summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_pad.c7
-rw-r--r--drivers/acpi/acpica/hwsleep.c22
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/apei/apei-base.c17
-rw-r--r--drivers/acpi/apei/apei-internal.h9
-rw-r--r--drivers/acpi/apei/ghes.c6
-rw-r--r--drivers/acpi/battery.c10
-rw-r--r--drivers/acpi/bgrt.c1
-rw-r--r--drivers/acpi/bus.c88
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_idle.c32
-rw-r--r--drivers/acpi/processor_perflib.c30
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c55
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/video.c35
-rw-r--r--drivers/amba/Makefile4
-rw-r--r--drivers/amba/tegra-ahb.c293
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c36
-rw-r--r--drivers/ata/libata-core.c7
-rw-r--r--drivers/ata/libata-eh.c24
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/ata/pata_ep93xx.c1044
-rw-r--r--drivers/ata/sata_mv.c40
-rw-r--r--drivers/atm/solos-pci.c4
-rw-r--r--drivers/base/Kconfig89
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/dd.c4
-rw-r--r--drivers/base/dma-buf.c99
-rw-r--r--drivers/base/dma-coherent.c42
-rw-r--r--drivers/base/dma-contiguous.c401
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/base/power/domain.c176
-rw-r--r--drivers/base/power/domain_governor.c166
-rw-r--r--drivers/base/power/main.c16
-rw-r--r--drivers/base/power/qos.c19
-rw-r--r--drivers/base/power/runtime.c103
-rw-r--r--drivers/base/power/sysfs.c54
-rw-r--r--drivers/base/power/wakeup.c174
-rw-r--r--drivers/base/regmap/Kconfig1
-rw-r--r--drivers/base/regmap/regmap-i2c.c2
-rw-r--r--drivers/base/regmap/regmap.c10
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/bcma/core.c3
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c4
-rw-r--r--drivers/bcma/driver_pci.c59
-rw-r--r--drivers/bcma/driver_pci_host.c10
-rw-r--r--drivers/bcma/host_pci.c7
-rw-r--r--drivers/bcma/scan.c54
-rw-r--r--drivers/bcma/sprom.c153
-rw-r--r--drivers/block/drbd/drbd_actlog.c104
-rw-r--r--drivers/block/drbd/drbd_bitmap.c157
-rw-r--r--drivers/block/drbd/drbd_int.h90
-rw-r--r--drivers/block/drbd/drbd_main.c357
-rw-r--r--drivers/block/drbd/drbd_nl.c48
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c95
-rw-r--r--drivers/block/drbd/drbd_req.c198
-rw-r--r--drivers/block/drbd/drbd_req.h19
-rw-r--r--drivers/block/drbd/drbd_worker.c31
-rw-r--r--drivers/block/floppy.c162
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c390
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h53
-rw-r--r--drivers/block/rbd.c72
-rw-r--r--drivers/block/umem.c40
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkfront.c102
-rw-r--r--drivers/bluetooth/ath3k.c9
-rw-r--r--drivers/bluetooth/btmrvl_drv.h5
-rw-r--r--drivers/bluetooth/btmrvl_main.c70
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c120
-rw-r--r--drivers/bluetooth/btusb.c18
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c3
-rw-r--r--drivers/char/agp/generic.c4
-rw-r--r--drivers/char/agp/intel-agp.c6
-rw-r--r--drivers/char/agp/intel-agp.h15
-rw-r--r--drivers/char/agp/intel-gtt.c45
-rw-r--r--drivers/char/agp/sgi-agp.c1
-rw-r--r--drivers/char/hw_random/Kconfig15
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/atmel-rng.c9
-rw-r--r--drivers/char/hw_random/omap-rng.c22
-rw-r--r--drivers/char/hw_random/pseries-rng.c96
-rw-r--r--drivers/clk/Kconfig12
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/clk-divider.c68
-rw-r--r--drivers/clk/clk-fixed-factor.c95
-rw-r--r--drivers/clk/clk-fixed-rate.c49
-rw-r--r--drivers/clk/clk-gate.c104
-rw-r--r--drivers/clk/clk-mux.c27
-rw-r--r--drivers/clk/clk.c315
-rw-r--r--drivers/clk/mxs/Makefile8
-rw-r--r--drivers/clk/mxs/clk-div.c110
-rw-r--r--drivers/clk/mxs/clk-frac.c139
-rw-r--r--drivers/clk/mxs/clk-imx23.c205
-rw-r--r--drivers/clk/mxs/clk-imx28.c338
-rw-r--r--drivers/clk/mxs/clk-pll.c116
-rw-r--r--drivers/clk/mxs/clk-ref.c154
-rw-r--r--drivers/clk/mxs/clk.c28
-rw-r--r--drivers/clk/mxs/clk.h66
-rw-r--r--drivers/clk/spear/Makefile10
-rw-r--r--drivers/clk/spear/clk-aux-synth.c198
-rw-r--r--drivers/clk/spear/clk-frac-synth.c165
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c154
-rw-r--r--drivers/clk/spear/clk-vco-pll.c363
-rw-r--r--drivers/clk/spear/clk.c36
-rw-r--r--drivers/clk/spear/clk.h134
-rw-r--r--drivers/clk/spear/spear1310_clock.c1104
-rw-r--r--drivers/clk/spear/spear1340_clock.c961
-rw-r--r--drivers/clk/spear/spear3xx_clock.c604
-rw-r--r--drivers/clk/spear/spear6xx_clock.c340
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/em_sti.c406
-rw-r--r--drivers/clocksource/sh_cmt.c26
-rw-r--r--drivers/clocksource/sh_mtu2.c6
-rw-r--r--drivers/clocksource/sh_tmu.c16
-rw-r--r--drivers/crypto/Kconfig28
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/caam/caamalg.c14
-rw-r--r--drivers/crypto/caam/ctrl.c16
-rw-r--r--drivers/crypto/mv_cesa.c14
-rw-r--r--drivers/crypto/nx/Makefile11
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c141
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c468
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c178
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c139
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c353
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c236
-rw-r--r--drivers/crypto/nx/nx-sha256.c246
-rw-r--r--drivers/crypto/nx/nx-sha512.c265
-rw-r--r--drivers/crypto/nx/nx.c716
-rw-r--r--drivers/crypto/nx/nx.h193
-rw-r--r--drivers/crypto/nx/nx_csbcpb.h205
-rw-r--r--drivers/crypto/nx/nx_debugfs.c103
-rw-r--r--drivers/crypto/ux500/Kconfig30
-rw-r--r--drivers/crypto/ux500/Makefile8
-rw-r--r--drivers/crypto/ux500/cryp/Makefile13
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c389
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h308
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1784
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.c45
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.h31
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irqp.h125
-rw-r--r--drivers/crypto/ux500/cryp/cryp_p.h123
-rw-r--r--drivers/crypto/ux500/hash/Makefile11
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h395
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2009
-rw-r--r--drivers/devfreq/Kconfig2
-rw-r--r--drivers/devfreq/governor_performance.c7
-rw-r--r--drivers/devfreq/governor_powersave.c7
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/amba-pl08x.c52
-rw-r--r--drivers/dma/at_hdmac.c15
-rw-r--r--drivers/dma/at_hdmac_regs.h21
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/coh901318_lli.c4
-rw-r--r--drivers/dma/dw_dmac.c28
-rw-r--r--drivers/dma/ep93xx_dma.c117
-rw-r--r--drivers/dma/imx-dma.c12
-rw-r--r--drivers/dma/imx-sdma.c114
-rw-r--r--drivers/dma/intel_mid_dma.c8
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/mv_xor.c15
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/mxs-dma.c194
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c31
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/edac/amd64_edac.c200
-rw-r--r--drivers/edac/amd76x_edac.c42
-rw-r--r--drivers/edac/cell_edac.c42
-rw-r--r--drivers/edac/cpc925_edac.c91
-rw-r--r--drivers/edac/e752x_edac.c116
-rw-r--r--drivers/edac/e7xxx_edac.c86
-rw-r--r--drivers/edac/edac_core.h51
-rw-r--r--drivers/edac/edac_device.c33
-rw-r--r--drivers/edac/edac_mc.c718
-rw-r--r--drivers/edac/edac_mc_sysfs.c70
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci.c6
-rw-r--r--drivers/edac/i3000_edac.c49
-rw-r--r--drivers/edac/i3200_edac.c56
-rw-r--r--drivers/edac/i5000_edac.c236
-rw-r--r--drivers/edac/i5100_edac.c106
-rw-r--r--drivers/edac/i5400_edac.c265
-rw-r--r--drivers/edac/i7300_edac.c115
-rw-r--r--drivers/edac/i7core_edac.c295
-rw-r--r--drivers/edac/i82443bxgx_edac.c41
-rw-r--r--drivers/edac/i82860_edac.c55
-rw-r--r--drivers/edac/i82875p_edac.c51
-rw-r--r--drivers/edac/i82975x_edac.c58
-rw-r--r--drivers/edac/mce_amd.h2
-rw-r--r--drivers/edac/mpc85xx_edac.c38
-rw-r--r--drivers/edac/mv64x60_edac.c47
-rw-r--r--drivers/edac/pasemi_edac.c49
-rw-r--r--drivers/edac/ppc4xx_edac.c50
-rw-r--r--drivers/edac/r82600_edac.c40
-rw-r--r--drivers/edac/sb_edac.c228
-rw-r--r--drivers/edac/tile_edac.c33
-rw-r--r--drivers/edac/x38_edac.c52
-rw-r--r--drivers/extcon/extcon-max8997.c5
-rw-r--r--drivers/extcon/extcon_class.c2
-rw-r--r--drivers/extcon/extcon_gpio.c2
-rw-r--r--drivers/firewire/core-card.c5
-rw-r--r--drivers/firewire/core-cdev.c51
-rw-r--r--drivers/firewire/core-device.c116
-rw-r--r--drivers/firewire/core-iso.c80
-rw-r--r--drivers/firewire/core-transaction.c35
-rw-r--r--drivers/firewire/core.h22
-rw-r--r--drivers/firewire/nosy.c20
-rw-r--r--drivers/firewire/ohci.c42
-rw-r--r--drivers/firewire/sbp2.c28
-rw-r--r--drivers/gpio/Kconfig89
-rw-r--r--drivers/gpio/Makefile7
-rw-r--r--drivers/gpio/devres.c30
-rw-r--r--drivers/gpio/gpio-bt8xx.c12
-rw-r--r--drivers/gpio/gpio-ep93xx.c2
-rw-r--r--drivers/gpio/gpio-generic.c16
-rw-r--r--drivers/gpio/gpio-ich.c419
-rw-r--r--drivers/gpio/gpio-langwell.c91
-rw-r--r--drivers/gpio/gpio-lpc32xx.c52
-rw-r--r--drivers/gpio/gpio-mcp23s08.c2
-rw-r--r--drivers/gpio/gpio-ml-ioh.c12
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c158
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c3
-rw-r--r--drivers/gpio/gpio-msic.c339
-rw-r--r--drivers/gpio/gpio-mxc.c12
-rw-r--r--drivers/gpio/gpio-mxs.c156
-rw-r--r--drivers/gpio/gpio-omap.c120
-rw-r--r--drivers/gpio/gpio-pca953x.c43
-rw-r--r--drivers/gpio/gpio-pch.c12
-rw-r--r--drivers/gpio/gpio-rc5t583.c180
-rw-r--r--drivers/gpio/gpio-samsung.c416
-rw-r--r--drivers/gpio/gpio-sch.c8
-rw-r--r--drivers/gpio/gpio-sodaville.c14
-rw-r--r--drivers/gpio/gpio-sta2x11.c436
-rw-r--r--drivers/gpio/gpio-stp-xway.c301
-rw-r--r--drivers/gpio/gpio-tps65910.c191
-rw-r--r--drivers/gpio/gpio-wm831x.c6
-rw-r--r--drivers/gpio/gpio-wm8994.c5
-rw-r--r--drivers/gpio/gpiolib-of.c (renamed from drivers/of/gpio.c)80
-rw-r--r--drivers/gpio/gpiolib.c18
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/Kconfig16
-rw-r--r--drivers/gpu/drm/ast/Makefile9
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h144
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c244
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h356
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c341
-rw-r--r--drivers/gpu/drm/ast/ast_main.c527
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1160
-rw-r--r--drivers/gpu/drm/ast/ast_post.c1780
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h265
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c453
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig12
-rw-r--r--drivers/gpu/drm/cirrus/Makefile5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c127
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h246
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c307
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c335
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c629
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c458
-rw-r--r--drivers/gpu/drm/drm_cache.c23
-rw-r--r--drivers/gpu/drm/drm_context.c9
-rw-r--r--drivers/gpu/drm/drm_crtc.c585
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c35
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c280
-rw-r--r--drivers/gpu/drm/drm_edid_load.c12
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h292
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c35
-rw-r--r--drivers/gpu/drm/drm_ioctl.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c23
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c48
-rw-r--r--drivers/gpu/drm/drm_stub.c7
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/drm_vm.c18
-rw-r--r--drivers/gpu/drm/exynos/Kconfig12
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c272
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c45
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c937
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c98
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c429
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c405
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h6
-rw-r--r--drivers/gpu/drm/gma500/Makefile5
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c232
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c30
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c697
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c76
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c57
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gtt.c32
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c274
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h161
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c452
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c24
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c330
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c295
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h25
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c134
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c138
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c66
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/opregion.c342
-rw-r--r--drivers/gpu/drm/gma500/opregion.h (renamed from drivers/gpu/drm/gma500/intel_opregion.c)67
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c71
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c67
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h208
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c348
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h9
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h35
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c36
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c14
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c385
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1181
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c282
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h251
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1932
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c232
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c200
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c202
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c18
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1798
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h531
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c23
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c111
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c45
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c755
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4377
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c128
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h105
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c310
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c389
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c19
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c3
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c71
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c209
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3820
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c744
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c119
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c102
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c69
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig15
-rw-r--r--drivers/gpu/drm/mgag200/Makefile5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c135
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h276
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c294
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c156
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c388
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1533
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h661
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c452
-rw-r--r--drivers/gpu/drm/nouveau/Makefile11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c385
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h179
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c578
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fifo.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c215
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c231
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c270
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c48
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c419
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c278
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fifo.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c351
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c102
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c596
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c229
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c214
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c177
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c241
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c166
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc698
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.fuc.h584
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c31
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c290
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c184
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c310
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c16
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c423
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.c831
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.h89
-rw-r--r--drivers/gpu/drm/nouveau/nve0_grctx.c2777
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c27
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c540
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c59
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c213
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h232
-rw-r--r--drivers/gpu/drm/radeon/ni.c429
-rw-r--r--drivers/gpu/drm/radeon/nid.h11
-rw-r--r--drivers/gpu/drm/radeon/r100.c121
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c34
-rw-r--r--drivers/gpu/drm/radeon/r420.c7
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c392
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c222
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c101
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c64
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c470
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h45
-rw-r--r--drivers/gpu/drm/radeon/r600d.h237
-rw-r--r--drivers/gpu/drm/radeon/radeon.h249
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h27
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c201
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c113
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c621
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h30
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c226
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c415
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c325
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c187
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c67
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c54
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h14
-rw-r--r--drivers/gpu/drm/radeon/rs690.c13
-rw-r--r--drivers/gpu/drm/radeon/rv515.c8
-rw-r--r--drivers/gpu/drm/radeon/rv770.c305
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h198
-rw-r--r--drivers/gpu/drm/radeon/si.c511
-rw-r--r--drivers/gpu/drm/radeon/si_reg.h72
-rw-r--r--drivers/gpu/drm/radeon/sid.h19
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c31
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c23
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h3
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c22
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c94
-rw-r--r--drivers/gpu/drm/udl/udl_main.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/via/via_map.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c299
-rw-r--r--drivers/gpu/vga/vgaarb.c9
-rw-r--r--drivers/hid/Kconfig64
-rw-r--r--drivers/hid/Makefile5
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-chicony.c1
-rw-r--r--drivers/hid/hid-core.c29
-rw-r--r--drivers/hid/hid-cypress.c2
-rw-r--r--drivers/hid/hid-holtek-kbd.c183
-rw-r--r--drivers/hid/hid-ids.h21
-rw-r--r--drivers/hid/hid-input.c12
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c564
-rw-r--r--drivers/hid/hid-logitech-dj.c38
-rw-r--r--drivers/hid/hid-magicmouse.c151
-rw-r--r--drivers/hid/hid-picolcd.c6
-rw-r--r--drivers/hid/hid-roccat-arvo.c16
-rw-r--r--drivers/hid/hid-roccat-common.c72
-rw-r--r--drivers/hid/hid-roccat-common.h16
-rw-r--r--drivers/hid/hid-roccat-isku.c52
-rw-r--r--drivers/hid/hid-roccat-isku.h7
-rw-r--r--drivers/hid/hid-roccat-kone.c6
-rw-r--r--drivers/hid/hid-roccat-koneplus.c98
-rw-r--r--drivers/hid/hid-roccat-koneplus.h22
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c71
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h15
-rw-r--r--drivers/hid/hid-roccat-pyra.c59
-rw-r--r--drivers/hid/hid-roccat-pyra.h12
-rw-r--r--drivers/hid/hid-roccat-savu.c316
-rw-r--r--drivers/hid/hid-roccat-savu.h87
-rw-r--r--drivers/hid/hid-wiimote-ext.c2
-rw-r--r--drivers/hid/hidraw.c12
-rw-r--r--drivers/hid/usbhid/Kconfig8
-rw-r--r--drivers/hid/usbhid/hid-core.c294
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hwmon/Kconfig21
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/acpi_power_meter.c166
-rw-r--r--drivers/hwmon/ad7314.c11
-rw-r--r--drivers/hwmon/applesmc.c6
-rw-r--r--drivers/hwmon/coretemp.c37
-rw-r--r--drivers/hwmon/emc2103.c12
-rw-r--r--drivers/hwmon/fam15h_power.c13
-rw-r--r--drivers/hwmon/ina2xx.c368
-rw-r--r--drivers/hwmon/it87.c386
-rw-r--r--drivers/hwmon/jc42.c2
-rw-r--r--drivers/hwmon/k10temp.c13
-rw-r--r--drivers/hwmon/k8temp.c13
-rw-r--r--drivers/hwmon/lineage-pem.c2
-rw-r--r--drivers/hwmon/ltc4261.c2
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c191
-rw-r--r--drivers/hwmon/sch5627.c2
-rw-r--r--drivers/hwmon/sch5636.c2
-rw-r--r--drivers/hwmon/sch56xx-common.c406
-rw-r--r--drivers/hwmon/sch56xx-common.h2
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c4
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c2
-rw-r--r--drivers/i2c/busses/Kconfig17
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c31
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h5
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c33
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c246
-rw-r--r--drivers/i2c/busses/i2c-gpio.c7
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c157
-rw-r--r--drivers/i2c/busses/i2c-mpc.c30
-rw-r--r--drivers/i2c/busses/i2c-mxs.c22
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c3
-rw-r--r--drivers/i2c/busses/i2c-ocores.c3
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-powermac.c98
-rw-r--r--drivers/i2c/busses/i2c-pxa.c5
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c112
-rw-r--r--drivers/i2c/busses/i2c-s6000.c2
-rw-r--r--drivers/i2c/busses/i2c-s6000.h2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c11
-rw-r--r--drivers/i2c/busses/i2c-tegra.c24
-rw-r--r--drivers/i2c/busses/i2c-versatile.c9
-rw-r--r--drivers/i2c/busses/i2c-xiic.c23
-rw-r--r--drivers/i2c/i2c-core.c17
-rw-r--r--drivers/i2c/i2c-dev.c30
-rw-r--r--drivers/i2c/i2c-mux.c42
-rw-r--r--drivers/i2c/muxes/Kconfig18
-rw-r--r--drivers/i2c/muxes/Makefile7
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c (renamed from drivers/i2c/muxes/gpio-i2cmux.c)42
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c (renamed from drivers/i2c/muxes/pca9541.c)3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c (renamed from drivers/i2c/muxes/pca954x.c)2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c279
-rw-r--r--drivers/ide/icside.c17
-rw-r--r--drivers/ide/ide-cs.c3
-rw-r--r--drivers/iio/Kconfig3
-rw-r--r--drivers/iio/industrialio-core.c16
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c21
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c21
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c27
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c64
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c17
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/input/Kconfig17
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/evdev.c69
-rw-r--r--drivers/input/ff-memless.c3
-rw-r--r--drivers/input/fixp-arith.h87
-rw-r--r--drivers/input/gameport/emu10k1-gp.c13
-rw-r--r--drivers/input/gameport/fm801-gp.c16
-rw-r--r--drivers/input/joystick/a3d.c13
-rw-r--r--drivers/input/joystick/adi.c17
-rw-r--r--drivers/input/joystick/as5011.c6
-rw-r--r--drivers/input/joystick/cobra.c13
-rw-r--r--drivers/input/joystick/gf2k.c13
-rw-r--r--drivers/input/joystick/grip.c13
-rw-r--r--drivers/input/joystick/grip_mp.c13
-rw-r--r--drivers/input/joystick/guillemot.c13
-rw-r--r--drivers/input/joystick/interact.c13
-rw-r--r--drivers/input/joystick/joydump.c13
-rw-r--r--drivers/input/joystick/magellan.c17
-rw-r--r--drivers/input/joystick/sidewinder.c13
-rw-r--r--drivers/input/joystick/spaceball.c17
-rw-r--r--drivers/input/joystick/spaceorb.c17
-rw-r--r--drivers/input/joystick/stinger.c17
-rw-r--r--drivers/input/joystick/tmdc.c13
-rw-r--r--drivers/input/joystick/twidjoy.c17
-rw-r--r--drivers/input/joystick/warrior.c17
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/joystick/zhenhua.c17
-rw-r--r--drivers/input/keyboard/Kconfig32
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c44
-rw-r--r--drivers/input/keyboard/hil_kbd.c13
-rw-r--r--drivers/input/keyboard/imx_keypad.c17
-rw-r--r--drivers/input/keyboard/lkkbd.c17
-rw-r--r--drivers/input/keyboard/lm8333.c235
-rw-r--r--drivers/input/keyboard/matrix_keypad.c99
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c3
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c2
-rw-r--r--drivers/input/keyboard/newtonkbd.c13
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c20
-rw-r--r--drivers/input/keyboard/omap-keypad.c20
-rw-r--r--drivers/input/keyboard/omap4-keypad.c133
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c20
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c52
-rw-r--r--drivers/input/keyboard/qt1070.c3
-rw-r--r--drivers/input/keyboard/samsung-keypad.c20
-rw-r--r--drivers/input/keyboard/spear-keyboard.c92
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c16
-rw-r--r--drivers/input/keyboard/stowaway.c13
-rw-r--r--drivers/input/keyboard/sunkbd.c17
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c41
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c3
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c17
-rw-r--r--drivers/input/keyboard/tegra-kbc.c68
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c29
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c25
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c27
-rw-r--r--drivers/input/keyboard/xtkbd.c13
-rw-r--r--drivers/input/matrix-keymap.c163
-rw-r--r--drivers/input/misc/ad714x.c8
-rw-r--r--drivers/input/misc/cma3000_d0x.c2
-rw-r--r--drivers/input/misc/dm355evm_keys.c3
-rw-r--r--drivers/input/misc/mpu3050.c2
-rw-r--r--drivers/input/misc/twl6040-vibra.c46
-rw-r--r--drivers/input/misc/wm831x-on.c2
-rw-r--r--drivers/input/mouse/Kconfig12
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/alps.c81
-rw-r--r--drivers/input/mouse/alps.h2
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/mouse/navpoint.c369
-rw-r--r--drivers/input/mouse/sentelic.c34
-rw-r--r--drivers/input/mouse/sentelic.h8
-rw-r--r--drivers/input/mouse/sermouse.c13
-rw-r--r--drivers/input/mouse/synaptics.c20
-rw-r--r--drivers/input/mouse/vsxxxaa.c14
-rw-r--r--drivers/input/of_keymap.c87
-rw-r--r--drivers/input/serio/pcips2.c15
-rw-r--r--drivers/input/serio/ps2mult.c13
-rw-r--r--drivers/input/serio/serio_raw.c65
-rw-r--r--drivers/input/serio/xilinx_ps2.c35
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/tablet/wacom.h4
-rw-r--r--drivers/input/tablet/wacom_sys.c246
-rw-r--r--drivers/input/tablet/wacom_wac.c304
-rw-r--r--drivers/input/tablet/wacom_wac.h13
-rw-r--r--drivers/input/touchscreen/Kconfig34
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7879.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c36
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c3
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c3
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c370
-rw-r--r--drivers/input/touchscreen/dynapro.c17
-rw-r--r--drivers/input/touchscreen/elo.c17
-rw-r--r--drivers/input/touchscreen/fujitsu_ts.c13
-rw-r--r--drivers/input/touchscreen/gunze.c17
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c17
-rw-r--r--drivers/input/touchscreen/hampshire.c17
-rw-r--r--drivers/input/touchscreen/inexio.c17
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c2
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c10
-rw-r--r--drivers/input/touchscreen/mtouch.c17
-rw-r--r--drivers/input/touchscreen/penmount.c17
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/st1232.c20
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c2
-rw-r--r--drivers/input/touchscreen/touchit213.c17
-rw-r--r--drivers/input/touchscreen/touchright.c17
-rw-r--r--drivers/input/touchscreen/touchwin.c17
-rw-r--r--drivers/input/touchscreen/tsc2005.c3
-rw-r--r--drivers/input/touchscreen/tsc40.c12
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c282
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c13
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c9
-rw-r--r--drivers/iommu/Kconfig25
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c119
-rw-r--r--drivers/iommu/amd_iommu_init.c19
-rw-r--r--drivers/iommu/amd_iommu_types.h5
-rw-r--r--drivers/iommu/exynos-iommu.c1076
-rw-r--r--drivers/iommu/intel-iommu.c40
-rw-r--r--drivers/iommu/iommu.c5
-rw-r--r--drivers/iommu/omap-iommu.c32
-rw-r--r--drivers/iommu/tegra-gart.c20
-rw-r--r--drivers/iommu/tegra-smmu.c6
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.h6
-rw-r--r--drivers/isdn/mISDN/stack.c4
-rw-r--r--drivers/leds/Kconfig33
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c23
-rw-r--r--drivers/leds/led-core.c7
-rw-r--r--drivers/leds/leds-da9052.c214
-rw-r--r--drivers/leds/leds-lm3530.c100
-rw-r--r--drivers/leds/leds-lm3533.c785
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-mc13783.c2
-rw-r--r--drivers/leds/leds-pca955x.c95
-rw-r--r--drivers/leds/ledtrig-backlight.c4
-rw-r--r--drivers/leds/ledtrig-gpio.c4
-rw-r--r--drivers/leds/ledtrig-heartbeat.c46
-rw-r--r--drivers/leds/ledtrig-timer.c54
-rw-r--r--drivers/leds/ledtrig-transient.c237
-rw-r--r--drivers/macintosh/Kconfig23
-rw-r--r--drivers/macintosh/Makefile14
-rw-r--r--drivers/macintosh/ams/ams-i2c.c2
-rw-r--r--drivers/macintosh/therm_adt746x.c480
-rw-r--r--drivers/macintosh/windfarm.h51
-rw-r--r--drivers/macintosh/windfarm_ad7417_sensor.c347
-rw-r--r--drivers/macintosh/windfarm_core.c23
-rw-r--r--drivers/macintosh/windfarm_cpufreq_clamp.c6
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c613
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c135
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c201
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c109
-rw-r--r--drivers/macintosh/windfarm_mpu.h105
-rw-r--r--drivers/macintosh/windfarm_pm72.c847
-rw-r--r--drivers/macintosh/windfarm_pm81.c25
-rw-r--r--drivers/macintosh/windfarm_pm91.c33
-rw-r--r--drivers/macintosh/windfarm_rm31.c740
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c1
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c132
-rw-r--r--drivers/mca/Kconfig14
-rw-r--r--drivers/mca/Makefile7
-rw-r--r--drivers/mca/mca-bus.c169
-rw-r--r--drivers/mca/mca-device.c218
-rw-r--r--drivers/mca/mca-driver.c63
-rw-r--r--drivers/mca/mca-legacy.c329
-rw-r--r--drivers/mca/mca-proc.c249
-rw-r--r--drivers/md/bitmap.c1100
-rw-r--r--drivers/md/bitmap.h60
-rw-r--r--drivers/md/dm-mpath.c47
-rw-r--r--drivers/md/dm-raid.c22
-rw-r--r--drivers/md/dm-thin-metadata.c136
-rw-r--r--drivers/md/dm-thin-metadata.h13
-rw-r--r--drivers/md/dm-thin.c210
-rw-r--r--drivers/md/md.c378
-rw-r--r--drivers/md/md.h12
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.c54
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c11
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c13
-rw-r--r--drivers/md/raid1.c42
-rw-r--r--drivers/md/raid10.c1311
-rw-r--r--drivers/md/raid10.h34
-rw-r--r--drivers/md/raid5.c317
-rw-r--r--drivers/md/raid5.h7
-rw-r--r--drivers/media/common/saa7146_fops.c131
-rw-r--r--drivers/media/common/saa7146_hlp.c23
-rw-r--r--drivers/media/common/saa7146_vbi.c54
-rw-r--r--drivers/media/common/saa7146_video.c367
-rw-r--r--drivers/media/common/tuners/Kconfig27
-rw-r--r--drivers/media/common/tuners/Makefile4
-rw-r--r--drivers/media/common/tuners/fc0011.c524
-rw-r--r--drivers/media/common/tuners/fc0011.h41
-rw-r--r--drivers/media/common/tuners/fc0012-priv.h43
-rw-r--r--drivers/media/common/tuners/fc0012.c467
-rw-r--r--drivers/media/common/tuners/fc0012.h44
-rw-r--r--drivers/media/common/tuners/fc0013-priv.h44
-rw-r--r--drivers/media/common/tuners/fc0013.c634
-rw-r--r--drivers/media/common/tuners/fc0013.h57
-rw-r--r--drivers/media/common/tuners/fc001x-common.h39
-rw-r--r--drivers/media/common/tuners/tua9001.c215
-rw-r--r--drivers/media/common/tuners/tua9001.h46
-rw-r--r--drivers/media/common/tuners/tua9001_priv.h34
-rw-r--r--drivers/media/common/tuners/xc5000.c7
-rw-r--r--drivers/media/common/tuners/xc5000.h2
-rw-r--r--drivers/media/dvb/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c10
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.h2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c80
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h18
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c1
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig13
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile3
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c495
-rw-r--r--drivers/media/dvb/dvb-usb/af9035.c1242
-rw-r--r--drivers/media/dvb/dvb-usb/af9035.h113
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c24
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c7
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h12
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c12
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h3
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c76
-rw-r--r--drivers/media/dvb/dvb-usb/it913x.c4
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c5
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf-tuner.c1
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf.c850
-rw-r--r--drivers/media/dvb/dvb-usb/rtl28xxu.c28
-rw-r--r--drivers/media/dvb/frontends/Kconfig35
-rw-r--r--drivers/media/dvb/frontends/Makefile7
-rw-r--r--drivers/media/dvb/frontends/af9013.c13
-rw-r--r--drivers/media/dvb/frontends/af9033.c980
-rw-r--r--drivers/media/dvb/frontends/af9033.h75
-rw-r--r--drivers/media/dvb/frontends/af9033_priv.h470
-rw-r--r--drivers/media/dvb/frontends/au8522_common.c259
-rw-r--r--drivers/media/dvb/frontends/au8522_dig.c215
-rw-r--r--drivers/media/dvb/frontends/au8522_priv.h2
-rw-r--r--drivers/media/dvb/frontends/cx24110.c7
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c5
-rw-r--r--drivers/media/dvb/frontends/dib9000.c131
-rw-r--r--drivers/media/dvb/frontends/drxd.h14
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c18
-rw-r--r--drivers/media/dvb/frontends/drxk_map.h2
-rw-r--r--drivers/media/dvb/frontends/ds3000.c5
-rw-r--r--drivers/media/dvb/frontends/it913x-fe.c26
-rw-r--r--drivers/media/dvb/frontends/lg2160.c1468
-rw-r--r--drivers/media/dvb/frontends/lg2160.h84
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c3
-rw-r--r--drivers/media/dvb/frontends/m88rs2000.c29
-rw-r--r--drivers/media/dvb/frontends/rtl2830.c201
-rw-r--r--drivers/media/dvb/frontends/rtl2830_priv.h1
-rw-r--r--drivers/media/dvb/frontends/stb0899_drv.c8
-rw-r--r--drivers/media/dvb/frontends/stb6100.c3
-rw-r--r--drivers/media/dvb/frontends/stv0297.c2
-rw-r--r--drivers/media/dvb/frontends/stv0900_sw.c2
-rw-r--r--drivers/media/dvb/frontends/stv090x.c2
-rw-r--r--drivers/media/dvb/frontends/zl10353.c5
-rw-r--r--drivers/media/dvb/mantis/hopper_cards.c3
-rw-r--r--drivers/media/dvb/mantis/mantis_cards.c3
-rw-r--r--drivers/media/dvb/mantis/mantis_dma.c4
-rw-r--r--drivers/media/dvb/mantis/mantis_evm.c3
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c4
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c8
-rw-r--r--drivers/media/dvb/siano/smssdio.c4
-rw-r--r--drivers/media/dvb/siano/smsusb.c4
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c72
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c6
-rw-r--r--drivers/media/media-entity.c57
-rw-r--r--drivers/media/radio/Kconfig4
-rw-r--r--drivers/media/radio/dsbr100.c528
-rw-r--r--drivers/media/radio/radio-gemtek.c25
-rw-r--r--drivers/media/radio/radio-isa.c173
-rw-r--r--drivers/media/radio/radio-isa.h9
-rw-r--r--drivers/media/radio/radio-keene.c36
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/radio/radio-mr800.c524
-rw-r--r--drivers/media/radio/radio-rtrack2.c1
-rw-r--r--drivers/media/radio/radio-sf16fmi.c14
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c146
-rw-r--r--drivers/media/radio/radio-timb.c2
-rw-r--r--drivers/media/radio/saa7706h.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c305
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c65
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c267
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h14
-rw-r--r--drivers/media/radio/tef6862.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c4
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/ati_remote.c146
-rw-r--r--drivers/media/rc/fintek-cir.c13
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/ir-raw.c8
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c4
-rw-r--r--drivers/media/rc/ite-cir.c14
-rw-r--r--drivers/media/rc/keymaps/Makefile3
-rw-r--r--drivers/media/rc/keymaps/rc-asus-ps3-100.c91
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v2.c2
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-digitainer.c123
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-or2x.c108
-rw-r--r--drivers/media/rc/mceusb.c5
-rw-r--r--drivers/media/rc/nuvoton-cir.c26
-rw-r--r--drivers/media/rc/rc-loopback.c1
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/rc/winbond-cir.c4
-rw-r--r--drivers/media/video/Kconfig48
-rw-r--r--drivers/media/video/Makefile5
-rw-r--r--drivers/media/video/adp1653.c9
-rw-r--r--drivers/media/video/adv7180.c417
-rw-r--r--drivers/media/video/adv7343.c4
-rw-r--r--drivers/media/video/aptina-pll.c5
-rw-r--r--drivers/media/video/arv.c7
-rw-r--r--drivers/media/video/as3645a.c10
-rw-r--r--drivers/media/video/atmel-isi.c18
-rw-r--r--drivers/media/video/au0828/Kconfig3
-rw-r--r--drivers/media/video/au0828/au0828-cards.c2
-rw-r--r--drivers/media/video/au0828/au0828-dvb.c27
-rw-r--r--drivers/media/video/au0828/au0828-video.c25
-rw-r--r--drivers/media/video/blackfin/bfin_capture.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c84
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c9
-rw-r--r--drivers/media/video/bt8xx/bttv.h1
-rw-r--r--drivers/media/video/bt8xx/bttvp.h1
-rw-r--r--drivers/media/video/bw-qcam.c177
-rw-r--r--drivers/media/video/c-qcam.c140
-rw-r--r--drivers/media/video/cpia2/cpia2.h34
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c142
-rw-r--r--drivers/media/video/cpia2/cpia2_usb.c78
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c850
-rw-r--r--drivers/media/video/cpia2/cpia2dev.h50
-rw-r--r--drivers/media/video/cx18/cx18-alsa-main.c1
-rw-r--r--drivers/media/video/cx18/cx18-alsa-pcm.c10
-rw-r--r--drivers/media/video/cx18/cx18-driver.c10
-rw-r--r--drivers/media/video/cx18/cx18-driver.h2
-rw-r--r--drivers/media/video/cx18/cx18-firmware.c9
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c21
-rw-r--r--drivers/media/video/cx18/cx18-streams.c3
-rw-r--r--drivers/media/video/cx231xx/cx231xx-417.c18
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c22
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c148
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c76
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c8
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c20
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c98
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c7
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c13
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c9
-rw-r--r--drivers/media/video/cx23885/cx23885.h2
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c4
-rw-r--r--drivers/media/video/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/video/cx25821/cx25821-audio-upstream.c3
-rw-r--r--drivers/media/video/cx25821/cx25821-core.c17
-rw-r--r--drivers/media/video/cx25821/cx25821-i2c.c3
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-video.c13
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream-ch2.c3
-rw-r--r--drivers/media/video/cx25821/cx25821-video-upstream.c3
-rw-r--r--drivers/media/video/cx25821/cx25821-video.c25
-rw-r--r--drivers/media/video/cx25821/cx25821-video.h2
-rw-r--r--drivers/media/video/cx25821/cx25821.h2
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c76
-rw-r--r--drivers/media/video/cx25840/cx25840-ir.c6
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/video/davinci/Kconfig1
-rw-r--r--drivers/media/video/davinci/vpbe_display.c4
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/video/davinci/vpif_capture.c4
-rw-r--r--drivers/media/video/davinci/vpif_display.c4
-rw-r--r--drivers/media/video/em28xx/Kconfig4
-rw-r--r--drivers/media/video/em28xx/Makefile5
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c11
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c82
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c30
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c11
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c3
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c252
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c13
-rw-r--r--drivers/media/video/em28xx/em28xx.h60
-rw-r--r--drivers/media/video/et61x251/Kconfig18
-rw-r--r--drivers/media/video/et61x251/Makefile4
-rw-r--r--drivers/media/video/et61x251/et61x251.h213
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c2683
-rw-r--r--drivers/media/video/et61x251/et61x251_sensor.h108
-rw-r--r--drivers/media/video/et61x251/et61x251_tas5130d1b.c143
-rw-r--r--drivers/media/video/fsl-viu.c4
-rw-r--r--drivers/media/video/gspca/Makefile2
-rw-r--r--drivers/media/video/gspca/autogain_functions.c178
-rw-r--r--drivers/media/video/gspca/autogain_functions.h6
-rw-r--r--drivers/media/video/gspca/conex.c4
-rw-r--r--drivers/media/video/gspca/finepix.c18
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c3
-rw-r--r--drivers/media/video/gspca/gspca.c549
-rw-r--r--drivers/media/video/gspca/gspca.h26
-rw-r--r--drivers/media/video/gspca/jl2005bcd.c10
-rw-r--r--drivers/media/video/gspca/mars.c292
-rw-r--r--drivers/media/video/gspca/nw80x.c2
-rw-r--r--drivers/media/video/gspca/ov519.c10
-rw-r--r--drivers/media/video/gspca/ov534.c178
-rw-r--r--drivers/media/video/gspca/ov534_9.c1
-rw-r--r--drivers/media/video/gspca/pac207.c336
-rw-r--r--drivers/media/video/gspca/pac7302.c185
-rw-r--r--drivers/media/video/gspca/pac7311.c505
-rw-r--r--drivers/media/video/gspca/sn9c20x.c601
-rw-r--r--drivers/media/video/gspca/sonixb.c2
-rw-r--r--drivers/media/video/gspca/sonixj.c7
-rw-r--r--drivers/media/video/gspca/sq905.c12
-rw-r--r--drivers/media/video/gspca/sq905c.c10
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c21
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.h3
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c143
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h7
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c359
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h12
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_sensor.h4
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.c236
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_st6422.h4
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c198
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h8
-rw-r--r--drivers/media/video/gspca/topro.c6
-rw-r--r--drivers/media/video/gspca/vicam.c13
-rw-r--r--drivers/media/video/gspca/zc3xx.c620
-rw-r--r--drivers/media/video/hdpvr/hdpvr-control.c2
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/hexium_gemini.c129
-rw-r--r--drivers/media/video/hexium_orion.c24
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c22
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c6
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/m5mols/m5mols.h81
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c11
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c479
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c93
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h1
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.c1
-rw-r--r--drivers/media/video/mem2mem_testdev.c56
-rw-r--r--drivers/media/video/meye.c2
-rw-r--r--drivers/media/video/msp3400-driver.c15
-rw-r--r--drivers/media/video/mt9m032.c4
-rw-r--r--drivers/media/video/mt9p031.c161
-rw-r--r--drivers/media/video/mt9t112.c1
-rw-r--r--drivers/media/video/mt9v032.c2
-rw-r--r--drivers/media/video/mx1_camera.c14
-rw-r--r--drivers/media/video/mx2_camera.c53
-rw-r--r--drivers/media/video/mx2_emmaprp.c8
-rw-r--r--drivers/media/video/mx3_camera.c45
-rw-r--r--drivers/media/video/mxb.c351
-rw-r--r--drivers/media/video/mxb.h42
-rw-r--r--drivers/media/video/omap1_camera.c22
-rw-r--r--drivers/media/video/omap24xxcam-dma.c20
-rw-r--r--drivers/media/video/omap24xxcam.c3
-rw-r--r--drivers/media/video/omap24xxcam.h14
-rw-r--r--drivers/media/video/omap3isp/isp.c59
-rw-r--r--drivers/media/video/omap3isp/isp.h8
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c256
-rw-r--r--drivers/media/video/omap3isp/ispccdc.h12
-rw-r--r--drivers/media/video/omap3isp/ispccp2.c24
-rw-r--r--drivers/media/video/omap3isp/ispcsi2.c21
-rw-r--r--drivers/media/video/omap3isp/ispcsi2.h1
-rw-r--r--drivers/media/video/omap3isp/ispcsiphy.c4
-rw-r--r--drivers/media/video/omap3isp/ispcsiphy.h15
-rw-r--r--drivers/media/video/omap3isp/isppreview.c634
-rw-r--r--drivers/media/video/omap3isp/isppreview.h76
-rw-r--r--drivers/media/video/omap3isp/ispqueue.h2
-rw-r--r--drivers/media/video/omap3isp/ispresizer.c139
-rw-r--r--drivers/media/video/omap3isp/ispstat.c2
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c303
-rw-r--r--drivers/media/video/omap3isp/ispvideo.h5
-rw-r--r--drivers/media/video/ov5642.c2
-rw-r--r--drivers/media/video/pms.c241
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c193
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.h9
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c1341
-rw-r--r--drivers/media/video/pwc/pwc-if.c191
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c145
-rw-r--r--drivers/media/video/pwc/pwc.h21
-rw-r--r--drivers/media/video/pxa_camera.c15
-rw-r--r--drivers/media/video/s2255drv.c11
-rw-r--r--drivers/media/video/s5p-fimc/Kconfig48
-rw-r--r--drivers/media/video/s5p-fimc/Makefile6
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c569
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c1174
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h272
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite-reg.c300
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite-reg.h150
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite.c1607
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite.h213
-rw-r--r--drivers/media/video/s5p-fimc/fimc-m2m.c824
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c512
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.h20
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c616
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.h326
-rw-r--r--drivers/media/video/s5p-fimc/mipi-csis.c21
-rw-r--r--drivers/media/video/s5p-fimc/regs-fimc.h301
-rw-r--r--drivers/media/video/s5p-g2d/g2d.c69
-rw-r--r--drivers/media/video/s5p-g2d/g2d.h1
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.c68
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/video/s5p-mfc/regs-mfc.h5
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c81
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_common.h2
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c16
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c5
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c19
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.c28
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.h4
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.h3
-rw-r--r--drivers/media/video/s5p-tv/hdmi_drv.c480
-rw-r--r--drivers/media/video/s5p-tv/hdmiphy_drv.c225
-rw-r--r--drivers/media/video/s5p-tv/mixer.h3
-rw-r--r--drivers/media/video/s5p-tv/mixer_drv.c2
-rw-r--r--drivers/media/video/s5p-tv/mixer_reg.c15
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c10
-rw-r--r--drivers/media/video/s5p-tv/regs-hdmi.h1
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c45
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c39
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c7
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/video/saa7134/saa7134.h1
-rw-r--r--drivers/media/video/saa7164/saa7164-vbi.c4
-rw-r--r--drivers/media/video/saa7164/saa7164.h5
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c92
-rw-r--r--drivers/media/video/sh_vou.c4
-rw-r--r--drivers/media/video/smiapp-pll.c418
-rw-r--r--drivers/media/video/smiapp-pll.h103
-rw-r--r--drivers/media/video/smiapp/Kconfig6
-rw-r--r--drivers/media/video/smiapp/Makefile5
-rw-r--r--drivers/media/video/smiapp/smiapp-core.c2896
-rw-r--r--drivers/media/video/smiapp/smiapp-limits.c132
-rw-r--r--drivers/media/video/smiapp/smiapp-limits.h128
-rw-r--r--drivers/media/video/smiapp/smiapp-quirk.c306
-rw-r--r--drivers/media/video/smiapp/smiapp-quirk.h83
-rw-r--r--drivers/media/video/smiapp/smiapp-reg-defs.h503
-rw-r--r--drivers/media/video/smiapp/smiapp-reg.h122
-rw-r--r--drivers/media/video/smiapp/smiapp-regs.c273
-rw-r--r--drivers/media/video/smiapp/smiapp-regs.h49
-rw-r--r--drivers/media/video/smiapp/smiapp.h252
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c4
-rw-r--r--drivers/media/video/soc_camera.c55
-rw-r--r--drivers/media/video/soc_mediabus.c54
-rw-r--r--drivers/media/video/sta2x11_vip.c1550
-rw-r--r--drivers/media/video/sta2x11_vip.h40
-rw-r--r--drivers/media/video/stk-webcam.c8
-rw-r--r--drivers/media/video/tda9840.c75
-rw-r--r--drivers/media/video/tlg2300/pd-video.c1
-rw-r--r--drivers/media/video/tm6000/tm6000-input.c3
-rw-r--r--drivers/media/video/tm6000/tm6000-stds.c2
-rw-r--r--drivers/media/video/tm6000/tm6000-video.c14
-rw-r--r--drivers/media/video/tm6000/tm6000.h2
-rw-r--r--drivers/media/video/tuner-core.c17
-rw-r--r--drivers/media/video/tvp5150.c11
-rw-r--r--drivers/media/video/tvp7002.c105
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c12
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c330
-rw-r--r--drivers/media/video/uvc/uvc_queue.c43
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c50
-rw-r--r--drivers/media/video/uvc/uvcvideo.h26
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c15
-rw-r--r--drivers/media/video/v4l2-ctrls.c302
-rw-r--r--drivers/media/video/v4l2-dev.c221
-rw-r--r--drivers/media/video/v4l2-event.c71
-rw-r--r--drivers/media/video/v4l2-ioctl.c663
-rw-r--r--drivers/media/video/v4l2-subdev.c143
-rw-r--r--drivers/media/video/via-camera.c15
-rw-r--r--drivers/media/video/videobuf-core.c3
-rw-r--r--drivers/media/video/videobuf-dma-contig.c199
-rw-r--r--drivers/media/video/videobuf-dvb.c3
-rw-r--r--drivers/media/video/videobuf2-core.c50
-rw-r--r--drivers/media/video/vino.c4
-rw-r--r--drivers/media/video/vivi.c229
-rw-r--r--drivers/media/video/w9966.c94
-rw-r--r--drivers/media/video/zoran/zoran_device.c2
-rw-r--r--drivers/media/video/zoran/zoran_driver.c20
-rw-r--r--drivers/media/video/zr364xx.c2
-rw-r--r--drivers/message/fusion/mptbase.c13
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/i2o/i2o_proc.c13
-rw-r--r--drivers/mfd/Kconfig86
-rw-r--r--drivers/mfd/Makefile10
-rw-r--r--drivers/mfd/ab5500-core.h87
-rw-r--r--drivers/mfd/ab8500-core.c423
-rw-r--r--drivers/mfd/ab8500-debugfs.c6
-rw-r--r--drivers/mfd/ab8500-gpadc.c8
-rw-r--r--drivers/mfd/ab8500-i2c.c128
-rw-r--r--drivers/mfd/ab8500-sysctrl.c6
-rw-r--r--drivers/mfd/anatop-mfd.c35
-rw-r--r--drivers/mfd/asic3.c33
-rw-r--r--drivers/mfd/cs5535-mfd.c13
-rw-r--r--drivers/mfd/da9052-core.c140
-rw-r--r--drivers/mfd/da9052-i2c.c72
-rw-r--r--drivers/mfd/da9052-spi.c19
-rw-r--r--drivers/mfd/db8500-prcmu.c36
-rw-r--r--drivers/mfd/intel_msic.c31
-rw-r--r--drivers/mfd/janz-cmodio.c17
-rw-r--r--drivers/mfd/lm3533-core.c667
-rw-r--r--drivers/mfd/lm3533-ctrlbank.c148
-rw-r--r--drivers/mfd/lpc_ich.c888
-rw-r--r--drivers/mfd/lpc_sch.c26
-rw-r--r--drivers/mfd/max77693-irq.c309
-rw-r--r--drivers/mfd/max77693.c249
-rw-r--r--drivers/mfd/mc13xxx-core.c242
-rw-r--r--drivers/mfd/mc13xxx-i2c.c128
-rw-r--r--drivers/mfd/mc13xxx-spi.c203
-rw-r--r--drivers/mfd/mc13xxx.h45
-rw-r--r--drivers/mfd/omap-usb-host.c48
-rw-r--r--drivers/mfd/palmas.c13
-rw-r--r--drivers/mfd/pcf50633-core.c36
-rw-r--r--drivers/mfd/rc5t583.c8
-rw-r--r--drivers/mfd/rdc321x-southbridge.c13
-rw-r--r--drivers/mfd/s5m-core.c6
-rw-r--r--drivers/mfd/sta2x11-mfd.c467
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe-spi.c5
-rw-r--r--drivers/mfd/tps65090.c33
-rw-r--r--drivers/mfd/tps65217.c17
-rw-r--r--drivers/mfd/tps65910-irq.c130
-rw-r--r--drivers/mfd/tps65910.c205
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/mfd/twl4030-irq.c1
-rw-r--r--drivers/mfd/twl6040-core.c120
-rw-r--r--drivers/mfd/twl6040-irq.c32
-rw-r--r--drivers/mfd/vx855.c12
-rw-r--r--drivers/mfd/wm831x-auxadc.c6
-rw-r--r--drivers/mfd/wm831x-core.c45
-rw-r--r--drivers/mfd/wm831x-irq.c148
-rw-r--r--drivers/mfd/wm8350-core.c31
-rw-r--r--drivers/mfd/wm8350-i2c.c61
-rw-r--r--drivers/mfd/wm8400-core.c250
-rw-r--r--drivers/mfd/wm8994-core.c25
-rw-r--r--drivers/mfd/wm8994-regmap.c1
-rw-r--r--drivers/misc/ab8500-pwm.c6
-rw-r--r--drivers/misc/c2port/Kconfig6
-rw-r--r--drivers/misc/mei/interrupt.c2
-rw-r--r--drivers/misc/mei/main.c11
-rw-r--r--drivers/misc/mei/wd.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c4
-rw-r--r--drivers/mmc/card/block.c36
-rw-r--r--drivers/mmc/card/queue.c6
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/cd-gpio.c7
-rw-r--r--drivers/mmc/core/core.c18
-rw-r--r--drivers/mmc/core/mmc.c103
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/core/sdio.c8
-rw-r--r--drivers/mmc/core/sdio_irq.c11
-rw-r--r--drivers/mmc/host/Kconfig17
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h14
-rw-r--r--drivers/mmc/host/atmel-mci.c477
-rw-r--r--drivers/mmc/host/davinci_mmc.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c54
-rw-r--r--drivers/mmc/host/imxmmc.c1169
-rw-r--r--drivers/mmc/host/imxmmc.h64
-rw-r--r--drivers/mmc/host/mmci.c72
-rw-r--r--drivers/mmc/host/mvsdio.c14
-rw-r--r--drivers/mmc/host/mxcmmc.c39
-rw-r--r--drivers/mmc/host/mxs-mmc.c197
-rw-r--r--drivers/mmc/host/omap.c58
-rw-r--r--drivers/mmc/host/omap_hsmmc.c71
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c44
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c8
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c86
-rw-r--r--drivers/mmc/host/sdhci-tegra.c26
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm63xxpart.c41
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c18
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c7
-rw-r--r--drivers/mtd/devices/docg3.c56
-rw-r--r--drivers/mtd/devices/m25p80.c5
-rw-r--r--drivers/mtd/devices/spear_smi.c14
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c2
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c13
-rw-r--r--drivers/mtd/maps/lantiq-flash.c76
-rw-r--r--drivers/mtd/maps/pci.c13
-rw-r--r--drivers/mtd/maps/scb2_flash.c15
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtdcore.c57
-rw-r--r--drivers/mtd/mtdoops.c22
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/nand/Kconfig42
-rw-r--r--drivers/mtd/nand/alauda.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c14
-rw-r--r--drivers/mtd/nand/au1550nd.c2
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c14
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c4
-rw-r--r--drivers/mtd/nand/cafe_nand.c37
-rw-r--r--drivers/mtd/nand/cs553x_nand.c1
-rw-r--r--drivers/mtd/nand/denali.c38
-rw-r--r--drivers/mtd/nand/docg4.c22
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c37
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c47
-rw-r--r--drivers/mtd/nand/fsmc_nand.c26
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h42
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c27
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c176
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/h1910.c1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c6
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c1
-rw-r--r--drivers/mtd/nand/mxc_nand.c649
-rw-r--r--drivers/mtd/nand/nand_base.c240
-rw-r--r--drivers/mtd/nand/nand_bbt.c1
-rw-r--r--drivers/mtd/nand/nand_ids.c6
-rw-r--r--drivers/mtd/nand/nandsim.c40
-rw-r--r--drivers/mtd/nand/omap2.c253
-rw-r--r--drivers/mtd/nand/orion_nand.c18
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/plat_nand.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/nand/r852.c22
-rw-r--r--drivers/mtd/nand/sh_flctl.c8
-rw-r--r--drivers/mtd/nand/sm_common.c9
-rw-r--r--drivers/mtd/onenand/onenand_base.c6
-rw-r--r--drivers/mtd/ubi/Kconfig8
-rw-r--r--drivers/mtd/ubi/Makefile5
-rw-r--r--drivers/mtd/ubi/attach.c (renamed from drivers/mtd/ubi/scan.c)970
-rw-r--r--drivers/mtd/ubi/build.c85
-rw-r--r--drivers/mtd/ubi/cdev.c36
-rw-r--r--drivers/mtd/ubi/debug.c149
-rw-r--r--drivers/mtd/ubi/debug.h98
-rw-r--r--drivers/mtd/ubi/eba.c68
-rw-r--r--drivers/mtd/ubi/gluebi.c2
-rw-r--r--drivers/mtd/ubi/io.c211
-rw-r--r--drivers/mtd/ubi/kapi.c61
-rw-r--r--drivers/mtd/ubi/scan.h174
-rw-r--r--drivers/mtd/ubi/ubi-media.h8
-rw-r--r--drivers/mtd/ubi/ubi.h179
-rw-r--r--drivers/mtd/ubi/upd.c16
-rw-r--r--drivers/mtd/ubi/vmt.c62
-rw-r--r--drivers/mtd/ubi/vtbl.c228
-rw-r--r--drivers/mtd/ubi/wl.c261
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c18
-rw-r--r--drivers/net/bonding/bond_procfs.c15
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
-rw-r--r--drivers/net/caif/caif_hsi.c5
-rw-r--r--drivers/net/can/c_can/c_can.c20
-rw-r--r--drivers/net/can/c_can/c_can.h1
-rw-r--r--drivers/net/can/cc770/cc770_platform.c2
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/cris/eth_v10.c1
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/ethernet/3com/typhoon.c3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c54
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c10
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/freescale/fec.c35
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c19
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c60
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c79
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c15
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c47
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c9
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c15
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c24
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c12
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c14
-rw-r--r--drivers/net/ethernet/s6gmac.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h63
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c12
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c4
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/Makefile4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1898
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c12
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/mdio-mux.c10
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/micrel.c8
-rw-r--r--drivers/net/usb/asix.c3
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/mcs7830.c25
-rw-r--r--drivers/net/usb/qmi_wwan.c107
-rw-r--r--drivers/net/usb/sierra_net.c14
-rw-r--r--drivers/net/usb/usbnet.c53
-rw-r--r--drivers/net/virtio_net.c24
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c238
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h33
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c45
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c11
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c29
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c104
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c94
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h178
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c47
-rw-r--r--drivers/net/wireless/ath/key.c4
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/bus.c6
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/main.c25
-rw-r--r--drivers/net/wireless/b43legacy/dma.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c6
-rw-r--r--drivers/net/wireless/b43legacy/phy.c4
-rw-r--r--drivers/net/wireless/b43legacy/radio.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c248
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c350
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c265
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h37
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c479
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h24
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c142
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.c826
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.h77
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.c410
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.h36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c67
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c333
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h228
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.c980
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c20
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/iwlegacy/common.c14
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig8
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c9
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c27
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c5
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h7
-rw-r--r--drivers/net/wireless/mwifiex/Makefile2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c531
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h2
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c21
-rw-r--r--drivers/net/wireless/mwifiex/decl.h13
-rw-r--r--drivers/net/wireless/mwifiex/fw.h165
-rw-r--r--drivers/net/wireless/mwifiex/ie.c397
-rw-r--r--drivers/net/wireless/mwifiex/init.c1
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h32
-rw-r--r--drivers/net/wireless/mwifiex/join.c26
-rw-r--r--drivers/net/wireless/mwifiex/main.c57
-rw-r--r--drivers/net/wireless/mwifiex/main.h26
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c69
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c8
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c50
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c9
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c10
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c453
-rw-r--r--drivers/net/wireless/mwifiex/usb.c28
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c7
-rw-r--r--drivers/net/wireless/rndis_wlan.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c7
-rw-r--r--drivers/net/wireless/ti/wl12xx/Kconfig1
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c82
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h32
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c29
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c323
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c38
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx.h41
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h6
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/net/xen-netfront.c14
-rw-r--r--drivers/nfc/Kconfig13
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/pn533.c19
-rw-r--r--drivers/nfc/pn544_hci.c947
-rw-r--r--drivers/of/Kconfig12
-rw-r--r--drivers/of/Makefile2
-rw-r--r--drivers/of/of_i2c.c16
-rw-r--r--drivers/of/of_pci_irq.c2
-rw-r--r--drivers/of/of_spi.c99
-rw-r--r--drivers/of/platform.c8
-rw-r--r--drivers/oprofile/oprofile_perf.c2
-rw-r--r--drivers/parisc/superio.c2
-rw-r--r--drivers/pci/pci-driver.c12
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx.c32
-rw-r--r--drivers/pinctrl/pinctrl-imx6q.c2
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c13
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c45
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c2
-rw-r--r--drivers/pinctrl/spear/Kconfig10
-rw-r--r--drivers/pinctrl/spear/Makefile2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h253
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c2198
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c1989
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c105
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/platform/x86/acer-wmi.c24
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/apple-gmux.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c308
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c2
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c34
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c15
-rw-r--r--drivers/platform/x86/intel_ips.c22
-rw-r--r--drivers/platform/x86/sony-laptop.c1568
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c140
-rw-r--r--drivers/platform/x86/xo1-rfkill.c13
-rw-r--r--drivers/power/Kconfig10
-rw-r--r--drivers/power/ab8500_btemp.c12
-rw-r--r--drivers/power/ab8500_charger.c13
-rw-r--r--drivers/power/ab8500_fg.c12
-rw-r--r--drivers/power/bq27x00_battery.c2
-rw-r--r--drivers/power/charger-manager.c392
-rw-r--r--drivers/power/ds2781_battery.c20
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17042_battery.c148
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/sbs-battery.c2
-rw-r--r--drivers/power/smb347-charger.c712
-rw-r--r--drivers/power/wm831x_power.c21
-rw-r--r--drivers/ps3/ps3av.c24
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/rapidio/Kconfig14
-rw-r--r--drivers/rapidio/devices/Makefile3
-rw-r--r--drivers/rapidio/devices/tsi721.c211
-rw-r--r--drivers/rapidio/devices/tsi721.h105
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c823
-rw-r--r--drivers/rapidio/rio.c81
-rw-r--r--drivers/regulator/ab8500.c22
-rw-r--r--drivers/regulator/anatop-regulator.c20
-rw-r--r--drivers/regulator/core.c13
-rw-r--r--drivers/regulator/db8500-prcmu.c40
-rw-r--r--drivers/regulator/gpio-regulator.c16
-rw-r--r--drivers/regulator/max8649.c1
-rw-r--r--drivers/regulator/palmas-regulator.c14
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/regulator/tps65023-regulator.c2
-rw-r--r--drivers/regulator/tps6524x-regulator.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c102
-rw-r--r--drivers/regulator/wm831x-dcdc.c24
-rw-r--r--drivers/regulator/wm831x-isink.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c10
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c17
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c57
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-ab8500.c10
-rw-r--r--drivers/rtc/rtc-cmos.c9
-rw-r--r--drivers/rtc/rtc-ds1307.c20
-rw-r--r--drivers/rtc/rtc-ep93xx.c24
-rw-r--r--drivers/rtc/rtc-imxdi.c6
-rw-r--r--drivers/rtc/rtc-lpc32xx.c12
-rw-r--r--drivers/rtc/rtc-m41t93.c46
-rw-r--r--drivers/rtc/rtc-mxc.c5
-rw-r--r--drivers/rtc/rtc-pcf8563.c44
-rw-r--r--drivers/rtc/rtc-pl031.c14
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/rtc/rtc-spear.c12
-rw-r--r--drivers/rtc/rtc-tegra.c50
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/char/sclp_cmd.c12
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/scsi/Kconfig83
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aha1542.c66
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfad_attr.c17
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c18
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c173
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c39
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c122
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c21
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c10
-rw-r--r--drivers/scsi/esp_scsi.c2
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c200
-rw-r--r--drivers/scsi/fcoe/fcoe.h8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c159
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c832
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c13
-rw-r--r--drivers/scsi/fd_mcs.c1354
-rw-r--r--drivers/scsi/ibmmca.c2379
-rw-r--r--drivers/scsi/libsas/sas_ata.c12
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/qla1280.c7
-rw-r--r--drivers/scsi/qla2xxx/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h78
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c615
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c176
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c4973
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1004
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c1919
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h82
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c134
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h28
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c95
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c111
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c738
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h192
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c78
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_error.c6
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_pm.c5
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_wait_scan.c7
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sim710.c124
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/sh/clk/cpg.c77
-rw-r--r--drivers/sh/intc/dynamic.c8
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-ath79.c3
-rw-r--r--drivers/spi/spi-coldfire-qspi.c255
-rw-r--r--drivers/spi/spi-dw-pci.c13
-rw-r--r--drivers/spi/spi-ep93xx.c37
-rw-r--r--drivers/spi/spi-fsl-espi.c1
-rw-r--r--drivers/spi/spi-fsl-lib.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-imx.c30
-rw-r--r--drivers/spi/spi-lm70llp.c3
-rw-r--r--drivers/spi/spi-mpc52xx.c3
-rw-r--r--drivers/spi/spi-omap2-mcspi.c372
-rw-r--r--drivers/spi/spi-orion.c30
-rw-r--r--drivers/spi/spi-ppc4xx.c4
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c12
-rw-r--r--drivers/spi/spi-rspi.c320
-rw-r--r--drivers/spi/spi-sirf.c20
-rw-r--r--drivers/spi/spi-topcliff-pch.c3
-rw-r--r--drivers/spi/spi.c98
-rw-r--r--drivers/ssb/b43_pci_bridge.c2
-rw-r--r--drivers/ssb/pci.c88
-rw-r--r--drivers/staging/android/ashmem.c10
-rw-r--r--drivers/staging/comedi/drivers.c5
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c2
-rw-r--r--drivers/staging/iio/Documentation/device.txt2
-rw-r--r--drivers/staging/iio/adc/Kconfig1
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c3
-rw-r--r--drivers/staging/media/as102/as10x_cmd.c28
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c4
-rw-r--r--drivers/staging/media/easycap/easycap_main.c1639
-rw-r--r--drivers/staging/media/go7007/go7007-v4l2.c2
-rw-r--r--drivers/staging/media/go7007/s2250-loader.c2
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c4
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c4
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c6
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c7
-rw-r--r--drivers/staging/omapdrm/omap_drv.c4
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c10
-rw-r--r--drivers/staging/ramster/zcache-main.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c10
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile1
-rw-r--r--drivers/target/sbp/Kconfig11
-rw-r--r--drivers/target/sbp/Makefile1
-rw-r--r--drivers/target/sbp/sbp_target.c2621
-rw-r--r--drivers/target/sbp/sbp_target.h251
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_cdb.c2
-rw-r--r--drivers/target/target_core_file.c70
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_pr.c7
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c3
-rw-r--r--drivers/tty/amiserial.c14
-rw-r--r--drivers/tty/cyclades.c2
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c31
-rw-r--r--drivers/tty/n_r3964.c11
-rw-r--r--drivers/tty/pty.c25
-rw-r--r--drivers/tty/serial/8250/8250.c2
-rw-r--r--drivers/tty/serial/8250/8250_mca.c61
-rw-r--r--drivers/tty/serial/8250/Kconfig9
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/amba-pl011.c45
-rw-r--r--drivers/tty/serial/crisv10.c8
-rw-r--r--drivers/tty/serial/imx.c38
-rw-r--r--drivers/tty/serial/lantiq.c83
-rw-r--r--drivers/tty/serial/sb1250-duart.c1
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c87
-rw-r--r--drivers/tty/serial/zs.c1
-rw-r--r--drivers/tty/synclink.c4
-rw-r--r--drivers/tty/synclink_gt.c4
-rw-r--r--drivers/tty/synclinkmp.c4
-rw-r--r--drivers/tty/tty_io.c67
-rw-r--r--drivers/tty/tty_ldisc.c30
-rw-r--r--drivers/tty/tty_mutex.c60
-rw-r--r--drivers/tty/tty_port.c6
-rw-r--r--drivers/tty/vt/keyboard.c20
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/class/cdc-acm.c8
-rw-r--r--drivers/usb/class/cdc-wdm.c11
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/hub.c20
-rw-r--r--drivers/usb/core/message.c3
-rw-r--r--drivers/usb/dwc3/gadget.c3
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.h4
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h4
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c1
-rw-r--r--drivers/usb/gadget/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/gadget/u_serial.c2
-rw-r--r--drivers/usb/gadget/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mxc.c62
-rw-r--r--drivers/usb/host/ehci-omap.c184
-rw-r--r--drivers/usb/host/ehci-orion.c16
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-sh.c3
-rw-r--r--drivers/usb/host/ehci-tegra.c5
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c4
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/xhci-hub.c44
-rw-r--r--drivers/usb/host/xhci-mem.c74
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h6
-rw-r--r--drivers/usb/musb/davinci.c1
-rw-r--r--drivers/usb/musb/davinci.h4
-rw-r--r--drivers/usb/musb/musb_gadget.c1
-rw-r--r--drivers/usb/musb/musb_host.c14
-rw-r--r--drivers/usb/otg/twl6030-usb.c15
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/serial/cp210x.c12
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/generic.c10
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/metro-usb.c8
-rw-r--r--drivers/usb/serial/mos7840.c2
-rw-r--r--drivers/usb/serial/option.c130
-rw-r--r--drivers/usb/serial/qcserial.c6
-rw-r--r--drivers/usb/serial/sierra.c4
-rw-r--r--drivers/usb/serial/usb-serial.c12
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/video/Kconfig35
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/au1100fb.c2
-rw-r--r--drivers/video/auo_k1900fb.c198
-rw-r--r--drivers/video/auo_k1901fb.c251
-rw-r--r--drivers/video/auo_k190x.c1046
-rw-r--r--drivers/video/auo_k190x.h129
-rw-r--r--drivers/video/backlight/Kconfig14
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp5520_bl.c4
-rw-r--r--drivers/video/backlight/adp8860_bl.c28
-rw-r--r--drivers/video/backlight/adp8870_bl.c28
-rw-r--r--drivers/video/backlight/ams369fg06.c16
-rw-r--r--drivers/video/backlight/apple_bl.c21
-rw-r--r--drivers/video/backlight/backlight.c11
-rw-r--r--drivers/video/backlight/corgi_lcd.c12
-rw-r--r--drivers/video/backlight/cr_bllcd.c9
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c6
-rw-r--r--drivers/video/backlight/ili9320.c11
-rw-r--r--drivers/video/backlight/jornada720_bl.c14
-rw-r--r--drivers/video/backlight/jornada720_lcd.c8
-rw-r--r--drivers/video/backlight/l4f00242t03.c27
-rw-r--r--drivers/video/backlight/lcd.c20
-rw-r--r--drivers/video/backlight/ld9040.c15
-rw-r--r--drivers/video/backlight/lm3533_bl.c423
-rw-r--r--drivers/video/backlight/lms283gf05.c9
-rw-r--r--drivers/video/backlight/ltv350qv.c24
-rw-r--r--drivers/video/backlight/omap1_bl.c4
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c1
-rw-r--r--drivers/video/backlight/progear_bl.c6
-rw-r--r--drivers/video/backlight/s6e63m0.c16
-rw-r--r--drivers/video/backlight/tdo24m.c21
-rw-r--r--drivers/video/backlight/tosa_bl.c11
-rw-r--r--drivers/video/backlight/tosa_lcd.c8
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/bfin_adv7393fb.c49
-rw-r--r--drivers/video/broadsheetfb.c2
-rw-r--r--drivers/video/cobalt_lcdfb.c45
-rw-r--r--drivers/video/console/Kconfig14
-rw-r--r--drivers/video/efifb.c79
-rw-r--r--drivers/video/ep93xx-fb.c32
-rw-r--r--drivers/video/exynos/exynos_dp_core.c69
-rw-r--r--drivers/video/exynos/exynos_dp_core.h3
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c45
-rw-r--r--drivers/video/exynos/exynos_dp_reg.h29
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c49
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c36
-rw-r--r--drivers/video/exynos/s6e8ax0.c15
-rw-r--r--drivers/video/fb_defio.c6
-rw-r--r--drivers/video/fbmem.c21
-rw-r--r--drivers/video/fbsysfs.c2
-rw-r--r--drivers/video/fsl-diu-fb.c1
-rw-r--r--drivers/video/imxfb.c50
-rw-r--r--drivers/video/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/matrox/matroxfb_maven.c1
-rw-r--r--drivers/video/mb862xx/mb862xx-i2c.c2
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/mbx/mbxfb.c4
-rw-r--r--drivers/video/mxsfb.c13
-rw-r--r--drivers/video/omap/Kconfig8
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c8
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c107
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c8
-rw-r--r--drivers/video/omap2/displays/panel-taal.c90
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c76
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c22
-rw-r--r--drivers/video/omap2/dss/Kconfig13
-rw-r--r--drivers/video/omap2/dss/apply.c134
-rw-r--r--drivers/video/omap2/dss/core.c293
-rw-r--r--drivers/video/omap2/dss/dispc.c749
-rw-r--r--drivers/video/omap2/dss/dispc.h72
-rw-r--r--drivers/video/omap2/dss/display.c49
-rw-r--r--drivers/video/omap2/dss/dpi.c75
-rw-r--r--drivers/video/omap2/dss/dsi.c406
-rw-r--r--drivers/video/omap2/dss/dss.c67
-rw-r--r--drivers/video/omap2/dss/dss.h151
-rw-r--r--drivers/video/omap2/dss/dss_features.c30
-rw-r--r--drivers/video/omap2/dss/dss_features.h5
-rw-r--r--drivers/video/omap2/dss/hdmi.c445
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c236
-rw-r--r--drivers/video/omap2/dss/manager.c19
-rw-r--r--drivers/video/omap2/dss/overlay.c16
-rw-r--r--drivers/video/omap2/dss/rfbi.c86
-rw-r--r--drivers/video/omap2/dss/sdi.c63
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h32
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c480
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h161
-rw-r--r--drivers/video/omap2/dss/venc.c135
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c17
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c12
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h1
-rw-r--r--drivers/video/omap2/vrfb.c4
-rw-r--r--drivers/video/pxa3xx-gcu.c5
-rw-r--r--drivers/video/s3c-fb.c160
-rw-r--r--drivers/video/savage/savagefb_driver.c10
-rw-r--r--drivers/video/sh_mobile_hdmi.c219
-rw-r--r--drivers/video/sis/init.h45
-rw-r--r--drivers/video/sis/sis_main.c41
-rw-r--r--drivers/video/skeletonfb.c2
-rw-r--r--drivers/video/smscufx.c4
-rw-r--r--drivers/video/udlfb.c2
-rw-r--r--drivers/video/via/viafbdev.c34
-rw-r--r--drivers/virtio/virtio_balloon.c24
-rw-r--r--drivers/w1/Kconfig2
-rw-r--r--drivers/w1/masters/mxc_w1.c4
-rw-r--r--drivers/watchdog/Kconfig52
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/ar7_wdt.c33
-rw-r--r--drivers/watchdog/da9052_wdt.c251
-rw-r--r--drivers/watchdog/hpwdt.c17
-rw-r--r--drivers/watchdog/i6300esb.c14
-rw-r--r--drivers/watchdog/iTCO_vendor.h6
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c43
-rw-r--r--drivers/watchdog/iTCO_wdt.c534
-rw-r--r--drivers/watchdog/ie6xx_wdt.c348
-rw-r--r--drivers/watchdog/imx2_wdt.c2
-rw-r--r--drivers/watchdog/it87_wdt.c7
-rw-r--r--drivers/watchdog/ixp2000_wdt.c215
-rw-r--r--drivers/watchdog/lantiq_wdt.c56
-rw-r--r--drivers/watchdog/orion_wdt.c16
-rw-r--r--drivers/watchdog/pcwd_pci.c18
-rw-r--r--drivers/watchdog/pnx4008_wdt.c10
-rw-r--r--drivers/watchdog/s3c2410_wdt.c7
-rw-r--r--drivers/watchdog/sch311x_wdt.c39
-rw-r--r--drivers/watchdog/shwdt.c306
-rw-r--r--drivers/watchdog/sp5100_tco.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c253
-rw-r--r--drivers/watchdog/via_wdt.c20
-rw-r--r--drivers/watchdog/watchdog_core.c74
-rw-r--r--drivers/watchdog/watchdog_core.h (renamed from drivers/watchdog/watchdog_dev.h)8
-rw-r--r--drivers/watchdog/watchdog_dev.c375
-rw-r--r--drivers/watchdog/wdt_pci.c34
-rw-r--r--drivers/watchdog/wm831x_wdt.c13
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/acpi.c62
-rw-r--r--drivers/xen/events.c14
-rw-r--r--drivers/xen/grant-table.c125
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/tmem.c8
-rw-r--r--drivers/xen/xen-acpi-processor.c1
-rw-r--r--drivers/xen/xen-selfballoon.c34
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c6
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c51
2103 files changed, 162406 insertions, 64991 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 35d5181c8ef2..2ba29ffef2cb 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_SFI) += sfi/
# PnP must come after ACPI since it will eventually need to check if acpi
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
-obj-$(CONFIG_ARM_AMBA) += amba/
+obj-y += amba/
# Many drivers will want to use DMA so this has to be made available
# really early.
obj-$(CONFIG_DMA_ENGINE) += dma/
@@ -92,7 +92,6 @@ obj-$(CONFIG_BT) += bluetooth/
obj-$(CONFIG_ACCESSIBILITY) += accessibility/
obj-$(CONFIG_ISDN) += isdn/
obj-$(CONFIG_EDAC) += edac/
-obj-$(CONFIG_MCA) += mca/
obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 47768ff87343..80998958cf45 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -208,7 +208,7 @@ config ACPI_IPMI
config ACPI_HOTPLUG_CPU
bool
- depends on ACPI_PROCESSOR && HOTPLUG_CPU
+ depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
select ACPI_CONTAINER
default y
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index a43fa1a57d57..1502c50273b5 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -36,6 +36,7 @@
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
static unsigned long power_saving_mwait_eax;
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
- mutex_lock(&isolated_cpus_lock);
+ mutex_lock(&round_robin_lock);
cpumask_clear(tmp);
for_each_cpu(cpu, pad_busy_cpus)
cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
if (cpumask_empty(tmp))
cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
if (cpumask_empty(tmp)) {
- mutex_unlock(&isolated_cpus_lock);
+ mutex_unlock(&round_robin_lock);
return;
}
for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
tsk_in_cpu[tsk_index] = preferred_cpu;
cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
cpu_weight[preferred_cpu]++;
- mutex_unlock(&isolated_cpus_lock);
+ mutex_unlock(&round_robin_lock);
set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 0ed85cac3231..615996a36bed 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -95,18 +95,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
return_ACPI_STATUS(status);
}
- if (sleep_state != ACPI_STATE_S5) {
- /*
- * Disable BM arbitration. This feature is contained within an
- * optional register (PM2 Control), so ignore a BAD_ADDRESS
- * exception.
- */
- status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
- if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
- return_ACPI_STATUS(status);
- }
- }
-
/*
* 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
@@ -364,16 +352,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
[ACPI_EVENT_POWER_BUTTON].
status_register_id, ACPI_CLEAR_STATUS);
- /*
- * Enable BM arbitration. This feature is contained within an
- * optional register (PM2 Control), so ignore a BAD_ADDRESS
- * exception.
- */
- status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
- if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
- return_ACPI_STATUS(status);
- }
-
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 23ce09686418..fe6626035495 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
/* Create the new outer package and populate it */
status =
- acpi_ns_wrap_with_package(data, *elements,
+ acpi_ns_wrap_with_package(data, return_object,
return_object_ptr);
if (ACPI_FAILURE(status)) {
return (status);
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 5577762daee1..6686b1eaf13e 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- return acpi_os_map_generic_address(&entry->register_region);
+ return apei_map_generic_address(&entry->register_region);
return 0;
}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- acpi_os_unmap_generic_address(&entry->register_region);
+ apei_unmap_generic_address(&entry->register_region);
return 0;
}
@@ -606,6 +606,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
return 0;
}
+int apei_map_generic_address(struct acpi_generic_address *reg)
+{
+ int rc;
+ u32 access_bit_width;
+ u64 address;
+
+ rc = apei_check_gar(reg, &address, &access_bit_width);
+ if (rc)
+ return rc;
+ return acpi_os_map_generic_address(reg);
+}
+EXPORT_SYMBOL_GPL(apei_map_generic_address);
+
/* read GAR in interrupt (including NMI) or process context */
int apei_read(u64 *val, struct acpi_generic_address *reg)
{
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cca240a33038..f220d642136e 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -7,6 +7,8 @@
#define APEI_INTERNAL_H
#include <linux/cper.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
struct apei_exec_context;
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
/* IP has been set in instruction function */
#define APEI_EXEC_SET_IP 1
+int apei_map_generic_address(struct acpi_generic_address *reg);
+
+static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
+{
+ acpi_os_unmap_generic_address(reg);
+}
+
int apei_read(u64 *val, struct acpi_generic_address *reg);
int apei_write(u64 val, struct acpi_generic_address *reg);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 9b3cac0abecc..1599566ed1fe 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
- rc = acpi_os_map_generic_address(&generic->error_status_address);
+ rc = apei_map_generic_address(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap:
- acpi_os_unmap_generic_address(&generic->error_status_address);
+ apei_unmap_generic_address(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
- acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+ apei_unmap_generic_address(&ghes->generic->error_status_address);
}
enum {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 86933ca8b472..7dd3f9fb9f3f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
static void acpi_battery_refresh(struct acpi_battery *battery)
{
+ int power_unit;
+
if (!battery->bat.dev)
return;
+ power_unit = battery->power_unit;
+
acpi_battery_get_info(battery);
- /* The battery may have changed its reporting units. */
+
+ if (power_unit == battery->power_unit)
+ return;
+
+ /* The battery has changed its reporting units. */
sysfs_remove_battery(battery);
sysfs_add_battery(battery);
}
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 8cf6c46e99fb..6680df36b963 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/io.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3188da3df8da..adceafda9c17 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
Power Management
-------------------------------------------------------------------------- */
+static const char *state_string(int state)
+{
+ switch (state) {
+ case ACPI_STATE_D0:
+ return "D0";
+ case ACPI_STATE_D1:
+ return "D1";
+ case ACPI_STATE_D2:
+ return "D2";
+ case ACPI_STATE_D3_HOT:
+ return "D3hot";
+ case ACPI_STATE_D3_COLD:
+ return "D3";
+ default:
+ return "(unknown)";
+ }
+}
+
static int __acpi_bus_get_power(struct acpi_device *device, int *state)
{
- int result = 0;
- acpi_status status = 0;
- unsigned long long psc = 0;
+ int result = ACPI_STATE_UNKNOWN;
if (!device || !state)
return -EINVAL;
- *state = ACPI_STATE_UNKNOWN;
-
- if (device->flags.power_manageable) {
- /*
- * Get the device's power state either directly (via _PSC) or
- * indirectly (via power resources).
- */
- if (device->power.flags.power_resources) {
- result = acpi_power_get_inferred_state(device, state);
- if (result)
- return result;
- } else if (device->power.flags.explicit_get) {
- status = acpi_evaluate_integer(device->handle, "_PSC",
- NULL, &psc);
- if (ACPI_FAILURE(status))
- return -ENODEV;
- *state = (int)psc;
- }
- } else {
+ if (!device->flags.power_manageable) {
/* TBD: Non-recursive algorithm for walking up hierarchy. */
*state = device->parent ?
device->parent->power.state : ACPI_STATE_D0;
+ goto out;
+ }
+
+ /*
+ * Get the device's power state either directly (via _PSC) or
+ * indirectly (via power resources).
+ */
+ if (device->power.flags.explicit_get) {
+ unsigned long long psc;
+ acpi_status status = acpi_evaluate_integer(device->handle,
+ "_PSC", NULL, &psc);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ result = psc;
+ }
+ /* The test below covers ACPI_STATE_UNKNOWN too. */
+ if (result <= ACPI_STATE_D2) {
+ ; /* Do nothing. */
+ } else if (device->power.flags.power_resources) {
+ int error = acpi_power_get_inferred_state(device, &result);
+ if (error)
+ return error;
+ } else if (result == ACPI_STATE_D3_HOT) {
+ result = ACPI_STATE_D3;
}
+ *state = result;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
- device->pnp.bus_id, *state));
+ out:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+ device->pnp.bus_id, state_string(*state)));
return 0;
}
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
/* Make sure this is a valid target state */
if (state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
- state));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+ state_string(state)));
return 0;
}
if (!device->power.states[state].flags.valid) {
- printk(KERN_WARNING PREFIX "Device does not support D%d\n", state);
+ printk(KERN_WARNING PREFIX "Device does not support %s\n",
+ state_string(state));
return -ENODEV;
}
if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
end:
if (result)
printk(KERN_WARNING PREFIX
- "Device [%s] failed to transition to D%d\n",
- device->pnp.bus_id, state);
+ "Device [%s] failed to transition to %s\n",
+ device->pnp.bus_id, state_string(state));
else {
device->power.state = state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] transitioned to D%d\n",
- device->pnp.bus_id, state));
+ "Device [%s] transitioned to %s\n",
+ device->pnp.bus_id, state_string(state)));
}
return result;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0500f719f63e..dd6d6a3c6780 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
* We know a device's inferred power state when all the resources
* required for a given D-state are 'on'.
*/
- for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
+ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
list = &device->power.states[i].resources;
if (list->count < 1)
continue;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index c850de4c9a14..eff722278ff5 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
- * Ignores apic_id and always return 0 for CPU0's handle.
+ * Ignores apic_id and always returns 0 for the processor
+ * handle with acpi id 0 if nr_cpu_ids is 1.
+ * This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
*/
- if (acpi_id == 0)
+ if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
return apic_id;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f3decb30223f..47a8caa89dbe 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -224,6 +224,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
/*
* Suspend / resume control
*/
+static int acpi_idle_suspend;
static u32 saved_bm_rld;
static void acpi_idle_bm_rld_save(void)
@@ -242,13 +243,21 @@ static void acpi_idle_bm_rld_restore(void)
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
{
+ if (acpi_idle_suspend == 1)
+ return 0;
+
acpi_idle_bm_rld_save();
+ acpi_idle_suspend = 1;
return 0;
}
int acpi_processor_resume(struct acpi_device * device)
{
+ if (acpi_idle_suspend == 0)
+ return 0;
+
acpi_idle_bm_rld_restore();
+ acpi_idle_suspend = 0;
return 0;
}
@@ -754,6 +763,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
local_irq_disable();
+ if (acpi_idle_suspend) {
+ local_irq_enable();
+ cpu_relax();
+ return -EBUSY;
+ }
+
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
@@ -823,6 +838,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
local_irq_disable();
+ if (acpi_idle_suspend) {
+ local_irq_enable();
+ cpu_relax();
+ return -EBUSY;
+ }
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -907,14 +928,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
drv, drv->safe_state_index);
} else {
local_irq_disable();
- acpi_safe_halt();
+ if (!acpi_idle_suspend)
+ acpi_safe_halt();
local_irq_enable();
- return -EINVAL;
+ return -EBUSY;
}
}
local_irq_disable();
+ if (acpi_idle_suspend) {
+ local_irq_enable();
+ cpu_relax();
+ return -EBUSY;
+ }
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0af48a8554cd..a093dc163a42 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
struct acpi_buffer state = { 0, NULL };
union acpi_object *pss = NULL;
int i;
+ int last_invalid = -1;
status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
@@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
((u32)(px->core_frequency * 1000) !=
(px->core_frequency * 1000))) {
printk(KERN_ERR FW_BUG PREFIX
- "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
- px->core_frequency);
- result = -EFAULT;
- kfree(pr->performance->states);
- goto end;
+ "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
+ pr->id, px->core_frequency);
+ if (last_invalid == -1)
+ last_invalid = i;
+ } else {
+ if (last_invalid != -1) {
+ /*
+ * Copy this valid entry over last_invalid entry
+ */
+ memcpy(&(pr->performance->states[last_invalid]),
+ px, sizeof(struct acpi_processor_px));
+ ++last_invalid;
+ }
}
}
+ if (last_invalid == 0) {
+ printk(KERN_ERR FW_BUG PREFIX
+ "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
+ result = -EFAULT;
+ kfree(pr->performance->states);
+ pr->performance->states = NULL;
+ }
+
+ if (last_invalid > 0)
+ pr->performance->state_count = last_invalid;
+
end:
kfree(buffer.pointer);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 85cbfdccc97c..c8a1f3b68110 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void)
ACPI_BUS_TYPE_POWER_BUTTON,
ACPI_STA_DEFAULT,
&ops);
+ device_init_wakeup(&device->dev, true);
}
if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 06527c526618..88561029cca8 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
static u8 sleep_states[ACPI_S_STATE_COUNT];
+static bool pwr_btn_event_pending;
static void acpi_sleep_tts_switch(u32 acpi_state)
{
@@ -93,11 +94,9 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) {
- if (!acpi_wakeup_address) {
+ if (!acpi_wakeup_address)
return -EFAULT;
- }
- acpi_set_firmware_waking_vector(
- (acpi_physical_address)acpi_wakeup_address);
+ acpi_set_firmware_waking_vector(acpi_wakeup_address);
}
ACPI_FLUSH_CPU_CACHE();
@@ -186,6 +185,14 @@ static int acpi_pm_prepare(void)
return error;
}
+static int find_powerf_dev(struct device *dev, void *data)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ const char *hid = acpi_device_hid(device);
+
+ return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
+}
+
/**
* acpi_pm_finish - Instruct the platform to leave a sleep state.
*
@@ -194,6 +201,7 @@ static int acpi_pm_prepare(void)
*/
static void acpi_pm_finish(void)
{
+ struct device *pwr_btn_dev;
u32 acpi_state = acpi_target_sleep_state;
acpi_ec_unblock_transactions();
@@ -211,6 +219,23 @@ static void acpi_pm_finish(void)
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
acpi_target_sleep_state = ACPI_STATE_S0;
+
+ /* If we were woken with the fixed power button, provide a small
+ * hint to userspace in the form of a wakeup event on the fixed power
+ * button device (if it can be found).
+ *
+ * We delay the event generation til now, as the PM layer requires
+ * timekeeping to be running before we generate events. */
+ if (!pwr_btn_event_pending)
+ return;
+
+ pwr_btn_event_pending = false;
+ pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
+ find_powerf_dev);
+ if (pwr_btn_dev) {
+ pm_wakeup_event(pwr_btn_dev, 0);
+ put_device(pwr_btn_dev);
+ }
}
/**
@@ -300,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
* POWER_BUTTON event should not reach userspace ]
+ *
+ * However, we do generate a small hint for userspace in the form of
+ * a wakeup event. We flag this condition for now and generate the
+ * event later, as we're currently too early in resume to be able to
+ * generate wakeup events.
*/
- if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
- acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
+ acpi_event_status pwr_btn_status;
+
+ acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
+
+ if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
+ acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+ /* Flag for later */
+ pwr_btn_event_pending = true;
+ }
+ }
/*
* Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -732,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
* can wake the system. _S0W may be valid, too.
*/
if (acpi_target_sleep_state == ACPI_STATE_S0 ||
- (device_may_wakeup(dev) &&
- adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+ (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
+ adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
acpi_status status;
acpi_method[3] = 'W';
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 9f66181c814e..240a24400976 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
{
int result = 0;
- if (!strncmp(val, "enable", strlen("enable") - 1)) {
+ if (!strncmp(val, "enable", strlen("enable"))) {
result = acpi_debug_trace(trace_method_name, trace_debug_level,
trace_debug_layer, 0);
if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
goto exit;
}
- if (!strncmp(val, "disable", strlen("disable") - 1)) {
+ if (!strncmp(val, "disable", strlen("disable"))) {
int name = 0;
result = acpi_debug_trace((char *)&name, trace_debug_level,
trace_debug_layer, 0);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 9577b6fa2650..1e0a9e17c31d 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
+ if (!video->cap._DOS)
+ return 0;
if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
return -EINVAL;
@@ -1687,10 +1689,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
set_bit(KEY_DISPLAY_OFF, input->keybit);
- error = input_register_device(input);
- if (error)
- goto err_stop_video;
-
printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
@@ -1701,12 +1699,16 @@ static int acpi_video_bus_add(struct acpi_device *device)
video->pm_nb.priority = 0;
error = register_pm_notifier(&video->pm_nb);
if (error)
- goto err_unregister_input_dev;
+ goto err_stop_video;
+
+ error = input_register_device(input);
+ if (error)
+ goto err_unregister_pm_notifier;
return 0;
- err_unregister_input_dev:
- input_unregister_device(input);
+ err_unregister_pm_notifier:
+ unregister_pm_notifier(&video->pm_nb);
err_stop_video:
acpi_video_bus_stop_devices(video);
err_free_input_dev:
@@ -1743,9 +1745,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
return 0;
}
+static int __init is_i740(struct pci_dev *dev)
+{
+ if (dev->device == 0x00D1)
+ return 1;
+ if (dev->device == 0x7000)
+ return 1;
+ return 0;
+}
+
static int __init intel_opregion_present(void)
{
-#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE)
+ int opregion = 0;
struct pci_dev *dev = NULL;
u32 address;
@@ -1754,13 +1765,15 @@ static int __init intel_opregion_present(void)
continue;
if (dev->vendor != PCI_VENDOR_ID_INTEL)
continue;
+ /* We don't want to poke around undefined i740 registers */
+ if (is_i740(dev))
+ continue;
pci_read_config_dword(dev, 0xfc, &address);
if (!address)
continue;
- return 1;
+ opregion = 1;
}
-#endif
- return 0;
+ return opregion;
}
int acpi_video_register(void)
diff --git a/drivers/amba/Makefile b/drivers/amba/Makefile
index 40fe74097be2..66e81c2f1e3c 100644
--- a/drivers/amba/Makefile
+++ b/drivers/amba/Makefile
@@ -1,2 +1,2 @@
-obj-y += bus.o
-
+obj-$(CONFIG_ARM_AMBA) += bus.o
+obj-$(CONFIG_TEGRA_AHB) += tegra-ahb.o
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
new file mode 100644
index 000000000000..aa0b1f160528
--- /dev/null
+++ b/drivers/amba/tegra-ahb.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Jay Cheng <jacheng@nvidia.com>
+ * James Wylder <james.wylder@motorola.com>
+ * Benoit Goby <benoit@android.com>
+ * Colin Cross <ccross@android.com>
+ * Hiroshi DOYU <hdoyu@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define DRV_NAME "tegra-ahb"
+
+#define AHB_ARBITRATION_DISABLE 0x00
+#define AHB_ARBITRATION_PRIORITY_CTRL 0x04
+#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
+#define PRIORITY_SELECT_USB BIT(6)
+#define PRIORITY_SELECT_USB2 BIT(18)
+#define PRIORITY_SELECT_USB3 BIT(17)
+
+#define AHB_GIZMO_AHB_MEM 0x0c
+#define ENB_FAST_REARBITRATE BIT(2)
+#define DONT_SPLIT_AHB_WR BIT(7)
+
+#define AHB_GIZMO_APB_DMA 0x10
+#define AHB_GIZMO_IDE 0x18
+#define AHB_GIZMO_USB 0x1c
+#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x20
+#define AHB_GIZMO_CPU_AHB_BRIDGE 0x24
+#define AHB_GIZMO_COP_AHB_BRIDGE 0x28
+#define AHB_GIZMO_XBAR_APB_CTLR 0x2c
+#define AHB_GIZMO_VCP_AHB_BRIDGE 0x30
+#define AHB_GIZMO_NAND 0x3c
+#define AHB_GIZMO_SDMMC4 0x44
+#define AHB_GIZMO_XIO 0x48
+#define AHB_GIZMO_BSEV 0x60
+#define AHB_GIZMO_BSEA 0x70
+#define AHB_GIZMO_NOR 0x74
+#define AHB_GIZMO_USB2 0x78
+#define AHB_GIZMO_USB3 0x7c
+#define IMMEDIATE BIT(18)
+
+#define AHB_GIZMO_SDMMC1 0x80
+#define AHB_GIZMO_SDMMC2 0x84
+#define AHB_GIZMO_SDMMC3 0x88
+#define AHB_MEM_PREFETCH_CFG_X 0xd8
+#define AHB_ARBITRATION_XBAR_CTRL 0xdc
+#define AHB_MEM_PREFETCH_CFG3 0xe0
+#define AHB_MEM_PREFETCH_CFG4 0xe4
+#define AHB_MEM_PREFETCH_CFG1 0xec
+#define AHB_MEM_PREFETCH_CFG2 0xf0
+#define PREFETCH_ENB BIT(31)
+#define MST_ID(x) (((x) & 0x1f) << 26)
+#define AHBDMA_MST_ID MST_ID(5)
+#define USB_MST_ID MST_ID(6)
+#define USB2_MST_ID MST_ID(18)
+#define USB3_MST_ID MST_ID(17)
+#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
+#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
+
+#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xf8
+
+#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
+
+static struct platform_driver tegra_ahb_driver;
+
+static const u32 tegra_ahb_gizmo[] = {
+ AHB_ARBITRATION_DISABLE,
+ AHB_ARBITRATION_PRIORITY_CTRL,
+ AHB_GIZMO_AHB_MEM,
+ AHB_GIZMO_APB_DMA,
+ AHB_GIZMO_IDE,
+ AHB_GIZMO_USB,
+ AHB_GIZMO_AHB_XBAR_BRIDGE,
+ AHB_GIZMO_CPU_AHB_BRIDGE,
+ AHB_GIZMO_COP_AHB_BRIDGE,
+ AHB_GIZMO_XBAR_APB_CTLR,
+ AHB_GIZMO_VCP_AHB_BRIDGE,
+ AHB_GIZMO_NAND,
+ AHB_GIZMO_SDMMC4,
+ AHB_GIZMO_XIO,
+ AHB_GIZMO_BSEV,
+ AHB_GIZMO_BSEA,
+ AHB_GIZMO_NOR,
+ AHB_GIZMO_USB2,
+ AHB_GIZMO_USB3,
+ AHB_GIZMO_SDMMC1,
+ AHB_GIZMO_SDMMC2,
+ AHB_GIZMO_SDMMC3,
+ AHB_MEM_PREFETCH_CFG_X,
+ AHB_ARBITRATION_XBAR_CTRL,
+ AHB_MEM_PREFETCH_CFG3,
+ AHB_MEM_PREFETCH_CFG4,
+ AHB_MEM_PREFETCH_CFG1,
+ AHB_MEM_PREFETCH_CFG2,
+ AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID,
+};
+
+struct tegra_ahb {
+ void __iomem *regs;
+ struct device *dev;
+ u32 ctx[0];
+};
+
+static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset)
+{
+ return readl(ahb->regs + offset);
+}
+
+static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
+{
+ writel(value, ahb->regs + offset);
+}
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
+{
+ struct tegra_ahb *ahb = dev_get_drvdata(dev);
+ struct device_node *dn = data;
+
+ return (ahb->dev->of_node == dn) ? 1 : 0;
+}
+
+int tegra_ahb_enable_smmu(struct device_node *dn)
+{
+ struct device *dev;
+ u32 val;
+ struct tegra_ahb *ahb;
+
+ dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn,
+ tegra_ahb_match_by_smmu);
+ if (!dev)
+ return -EPROBE_DEFER;
+ ahb = dev_get_drvdata(dev);
+ val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
+ val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
+ gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ahb_enable_smmu);
+#endif
+
+static int tegra_ahb_suspend(struct device *dev)
+{
+ int i;
+ struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+ ahb->ctx[i] = gizmo_readl(ahb, tegra_ahb_gizmo[i]);
+ return 0;
+}
+
+static int tegra_ahb_resume(struct device *dev)
+{
+ int i;
+ struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+ gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
+ tegra_ahb_suspend,
+ tegra_ahb_resume, NULL);
+
+static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
+{
+ u32 val;
+
+ val = gizmo_readl(ahb, AHB_GIZMO_AHB_MEM);
+ val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
+ gizmo_writel(ahb, val, AHB_GIZMO_AHB_MEM);
+
+ val = gizmo_readl(ahb, AHB_GIZMO_USB);
+ val |= IMMEDIATE;
+ gizmo_writel(ahb, val, AHB_GIZMO_USB);
+
+ val = gizmo_readl(ahb, AHB_GIZMO_USB2);
+ val |= IMMEDIATE;
+ gizmo_writel(ahb, val, AHB_GIZMO_USB2);
+
+ val = gizmo_readl(ahb, AHB_GIZMO_USB3);
+ val |= IMMEDIATE;
+ gizmo_writel(ahb, val, AHB_GIZMO_USB3);
+
+ val = gizmo_readl(ahb, AHB_ARBITRATION_PRIORITY_CTRL);
+ val |= PRIORITY_SELECT_USB |
+ PRIORITY_SELECT_USB2 |
+ PRIORITY_SELECT_USB3 |
+ AHB_PRIORITY_WEIGHT(7);
+ gizmo_writel(ahb, val, AHB_ARBITRATION_PRIORITY_CTRL);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG1);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ AHBDMA_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG1);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG2);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ USB_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG2);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG3);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ USB3_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG3);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG4);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ USB2_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
+}
+
+static int __devinit tegra_ahb_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct tegra_ahb *ahb;
+ size_t bytes;
+
+ bytes = sizeof(*ahb) + sizeof(u32) * ARRAY_SIZE(tegra_ahb_gizmo);
+ ahb = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
+ if (!ahb)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ ahb->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (!ahb->regs)
+ return -EBUSY;
+
+ ahb->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ahb);
+ tegra_ahb_gizmo_init(ahb);
+ return 0;
+}
+
+static int __devexit tegra_ahb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
+ { .compatible = "nvidia,tegra30-ahb", },
+ { .compatible = "nvidia,tegra20-ahb", },
+ {},
+};
+
+static struct platform_driver tegra_ahb_driver = {
+ .probe = tegra_ahb_probe,
+ .remove = __devexit_p(tegra_ahb_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_ahb_of_match,
+ .pm = &tegra_ahb_pm,
+ },
+};
+module_platform_driver(tegra_ahb_driver);
+
+MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_DESCRIPTION("Tegra AHB driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6bdedd7cca2c..2be8ef1d3093 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -416,6 +416,15 @@ config PATA_EFAR
If unsure, say N.
+config PATA_EP93XX
+ tristate "Cirrus Logic EP93xx PATA support"
+ depends on ARCH_EP93XX
+ help
+ This option enables support for the PATA controller in
+ the Cirrus Logic EP9312 and EP9315 ARM CPU.
+
+ If unsure, say N.
+
config PATA_HPT366
tristate "HPT 366/368 PATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 6ece5b7231a3..a454a139b1d2 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_PATA_CS5535) += pata_cs5535.o
obj-$(CONFIG_PATA_CS5536) += pata_cs5536.o
obj-$(CONFIG_PATA_CYPRESS) += pata_cypress.o
obj-$(CONFIG_PATA_EFAR) += pata_efar.o
+obj-$(CONFIG_PATA_EP93XX) += pata_ep93xx.o
obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 7df56ec31819..aae115600b74 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -177,7 +177,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
return -ENODEV;
- if (id->driver_data & ATA_GEN_INTEL_IDER)
+ if ((id->driver_data & ATA_GEN_INTEL_IDER) && !all_generic_ide)
if (!is_intel_ider(dev))
return -ENODEV;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 7857e8fd0a3e..3c809bfbccf5 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1554,6 +1554,39 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
return false;
}
+static int prefer_ms_hyperv = 1;
+module_param(prefer_ms_hyperv, int, 0);
+
+static void piix_ignore_devices_quirk(struct ata_host *host)
+{
+#if IS_ENABLED(CONFIG_HYPERV_STORAGE)
+ static const struct dmi_system_id ignore_hyperv[] = {
+ {
+ /* On Hyper-V hypervisors the disks are exposed on
+ * both the emulated SATA controller and on the
+ * paravirtualised drivers. The CD/DVD devices
+ * are only exposed on the emulated controller.
+ * Request we ignore ATA devices on this host.
+ */
+ .ident = "Hyper-V Virtual Machine",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ },
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *dmi = dmi_first_match(ignore_hyperv);
+
+ if (dmi && prefer_ms_hyperv) {
+ host->flags |= ATA_HOST_IGNORE_ATA;
+ dev_info(host->dev, "%s detected, ATA device ignore set\n",
+ dmi->ident);
+ }
+#endif
+}
+
/**
* piix_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
@@ -1669,6 +1702,9 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
}
host->flags |= ATA_HOST_PARALLEL_SCAN;
+ /* Allow hosts to specify device types to ignore when scanning. */
+ piix_ignore_devices_quirk(host);
+
pci_set_master(pdev);
return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 23763a1ec570..cece3a4d11ea 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1973,6 +1973,12 @@ retry:
if (class == ATA_DEV_ATA) {
if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
goto err_out;
+ if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
+ ata_id_is_ata(id)) {
+ ata_dev_dbg(dev,
+ "host indicates ignore ATA devices, ignored\n");
+ return -ENOENT;
+ }
} else {
if (ata_id_is_ata(id))
goto err_out;
@@ -4051,6 +4057,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
{ "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
+ { "2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
/* Odd clown on sil3726/4726 PMPs */
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index d1fbd59ead16..6d53cf9b3b6e 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2047,6 +2047,26 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
}
/**
+ * ata_eh_worth_retry - analyze error and decide whether to retry
+ * @qc: qc to possibly retry
+ *
+ * Look at the cause of the error and decide if a retry
+ * might be useful or not. We don't want to retry media errors
+ * because the drive itself has probably already taken 10-30 seconds
+ * doing its own internal retries before reporting the failure.
+ */
+static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
+{
+ if (qc->flags & AC_ERR_MEDIA)
+ return 0; /* don't retry media errors */
+ if (qc->flags & ATA_QCFLAG_IO)
+ return 1; /* otherwise retry anything from fs stack */
+ if (qc->err_mask & AC_ERR_INVALID)
+ return 0; /* don't retry these */
+ return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */
+}
+
+/**
* ata_eh_link_autopsy - analyze error and determine recovery action
* @link: host link to perform autopsy on
*
@@ -2120,9 +2140,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
/* determine whether the command is worth retrying */
- if (qc->flags & ATA_QCFLAG_IO ||
- (!(qc->err_mask & AC_ERR_INVALID) &&
- qc->err_mask != AC_ERR_DEV))
+ if (ata_eh_worth_retry(qc))
qc->flags |= ATA_QCFLAG_RETRY;
/* accumulate error info */
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 3239517f4d90..ac6a5beb28f3 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
* Arasan Compact Flash host controller source file
*
* Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -959,7 +959,7 @@ static struct platform_driver arasan_cf_driver = {
module_platform_driver(arasan_cf_driver);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
new file mode 100644
index 000000000000..6ef2e3741f76
--- /dev/null
+++ b/drivers/ata/pata_ep93xx.c
@@ -0,0 +1,1044 @@
+/*
+ * EP93XX PATA controller driver.
+ *
+ * Copyright (c) 2012, Metasoft s.c.
+ * Rafal Prylowski <prylowski@metasoft.pl>
+ *
+ * Based on pata_scc.c, pata_icside.c and on earlier version of EP93XX
+ * PATA driver by Lennert Buytenhek and Alessandro Zummo.
+ * Read/Write timings, resource management and other improvements
+ * from driver by Joao Ramos and Bartlomiej Zolnierkiewicz.
+ * DMA engine support based on spi-ep93xx.c by Mika Westerberg.
+ *
+ * Original copyrights:
+ *
+ * Support for Cirrus Logic's EP93xx (EP9312, EP9315) CPUs
+ * PATA host controller driver.
+ *
+ * Copyright (c) 2009, Bartlomiej Zolnierkiewicz
+ *
+ * Heavily based on the ep93xx-ide.c driver:
+ *
+ * Copyright (c) 2009, Joao Ramos <joao.ramos@inov.pt>
+ * INESC Inovacao (INOV)
+ *
+ * EP93XX PATA controller driver.
+ * Copyright (C) 2007 Lennert Buytenhek <buytenh@wantstofly.org>
+ *
+ * An ATA driver for the Cirrus Logic EP93xx PATA controller.
+ *
+ * Based on an earlier version by Alessandro Zummo, which is:
+ * Copyright (C) 2006 Tower Technologies
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/ktime.h>
+
+#include <mach/dma.h>
+#include <mach/platform.h>
+
+#define DRV_NAME "ep93xx-ide"
+#define DRV_VERSION "1.0"
+
+enum {
+ /* IDE Control Register */
+ IDECTRL = 0x00,
+ IDECTRL_CS0N = (1 << 0),
+ IDECTRL_CS1N = (1 << 1),
+ IDECTRL_DIORN = (1 << 5),
+ IDECTRL_DIOWN = (1 << 6),
+ IDECTRL_INTRQ = (1 << 9),
+ IDECTRL_IORDY = (1 << 10),
+ /*
+ * the device IDE register to be accessed is selected through
+ * IDECTRL register's specific bitfields 'DA', 'CS1N' and 'CS0N':
+ * b4 b3 b2 b1 b0
+ * A2 A1 A0 CS1N CS0N
+ * the values filled in this structure allows the value to be directly
+ * ORed to the IDECTRL register, hence giving directly the A[2:0] and
+ * CS1N/CS0N values for each IDE register.
+ * The values correspond to the transformation:
+ * ((real IDE address) << 2) | CS1N value << 1 | CS0N value
+ */
+ IDECTRL_ADDR_CMD = 0 + 2, /* CS1 */
+ IDECTRL_ADDR_DATA = (ATA_REG_DATA << 2) + 2,
+ IDECTRL_ADDR_ERROR = (ATA_REG_ERR << 2) + 2,
+ IDECTRL_ADDR_FEATURE = (ATA_REG_FEATURE << 2) + 2,
+ IDECTRL_ADDR_NSECT = (ATA_REG_NSECT << 2) + 2,
+ IDECTRL_ADDR_LBAL = (ATA_REG_LBAL << 2) + 2,
+ IDECTRL_ADDR_LBAM = (ATA_REG_LBAM << 2) + 2,
+ IDECTRL_ADDR_LBAH = (ATA_REG_LBAH << 2) + 2,
+ IDECTRL_ADDR_DEVICE = (ATA_REG_DEVICE << 2) + 2,
+ IDECTRL_ADDR_STATUS = (ATA_REG_STATUS << 2) + 2,
+ IDECTRL_ADDR_COMMAND = (ATA_REG_CMD << 2) + 2,
+ IDECTRL_ADDR_ALTSTATUS = (0x06 << 2) + 1, /* CS0 */
+ IDECTRL_ADDR_CTL = (0x06 << 2) + 1, /* CS0 */
+
+ /* IDE Configuration Register */
+ IDECFG = 0x04,
+ IDECFG_IDEEN = (1 << 0),
+ IDECFG_PIO = (1 << 1),
+ IDECFG_MDMA = (1 << 2),
+ IDECFG_UDMA = (1 << 3),
+ IDECFG_MODE_SHIFT = 4,
+ IDECFG_MODE_MASK = (0xf << 4),
+ IDECFG_WST_SHIFT = 8,
+ IDECFG_WST_MASK = (0x3 << 8),
+
+ /* MDMA Operation Register */
+ IDEMDMAOP = 0x08,
+
+ /* UDMA Operation Register */
+ IDEUDMAOP = 0x0c,
+ IDEUDMAOP_UEN = (1 << 0),
+ IDEUDMAOP_RWOP = (1 << 1),
+
+ /* PIO/MDMA/UDMA Data Registers */
+ IDEDATAOUT = 0x10,
+ IDEDATAIN = 0x14,
+ IDEMDMADATAOUT = 0x18,
+ IDEMDMADATAIN = 0x1c,
+ IDEUDMADATAOUT = 0x20,
+ IDEUDMADATAIN = 0x24,
+
+ /* UDMA Status Register */
+ IDEUDMASTS = 0x28,
+ IDEUDMASTS_DMAIDE = (1 << 16),
+ IDEUDMASTS_INTIDE = (1 << 17),
+ IDEUDMASTS_SBUSY = (1 << 18),
+ IDEUDMASTS_NDO = (1 << 24),
+ IDEUDMASTS_NDI = (1 << 25),
+ IDEUDMASTS_N4X = (1 << 26),
+
+ /* UDMA Debug Status Register */
+ IDEUDMADEBUG = 0x2c,
+};
+
+struct ep93xx_pata_data {
+ const struct platform_device *pdev;
+ void __iomem *ide_base;
+ struct ata_timing t;
+ bool iordy;
+
+ unsigned long udma_in_phys;
+ unsigned long udma_out_phys;
+
+ struct dma_chan *dma_rx_channel;
+ struct ep93xx_dma_data dma_rx_data;
+ struct dma_chan *dma_tx_channel;
+ struct ep93xx_dma_data dma_tx_data;
+};
+
+static void ep93xx_pata_clear_regs(void __iomem *base)
+{
+ writel(IDECTRL_CS0N | IDECTRL_CS1N | IDECTRL_DIORN |
+ IDECTRL_DIOWN, base + IDECTRL);
+
+ writel(0, base + IDECFG);
+ writel(0, base + IDEMDMAOP);
+ writel(0, base + IDEUDMAOP);
+ writel(0, base + IDEDATAOUT);
+ writel(0, base + IDEDATAIN);
+ writel(0, base + IDEMDMADATAOUT);
+ writel(0, base + IDEMDMADATAIN);
+ writel(0, base + IDEUDMADATAOUT);
+ writel(0, base + IDEUDMADATAIN);
+ writel(0, base + IDEUDMADEBUG);
+}
+
+static bool ep93xx_pata_check_iordy(void __iomem *base)
+{
+ return !!(readl(base + IDECTRL) & IDECTRL_IORDY);
+}
+
+/*
+ * According to EP93xx User's Guide, WST field of IDECFG specifies number
+ * of HCLK cycles to hold the data bus after a PIO write operation.
+ * It should be programmed to guarantee following delays:
+ *
+ * PIO Mode [ns]
+ * 0 30
+ * 1 20
+ * 2 15
+ * 3 10
+ * 4 5
+ *
+ * Maximum possible value for HCLK is 100MHz.
+ */
+static int ep93xx_pata_get_wst(int pio_mode)
+{
+ int val;
+
+ if (pio_mode == 0)
+ val = 3;
+ else if (pio_mode < 3)
+ val = 2;
+ else
+ val = 1;
+
+ return val << IDECFG_WST_SHIFT;
+}
+
+static void ep93xx_pata_enable_pio(void __iomem *base, int pio_mode)
+{
+ writel(IDECFG_IDEEN | IDECFG_PIO |
+ ep93xx_pata_get_wst(pio_mode) |
+ (pio_mode << IDECFG_MODE_SHIFT), base + IDECFG);
+}
+
+/*
+ * Based on delay loop found in mach-pxa/mp900.c.
+ *
+ * Single iteration should take 5 cpu cycles. This is 25ns assuming the
+ * fastest ep93xx cpu speed (200MHz) and is better optimized for PIO4 timings
+ * than eg. 20ns.
+ */
+static void ep93xx_pata_delay(unsigned long count)
+{
+ __asm__ volatile (
+ "0:\n"
+ "mov r0, r0\n"
+ "subs %0, %1, #1\n"
+ "bge 0b\n"
+ : "=r" (count)
+ : "0" (count)
+ );
+}
+
+static unsigned long ep93xx_pata_wait_for_iordy(void __iomem *base,
+ unsigned long t2)
+{
+ /*
+ * According to ATA specification, IORDY pin can be first sampled
+ * tA = 35ns after activation of DIOR-/DIOW-. Maximum IORDY pulse
+ * width is tB = 1250ns.
+ *
+ * We are already t2 delay loop iterations after activation of
+ * DIOR-/DIOW-, so we set timeout to (1250 + 35) / 25 - t2 additional
+ * delay loop iterations.
+ */
+ unsigned long start = (1250 + 35) / 25 - t2;
+ unsigned long counter = start;
+
+ while (!ep93xx_pata_check_iordy(base) && counter--)
+ ep93xx_pata_delay(1);
+ return start - counter;
+}
+
+/* common part at start of ep93xx_pata_read/write() */
+static void ep93xx_pata_rw_begin(void __iomem *base, unsigned long addr,
+ unsigned long t1)
+{
+ writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL);
+ ep93xx_pata_delay(t1);
+}
+
+/* common part at end of ep93xx_pata_read/write() */
+static void ep93xx_pata_rw_end(void __iomem *base, unsigned long addr,
+ bool iordy, unsigned long t0, unsigned long t2,
+ unsigned long t2i)
+{
+ ep93xx_pata_delay(t2);
+ /* lengthen t2 if needed */
+ if (iordy)
+ t2 += ep93xx_pata_wait_for_iordy(base, t2);
+ writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL);
+ if (t0 > t2 && t0 - t2 > t2i)
+ ep93xx_pata_delay(t0 - t2);
+ else
+ ep93xx_pata_delay(t2i);
+}
+
+static u16 ep93xx_pata_read(struct ep93xx_pata_data *drv_data,
+ unsigned long addr,
+ bool reg)
+{
+ void __iomem *base = drv_data->ide_base;
+ const struct ata_timing *t = &drv_data->t;
+ unsigned long t0 = reg ? t->cyc8b : t->cycle;
+ unsigned long t2 = reg ? t->act8b : t->active;
+ unsigned long t2i = reg ? t->rec8b : t->recover;
+
+ ep93xx_pata_rw_begin(base, addr, t->setup);
+ writel(IDECTRL_DIOWN | addr, base + IDECTRL);
+ /*
+ * The IDEDATAIN register is loaded from the DD pins at the positive
+ * edge of the DIORN signal. (EP93xx UG p27-14)
+ */
+ ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i);
+ return readl(base + IDEDATAIN);
+}
+
+/* IDE register read */
+static u16 ep93xx_pata_read_reg(struct ep93xx_pata_data *drv_data,
+ unsigned long addr)
+{
+ return ep93xx_pata_read(drv_data, addr, true);
+}
+
+/* PIO data read */
+static u16 ep93xx_pata_read_data(struct ep93xx_pata_data *drv_data,
+ unsigned long addr)
+{
+ return ep93xx_pata_read(drv_data, addr, false);
+}
+
+static void ep93xx_pata_write(struct ep93xx_pata_data *drv_data,
+ u16 value, unsigned long addr,
+ bool reg)
+{
+ void __iomem *base = drv_data->ide_base;
+ const struct ata_timing *t = &drv_data->t;
+ unsigned long t0 = reg ? t->cyc8b : t->cycle;
+ unsigned long t2 = reg ? t->act8b : t->active;
+ unsigned long t2i = reg ? t->rec8b : t->recover;
+
+ ep93xx_pata_rw_begin(base, addr, t->setup);
+ /*
+ * Value from IDEDATAOUT register is driven onto the DD pins when
+ * DIOWN is low. (EP93xx UG p27-13)
+ */
+ writel(value, base + IDEDATAOUT);
+ writel(IDECTRL_DIORN | addr, base + IDECTRL);
+ ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i);
+}
+
+/* IDE register write */
+static void ep93xx_pata_write_reg(struct ep93xx_pata_data *drv_data,
+ u16 value, unsigned long addr)
+{
+ ep93xx_pata_write(drv_data, value, addr, true);
+}
+
+/* PIO data write */
+static void ep93xx_pata_write_data(struct ep93xx_pata_data *drv_data,
+ u16 value, unsigned long addr)
+{
+ ep93xx_pata_write(drv_data, value, addr, false);
+}
+
+static void ep93xx_pata_set_piomode(struct ata_port *ap,
+ struct ata_device *adev)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+ struct ata_device *pair = ata_dev_pair(adev);
+ /*
+ * Calculate timings for the delay loop, assuming ep93xx cpu speed
+ * is 200MHz (maximum possible for ep93xx). If actual cpu speed is
+ * slower, we will wait a bit longer in each delay.
+ * Additional division of cpu speed by 5, because single iteration
+ * of our delay loop takes 5 cpu cycles (25ns).
+ */
+ unsigned long T = 1000000 / (200 / 5);
+
+ ata_timing_compute(adev, adev->pio_mode, &drv_data->t, T, 0);
+ if (pair && pair->pio_mode) {
+ struct ata_timing t;
+ ata_timing_compute(pair, pair->pio_mode, &t, T, 0);
+ ata_timing_merge(&t, &drv_data->t, &drv_data->t,
+ ATA_TIMING_SETUP | ATA_TIMING_8BIT);
+ }
+ drv_data->iordy = ata_pio_need_iordy(adev);
+
+ ep93xx_pata_enable_pio(drv_data->ide_base,
+ adev->pio_mode - XFER_PIO_0);
+}
+
+/* Note: original code is ata_sff_check_status */
+static u8 ep93xx_pata_check_status(struct ata_port *ap)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+ return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_STATUS);
+}
+
+static u8 ep93xx_pata_check_altstatus(struct ata_port *ap)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+ return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_ALTSTATUS);
+}
+
+/* Note: original code is ata_sff_tf_load */
+static void ep93xx_pata_tf_load(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ if (tf->ctl != ap->last_ctl) {
+ ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+ ep93xx_pata_write_reg(drv_data, tf->hob_feature,
+ IDECTRL_ADDR_FEATURE);
+ ep93xx_pata_write_reg(drv_data, tf->hob_nsect,
+ IDECTRL_ADDR_NSECT);
+ ep93xx_pata_write_reg(drv_data, tf->hob_lbal,
+ IDECTRL_ADDR_LBAL);
+ ep93xx_pata_write_reg(drv_data, tf->hob_lbam,
+ IDECTRL_ADDR_LBAM);
+ ep93xx_pata_write_reg(drv_data, tf->hob_lbah,
+ IDECTRL_ADDR_LBAH);
+ }
+
+ if (is_addr) {
+ ep93xx_pata_write_reg(drv_data, tf->feature,
+ IDECTRL_ADDR_FEATURE);
+ ep93xx_pata_write_reg(drv_data, tf->nsect, IDECTRL_ADDR_NSECT);
+ ep93xx_pata_write_reg(drv_data, tf->lbal, IDECTRL_ADDR_LBAL);
+ ep93xx_pata_write_reg(drv_data, tf->lbam, IDECTRL_ADDR_LBAM);
+ ep93xx_pata_write_reg(drv_data, tf->lbah, IDECTRL_ADDR_LBAH);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE)
+ ep93xx_pata_write_reg(drv_data, tf->device,
+ IDECTRL_ADDR_DEVICE);
+
+ ata_wait_idle(ap);
+}
+
+/* Note: original code is ata_sff_tf_read */
+static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+ tf->command = ep93xx_pata_check_status(ap);
+ tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE);
+ tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
+ tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
+ tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM);
+ tf->lbah = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAH);
+ tf->device = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DEVICE);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ ep93xx_pata_write_reg(drv_data, tf->ctl | ATA_HOB,
+ IDECTRL_ADDR_CTL);
+ tf->hob_feature = ep93xx_pata_read_reg(drv_data,
+ IDECTRL_ADDR_FEATURE);
+ tf->hob_nsect = ep93xx_pata_read_reg(drv_data,
+ IDECTRL_ADDR_NSECT);
+ tf->hob_lbal = ep93xx_pata_read_reg(drv_data,
+ IDECTRL_ADDR_LBAL);
+ tf->hob_lbam = ep93xx_pata_read_reg(drv_data,
+ IDECTRL_ADDR_LBAM);
+ tf->hob_lbah = ep93xx_pata_read_reg(drv_data,
+ IDECTRL_ADDR_LBAH);
+ ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL);
+ ap->last_ctl = tf->ctl;
+ }
+}
+
+/* Note: original code is ata_sff_exec_command */
+static void ep93xx_pata_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+ ep93xx_pata_write_reg(drv_data, tf->command,
+ IDECTRL_ADDR_COMMAND);
+ ata_sff_pause(ap);
+}
+
+/* Note: original code is ata_sff_dev_select */
+static void ep93xx_pata_dev_select(struct ata_port *ap, unsigned int device)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+ u8 tmp = ATA_DEVICE_OBS;
+
+ if (device != 0)
+ tmp |= ATA_DEV1;
+
+ ep93xx_pata_write_reg(drv_data, tmp, IDECTRL_ADDR_DEVICE);
+ ata_sff_pause(ap); /* needed; also flushes, for mmio */
+}
+
+/* Note: original code is ata_sff_set_devctl */
+static void ep93xx_pata_set_devctl(struct ata_port *ap, u8 ctl)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+ ep93xx_pata_write_reg(drv_data, ctl, IDECTRL_ADDR_CTL);
+}
+
+/* Note: original code is ata_sff_data_xfer */
+static unsigned int ep93xx_pata_data_xfer(struct ata_device *adev,
+ unsigned char *buf,
+ unsigned int buflen, int rw)
+{
+ struct ata_port *ap = adev->link->ap;
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+ u16 *data = (u16 *)buf;
+ unsigned int words = buflen >> 1;
+
+ /* Transfer multiple of 2 bytes */
+ while (words--)
+ if (rw == READ)
+ *data++ = cpu_to_le16(
+ ep93xx_pata_read_data(
+ drv_data, IDECTRL_ADDR_DATA));
+ else
+ ep93xx_pata_write_data(drv_data, le16_to_cpu(*data++),
+ IDECTRL_ADDR_DATA);
+
+ /* Transfer trailing 1 byte, if any. */
+ if (unlikely(buflen & 0x01)) {
+ unsigned char pad[2] = { };
+
+ buf += buflen - 1;
+
+ if (rw == READ) {
+ *pad = cpu_to_le16(
+ ep93xx_pata_read_data(
+ drv_data, IDECTRL_ADDR_DATA));
+ *buf = pad[0];
+ } else {
+ pad[0] = *buf;
+ ep93xx_pata_write_data(drv_data, le16_to_cpu(*pad),
+ IDECTRL_ADDR_DATA);
+ }
+ words++;
+ }
+
+ return words << 1;
+}
+
+/* Note: original code is ata_devchk */
+static bool ep93xx_pata_device_is_present(struct ata_port *ap,
+ unsigned int device)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+ u8 nsect, lbal;
+
+ ap->ops->sff_dev_select(ap, device);
+
+ ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT);
+ ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL);
+
+ ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_NSECT);
+ ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_LBAL);
+
+ ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT);
+ ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL);
+
+ nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT);
+ lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL);
+
+ if ((nsect == 0x55) && (lbal == 0xaa))
+ return true;
+
+ return false;
+}
+
+/* Note: original code is ata_sff_wait_after_reset */
+static int ep93xx_pata_wait_after_reset(struct ata_link *link,
+ unsigned int devmask,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+ unsigned int dev0 = devmask & (1 << 0);
+ unsigned int dev1 = devmask & (1 << 1);
+ int rc, ret = 0;
+
+ ata_msleep(ap, ATA_WAIT_AFTER_RESET);
+
+ /* always check readiness of the master device */
+ rc = ata_sff_wait_ready(link, deadline);
+ /*
+ * -ENODEV means the odd clown forgot the D7 pulldown resistor
+ * and TF status is 0xff, bail out on it too.
+ */
+ if (rc)
+ return rc;
+
+ /*
+ * if device 1 was found in ata_devchk, wait for register
+ * access briefly, then wait for BSY to clear.
+ */
+ if (dev1) {
+ int i;
+
+ ap->ops->sff_dev_select(ap, 1);
+
+ /*
+ * Wait for register access. Some ATAPI devices fail
+ * to set nsect/lbal after reset, so don't waste too
+ * much time on it. We're gonna wait for !BSY anyway.
+ */
+ for (i = 0; i < 2; i++) {
+ u8 nsect, lbal;
+
+ nsect = ep93xx_pata_read_reg(drv_data,
+ IDECTRL_ADDR_NSECT);
+ lbal = ep93xx_pata_read_reg(drv_data,
+ IDECTRL_ADDR_LBAL);
+ if (nsect == 1 && lbal == 1)
+ break;
+ msleep(50); /* give drive a breather */
+ }
+
+ rc = ata_sff_wait_ready(link, deadline);
+ if (rc) {
+ if (rc != -ENODEV)
+ return rc;
+ ret = rc;
+ }
+ }
+ /* is all this really necessary? */
+ ap->ops->sff_dev_select(ap, 0);
+ if (dev1)
+ ap->ops->sff_dev_select(ap, 1);
+ if (dev0)
+ ap->ops->sff_dev_select(ap, 0);
+
+ return ret;
+}
+
+/* Note: original code is ata_bus_softreset */
+static int ep93xx_pata_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ unsigned long deadline)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+ ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL);
+ udelay(20); /* FIXME: flush */
+ ep93xx_pata_write_reg(drv_data, ap->ctl | ATA_SRST, IDECTRL_ADDR_CTL);
+ udelay(20); /* FIXME: flush */
+ ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL);
+ ap->last_ctl = ap->ctl;
+
+ return ep93xx_pata_wait_after_reset(&ap->link, devmask, deadline);
+}
+
+static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data)
+{
+ if (drv_data->dma_rx_channel) {
+ dma_release_channel(drv_data->dma_rx_channel);
+ drv_data->dma_rx_channel = NULL;
+ }
+ if (drv_data->dma_tx_channel) {
+ dma_release_channel(drv_data->dma_tx_channel);
+ drv_data->dma_tx_channel = NULL;
+ }
+}
+
+static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ if (ep93xx_dma_chan_is_m2p(chan))
+ return false;
+
+ chan->private = filter_param;
+ return true;
+}
+
+static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
+{
+ const struct platform_device *pdev = drv_data->pdev;
+ dma_cap_mask_t mask;
+ struct dma_slave_config conf;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /*
+ * Request two channels for IDE. Another possibility would be
+ * to request only one channel, and reprogram it's direction at
+ * start of new transfer.
+ */
+ drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
+ drv_data->dma_rx_data.direction = DMA_FROM_DEVICE;
+ drv_data->dma_rx_data.name = "ep93xx-pata-rx";
+ drv_data->dma_rx_channel = dma_request_channel(mask,
+ ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
+ if (!drv_data->dma_rx_channel)
+ return;
+
+ drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
+ drv_data->dma_tx_data.direction = DMA_TO_DEVICE;
+ drv_data->dma_tx_data.name = "ep93xx-pata-tx";
+ drv_data->dma_tx_channel = dma_request_channel(mask,
+ ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
+ if (!drv_data->dma_tx_channel) {
+ dma_release_channel(drv_data->dma_rx_channel);
+ return;
+ }
+
+ /* Configure receive channel direction and source address */
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = DMA_FROM_DEVICE;
+ conf.src_addr = drv_data->udma_in_phys;
+ conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
+ dev_err(&pdev->dev, "failed to configure rx dma channel\n");
+ ep93xx_pata_release_dma(drv_data);
+ return;
+ }
+
+ /* Configure transmit channel direction and destination address */
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = DMA_TO_DEVICE;
+ conf.dst_addr = drv_data->udma_out_phys;
+ conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
+ dev_err(&pdev->dev, "failed to configure tx dma channel\n");
+ ep93xx_pata_release_dma(drv_data);
+ }
+}
+
+static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc)
+{
+ struct dma_async_tx_descriptor *txd;
+ struct ep93xx_pata_data *drv_data = qc->ap->host->private_data;
+ void __iomem *base = drv_data->ide_base;
+ struct ata_device *adev = qc->dev;
+ u32 v = qc->dma_dir == DMA_TO_DEVICE ? IDEUDMAOP_RWOP : 0;
+ struct dma_chan *channel = qc->dma_dir == DMA_TO_DEVICE
+ ? drv_data->dma_tx_channel : drv_data->dma_rx_channel;
+
+ txd = channel->device->device_prep_slave_sg(channel, qc->sg,
+ qc->n_elem, qc->dma_dir, DMA_CTRL_ACK, NULL);
+ if (!txd) {
+ dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n");
+ return;
+ }
+ txd->callback = NULL;
+ txd->callback_param = NULL;
+
+ if (dmaengine_submit(txd) < 0) {
+ dev_err(qc->ap->dev, "failed to submit dma transfer\n");
+ return;
+ }
+ dma_async_issue_pending(channel);
+
+ /*
+ * When enabling UDMA operation, IDEUDMAOP register needs to be
+ * programmed in three step sequence:
+ * 1) set or clear the RWOP bit,
+ * 2) perform dummy read of the register,
+ * 3) set the UEN bit.
+ */
+ writel(v, base + IDEUDMAOP);
+ readl(base + IDEUDMAOP);
+ writel(v | IDEUDMAOP_UEN, base + IDEUDMAOP);
+
+ writel(IDECFG_IDEEN | IDECFG_UDMA |
+ ((adev->xfer_mode - XFER_UDMA_0) << IDECFG_MODE_SHIFT),
+ base + IDECFG);
+}
+
+static void ep93xx_pata_dma_stop(struct ata_queued_cmd *qc)
+{
+ struct ep93xx_pata_data *drv_data = qc->ap->host->private_data;
+ void __iomem *base = drv_data->ide_base;
+
+ /* terminate all dma transfers, if not yet finished */
+ dmaengine_terminate_all(drv_data->dma_rx_channel);
+ dmaengine_terminate_all(drv_data->dma_tx_channel);
+
+ /*
+ * To properly stop IDE-DMA, IDEUDMAOP register must to be cleared
+ * and IDECTRL register must be set to default value.
+ */
+ writel(0, base + IDEUDMAOP);
+ writel(readl(base + IDECTRL) | IDECTRL_DIOWN | IDECTRL_DIORN |
+ IDECTRL_CS0N | IDECTRL_CS1N, base + IDECTRL);
+
+ ep93xx_pata_enable_pio(drv_data->ide_base,
+ qc->dev->pio_mode - XFER_PIO_0);
+
+ ata_sff_dma_pause(qc->ap);
+}
+
+static void ep93xx_pata_dma_setup(struct ata_queued_cmd *qc)
+{
+ qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
+}
+
+static u8 ep93xx_pata_dma_status(struct ata_port *ap)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+ u32 val = readl(drv_data->ide_base + IDEUDMASTS);
+
+ /*
+ * UDMA Status Register bits:
+ *
+ * DMAIDE - DMA request signal from UDMA state machine,
+ * INTIDE - INT line generated by UDMA because of errors in the
+ * state machine,
+ * SBUSY - UDMA state machine busy, not in idle state,
+ * NDO - error for data-out not completed,
+ * NDI - error for data-in not completed,
+ * N4X - error for data transferred not multiplies of four
+ * 32-bit words.
+ * (EP93xx UG p27-17)
+ */
+ if (val & IDEUDMASTS_NDO || val & IDEUDMASTS_NDI ||
+ val & IDEUDMASTS_N4X || val & IDEUDMASTS_INTIDE)
+ return ATA_DMA_ERR;
+
+ /* read INTRQ (INT[3]) pin input state */
+ if (readl(drv_data->ide_base + IDECTRL) & IDECTRL_INTRQ)
+ return ATA_DMA_INTR;
+
+ if (val & IDEUDMASTS_SBUSY || val & IDEUDMASTS_DMAIDE)
+ return ATA_DMA_ACTIVE;
+
+ return 0;
+}
+
+/* Note: original code is ata_sff_softreset */
+static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = al->ap;
+ unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+ unsigned int devmask = 0;
+ int rc;
+ u8 err;
+
+ /* determine if device 0/1 are present */
+ if (ep93xx_pata_device_is_present(ap, 0))
+ devmask |= (1 << 0);
+ if (slave_possible && ep93xx_pata_device_is_present(ap, 1))
+ devmask |= (1 << 1);
+
+ /* select device 0 again */
+ ap->ops->sff_dev_select(al->ap, 0);
+
+ /* issue bus reset */
+ rc = ep93xx_pata_bus_softreset(ap, devmask, deadline);
+ /* if link is ocuppied, -ENODEV too is an error */
+ if (rc && (rc != -ENODEV || sata_scr_valid(al))) {
+ ata_link_printk(al, KERN_ERR, "SRST failed (errno=%d)\n",
+ rc);
+ return rc;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&al->device[0], devmask & (1 << 0),
+ &err);
+ if (slave_possible && err != 0x81)
+ classes[1] = ata_sff_dev_classify(&al->device[1],
+ devmask & (1 << 1), &err);
+
+ return 0;
+}
+
+/* Note: original code is ata_sff_drain_fifo */
+static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc)
+{
+ int count;
+ struct ata_port *ap;
+ struct ep93xx_pata_data *drv_data;
+
+ /* We only need to flush incoming data when a command was running */
+ if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+ return;
+
+ ap = qc->ap;
+ drv_data = ap->host->private_data;
+ /* Drain up to 64K of data before we give up this recovery method */
+ for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
+ && count < 65536; count += 2)
+ ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DATA);
+
+ /* Can become DEBUG later */
+ if (count)
+ ata_port_printk(ap, KERN_DEBUG,
+ "drained %d bytes to clear DRQ.\n", count);
+
+}
+
+static int ep93xx_pata_port_start(struct ata_port *ap)
+{
+ struct ep93xx_pata_data *drv_data = ap->host->private_data;
+
+ /*
+ * Set timings to safe values at startup (= number of ns from ATA
+ * specification), we'll switch to properly calculated values later.
+ */
+ drv_data->t = *ata_timing_find_mode(XFER_PIO_0);
+ return 0;
+}
+
+static struct scsi_host_template ep93xx_pata_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ /* ep93xx dma implementation limit */
+ .sg_tablesize = 32,
+ /* ep93xx dma can't transfer 65536 bytes at once */
+ .dma_boundary = 0x7fff,
+};
+
+static struct ata_port_operations ep93xx_pata_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+
+ .qc_prep = ata_noop_qc_prep,
+
+ .softreset = ep93xx_pata_softreset,
+ .hardreset = ATA_OP_NULL,
+
+ .sff_dev_select = ep93xx_pata_dev_select,
+ .sff_set_devctl = ep93xx_pata_set_devctl,
+ .sff_check_status = ep93xx_pata_check_status,
+ .sff_check_altstatus = ep93xx_pata_check_altstatus,
+ .sff_tf_load = ep93xx_pata_tf_load,
+ .sff_tf_read = ep93xx_pata_tf_read,
+ .sff_exec_command = ep93xx_pata_exec_command,
+ .sff_data_xfer = ep93xx_pata_data_xfer,
+ .sff_drain_fifo = ep93xx_pata_drain_fifo,
+ .sff_irq_clear = ATA_OP_NULL,
+
+ .set_piomode = ep93xx_pata_set_piomode,
+
+ .bmdma_setup = ep93xx_pata_dma_setup,
+ .bmdma_start = ep93xx_pata_dma_start,
+ .bmdma_stop = ep93xx_pata_dma_stop,
+ .bmdma_status = ep93xx_pata_dma_status,
+
+ .cable_detect = ata_cable_unknown,
+ .port_start = ep93xx_pata_port_start,
+};
+
+static int __devinit ep93xx_pata_probe(struct platform_device *pdev)
+{
+ struct ep93xx_pata_data *drv_data;
+ struct ata_host *host;
+ struct ata_port *ap;
+ unsigned int irq;
+ struct resource *mem_res;
+ void __iomem *ide_base;
+ int err;
+
+ err = ep93xx_ide_acquire_gpio(pdev);
+ if (err)
+ return err;
+
+ /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ err = -ENXIO;
+ goto err_rel_gpio;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ err = -ENXIO;
+ goto err_rel_gpio;
+ }
+
+ ide_base = devm_request_and_ioremap(&pdev->dev, mem_res);
+ if (!ide_base) {
+ err = -ENXIO;
+ goto err_rel_gpio;
+ }
+
+ drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data) {
+ err = -ENXIO;
+ goto err_rel_gpio;
+ }
+
+ platform_set_drvdata(pdev, drv_data);
+ drv_data->pdev = pdev;
+ drv_data->ide_base = ide_base;
+ drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN;
+ drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT;
+ ep93xx_pata_dma_init(drv_data);
+
+ /* allocate host */
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host) {
+ err = -ENXIO;
+ goto err_rel_dma;
+ }
+
+ ep93xx_pata_clear_regs(ide_base);
+
+ host->private_data = drv_data;
+
+ ap = host->ports[0];
+ ap->dev = &pdev->dev;
+ ap->ops = &ep93xx_pata_port_ops;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ ap->pio_mask = ATA_PIO4;
+
+ /*
+ * Maximum UDMA modes:
+ * EP931x rev.E0 - UDMA2
+ * EP931x rev.E1 - UDMA3
+ * EP931x rev.E2 - UDMA4
+ *
+ * MWDMA support was removed from EP931x rev.E2,
+ * so this driver supports only UDMA modes.
+ */
+ if (drv_data->dma_rx_channel && drv_data->dma_tx_channel) {
+ int chip_rev = ep93xx_chip_revision();
+
+ if (chip_rev == EP93XX_CHIP_REV_E1)
+ ap->udma_mask = ATA_UDMA3;
+ else if (chip_rev == EP93XX_CHIP_REV_E2)
+ ap->udma_mask = ATA_UDMA4;
+ else
+ ap->udma_mask = ATA_UDMA2;
+ }
+
+ /* defaults, pio 0 */
+ ep93xx_pata_enable_pio(ide_base, 0);
+
+ dev_info(&pdev->dev, "version " DRV_VERSION "\n");
+
+ /* activate host */
+ err = ata_host_activate(host, irq, ata_bmdma_interrupt, 0,
+ &ep93xx_pata_sht);
+ if (err == 0)
+ return 0;
+
+err_rel_dma:
+ ep93xx_pata_release_dma(drv_data);
+err_rel_gpio:
+ ep93xx_ide_release_gpio(pdev);
+ return err;
+}
+
+static int __devexit ep93xx_pata_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+ struct ep93xx_pata_data *drv_data = host->private_data;
+
+ ata_host_detach(host);
+ ep93xx_pata_release_dma(drv_data);
+ ep93xx_pata_clear_regs(drv_data->ide_base);
+ ep93xx_ide_release_gpio(pdev);
+ return 0;
+}
+
+static struct platform_driver ep93xx_pata_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ep93xx_pata_probe,
+ .remove = __devexit_p(ep93xx_pata_remove),
+};
+
+module_platform_driver(ep93xx_pata_platform_driver);
+
+MODULE_AUTHOR("Alessandro Zummo, Lennert Buytenhek, Joao Ramos, "
+ "Bartlomiej Zolnierkiewicz, Rafal Prylowski");
+MODULE_DESCRIPTION("low-level driver for cirrus ep93xx IDE controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:pata_ep93xx");
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 7336d4a7ab31..24712adf69df 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -553,6 +553,7 @@ struct mv_host_priv {
#if defined(CONFIG_HAVE_CLK)
struct clk *clk;
+ struct clk **port_clks;
#endif
/*
* These consistent DMA memory pools give us guaranteed
@@ -4027,6 +4028,9 @@ static int mv_platform_probe(struct platform_device *pdev)
struct resource *res;
int n_ports = 0;
int rc;
+#if defined(CONFIG_HAVE_CLK)
+ int port;
+#endif
ata_print_version_once(&pdev->dev, DRV_VERSION);
@@ -4054,6 +4058,13 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!host || !hpriv)
return -ENOMEM;
+#if defined(CONFIG_HAVE_CLK)
+ hpriv->port_clks = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * n_ports,
+ GFP_KERNEL);
+ if (!hpriv->port_clks)
+ return -ENOMEM;
+#endif
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
@@ -4066,9 +4077,17 @@ static int mv_platform_probe(struct platform_device *pdev)
#if defined(CONFIG_HAVE_CLK)
hpriv->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(hpriv->clk))
- dev_notice(&pdev->dev, "cannot get clkdev\n");
+ dev_notice(&pdev->dev, "cannot get optional clkdev\n");
else
- clk_enable(hpriv->clk);
+ clk_prepare_enable(hpriv->clk);
+
+ for (port = 0; port < n_ports; port++) {
+ char port_number[16];
+ sprintf(port_number, "%d", port);
+ hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
+ if (!IS_ERR(hpriv->port_clks[port]))
+ clk_prepare_enable(hpriv->port_clks[port]);
+ }
#endif
/*
@@ -4098,9 +4117,15 @@ static int mv_platform_probe(struct platform_device *pdev)
err:
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
- clk_disable(hpriv->clk);
+ clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
+ for (port = 0; port < n_ports; port++) {
+ if (!IS_ERR(hpriv->port_clks[port])) {
+ clk_disable_unprepare(hpriv->port_clks[port]);
+ clk_put(hpriv->port_clks[port]);
+ }
+ }
#endif
return rc;
@@ -4119,14 +4144,21 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
struct ata_host *host = platform_get_drvdata(pdev);
#if defined(CONFIG_HAVE_CLK)
struct mv_host_priv *hpriv = host->private_data;
+ int port;
#endif
ata_host_detach(host);
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
- clk_disable(hpriv->clk);
+ clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
+ for (port = 0; port < host->n_ports; port++) {
+ if (!IS_ERR(hpriv->port_clks[port])) {
+ clk_disable_unprepare(hpriv->port_clks[port]);
+ clk_put(hpriv->port_clks[port]);
+ }
+ }
#endif
return 0;
}
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e8cd652d2017..98510931c815 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
} else if (skb && card->using_dma) {
SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + TX_DMA_ADDR(port));
}
@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
db_fpga_upgrade = db_firmware_upgrade = 0;
}
- if (card->fpga_version >= DMA_SUPPORTED){
+ if (card->fpga_version >= DMA_SUPPORTED) {
+ pci_set_master(dev);
card->using_dma = 1;
} else {
card->using_dma = 0;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9aa618acfe97..9b21469482ae 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -192,4 +192,93 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
+config CMA
+ bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+ depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+ select MIGRATION
+ help
+ This enables the Contiguous Memory Allocator which allows drivers
+ to allocate big physically-contiguous blocks of memory for use with
+ hardware components that do not support I/O map nor scatter-gather.
+
+ For more information see <include/linux/dma-contiguous.h>.
+ If unsure, say "n".
+
+if CMA
+
+config CMA_DEBUG
+ bool "CMA debug messages (DEVELOPMENT)"
+ depends on DEBUG_KERNEL
+ help
+ Turns on debug messages in CMA. This produces KERN_DEBUG
+ messages for every CMA call as well as various messages while
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+ int "Size in Mega Bytes"
+ depends on !CMA_SIZE_SEL_PERCENTAGE
+ default 16
+ help
+ Defines the size (in MiB) of the default memory area for Contiguous
+ Memory Allocator.
+
+config CMA_SIZE_PERCENTAGE
+ int "Percentage of total memory"
+ depends on !CMA_SIZE_SEL_MBYTES
+ default 10
+ help
+ Defines the size of the default memory area for Contiguous Memory
+ Allocator as a percentage of the total memory in the system.
+
+choice
+ prompt "Selected region size"
+ default CMA_SIZE_SEL_ABSOLUTE
+
+config CMA_SIZE_SEL_MBYTES
+ bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+ bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+ bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+ bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+ range 4 9
+ default 8
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a memory waste. With this parameter you can
+ specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+ buffers will be aligned only to this specified order. The order is
+ expressed as a power of two multiplied by the PAGE_SIZE.
+
+ For example, if your system defaults to 4KiB pages, the order value
+ of 8 means that the buffers will be aligned up to 1MiB only.
+
+ If unsure, leave the default value "8".
+
+config CMA_AREAS
+ int "Maximum count of the CMA device-private areas"
+ default 7
+ help
+ CMA allows to create CMA areas for particular devices. This parameter
+ sets the maximum number of such device private CMA areas in the
+ system.
+
+ If unsure, leave the default value "7".
+
+endif
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b6d1b9c4200c..5aa2d703d19f 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,6 +6,7 @@ obj-y := core.o bus.o dd.o syscore.o \
attribute_container.o transport_class.o \
topology.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
+obj-$(CONFIG_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1b1cbb571d38..4b01ab3d2c24 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -24,6 +24,7 @@
#include <linux/wait.h>
#include <linux/async.h>
#include <linux/pm_runtime.h>
+#include <scsi/scsi_scan.h>
#include "base.h"
#include "power/power.h"
@@ -100,7 +101,7 @@ static void driver_deferred_probe_add(struct device *dev)
mutex_lock(&deferred_probe_mutex);
if (list_empty(&dev->p->deferred_probe)) {
dev_dbg(dev, "Added to deferred list\n");
- list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
+ list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
}
mutex_unlock(&deferred_probe_mutex);
}
@@ -332,6 +333,7 @@ void wait_for_device_probe(void)
/* wait for the known devices to complete their probing */
wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
async_synchronize_full();
+ scsi_complete_async_scans();
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 05c64c11bad2..24e88fe29ec1 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -44,8 +44,26 @@ static int dma_buf_release(struct inode *inode, struct file *file)
return 0;
}
+static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ /* check for overflowing the buffer's size */
+ if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ dmabuf->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ return dmabuf->ops->mmap(dmabuf, vma);
+}
+
static const struct file_operations dma_buf_fops = {
.release = dma_buf_release,
+ .mmap = dma_buf_mmap_internal,
};
/*
@@ -82,7 +100,8 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
|| !ops->unmap_dma_buf
|| !ops->release
|| !ops->kmap_atomic
- || !ops->kmap)) {
+ || !ops->kmap
+ || !ops->mmap)) {
return ERR_PTR(-EINVAL);
}
@@ -406,3 +425,81 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_kunmap);
+
+
+/**
+ * dma_buf_mmap - Setup up a userspace mmap with the given vma
+ * @dmabuf: [in] buffer that should back the vma
+ * @vma: [in] vma for the mmap
+ * @pgoff: [in] offset in pages where this mmap should start within the
+ * dma-buf buffer.
+ *
+ * This function adjusts the passed in vma so that it points at the file of the
+ * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
+ * checking on the size of the vma. Then it calls the exporters mmap function to
+ * set up the mapping.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
+ unsigned long pgoff)
+{
+ if (WARN_ON(!dmabuf || !vma))
+ return -EINVAL;
+
+ /* check for offset overflow */
+ if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+ return -EOVERFLOW;
+
+ /* check for overflowing the buffer's size */
+ if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ dmabuf->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ /* readjust the vma */
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ vma->vm_file = dmabuf->file;
+ get_file(vma->vm_file);
+
+ vma->vm_pgoff = pgoff;
+
+ return dmabuf->ops->mmap(dmabuf, vma);
+}
+EXPORT_SYMBOL_GPL(dma_buf_mmap);
+
+/**
+ * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf: [in] buffer to vmap
+ *
+ * This call may fail due to lack of virtual mapping address space.
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ */
+void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ if (WARN_ON(!dmabuf))
+ return NULL;
+
+ if (dmabuf->ops->vmap)
+ return dmabuf->ops->vmap(dmabuf);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_buf_vmap);
+
+/**
+ * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf: [in] buffer to vunmap
+ */
+void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ if (WARN_ON(!dmabuf))
+ return;
+
+ if (dmabuf->ops->vunmap)
+ dmabuf->ops->vunmap(dmabuf, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_vunmap);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bb0025c510b3..1b85949e3d2f 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,6 +10,7 @@
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
+ phys_addr_t pfn_base;
int size;
int flags;
unsigned long *bitmap;
@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
return 0;
}
EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_coherent
+ * @size: size of the memory buffer allocated by dma_alloc_from_coherent
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if
+ * dma_release_coherent() should proceed with mapping memory from
+ * generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ if (mem && vaddr >= mem->virt_base && vaddr + size <=
+ (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ unsigned long off = vma->vm_pgoff;
+ int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+ int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int count = size >> PAGE_SHIFT;
+
+ *ret = -ENXIO;
+ if (off < count && user_count <= count - off) {
+ unsigned pfn = mem->pfn_base + start + off;
+ *ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644
index 000000000000..78efb0306a44
--- /dev/null
+++ b/drivers/base/dma-contiguous.c
@@ -0,0 +1,401 @@
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ * Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+# define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-isolation.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+
+#ifndef SZ_1M
+#define SZ_1M (1 << 20)
+#endif
+
+struct cma {
+ unsigned long base_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+};
+
+struct cma *dma_contiguous_default_area;
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is usefull mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static long size_cmdline = -1;
+
+static int __init early_cma(char *p)
+{
+ pr_debug("%s(%s)\n", __func__, p);
+ size_cmdline = memparse(p, &p);
+ return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+{
+ struct memblock_region *reg;
+ unsigned long total_pages = 0;
+
+ /*
+ * We cannot use memblock_phys_mem_size() here, because
+ * memblock_analyze() has not been called yet.
+ */
+ for_each_memblock(memory, reg)
+ total_pages += memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+
+ return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+{
+ return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+ unsigned long selected_size = 0;
+
+ pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+ if (size_cmdline != -1) {
+ selected_size = size_cmdline;
+ } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+ selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+ selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+ selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+ selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+ }
+
+ if (selected_size) {
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ selected_size / SZ_1M);
+
+ dma_declare_contiguous(NULL, selected_size, 0, limit);
+ }
+};
+
+static DEFINE_MUTEX(cma_mutex);
+
+static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+{
+ unsigned long pfn = base_pfn;
+ unsigned i = count >> pageblock_order;
+ struct zone *zone;
+
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ zone = page_zone(pfn_to_page(pfn));
+
+ do {
+ unsigned j;
+ base_pfn = pfn;
+ for (j = pageblock_nr_pages; j; --j, pfn++) {
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ if (page_zone(pfn_to_page(pfn)) != zone)
+ return -EINVAL;
+ }
+ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+ } while (--i);
+ return 0;
+}
+
+static __init struct cma *cma_create_area(unsigned long base_pfn,
+ unsigned long count)
+{
+ int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct cma *cma;
+ int ret = -ENOMEM;
+
+ pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
+
+ cma = kmalloc(sizeof *cma, GFP_KERNEL);
+ if (!cma)
+ return ERR_PTR(-ENOMEM);
+
+ cma->base_pfn = base_pfn;
+ cma->count = count;
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ if (!cma->bitmap)
+ goto no_mem;
+
+ ret = cma_activate_area(base_pfn, count);
+ if (ret)
+ goto error;
+
+ pr_debug("%s: returned %p\n", __func__, (void *)cma);
+ return cma;
+
+error:
+ kfree(cma->bitmap);
+no_mem:
+ kfree(cma);
+ return ERR_PTR(ret);
+}
+
+static struct cma_reserved {
+ phys_addr_t start;
+ unsigned long size;
+ struct device *dev;
+} cma_reserved[MAX_CMA_AREAS] __initdata;
+static unsigned cma_reserved_count __initdata;
+
+static int __init cma_init_reserved_areas(void)
+{
+ struct cma_reserved *r = cma_reserved;
+ unsigned i = cma_reserved_count;
+
+ pr_debug("%s()\n", __func__);
+
+ for (; i; --i, ++r) {
+ struct cma *cma;
+ cma = cma_create_area(PFN_DOWN(r->start),
+ r->size >> PAGE_SHIFT);
+ if (!IS_ERR(cma))
+ dev_set_cma_area(r->dev, cma);
+ }
+ return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ * for particular device
+ * @dev: Pointer to device structure.
+ * @size: Size of the reserved memory.
+ * @base: Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+ unsigned long alignment;
+
+ pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+ (unsigned long)size, (unsigned long)base,
+ (unsigned long)limit);
+
+ /* Sanity checks */
+ if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ pr_err("Not enough slots for CMA reserved regions!\n");
+ return -ENOSPC;
+ }
+
+ if (!size)
+ return -EINVAL;
+
+ /* Sanitise input arguments */
+ alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+ base = ALIGN(base, alignment);
+ size = ALIGN(size, alignment);
+ limit &= ~(alignment - 1);
+
+ /* Reserve memory */
+ if (base) {
+ if (memblock_is_region_reserved(base, size) ||
+ memblock_reserve(base, size) < 0) {
+ base = -EBUSY;
+ goto err;
+ }
+ } else {
+ /*
+ * Use __memblock_alloc_base() since
+ * memblock_alloc_base() panic()s.
+ */
+ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+ if (!addr) {
+ base = -ENOMEM;
+ goto err;
+ } else if (addr + size > ~(unsigned long)0) {
+ memblock_free(addr, size);
+ base = -EINVAL;
+ goto err;
+ } else {
+ base = addr;
+ }
+ }
+
+ /*
+ * Each reserved area must be initialised later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ r->start = base;
+ r->size = size;
+ r->dev = dev;
+ cma_reserved_count++;
+ pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+ (unsigned long)base);
+
+ /* Architecture specific contiguous memory fixup. */
+ dma_contiguous_early_fixup(base, size);
+ return 0;
+err:
+ pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+ return base;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev: Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific get_dev_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int align)
+{
+ unsigned long mask, pfn, pageno, start = 0;
+ struct cma *cma = dev_get_cma_area(dev);
+ int ret;
+
+ if (!cma || !cma->count)
+ return NULL;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+ count, align);
+
+ if (!count)
+ return NULL;
+
+ mask = (1 << align) - 1;
+
+ mutex_lock(&cma_mutex);
+
+ for (;;) {
+ pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+ start, count, mask);
+ if (pageno >= cma->count) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ pfn = cma->base_pfn + pageno;
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ if (ret == 0) {
+ bitmap_set(cma->bitmap, pageno, count);
+ break;
+ } else if (ret != -EBUSY) {
+ goto error;
+ }
+ pr_debug("%s(): memory range at %p is busy, retrying\n",
+ __func__, pfn_to_page(pfn));
+ /* try again with a bit different memory target */
+ start = pageno + mask + 1;
+ }
+
+ mutex_unlock(&cma_mutex);
+
+ pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
+ return pfn_to_page(pfn);
+error:
+ mutex_unlock(&cma_mutex);
+ return NULL;
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev: Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+{
+ struct cma *cma = dev_get_cma_area(dev);
+ unsigned long pfn;
+
+ if (!cma || !pages)
+ return false;
+
+ pr_debug("%s(page %p)\n", __func__, (void *)pages);
+
+ pfn = page_to_pfn(pages);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+ mutex_lock(&cma_mutex);
+ bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+ free_contig_range(pfn, count);
+ mutex_unlock(&cma_mutex);
+
+ return true;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 90aa2a11a933..af1a177216f1 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
{
int n;
- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
- if (n > 0 && PAGE_SIZE > n + 1) {
- *(buf + n++) = '\n';
- *(buf + n++) = '\0';
- }
+ n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
+ buf[n++] = '\n';
+ buf[n] = '\0';
return n;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 73ce9fbe9839..83aa694a8efe 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sched.h>
@@ -38,11 +39,13 @@
ktime_t __start = ktime_get(); \
type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
- struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
- if (__elapsed > __gpd_data->td.field) { \
- __gpd_data->td.field = __elapsed; \
+ struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
+ if (!__retval && __elapsed > __td->field) { \
+ __td->field = __elapsed; \
dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
__elapsed); \
+ genpd->max_off_time_changed = true; \
+ __td->constraint_changed = true; \
} \
__retval; \
})
@@ -211,6 +214,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > genpd->power_on_latency_ns) {
genpd->power_on_latency_ns = elapsed_ns;
+ genpd->max_off_time_changed = true;
if (genpd->name)
pr_warning("%s: Power-on latency exceeded, "
"new value %lld ns\n", genpd->name,
@@ -247,6 +251,53 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_RUNTIME
+static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct device *dev;
+
+ gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
+
+ mutex_lock(&gpd_data->lock);
+ dev = gpd_data->base.dev;
+ if (!dev) {
+ mutex_unlock(&gpd_data->lock);
+ return NOTIFY_DONE;
+ }
+ mutex_unlock(&gpd_data->lock);
+
+ for (;;) {
+ struct generic_pm_domain *genpd;
+ struct pm_domain_data *pdd;
+
+ spin_lock_irq(&dev->power.lock);
+
+ pdd = dev->power.subsys_data ?
+ dev->power.subsys_data->domain_data : NULL;
+ if (pdd) {
+ to_gpd_data(pdd)->td.constraint_changed = true;
+ genpd = dev_to_genpd(dev);
+ } else {
+ genpd = ERR_PTR(-ENODATA);
+ }
+
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!IS_ERR(genpd)) {
+ mutex_lock(&genpd->lock);
+ genpd->max_off_time_changed = true;
+ mutex_unlock(&genpd->lock);
+ }
+
+ dev = dev->parent;
+ if (!dev || dev->power.ignore_children)
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
/**
* __pm_genpd_save_device - Save the pre-suspend state of a device.
* @pdd: Domain data of the device to save the state of.
@@ -435,6 +486,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > genpd->power_off_latency_ns) {
genpd->power_off_latency_ns = elapsed_ns;
+ genpd->max_off_time_changed = true;
if (genpd->name)
pr_warning("%s: Power-off latency exceeded, "
"new value %lld ns\n", genpd->name,
@@ -443,17 +495,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
genpd->status = GPD_STATE_POWER_OFF;
- genpd->power_off_time = ktime_get();
-
- /* Update PM QoS information for devices in the domain. */
- list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
- struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
-
- pm_runtime_update_max_time_suspended(pdd->dev,
- td->start_latency_ns +
- td->restore_state_latency_ns +
- genpd->power_on_latency_ns);
- }
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
@@ -514,9 +555,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (ret)
return ret;
- pm_runtime_update_max_time_suspended(dev,
- dev_gpd_data(dev)->td.start_latency_ns);
-
/*
* If power.irq_safe is set, this routine will be run with interrupts
* off, so it can't use mutexes.
@@ -613,6 +651,12 @@ void pm_genpd_poweroff_unused(void)
#else
+static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ return NOTIFY_DONE;
+}
+
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#define pm_genpd_runtime_suspend NULL
@@ -1209,12 +1253,15 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- genpd_acquire_lock(genpd);
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+ if (!gpd_data)
+ return -ENOMEM;
- if (genpd->status == GPD_STATE_POWER_OFF) {
- ret = -EINVAL;
- goto out;
- }
+ mutex_init(&gpd_data->lock);
+ gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+
+ genpd_acquire_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1227,26 +1274,35 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
goto out;
}
- gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
- if (!gpd_data) {
- ret = -ENOMEM;
- goto out;
- }
-
genpd->device_count++;
+ genpd->max_off_time_changed = true;
- dev->pm_domain = &genpd->domain;
dev_pm_get_subsys_data(dev);
+
+ mutex_lock(&gpd_data->lock);
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = &genpd->domain;
dev->power.subsys_data->domain_data = &gpd_data->base;
gpd_data->base.dev = dev;
- gpd_data->need_restore = false;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
if (td)
gpd_data->td = *td;
+ gpd_data->td.constraint_changed = true;
+ gpd_data->td.effective_constraint_ns = -1;
+ spin_unlock_irq(&dev->power.lock);
+ mutex_unlock(&gpd_data->lock);
+
+ genpd_release_lock(genpd);
+
+ return 0;
+
out:
genpd_release_lock(genpd);
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ kfree(gpd_data);
return ret;
}
@@ -1290,12 +1346,15 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev)
{
+ struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
- int ret = -EINVAL;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
+ || IS_ERR_OR_NULL(dev->pm_domain)
+ || pd_to_genpd(dev->pm_domain) != genpd)
return -EINVAL;
genpd_acquire_lock(genpd);
@@ -1305,21 +1364,27 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- if (pdd->dev != dev)
- continue;
+ genpd->device_count--;
+ genpd->max_off_time_changed = true;
- list_del_init(&pdd->list_node);
- pdd->dev = NULL;
- dev_pm_put_subsys_data(dev);
- dev->pm_domain = NULL;
- kfree(to_gpd_data(pdd));
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = NULL;
+ pdd = dev->power.subsys_data->domain_data;
+ list_del_init(&pdd->list_node);
+ dev->power.subsys_data->domain_data = NULL;
+ spin_unlock_irq(&dev->power.lock);
- genpd->device_count--;
+ gpd_data = to_gpd_data(pdd);
+ mutex_lock(&gpd_data->lock);
+ pdd->dev = NULL;
+ mutex_unlock(&gpd_data->lock);
- ret = 0;
- break;
- }
+ genpd_release_lock(genpd);
+
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ kfree(gpd_data);
+ dev_pm_put_subsys_data(dev);
+ return 0;
out:
genpd_release_lock(genpd);
@@ -1348,6 +1413,26 @@ void pm_genpd_dev_always_on(struct device *dev, bool val)
EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
/**
+ * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
+ * @dev: Device to set/unset the flag for.
+ * @val: The new value of the device's "need restore" flag.
+ */
+void pm_genpd_dev_need_restore(struct device *dev, bool val)
+{
+ struct pm_subsys_data *psd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ psd = dev_to_psd(dev);
+ if (psd && psd->domain_data)
+ to_gpd_data(psd->domain_data)->need_restore = val;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
+
+/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @genpd: Master PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
@@ -1378,7 +1463,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ list_for_each_entry(link, &genpd->master_links, master_node) {
if (link->slave == subdomain && link->master == genpd) {
ret = -EINVAL;
goto out;
@@ -1690,6 +1775,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->resume_count = 0;
genpd->device_count = 0;
genpd->max_off_time_ns = -1;
+ genpd->max_off_time_changed = true;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 66a265bf5867..28dee3053f1f 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,6 +14,31 @@
#ifdef CONFIG_PM_RUNTIME
+static int dev_update_qos_constraint(struct device *dev, void *data)
+{
+ s64 *constraint_ns_p = data;
+ s32 constraint_ns = -1;
+
+ if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
+ constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
+
+ if (constraint_ns < 0) {
+ constraint_ns = dev_pm_qos_read_value(dev);
+ constraint_ns *= NSEC_PER_USEC;
+ }
+ if (constraint_ns == 0)
+ return 0;
+
+ /*
+ * constraint_ns cannot be negative here, because the device has been
+ * suspended.
+ */
+ if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
+ *constraint_ns_p = constraint_ns;
+
+ return 0;
+}
+
/**
* default_stop_ok - Default PM domain governor routine for stopping devices.
* @dev: Device to check.
@@ -21,14 +46,52 @@
bool default_stop_ok(struct device *dev)
{
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ unsigned long flags;
+ s64 constraint_ns;
dev_dbg(dev, "%s()\n", __func__);
- if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
- return true;
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (!td->constraint_changed) {
+ bool ret = td->cached_stop_ok;
- return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
- && td->break_even_ns < dev->power.max_time_suspended_ns;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return ret;
+ }
+ td->constraint_changed = false;
+ td->cached_stop_ok = false;
+ td->effective_constraint_ns = -1;
+ constraint_ns = __dev_pm_qos_read_value(dev);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (constraint_ns < 0)
+ return false;
+
+ constraint_ns *= NSEC_PER_USEC;
+ /*
+ * We can walk the children without any additional locking, because
+ * they all have been suspended at this point and their
+ * effective_constraint_ns fields won't be modified in parallel with us.
+ */
+ if (!dev->power.ignore_children)
+ device_for_each_child(dev, &constraint_ns,
+ dev_update_qos_constraint);
+
+ if (constraint_ns > 0) {
+ constraint_ns -= td->start_latency_ns;
+ if (constraint_ns == 0)
+ return false;
+ }
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
+ constraint_ns == 0;
+ /*
+ * The children have been suspended already, so we don't need to take
+ * their stop latencies into account here.
+ */
+ return td->cached_stop_ok;
}
/**
@@ -42,9 +105,27 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
struct pm_domain_data *pdd;
- s64 min_dev_off_time_ns;
+ s64 min_off_time_ns;
s64 off_on_time_ns;
- ktime_t time_now = ktime_get();
+
+ if (genpd->max_off_time_changed) {
+ struct gpd_link *link;
+
+ /*
+ * We have to invalidate the cached results for the masters, so
+ * use the observation that default_power_down_ok() is not
+ * going to be called for any master until this instance
+ * returns.
+ */
+ list_for_each_entry(link, &genpd->slave_links, slave_node)
+ link->master->max_off_time_changed = true;
+
+ genpd->max_off_time_changed = false;
+ genpd->cached_power_down_ok = false;
+ genpd->max_off_time_ns = -1;
+ } else {
+ return genpd->cached_power_down_ok;
+ }
off_on_time_ns = genpd->power_off_latency_ns +
genpd->power_on_latency_ns;
@@ -61,6 +142,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
to_gpd_data(pdd)->td.save_state_latency_ns;
}
+ min_off_time_ns = -1;
/*
* Check if subdomains can be off for enough time.
*
@@ -73,8 +155,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
if (sd_max_off_ns < 0)
continue;
- sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
- sd->power_off_time));
/*
* Check if the subdomain is allowed to be off long enough for
* the current domain to turn off and on (that's how much time
@@ -82,60 +162,64 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
*/
if (sd_max_off_ns <= off_on_time_ns)
return false;
+
+ if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
+ min_off_time_ns = sd_max_off_ns;
}
/*
* Check if the devices in the domain can be off enough time.
*/
- min_dev_off_time_ns = -1;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
struct gpd_timing_data *td;
- struct device *dev = pdd->dev;
- s64 dev_off_time_ns;
+ s64 constraint_ns;
- if (!dev->driver || dev->power.max_time_suspended_ns < 0)
+ if (!pdd->dev->driver)
continue;
+ /*
+ * Check if the device is allowed to be off long enough for the
+ * domain to turn off and on (that's how much time it will
+ * have to wait worst case).
+ */
td = &to_gpd_data(pdd)->td;
- dev_off_time_ns = dev->power.max_time_suspended_ns -
- (td->start_latency_ns + td->restore_state_latency_ns +
- ktime_to_ns(ktime_sub(time_now,
- dev->power.suspend_time)));
- if (dev_off_time_ns <= off_on_time_ns)
- return false;
-
- if (min_dev_off_time_ns > dev_off_time_ns
- || min_dev_off_time_ns < 0)
- min_dev_off_time_ns = dev_off_time_ns;
- }
+ constraint_ns = td->effective_constraint_ns;
+ /* default_stop_ok() need not be called before us. */
+ if (constraint_ns < 0) {
+ constraint_ns = dev_pm_qos_read_value(pdd->dev);
+ constraint_ns *= NSEC_PER_USEC;
+ }
+ if (constraint_ns == 0)
+ continue;
- if (min_dev_off_time_ns < 0) {
/*
- * There are no latency constraints, so the domain can spend
- * arbitrary time in the "off" state.
+ * constraint_ns cannot be negative here, because the device has
+ * been suspended.
*/
- genpd->max_off_time_ns = -1;
- return true;
+ constraint_ns -= td->restore_state_latency_ns;
+ if (constraint_ns <= off_on_time_ns)
+ return false;
+
+ if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
+ min_off_time_ns = constraint_ns;
}
+ genpd->cached_power_down_ok = true;
+
/*
- * The difference between the computed minimum delta and the time needed
- * to turn the domain on is the maximum theoretical time this domain can
- * spend in the "off" state.
+ * If the computed minimum device off time is negative, there are no
+ * latency constraints, so the domain can spend arbitrary time in the
+ * "off" state.
*/
- min_dev_off_time_ns -= genpd->power_on_latency_ns;
+ if (min_off_time_ns < 0)
+ return true;
/*
- * If the difference between the computed minimum delta and the time
- * needed to turn the domain off and back on on is smaller than the
- * domain's power break even time, removing power from the domain is not
- * worth it.
+ * The difference between the computed minimum subdomain or device off
+ * time and the time needed to turn the domain on is the maximum
+ * theoretical time this domain can spend in the "off" state.
*/
- if (genpd->break_even_ns >
- min_dev_off_time_ns - genpd->power_off_latency_ns)
- return false;
-
- genpd->max_off_time_ns = min_dev_off_time_ns;
+ genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns;
return true;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b462c0e341cb..9cb845e49334 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -889,6 +889,11 @@ static int dpm_suspend_noirq(pm_message_t state)
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
+
+ if (pm_wakeup_pending()) {
+ error = -EBUSY;
+ break;
+ }
}
mutex_unlock(&dpm_list_mtx);
if (error)
@@ -962,6 +967,11 @@ static int dpm_suspend_late(pm_message_t state)
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
+
+ if (pm_wakeup_pending()) {
+ error = -EBUSY;
+ break;
+ }
}
mutex_unlock(&dpm_list_mtx);
if (error)
@@ -1021,7 +1031,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_children(dev, async);
if (async_error)
- return 0;
+ goto Complete;
pm_runtime_get_noresume(dev);
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1030,7 +1040,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
pm_runtime_put_sync(dev);
async_error = -EBUSY;
- return 0;
+ goto Complete;
}
device_lock(dev);
@@ -1087,6 +1097,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
device_unlock(dev);
+
+ Complete:
complete_all(&dev->power.completion);
if (error) {
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 71855570922d..fd849a2c4fa8 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -352,21 +352,26 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
*
* Will register the notifier into a notification chain that gets called
* upon changes to the target value for the device.
+ *
+ * If the device's constraints object doesn't exist when this routine is called,
+ * it will be created (or error code will be returned if that fails).
*/
int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
{
- int retval = 0;
+ int ret = 0;
mutex_lock(&dev_pm_qos_mtx);
- /* Silently return if the constraints object is not present. */
- if (dev->power.constraints)
- retval = blocking_notifier_chain_register(
- dev->power.constraints->notifiers,
- notifier);
+ if (!dev->power.constraints)
+ ret = dev->power.power_state.event != PM_EVENT_INVALID ?
+ dev_pm_qos_constraints_allocate(dev) : -ENODEV;
+
+ if (!ret)
+ ret = blocking_notifier_chain_register(
+ dev->power.constraints->notifiers, notifier);
mutex_unlock(&dev_pm_qos_mtx);
- return retval;
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index bd0f3949bcf9..59894873a3b3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -282,47 +282,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
return retval != -EACCES ? retval : -EIO;
}
-struct rpm_qos_data {
- ktime_t time_now;
- s64 constraint_ns;
-};
-
-/**
- * rpm_update_qos_constraint - Update a given PM QoS constraint data.
- * @dev: Device whose timing data to use.
- * @data: PM QoS constraint data to update.
- *
- * Use the suspend timing data of @dev to update PM QoS constraint data pointed
- * to by @data.
- */
-static int rpm_update_qos_constraint(struct device *dev, void *data)
-{
- struct rpm_qos_data *qos = data;
- unsigned long flags;
- s64 delta_ns;
- int ret = 0;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- if (dev->power.max_time_suspended_ns < 0)
- goto out;
-
- delta_ns = dev->power.max_time_suspended_ns -
- ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
- if (delta_ns <= 0) {
- ret = -EBUSY;
- goto out;
- }
-
- if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
- qos->constraint_ns = delta_ns;
-
- out:
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- return ret;
-}
-
/**
* rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
@@ -349,7 +308,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
struct device *parent = NULL;
- struct rpm_qos_data qos;
int retval;
trace_rpm_suspend(dev, rpmflags);
@@ -445,38 +403,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
- qos.constraint_ns = __dev_pm_qos_read_value(dev);
- if (qos.constraint_ns < 0) {
- /* Negative constraint means "never suspend". */
+ if (__dev_pm_qos_read_value(dev) < 0) {
+ /* Negative PM QoS constraint means "never suspend". */
retval = -EPERM;
goto out;
}
- qos.constraint_ns *= NSEC_PER_USEC;
- qos.time_now = ktime_get();
__update_runtime_status(dev, RPM_SUSPENDING);
- if (!dev->power.ignore_children) {
- if (dev->power.irq_safe)
- spin_unlock(&dev->power.lock);
- else
- spin_unlock_irq(&dev->power.lock);
-
- retval = device_for_each_child(dev, &qos,
- rpm_update_qos_constraint);
-
- if (dev->power.irq_safe)
- spin_lock(&dev->power.lock);
- else
- spin_lock_irq(&dev->power.lock);
-
- if (retval)
- goto fail;
- }
-
- dev->power.suspend_time = qos.time_now;
- dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
-
if (dev->pm_domain)
callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
@@ -529,8 +463,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
fail:
__update_runtime_status(dev, RPM_ACTIVE);
- dev->power.suspend_time = ktime_set(0, 0);
- dev->power.max_time_suspended_ns = -1;
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@@ -704,9 +636,6 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
- dev->power.suspend_time = ktime_set(0, 0);
- dev->power.max_time_suspended_ns = -1;
-
__update_runtime_status(dev, RPM_RESUMING);
if (dev->pm_domain)
@@ -1369,9 +1298,6 @@ void pm_runtime_init(struct device *dev)
setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
(unsigned long)dev);
- dev->power.suspend_time = ktime_set(0, 0);
- dev->power.max_time_suspended_ns = -1;
-
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1389,28 +1315,3 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put_sync(dev->parent);
}
-
-/**
- * pm_runtime_update_max_time_suspended - Update device's suspend time data.
- * @dev: Device to handle.
- * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
- *
- * Update the device's power.max_time_suspended_ns field by subtracting
- * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
- * never negative.
- */
-void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
- if (dev->power.max_time_suspended_ns > delta_ns)
- dev->power.max_time_suspended_ns -= delta_ns;
- else
- dev->power.max_time_suspended_ns = 0;
- }
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 95c12f6cb5b9..48be2ad4dd2c 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -314,22 +314,41 @@ static ssize_t wakeup_active_count_show(struct device *dev,
static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
-static ssize_t wakeup_hit_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_abort_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long count = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->wakeup_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
+
+static ssize_t wakeup_expire_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
- count = dev->power.wakeup->hit_count;
+ count = dev->power.wakeup->expire_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL);
+static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
static ssize_t wakeup_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -398,6 +417,27 @@ static ssize_t wakeup_last_time_show(struct device *dev,
}
static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+
+#ifdef CONFIG_PM_AUTOSLEEP
+static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s64 msec = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
+ wakeup_prevent_sleep_time_show, NULL);
+#endif /* CONFIG_PM_AUTOSLEEP */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_ADVANCED_DEBUG
@@ -486,11 +526,15 @@ static struct attribute *wakeup_attrs[] = {
&dev_attr_wakeup.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_wakeup_active_count.attr,
- &dev_attr_wakeup_hit_count.attr,
+ &dev_attr_wakeup_abort_count.attr,
+ &dev_attr_wakeup_expire_count.attr,
&dev_attr_wakeup_active.attr,
&dev_attr_wakeup_total_time_ms.attr,
&dev_attr_wakeup_max_time_ms.attr,
&dev_attr_wakeup_last_time_ms.attr,
+#ifdef CONFIG_PM_AUTOSLEEP
+ &dev_attr_wakeup_prevent_sleep_time_ms.attr,
+#endif
#endif
NULL,
};
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 2a3e581b8dcd..cbb463b3a750 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -14,16 +14,15 @@
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <trace/events/power.h>
#include "power.h"
-#define TIMEOUT 100
-
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
*/
-bool events_check_enabled;
+bool events_check_enabled __read_mostly;
/*
* Combined counters of registered wakeup events and wakeup events in progress.
@@ -52,6 +51,8 @@ static void pm_wakeup_timer_fn(unsigned long data);
static LIST_HEAD(wakeup_sources);
+static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
+
/**
* wakeup_source_prepare - Prepare a new wakeup source for initialization.
* @ws: Wakeup source to prepare.
@@ -132,6 +133,7 @@ void wakeup_source_add(struct wakeup_source *ws)
spin_lock_init(&ws->lock);
setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
ws->active = false;
+ ws->last_time = ktime_get();
spin_lock_irq(&events_lock);
list_add_rcu(&ws->entry, &wakeup_sources);
@@ -374,12 +376,33 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
*/
static void wakeup_source_activate(struct wakeup_source *ws)
{
+ unsigned int cec;
+
ws->active = true;
ws->active_count++;
ws->last_time = ktime_get();
+ if (ws->autosleep_enabled)
+ ws->start_prevent_time = ws->last_time;
/* Increment the counter of events in progress. */
- atomic_inc(&combined_event_count);
+ cec = atomic_inc_return(&combined_event_count);
+
+ trace_wakeup_source_activate(ws->name, cec);
+}
+
+/**
+ * wakeup_source_report_event - Report wakeup event using the given source.
+ * @ws: Wakeup source to report the event for.
+ */
+static void wakeup_source_report_event(struct wakeup_source *ws)
+{
+ ws->event_count++;
+ /* This is racy, but the counter is approximate anyway. */
+ if (events_check_enabled)
+ ws->wakeup_count++;
+
+ if (!ws->active)
+ wakeup_source_activate(ws);
}
/**
@@ -397,10 +420,7 @@ void __pm_stay_awake(struct wakeup_source *ws)
spin_lock_irqsave(&ws->lock, flags);
- ws->event_count++;
- if (!ws->active)
- wakeup_source_activate(ws);
-
+ wakeup_source_report_event(ws);
del_timer(&ws->timer);
ws->timer_expires = 0;
@@ -432,6 +452,17 @@ void pm_stay_awake(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_stay_awake);
+#ifdef CONFIG_PM_AUTOSLEEP
+static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
+{
+ ktime_t delta = ktime_sub(now, ws->start_prevent_time);
+ ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
+}
+#else
+static inline void update_prevent_sleep_time(struct wakeup_source *ws,
+ ktime_t now) {}
+#endif
+
/**
* wakup_source_deactivate - Mark given wakeup source as inactive.
* @ws: Wakeup source to handle.
@@ -442,6 +473,7 @@ EXPORT_SYMBOL_GPL(pm_stay_awake);
*/
static void wakeup_source_deactivate(struct wakeup_source *ws)
{
+ unsigned int cnt, inpr, cec;
ktime_t duration;
ktime_t now;
@@ -468,14 +500,23 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
ws->max_time = duration;
+ ws->last_time = now;
del_timer(&ws->timer);
ws->timer_expires = 0;
+ if (ws->autosleep_enabled)
+ update_prevent_sleep_time(ws, now);
+
/*
* Increment the counter of registered wakeup events and decrement the
* couter of wakeup events in progress simultaneously.
*/
- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
+ cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
+ trace_wakeup_source_deactivate(ws->name, cec);
+
+ split_counters(&cnt, &inpr);
+ if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
+ wake_up(&wakeup_count_wait_queue);
}
/**
@@ -536,8 +577,10 @@ static void pm_wakeup_timer_fn(unsigned long data)
spin_lock_irqsave(&ws->lock, flags);
if (ws->active && ws->timer_expires
- && time_after_eq(jiffies, ws->timer_expires))
+ && time_after_eq(jiffies, ws->timer_expires)) {
wakeup_source_deactivate(ws);
+ ws->expire_count++;
+ }
spin_unlock_irqrestore(&ws->lock, flags);
}
@@ -564,9 +607,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
spin_lock_irqsave(&ws->lock, flags);
- ws->event_count++;
- if (!ws->active)
- wakeup_source_activate(ws);
+ wakeup_source_report_event(ws);
if (!msec) {
wakeup_source_deactivate(ws);
@@ -609,24 +650,6 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
EXPORT_SYMBOL_GPL(pm_wakeup_event);
/**
- * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
- */
-static void pm_wakeup_update_hit_counts(void)
-{
- unsigned long flags;
- struct wakeup_source *ws;
-
- rcu_read_lock();
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
- spin_lock_irqsave(&ws->lock, flags);
- if (ws->active)
- ws->hit_count++;
- spin_unlock_irqrestore(&ws->lock, flags);
- }
- rcu_read_unlock();
-}
-
-/**
* pm_wakeup_pending - Check if power transition in progress should be aborted.
*
* Compare the current number of registered wakeup events with its preserved
@@ -648,32 +671,38 @@ bool pm_wakeup_pending(void)
events_check_enabled = !ret;
}
spin_unlock_irqrestore(&events_lock, flags);
- if (ret)
- pm_wakeup_update_hit_counts();
return ret;
}
/**
* pm_get_wakeup_count - Read the number of registered wakeup events.
* @count: Address to store the value at.
+ * @block: Whether or not to block.
*
- * Store the number of registered wakeup events at the address in @count. Block
- * if the current number of wakeup events being processed is nonzero.
+ * Store the number of registered wakeup events at the address in @count. If
+ * @block is set, block until the current number of wakeup events being
+ * processed is zero.
*
- * Return 'false' if the wait for the number of wakeup events being processed to
- * drop down to zero has been interrupted by a signal (and the current number
- * of wakeup events being processed is still nonzero). Otherwise return 'true'.
+ * Return 'false' if the current number of wakeup events being processed is
+ * nonzero. Otherwise return 'true'.
*/
-bool pm_get_wakeup_count(unsigned int *count)
+bool pm_get_wakeup_count(unsigned int *count, bool block)
{
unsigned int cnt, inpr;
- for (;;) {
- split_counters(&cnt, &inpr);
- if (inpr == 0 || signal_pending(current))
- break;
- pm_wakeup_update_hit_counts();
- schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
+ if (block) {
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait(&wakeup_count_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+ split_counters(&cnt, &inpr);
+ if (inpr == 0 || signal_pending(current))
+ break;
+
+ schedule();
+ }
+ finish_wait(&wakeup_count_wait_queue, &wait);
}
split_counters(&cnt, &inpr);
@@ -703,11 +732,37 @@ bool pm_save_wakeup_count(unsigned int count)
events_check_enabled = true;
}
spin_unlock_irq(&events_lock);
- if (!events_check_enabled)
- pm_wakeup_update_hit_counts();
return events_check_enabled;
}
+#ifdef CONFIG_PM_AUTOSLEEP
+/**
+ * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
+ * @enabled: Whether to set or to clear the autosleep_enabled flags.
+ */
+void pm_wakep_autosleep_enabled(bool set)
+{
+ struct wakeup_source *ws;
+ ktime_t now = ktime_get();
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ spin_lock_irq(&ws->lock);
+ if (ws->autosleep_enabled != set) {
+ ws->autosleep_enabled = set;
+ if (ws->active) {
+ if (set)
+ ws->start_prevent_time = now;
+ else
+ update_prevent_sleep_time(ws, now);
+ }
+ }
+ spin_unlock_irq(&ws->lock);
+ }
+ rcu_read_unlock();
+}
+#endif /* CONFIG_PM_AUTOSLEEP */
+
static struct dentry *wakeup_sources_stats_dentry;
/**
@@ -723,27 +778,37 @@ static int print_wakeup_source_stats(struct seq_file *m,
ktime_t max_time;
unsigned long active_count;
ktime_t active_time;
+ ktime_t prevent_sleep_time;
int ret;
spin_lock_irqsave(&ws->lock, flags);
total_time = ws->total_time;
max_time = ws->max_time;
+ prevent_sleep_time = ws->prevent_sleep_time;
active_count = ws->active_count;
if (ws->active) {
- active_time = ktime_sub(ktime_get(), ws->last_time);
+ ktime_t now = ktime_get();
+
+ active_time = ktime_sub(now, ws->last_time);
total_time = ktime_add(total_time, active_time);
if (active_time.tv64 > max_time.tv64)
max_time = active_time;
+
+ if (ws->autosleep_enabled)
+ prevent_sleep_time = ktime_add(prevent_sleep_time,
+ ktime_sub(now, ws->start_prevent_time));
} else {
active_time = ktime_set(0, 0);
}
- ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t"
- "%lld\t\t%lld\t\t%lld\t\t%lld\n",
- ws->name, active_count, ws->event_count, ws->hit_count,
+ ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t"
+ "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+ ws->name, active_count, ws->event_count,
+ ws->wakeup_count, ws->expire_count,
ktime_to_ms(active_time), ktime_to_ms(total_time),
- ktime_to_ms(max_time), ktime_to_ms(ws->last_time));
+ ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
+ ktime_to_ms(prevent_sleep_time));
spin_unlock_irqrestore(&ws->lock, flags);
@@ -758,8 +823,9 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
{
struct wakeup_source *ws;
- seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t"
- "active_since\ttotal_time\tmax_time\tlast_change\n");
+ seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
+ "expire_count\tactive_since\ttotal_time\tmax_time\t"
+ "last_change\tprevent_suspend_time\n");
rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 9ef0a5326f17..6be390bd8bd1 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,6 +6,7 @@ config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI)
select LZO_COMPRESS
select LZO_DECOMPRESS
+ select IRQ_DOMAIN if REGMAP_IRQ
bool
config REGMAP_I2C
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 5f6b2478bf17..fa6bf5279d28 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -42,7 +42,7 @@ static int regmap_i2c_gather_write(void *context,
/* If the I2C controller can't do a gather tell the core, it
* will substitute in a linear write for us.
*/
- if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
return -ENOTSUPP;
xfer[0].addr = i2c->addr;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0bcda488f11c..c89aa01fb1de 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev,
map->lock = regmap_lock_mutex;
map->unlock = regmap_unlock_mutex;
}
- map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size += map->format.pad_bytes;
+ map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
+ config->val_bits + config->pad_bits, 8);
map->reg_shift = config->pad_bits % 8;
if (config->reg_stride)
map->reg_stride = config->reg_stride;
@@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev,
ret = regcache_init(map, config);
if (ret < 0)
- goto err_free_workbuf;
+ goto err_debugfs;
/* Add a devres resource for dev_get_regmap() */
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
@@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev,
err_cache:
regcache_exit(map);
-err_free_workbuf:
+err_debugfs:
+ regmap_debugfs_exit(map);
kfree(map->work_buf);
err_map:
kfree(map);
@@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
return ret;
}
+EXPORT_SYMBOL_GPL(regmap_reinit_cache);
/**
* regmap_exit(): Free a previously allocated register map
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index ba29b2e73d48..72b5e7280d14 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -42,7 +42,7 @@ struct device *soc_device_to_device(struct soc_device *soc_dev)
return &soc_dev->dev;
}
-static mode_t soc_attribute_mode(struct kobject *kobj,
+static umode_t soc_attribute_mode(struct kobject *kobj,
struct attribute *attr,
int index)
{
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 893f6e0c759f..bc6e89212ad3 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -30,6 +30,7 @@ void bcma_core_disable(struct bcma_device *core, u32 flags)
udelay(10);
bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+ bcma_aread32(core, BCMA_RESET_CTL);
udelay(1);
}
EXPORT_SYMBOL_GPL(bcma_core_disable);
@@ -77,7 +78,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
pr_err("HT force timeout\n");
break;
case BCMA_CLKMODE_DYNAMIC:
- pr_warn("Dynamic clockmode not supported yet!\n");
+ bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
break;
}
}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index a058842f14fd..61ce4054b3c3 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
break;
case 0x4331:
- /* BCM4331 workaround is SPROM-related, we put it in sprom.c */
+ case 43431:
+ /* Ext PA lines must be enabled for tx on BCM4331 */
+ bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
break;
case 43224:
if (bus->chipinfo.rev == 0) {
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 4d38ae179b48..c32ebd537abe 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -24,14 +24,12 @@ u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
}
-#if 0
static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
{
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
}
-#endif
static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
{
@@ -170,13 +168,50 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
}
+static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
+{
+ struct bcma_device *core = pc->core;
+ u16 val16, core_index;
+ uint regoff;
+
+ regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
+ core_index = (u16)core->core_index;
+
+ val16 = pcicore_read16(pc, regoff);
+ if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
+ != core_index) {
+ val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
+ (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
+ pcicore_write16(pc, regoff, val16);
+ }
+}
+
+/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
+/* Needs to happen when coming out of 'standby'/'hibernate' */
+static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
+{
+ u16 val16;
+ uint regoff;
+
+ regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG);
+
+ val16 = pcicore_read16(pc, regoff);
+
+ if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) {
+ val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST;
+ pcicore_write16(pc, regoff, val16);
+ }
+}
+
/**************************************************
* Init.
**************************************************/
static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{
+ bcma_core_pci_fixcfg(pc);
bcma_pcicore_serdes_workaround(pc);
+ bcma_core_pci_config_fixup(pc);
}
void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
@@ -197,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
bool enable)
{
- struct pci_dev *pdev = pc->core->bus->host_pci;
+ struct pci_dev *pdev;
u32 coremask, tmp;
int err = 0;
- if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+ if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
/* This bcma device is not on a PCI host-bus. So the IRQs are
* not routed through the PCI core.
* So we must not enable routing through the PCI core. */
goto out;
}
+ pdev = pc->core->bus->host_pci;
+
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
if (err)
goto out;
@@ -224,3 +261,17 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
+
+void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
+{
+ u32 w;
+
+ w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
+ if (extend)
+ w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
+ else
+ w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
+ bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
+ bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer);
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index d2097a11c3c7..b9a86edfec39 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -119,7 +119,7 @@ static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap_nocache(addr, sizeof(val));
if (!mmio)
goto out;
@@ -171,7 +171,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
addr = pc->core->addr + BCMA_CORE_PCI_PCICFG0;
addr |= (func << 8);
addr |= (off & 0xfc);
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap_nocache(addr, sizeof(val));
if (!mmio)
goto out;
}
@@ -180,7 +180,7 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
if (unlikely(!addr))
goto out;
err = -ENOMEM;
- mmio = ioremap_nocache(addr, len);
+ mmio = ioremap_nocache(addr, sizeof(val));
if (!mmio)
goto out;
@@ -491,8 +491,8 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
* to non-MIPS platform. */
- io_map_base = (unsigned long)ioremap_nocache(BCMA_SOC_PCI_MEM,
- 0x04000000);
+ io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
+ resource_size(&pc_host->mem_resource));
pc_host->pci_controller.io_map_base = io_map_base;
set_io_port_base(pc_host->pci_controller.io_map_base);
/* Give some time to the PCI controller to configure itself with the new
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index e3928d68802b..6c05cf470f96 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -201,6 +201,9 @@ static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
bus->hosttype = BCMA_HOSTTYPE_PCI;
bus->ops = &bcma_host_pci_ops;
+ bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
+ bus->boardinfo.type = bus->host_pci->subsystem_device;
+
/* Register */
err = bcma_bus_register(bus);
if (err)
@@ -222,7 +225,7 @@ err_kfree_bus:
return err;
}
-static void bcma_host_pci_remove(struct pci_dev *dev)
+static void __devexit bcma_host_pci_remove(struct pci_dev *dev)
{
struct bcma_bus *bus = pci_get_drvdata(dev);
@@ -277,7 +280,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
.name = "bcma-pci-bridge",
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
- .remove = bcma_host_pci_remove,
+ .remove = __devexit_p(bcma_host_pci_remove),
.driver.pm = BCMA_PM_OPS,
};
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 3bea7fe25b20..5ed0718fc660 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -19,7 +19,14 @@ struct bcma_device_id_name {
u16 id;
const char *name;
};
-struct bcma_device_id_name bcma_device_names[] = {
+
+static const struct bcma_device_id_name bcma_arm_device_names[] = {
+ { BCMA_CORE_ARM_1176, "ARM 1176" },
+ { BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
+ { BCMA_CORE_ARM_CM3, "ARM CM3" },
+};
+
+static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_OOB_ROUTER, "OOB Router" },
{ BCMA_CORE_INVALID, "Invalid" },
{ BCMA_CORE_CHIPCOMMON, "ChipCommon" },
@@ -27,7 +34,6 @@ struct bcma_device_id_name bcma_device_names[] = {
{ BCMA_CORE_SRAM, "SRAM" },
{ BCMA_CORE_SDRAM, "SDRAM" },
{ BCMA_CORE_PCI, "PCI" },
- { BCMA_CORE_MIPS, "MIPS" },
{ BCMA_CORE_ETHERNET, "Fast Ethernet" },
{ BCMA_CORE_V90, "V90" },
{ BCMA_CORE_USB11_HOSTDEV, "USB 1.1 Hostdev" },
@@ -44,7 +50,6 @@ struct bcma_device_id_name bcma_device_names[] = {
{ BCMA_CORE_PHY_A, "PHY A" },
{ BCMA_CORE_PHY_B, "PHY B" },
{ BCMA_CORE_PHY_G, "PHY G" },
- { BCMA_CORE_MIPS_3302, "MIPS 3302" },
{ BCMA_CORE_USB11_HOST, "USB 1.1 Host" },
{ BCMA_CORE_USB11_DEV, "USB 1.1 Device" },
{ BCMA_CORE_USB20_HOST, "USB 2.0 Host" },
@@ -58,15 +63,11 @@ struct bcma_device_id_name bcma_device_names[] = {
{ BCMA_CORE_PHY_N, "PHY N" },
{ BCMA_CORE_SRAM_CTL, "SRAM Controller" },
{ BCMA_CORE_MINI_MACPHY, "Mini MACPHY" },
- { BCMA_CORE_ARM_1176, "ARM 1176" },
- { BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
{ BCMA_CORE_PHY_LP, "PHY LP" },
{ BCMA_CORE_PMU, "PMU" },
{ BCMA_CORE_PHY_SSN, "PHY SSN" },
{ BCMA_CORE_SDIO_DEV, "SDIO Device" },
- { BCMA_CORE_ARM_CM3, "ARM CM3" },
{ BCMA_CORE_PHY_HT, "PHY HT" },
- { BCMA_CORE_MIPS_74K, "MIPS 74K" },
{ BCMA_CORE_MAC_GBIT, "GBit MAC" },
{ BCMA_CORE_DDR12_MEM_CTL, "DDR1/DDR2 Memory Controller" },
{ BCMA_CORE_PCIE_RC, "PCIe Root Complex" },
@@ -79,16 +80,41 @@ struct bcma_device_id_name bcma_device_names[] = {
{ BCMA_CORE_SHIM, "SHIM" },
{ BCMA_CORE_DEFAULT, "Default" },
};
-const char *bcma_device_name(struct bcma_device_id *id)
+
+static const struct bcma_device_id_name bcma_mips_device_names[] = {
+ { BCMA_CORE_MIPS, "MIPS" },
+ { BCMA_CORE_MIPS_3302, "MIPS 3302" },
+ { BCMA_CORE_MIPS_74K, "MIPS 74K" },
+};
+
+static const char *bcma_device_name(const struct bcma_device_id *id)
{
- int i;
+ const struct bcma_device_id_name *names;
+ int size, i;
+
+ /* search manufacturer specific names */
+ switch (id->manuf) {
+ case BCMA_MANUF_ARM:
+ names = bcma_arm_device_names;
+ size = ARRAY_SIZE(bcma_arm_device_names);
+ break;
+ case BCMA_MANUF_BCM:
+ names = bcma_bcm_device_names;
+ size = ARRAY_SIZE(bcma_bcm_device_names);
+ break;
+ case BCMA_MANUF_MIPS:
+ names = bcma_mips_device_names;
+ size = ARRAY_SIZE(bcma_mips_device_names);
+ break;
+ default:
+ return "UNKNOWN";
+ }
- if (id->manuf == BCMA_MANUF_BCM) {
- for (i = 0; i < ARRAY_SIZE(bcma_device_names); i++) {
- if (bcma_device_names[i].id == id->id)
- return bcma_device_names[i].name;
- }
+ for (i = 0; i < size; i++) {
+ if (names[i].id == id->id)
+ return names[i].name;
}
+
return "UNKNOWN";
}
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 3e2a6002aae6..f16f42d36071 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -181,6 +181,22 @@ static int bcma_sprom_valid(const u16 *sprom)
#define SPEX(_field, _offset, _mask, _shift) \
bus->sprom._field = ((sprom[SPOFF(_offset)] & (_mask)) >> (_shift))
+#define SPEX32(_field, _offset, _mask, _shift) \
+ bus->sprom._field = ((((u32)sprom[SPOFF((_offset)+2)] << 16 | \
+ sprom[SPOFF(_offset)]) & (_mask)) >> (_shift))
+
+#define SPEX_ARRAY8(_field, _offset, _mask, _shift) \
+ do { \
+ SPEX(_field[0], _offset + 0, _mask, _shift); \
+ SPEX(_field[1], _offset + 2, _mask, _shift); \
+ SPEX(_field[2], _offset + 4, _mask, _shift); \
+ SPEX(_field[3], _offset + 6, _mask, _shift); \
+ SPEX(_field[4], _offset + 8, _mask, _shift); \
+ SPEX(_field[5], _offset + 10, _mask, _shift); \
+ SPEX(_field[6], _offset + 12, _mask, _shift); \
+ SPEX(_field[7], _offset + 14, _mask, _shift); \
+ } while (0)
+
static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
{
u16 v, o;
@@ -243,7 +259,8 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, ~0, 0);
SPEX(boardflags2_hi, SSB_SPROM8_BFL2HI, ~0, 0);
- SPEX(country_code, SSB_SPROM8_CCODE, ~0, 0);
+ SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
/* Extract cores power info info */
for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
@@ -298,6 +315,136 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
SSB_SROM8_FEM_TR_ISO_SHIFT);
SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_ANTSWLUT,
SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+ SPEX(ant_available_a, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_A,
+ SSB_SPROM8_ANTAVAIL_A_SHIFT);
+ SPEX(ant_available_bg, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_BG,
+ SSB_SPROM8_ANTAVAIL_BG_SHIFT);
+ SPEX(maxpwr_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_MAXP_BG_MASK, 0);
+ SPEX(itssi_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_ITSSI_BG,
+ SSB_SPROM8_ITSSI_BG_SHIFT);
+ SPEX(maxpwr_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_MAXP_A_MASK, 0);
+ SPEX(itssi_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_ITSSI_A,
+ SSB_SPROM8_ITSSI_A_SHIFT);
+ SPEX(maxpwr_ah, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AH_MASK, 0);
+ SPEX(maxpwr_al, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AL_MASK,
+ SSB_SPROM8_MAXP_AL_SHIFT);
+ SPEX(gpio0, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P0, 0);
+ SPEX(gpio1, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P1,
+ SSB_SPROM8_GPIOA_P1_SHIFT);
+ SPEX(gpio2, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P2, 0);
+ SPEX(gpio3, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P3,
+ SSB_SPROM8_GPIOB_P3_SHIFT);
+ SPEX(tri2g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI2G, 0);
+ SPEX(tri5g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI5G,
+ SSB_SPROM8_TRI5G_SHIFT);
+ SPEX(tri5gl, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GL, 0);
+ SPEX(tri5gh, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GH,
+ SSB_SPROM8_TRI5GH_SHIFT);
+ SPEX(rxpo2g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO2G,
+ SSB_SPROM8_RXPO2G_SHIFT);
+ SPEX(rxpo5g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO5G,
+ SSB_SPROM8_RXPO5G_SHIFT);
+ SPEX(rssismf2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMF2G, 0);
+ SPEX(rssismc2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMC2G,
+ SSB_SPROM8_RSSISMC2G_SHIFT);
+ SPEX(rssisav2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISAV2G,
+ SSB_SPROM8_RSSISAV2G_SHIFT);
+ SPEX(bxa2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_BXA2G,
+ SSB_SPROM8_BXA2G_SHIFT);
+ SPEX(rssismf5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMF5G, 0);
+ SPEX(rssismc5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMC5G,
+ SSB_SPROM8_RSSISMC5G_SHIFT);
+ SPEX(rssisav5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISAV5G,
+ SSB_SPROM8_RSSISAV5G_SHIFT);
+ SPEX(bxa5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_BXA5G,
+ SSB_SPROM8_BXA5G_SHIFT);
+
+ SPEX(pa0b0, SSB_SPROM8_PA0B0, ~0, 0);
+ SPEX(pa0b1, SSB_SPROM8_PA0B1, ~0, 0);
+ SPEX(pa0b2, SSB_SPROM8_PA0B2, ~0, 0);
+ SPEX(pa1b0, SSB_SPROM8_PA1B0, ~0, 0);
+ SPEX(pa1b1, SSB_SPROM8_PA1B1, ~0, 0);
+ SPEX(pa1b2, SSB_SPROM8_PA1B2, ~0, 0);
+ SPEX(pa1lob0, SSB_SPROM8_PA1LOB0, ~0, 0);
+ SPEX(pa1lob1, SSB_SPROM8_PA1LOB1, ~0, 0);
+ SPEX(pa1lob2, SSB_SPROM8_PA1LOB2, ~0, 0);
+ SPEX(pa1hib0, SSB_SPROM8_PA1HIB0, ~0, 0);
+ SPEX(pa1hib1, SSB_SPROM8_PA1HIB1, ~0, 0);
+ SPEX(pa1hib2, SSB_SPROM8_PA1HIB2, ~0, 0);
+ SPEX(cck2gpo, SSB_SPROM8_CCK2GPO, ~0, 0);
+ SPEX32(ofdm2gpo, SSB_SPROM8_OFDM2GPO, ~0, 0);
+ SPEX32(ofdm5glpo, SSB_SPROM8_OFDM5GLPO, ~0, 0);
+ SPEX32(ofdm5gpo, SSB_SPROM8_OFDM5GPO, ~0, 0);
+ SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, ~0, 0);
+
+ /* Extract the antenna gain values. */
+ SPEX(antenna_gain.a0, SSB_SPROM8_AGAIN01,
+ SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT);
+ SPEX(antenna_gain.a1, SSB_SPROM8_AGAIN01,
+ SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT);
+ SPEX(antenna_gain.a2, SSB_SPROM8_AGAIN23,
+ SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT);
+ SPEX(antenna_gain.a3, SSB_SPROM8_AGAIN23,
+ SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT);
+
+ SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
+ SSB_SPROM8_LEDDC_ON_SHIFT);
+ SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF,
+ SSB_SPROM8_LEDDC_OFF_SHIFT);
+
+ SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN,
+ SSB_SPROM8_TXRXC_TXCHAIN_SHIFT);
+ SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN,
+ SSB_SPROM8_TXRXC_RXCHAIN_SHIFT);
+ SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH,
+ SSB_SPROM8_TXRXC_SWITCH_SHIFT);
+
+ SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0);
+
+ SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0);
+
+ SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP,
+ SSB_SPROM8_RAWTS_RAWTEMP_SHIFT);
+ SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER,
+ SSB_SPROM8_RAWTS_MEASPOWER_SHIFT);
+ SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX,
+ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE,
+ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT);
+ SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX,
+ SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT);
+ SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX,
+ SSB_SPROM8_OPT_CORRX_TEMP_OPTION,
+ SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT);
+ SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP,
+ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR,
+ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT);
+ SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP,
+ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP,
+ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT);
+ SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL,
+ SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT);
+
+ SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0);
+ SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0);
+ SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0);
+ SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0);
+
+ SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH,
+ SSB_SPROM8_THERMAL_TRESH_SHIFT);
+ SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET,
+ SSB_SPROM8_THERMAL_OFFSET_SHIFT);
+ SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA,
+ SSB_SPROM8_TEMPDELTA_PHYCAL,
+ SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT);
+ SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD,
+ SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT);
+ SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA,
+ SSB_SPROM8_TEMPDELTA_HYSTERESIS,
+ SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT);
}
/*
@@ -432,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus)
if (!sprom)
return -ENOMEM;
- if (bus->chipinfo.id == 0x4331)
+ if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
pr_debug("SPROM offset 0x%x\n", offset);
bcma_sprom_read(bus, offset, sprom);
- if (bus->chipinfo.id == 0x4331)
+ if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
err = bcma_sprom_valid(sprom);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index cf0e63dd97da..e54e31b02b88 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -65,39 +65,80 @@ struct drbd_atodb_wait {
int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
+void *drbd_md_get_buffer(struct drbd_conf *mdev)
+{
+ int r;
+
+ wait_event(mdev->misc_wait,
+ (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
+ mdev->state.disk <= D_FAILED);
+
+ return r ? NULL : page_address(mdev->md_io_page);
+}
+
+void drbd_md_put_buffer(struct drbd_conf *mdev)
+{
+ if (atomic_dec_and_test(&mdev->md_io_in_use))
+ wake_up(&mdev->misc_wait);
+}
+
+static bool md_io_allowed(struct drbd_conf *mdev)
+{
+ enum drbd_disk_state ds = mdev->state.disk;
+ return ds >= D_NEGOTIATING || ds == D_ATTACHING;
+}
+
+void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ unsigned int *done)
+{
+ long dt = bdev->dc.disk_timeout * HZ / 10;
+ if (dt == 0)
+ dt = MAX_SCHEDULE_TIMEOUT;
+
+ dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
+ if (dt == 0)
+ dev_err(DEV, "meta-data IO operation timed out\n");
+}
+
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev,
struct page *page, sector_t sector,
int rw, int size)
{
struct bio *bio;
- struct drbd_md_io md_io;
int ok;
- md_io.mdev = mdev;
- init_completion(&md_io.event);
- md_io.error = 0;
+ mdev->md_io.done = 0;
+ mdev->md_io.error = -ENODEV;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
ok = (bio_add_page(bio, page, size, 0) == size);
if (!ok)
goto out;
- bio->bi_private = &md_io;
+ bio->bi_private = &mdev->md_io;
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
+ ok = 0;
+ goto out;
+ }
+
+ bio_get(bio); /* one bio_put() is in the completion handler */
+ atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_for_completion(&md_io.event);
- ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
+ wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
+ ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
out:
bio_put(bio);
@@ -111,7 +152,7 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
int offset = 0;
struct page *iop = mdev->md_io_page;
- D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
+ D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
BUG_ON(!bdev->md_bdev);
@@ -328,8 +369,13 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
return 1;
}
- mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
- buffer = (struct al_transaction *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
+ if (!buffer) {
+ dev_err(DEV, "disk failed while waiting for md_io buffer\n");
+ complete(&((struct update_al_work *)w)->event);
+ put_ldev(mdev);
+ return 1;
+ }
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
@@ -374,7 +420,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
mdev->al_tr_number++;
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
@@ -443,8 +489,9 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
/* lock out all other meta data io for now,
* and make sure the page is mapped.
*/
- mutex_lock(&mdev->md_io_mutex);
- buffer = page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ return 0;
/* Find the valid transaction in the log */
for (i = 0; i <= mx; i++) {
@@ -452,7 +499,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (rv == 0)
continue;
if (rv == -1) {
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 0;
}
cnr = be32_to_cpu(buffer->tr_number);
@@ -478,7 +525,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!found_valid) {
dev_warn(DEV, "No usable activity log found.\n");
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 1;
}
@@ -493,7 +540,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
rv = drbd_al_read_tr(mdev, bdev, buffer, i);
ERR_IF(rv == 0) goto cancel;
if (rv == -1) {
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 0;
}
@@ -534,7 +581,7 @@ cancel:
mdev->al_tr_pos = 0;
/* ok, we are done with it */
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
transactions, active_extents);
@@ -671,16 +718,20 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
else
ext->rs_failed += count;
if (ext->rs_left < ext->rs_failed) {
- dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
- "rs_failed=%d count=%d\n",
+ dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
+ "rs_failed=%d count=%d cstate=%s\n",
(unsigned long long)sector,
ext->lce.lc_number, ext->rs_left,
- ext->rs_failed, count);
- dump_stack();
-
- lc_put(mdev->resync, &ext->lce);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return;
+ ext->rs_failed, count,
+ drbd_conn_str(mdev->state.conn));
+
+ /* We don't expect to be able to clear more bits
+ * than have been set when we originally counted
+ * the set bits to cache that value in ext->rs_left.
+ * Whatever the reason (disconnect during resync,
+ * delayed local completion of an application write),
+ * try to fix it up by recounting here. */
+ ext->rs_left = drbd_bm_e_weight(mdev, enr);
}
} else {
/* Normally this element should be in the cache,
@@ -1192,6 +1243,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
put_ldev(mdev);
}
spin_unlock_irq(&mdev->al_lock);
+ wake_up(&mdev->al_wait);
return 0;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3030201c69d8..fcb956bb4b4c 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -205,7 +205,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
static void bm_store_page_idx(struct page *page, unsigned long idx)
{
BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
- page_private(page) |= idx;
+ set_page_private(page, idx);
}
static unsigned long bm_page_to_idx(struct page *page)
@@ -886,12 +886,21 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
struct bm_aio_ctx {
struct drbd_conf *mdev;
atomic_t in_flight;
- struct completion done;
+ unsigned int done;
unsigned flags;
#define BM_AIO_COPY_PAGES 1
int error;
+ struct kref kref;
};
+static void bm_aio_ctx_destroy(struct kref *kref)
+{
+ struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
+
+ put_ldev(ctx->mdev);
+ kfree(ctx);
+}
+
/* bv_page may be a copy, or may be the original */
static void bm_async_io_complete(struct bio *bio, int error)
{
@@ -930,20 +939,21 @@ static void bm_async_io_complete(struct bio *bio, int error)
bm_page_unlock_io(mdev, idx);
- /* FIXME give back to page pool */
if (ctx->flags & BM_AIO_COPY_PAGES)
- put_page(bio->bi_io_vec[0].bv_page);
+ mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
bio_put(bio);
- if (atomic_dec_and_test(&ctx->in_flight))
- complete(&ctx->done);
+ if (atomic_dec_and_test(&ctx->in_flight)) {
+ ctx->done = 1;
+ wake_up(&mdev->misc_wait);
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+ }
}
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
- /* we are process context. we always get a bio */
- struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+ struct bio *bio = bio_alloc_drbd(GFP_NOIO);
struct drbd_conf *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap;
struct page *page;
@@ -966,10 +976,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) {
- /* FIXME alloc_page is good enough for now, but actually needs
- * to use pre-allocated page pool */
void *src, *dest;
- page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
+ page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
dest = kmap_atomic(page);
src = kmap_atomic(b->bm_pages[page_nr]);
memcpy(dest, src, PAGE_SIZE);
@@ -981,6 +989,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_sector = on_disk_sector;
+ /* bio_add_page of a single page to an empty bio will always succeed,
+ * according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = bm_async_io_complete;
@@ -999,14 +1009,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
-static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
+static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
- struct bm_aio_ctx ctx = {
- .mdev = mdev,
- .in_flight = ATOMIC_INIT(1),
- .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
- .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
- };
+ struct bm_aio_ctx *ctx;
struct drbd_bitmap *b = mdev->bitmap;
int num_pages, i, count = 0;
unsigned long now;
@@ -1021,7 +1026,27 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
* For lazy writeout, we don't care for ongoing changes to the bitmap,
* as we submit copies of pages anyways.
*/
- if (!ctx.flags)
+
+ ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+ if (!ctx)
+ return -ENOMEM;
+
+ *ctx = (struct bm_aio_ctx) {
+ .mdev = mdev,
+ .in_flight = ATOMIC_INIT(1),
+ .done = 0,
+ .flags = flags,
+ .error = 0,
+ .kref = { ATOMIC_INIT(2) },
+ };
+
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
+ kfree(ctx);
+ return -ENODEV;
+ }
+
+ if (!ctx->flags)
WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
num_pages = b->bm_number_of_pages;
@@ -1046,29 +1071,38 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
continue;
}
}
- atomic_inc(&ctx.in_flight);
- bm_page_io_async(&ctx, i, rw);
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i, rw);
++count;
cond_resched();
}
/*
- * We initialize ctx.in_flight to one to make sure bm_async_io_complete
- * will not complete() early, and decrement / test it here. If there
+ * We initialize ctx->in_flight to one to make sure bm_async_io_complete
+ * will not set ctx->done early, and decrement / test it here. If there
* are still some bios in flight, we need to wait for them here.
+ * If all IO is done already (or nothing had been submitted), there is
+ * no need to wait. Still, we need to put the kref associated with the
+ * "in_flight reached zero, all done" event.
*/
- if (!atomic_dec_and_test(&ctx.in_flight))
- wait_for_completion(&ctx.done);
+ if (!atomic_dec_and_test(&ctx->in_flight))
+ wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ else
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+
dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
rw == WRITE ? "WRITE" : "READ",
count, jiffies - now);
- if (ctx.error) {
+ if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
drbd_chk_io_error(mdev, 1, true);
- err = -EIO; /* ctx.error ? */
+ err = -EIO; /* ctx->error ? */
}
+ if (atomic_read(&ctx->in_flight))
+ err = -EIO; /* Disk failed during IO... */
+
now = jiffies;
if (rw == WRITE) {
drbd_md_flush(mdev);
@@ -1082,6 +1116,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
}
@@ -1091,7 +1126,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
*/
int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, READ, 0);
+ return bm_rw(mdev, READ, 0, 0);
}
/**
@@ -1102,7 +1137,7 @@ int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
*/
int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, WRITE, 0);
+ return bm_rw(mdev, WRITE, 0, 0);
}
/**
@@ -1112,7 +1147,23 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
*/
int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
{
- return bm_rw(mdev, WRITE, upper_idx);
+ return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
+}
+
+/**
+ * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
+ * @mdev: DRBD device.
+ *
+ * Will only write pages that have changed since last IO.
+ * In contrast to drbd_bm_write(), this will copy the bitmap pages
+ * to temporary writeout pages. It is intended to trigger a full write-out
+ * while still allowing the bitmap to change, for example if a resync or online
+ * verify is aborted due to a failed peer disk, while local IO continues, or
+ * pending resync acks are still being processed.
+ */
+int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
+{
+ return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
}
@@ -1130,28 +1181,45 @@ int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(l
*/
int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
{
- struct bm_aio_ctx ctx = {
+ struct bm_aio_ctx *ctx;
+ int err;
+
+ if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
+ dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
+ return 0;
+ }
+
+ ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+ if (!ctx)
+ return -ENOMEM;
+
+ *ctx = (struct bm_aio_ctx) {
.mdev = mdev,
.in_flight = ATOMIC_INIT(1),
- .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
+ .done = 0,
.flags = BM_AIO_COPY_PAGES,
+ .error = 0,
+ .kref = { ATOMIC_INIT(2) },
};
- if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
- dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
- return 0;
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
+ kfree(ctx);
+ return -ENODEV;
}
- bm_page_io_async(&ctx, idx, WRITE_SYNC);
- wait_for_completion(&ctx.done);
+ bm_page_io_async(ctx, idx, WRITE_SYNC);
+ wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
- if (ctx.error)
+ if (ctx->error)
drbd_chk_io_error(mdev, 1, true);
/* that should force detach, so the in memory bitmap will be
* gone in a moment as well. */
mdev->bm_writ_cnt++;
- return ctx.error;
+ err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+ return err;
}
/* NOTE
@@ -1407,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
first_word = 0;
spin_lock_irq(&b->bm_lock);
}
-
/* last page (respectively only page, for first page == last page) */
last_word = MLPP(el >> LN2_BPL);
- bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+ /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+ * ==> e = 32767, el = 32768, last_page = 2,
+ * and now last_word = 0.
+ * We do not want to touch last_page in this case,
+ * as we did not allocate it, it is not present in bitmap->bm_pages.
+ */
+ if (last_word)
+ bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
/* possibly trailing bits.
* example: (e & 63) == 63, el will be e+1.
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8d680562ba73..02f013a073a7 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -712,7 +712,6 @@ struct drbd_request {
struct list_head tl_requests; /* ring list in the transfer log */
struct bio *master_bio; /* master bio pointer */
unsigned long rq_state; /* see comments above _req_mod() */
- int seq_num;
unsigned long start_time;
};
@@ -851,6 +850,7 @@ enum {
NEW_CUR_UUID, /* Create new current UUID when thawing IO */
AL_SUSPENDED, /* Activity logging is currently suspended. */
AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
+ STATE_SENT, /* Do not change state/UUIDs while this is set */
};
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -862,31 +862,30 @@ enum bm_flag {
BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
/* currently locked for bulk operation */
- BM_LOCKED_MASK = 0x7,
+ BM_LOCKED_MASK = 0xf,
/* in detail, that is: */
BM_DONT_CLEAR = 0x1,
BM_DONT_SET = 0x2,
BM_DONT_TEST = 0x4,
+ /* so we can mark it locked for bulk operation,
+ * and still allow all non-bulk operations */
+ BM_IS_LOCKED = 0x8,
+
/* (test bit, count bit) allowed (common case) */
- BM_LOCKED_TEST_ALLOWED = 0x3,
+ BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
/* testing bits, as well as setting new bits allowed, but clearing bits
* would be unexpected. Used during bitmap receive. Setting new bits
* requires sending of "out-of-sync" information, though. */
- BM_LOCKED_SET_ALLOWED = 0x1,
+ BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
- /* clear is not expected while bitmap is locked for bulk operation */
+ /* for drbd_bm_write_copy_pages, everything is allowed,
+ * only concurrent bulk operations are locked out. */
+ BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
};
-
-/* TODO sort members for performance
- * MAYBE group them further */
-
-/* THINK maybe we actually want to use the default "event/%s" worker threads
- * or similar in linux 2.6, which uses per cpu data and threads.
- */
struct drbd_work_queue {
struct list_head q;
struct semaphore s; /* producers up it, worker down()s it */
@@ -938,8 +937,7 @@ struct drbd_backing_dev {
};
struct drbd_md_io {
- struct drbd_conf *mdev;
- struct completion event;
+ unsigned int done;
int error;
};
@@ -1022,6 +1020,7 @@ struct drbd_conf {
struct drbd_tl_epoch *newest_tle;
struct drbd_tl_epoch *oldest_tle;
struct list_head out_of_sequence_requests;
+ struct list_head barrier_acked_requests;
struct hlist_head *tl_hash;
unsigned int tl_hash_s;
@@ -1056,6 +1055,8 @@ struct drbd_conf {
struct crypto_hash *csums_tfm;
struct crypto_hash *verify_tfm;
+ unsigned long last_reattach_jif;
+ unsigned long last_reconnect_jif;
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread asender;
@@ -1094,7 +1095,8 @@ struct drbd_conf {
wait_queue_head_t ee_wait;
struct page *md_io_page; /* one page buffer for md_io */
struct page *md_io_tmpp; /* for logical_block_size != 512 */
- struct mutex md_io_mutex; /* protects the md_io_buffer */
+ struct drbd_md_io md_io;
+ atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */
spinlock_t al_lock;
wait_queue_head_t al_wait;
struct lru_cache *act_log; /* activity log */
@@ -1228,8 +1230,8 @@ extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
-extern int _drbd_send_state(struct drbd_conf *mdev);
-extern int drbd_send_state(struct drbd_conf *mdev);
+extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
+extern int drbd_send_current_state(struct drbd_conf *mdev);
extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
enum drbd_packets cmd, struct p_header80 *h,
size_t size, unsigned msg_flags);
@@ -1461,6 +1463,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
unsigned long al_enr);
extern size_t drbd_bm_words(struct drbd_conf *mdev);
@@ -1493,11 +1496,38 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t *drbd_request_mempool;
extern mempool_t *drbd_ee_mempool;
-extern struct page *drbd_pp_pool; /* drbd's page pool */
+/* drbd's page pool, used to buffer data received from the peer,
+ * or data requested by the peer.
+ *
+ * This does not have an emergency reserve.
+ *
+ * When allocating from this pool, it first takes pages from the pool.
+ * Only if the pool is depleted will try to allocate from the system.
+ *
+ * The assumption is that pages taken from this pool will be processed,
+ * and given back, "quickly", and then can be recycled, so we can avoid
+ * frequent calls to alloc_page(), and still will be able to make progress even
+ * under memory pressure.
+ */
+extern struct page *drbd_pp_pool;
extern spinlock_t drbd_pp_lock;
extern int drbd_pp_vacant;
extern wait_queue_head_t drbd_pp_wait;
+/* We also need a standard (emergency-reserve backed) page pool
+ * for meta data IO (activity log, bitmap).
+ * We can keep it global, as long as it is used as "N pages at a time".
+ * 128 should be plenty, currently we probably can get away with as few as 1.
+ */
+#define DRBD_MIN_POOL_PAGES 128
+extern mempool_t *drbd_md_io_page_pool;
+
+/* We also need to make sure we get a bio
+ * when we need it for housekeeping purposes */
+extern struct bio_set *drbd_md_io_bio_set;
+/* to allocate from that set */
+extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
+
extern rwlock_t global_state_lock;
extern struct drbd_conf *drbd_new_device(unsigned int minor);
@@ -1536,8 +1566,12 @@ extern void resume_next_sg(struct drbd_conf *mdev);
extern void suspend_other_sg(struct drbd_conf *mdev);
extern int drbd_resync_finished(struct drbd_conf *mdev);
/* maybe rather drbd_main.c ? */
+extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
+extern void drbd_md_put_buffer(struct drbd_conf *mdev);
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev, sector_t sector, int rw);
+ struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ unsigned int *done);
extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
@@ -1754,19 +1788,6 @@ static inline struct page *page_chain_next(struct page *page)
#define page_chain_for_each_safe(page, n) \
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (page_count(bvec->bv_page) > 1)
- return 1;
- }
-
- return 0;
-}
-
static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
{
struct page *page = e->pages;
@@ -1777,7 +1798,6 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
return 0;
}
-
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
@@ -2230,7 +2250,7 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
* Note: currently we don't support such large bitmaps on 32bit
* arch anyways, but no harm done to be prepared for it here.
*/
- unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
+ unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
unsigned long left = *bits_left >> shift;
unsigned long total = 1UL + (mdev->rs_total >> shift);
unsigned long tmp = 1000UL - left * 1000UL/total;
@@ -2306,12 +2326,12 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
case D_OUTDATED:
case D_CONSISTENT:
case D_UP_TO_DATE:
+ case D_FAILED:
/* disk state is stable as well. */
break;
/* no new io accepted during tansitional states */
case D_ATTACHING:
- case D_FAILED:
case D_NEGOTIATING:
case D_UNKNOWN:
case D_MASK:
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 211fc44f84be..920ede2829d6 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -139,6 +139,8 @@ struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t *drbd_request_mempool;
mempool_t *drbd_ee_mempool;
+mempool_t *drbd_md_io_page_pool;
+struct bio_set *drbd_md_io_bio_set;
/* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first.
@@ -159,7 +161,24 @@ static const struct block_device_operations drbd_ops = {
.release = drbd_release,
};
-#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
+static void bio_destructor_drbd(struct bio *bio)
+{
+ bio_free(bio, drbd_md_io_bio_set);
+}
+
+struct bio *bio_alloc_drbd(gfp_t gfp_mask)
+{
+ struct bio *bio;
+
+ if (!drbd_md_io_bio_set)
+ return bio_alloc(gfp_mask, 1);
+
+ bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ if (!bio)
+ return NULL;
+ bio->bi_destructor = bio_destructor_drbd;
+ return bio;
+}
#ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will
@@ -208,6 +227,7 @@ static int tl_init(struct drbd_conf *mdev)
mdev->oldest_tle = b;
mdev->newest_tle = b;
INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
+ INIT_LIST_HEAD(&mdev->barrier_acked_requests);
mdev->tl_hash = NULL;
mdev->tl_hash_s = 0;
@@ -246,9 +266,7 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
new->n_writes = 0;
newest_before = mdev->newest_tle;
- /* never send a barrier number == 0, because that is special-cased
- * when using TCQ for our write ordering code */
- new->br_number = (newest_before->br_number+1) ?: 1;
+ new->br_number = newest_before->br_number+1;
if (mdev->newest_tle != new) {
mdev->newest_tle->next = new;
mdev->newest_tle = new;
@@ -311,7 +329,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
These have been list_move'd to the out_of_sequence_requests list in
_req_mod(, barrier_acked) above.
*/
- list_del_init(&b->requests);
+ list_splice_init(&b->requests, &mdev->barrier_acked_requests);
nob = b->next;
if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
@@ -411,6 +429,23 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
b = tmp;
list_splice(&carry_reads, &b->requests);
}
+
+ /* Actions operating on the disk state, also want to work on
+ requests that got barrier acked. */
+ switch (what) {
+ case fail_frozen_disk_io:
+ case restart_frozen_disk_io:
+ list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ _req_mod(req, what);
+ }
+
+ case connection_lost_while_pending:
+ case resend:
+ break;
+ default:
+ dev_err(DEV, "what = %d in _tl_restart()\n", what);
+ }
}
@@ -458,6 +493,38 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
}
/**
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
+ * @mdev: DRBD device.
+ */
+void tl_abort_disk_io(struct drbd_conf *mdev)
+{
+ struct drbd_tl_epoch *b;
+ struct list_head *le, *tle;
+ struct drbd_request *req;
+
+ spin_lock_irq(&mdev->req_lock);
+ b = mdev->oldest_tle;
+ while (b) {
+ list_for_each_safe(le, tle, &b->requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
+ _req_mod(req, abort_disk_io);
+ }
+ b = b->next;
+ }
+
+ list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
+ _req_mod(req, abort_disk_io);
+ }
+
+ spin_unlock_irq(&mdev->req_lock);
+}
+
+/**
* cl_wide_st_chg() - true if the state change is a cluster wide one
* @mdev: DRBD device.
* @os: old (current) state.
@@ -470,7 +537,7 @@ static int cl_wide_st_chg(struct drbd_conf *mdev,
((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
(os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
(os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
- (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
+ (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
(os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
}
@@ -509,8 +576,16 @@ static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
union drbd_state,
union drbd_state);
+enum sanitize_state_warnings {
+ NO_WARNING,
+ ABORTED_ONLINE_VERIFY,
+ ABORTED_RESYNC,
+ CONNECTION_LOST_NEGOTIATING,
+ IMPLICITLY_UPGRADED_DISK,
+ IMPLICITLY_UPGRADED_PDSK,
+};
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, const char **warn_sync_abort);
+ union drbd_state ns, enum sanitize_state_warnings *warn);
int drbd_send_state_req(struct drbd_conf *,
union drbd_state, union drbd_state);
@@ -785,6 +860,13 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
rv = SS_IN_TRANSIENT_STATE;
+ /* While establishing a connection only allow cstate to change.
+ Delay/refuse role changes, detach attach etc... */
+ if (test_bit(STATE_SENT, &mdev->flags) &&
+ !(os.conn == C_WF_REPORT_PARAMS ||
+ (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+ rv = SS_IN_TRANSIENT_STATE;
+
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
rv = SS_NEED_CONNECTION;
@@ -803,6 +885,21 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
return rv;
}
+static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+{
+ static const char *msg_table[] = {
+ [NO_WARNING] = "",
+ [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
+ [ABORTED_RESYNC] = "Resync aborted.",
+ [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
+ [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
+ [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
+ };
+
+ if (warn != NO_WARNING)
+ dev_warn(DEV, "%s\n", msg_table[warn]);
+}
+
/**
* sanitize_state() - Resolves implicitly necessary additional changes to a state transition
* @mdev: DRBD device.
@@ -814,11 +911,14 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
* to D_UNKNOWN. This rule and many more along those lines are in this function.
*/
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, const char **warn_sync_abort)
+ union drbd_state ns, enum sanitize_state_warnings *warn)
{
enum drbd_fencing_p fp;
enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
+ if (warn)
+ *warn = NO_WARNING;
+
fp = FP_DONT_CARE;
if (get_ldev(mdev)) {
fp = mdev->ldev->dc.fencing;
@@ -833,18 +933,13 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
/* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
* If you try to go into some Sync* state, that shall fail (elsewhere). */
if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
- ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
+ ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
ns.conn = os.conn;
/* we cannot fail (again) if we already detached */
if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
ns.disk = D_DISKLESS;
- /* if we are only D_ATTACHING yet,
- * we can (and should) go directly to D_DISKLESS. */
- if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
- ns.disk = D_DISKLESS;
-
/* After C_DISCONNECTING only C_STANDALONE may follow */
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
ns.conn = os.conn;
@@ -863,10 +958,9 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
/* Abort resync if a disk fails/detaches */
if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
(ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
- if (warn_sync_abort)
- *warn_sync_abort =
- os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
- "Online-verify" : "Resync";
+ if (warn)
+ *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
+ ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
ns.conn = C_CONNECTED;
}
@@ -877,7 +971,8 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.disk = mdev->new_state_tmp.disk;
ns.pdsk = mdev->new_state_tmp.pdsk;
} else {
- dev_alert(DEV, "Connection lost while negotiating, no data!\n");
+ if (warn)
+ *warn = CONNECTION_LOST_NEGOTIATING;
ns.disk = D_DISKLESS;
ns.pdsk = D_UNKNOWN;
}
@@ -959,16 +1054,16 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.disk = disk_max;
if (ns.disk < disk_min) {
- dev_warn(DEV, "Implicitly set disk from %s to %s\n",
- drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_DISK;
ns.disk = disk_min;
}
if (ns.pdsk > pdsk_max)
ns.pdsk = pdsk_max;
if (ns.pdsk < pdsk_min) {
- dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
- drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_PDSK;
ns.pdsk = pdsk_min;
}
@@ -1045,12 +1140,12 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
{
union drbd_state os;
enum drbd_state_rv rv = SS_SUCCESS;
- const char *warn_sync_abort = NULL;
+ enum sanitize_state_warnings ssw;
struct after_state_chg_work *ascw;
os = mdev->state;
- ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
+ ns = sanitize_state(mdev, os, ns, &ssw);
if (ns.i == os.i)
return SS_NOTHING_TO_DO;
@@ -1076,8 +1171,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
return rv;
}
- if (warn_sync_abort)
- dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
+ print_sanitize_warnings(mdev, ssw);
{
char *pbp, pb[300];
@@ -1243,7 +1337,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
drbd_thread_stop_nowait(&mdev->receiver);
/* Upon network failure, we need to restart the receiver. */
- if (os.conn > C_TEAR_DOWN &&
+ if (os.conn > C_WF_CONNECTION &&
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
drbd_thread_restart_nowait(&mdev->receiver);
@@ -1251,6 +1345,15 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
drbd_resume_al(mdev);
+ /* remember last connect and attach times so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if (os.conn != C_WF_REPORT_PARAMS && ns.conn == C_WF_REPORT_PARAMS)
+ mdev->last_reconnect_jif = jiffies;
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
+ mdev->last_reattach_jif = jiffies;
+
ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
if (ascw) {
ascw->os = os;
@@ -1354,12 +1457,16 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Here we have the actions that are performed after a
state change. This function might sleep */
+ if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+
nsm.i = -1;
if (ns.susp_nod) {
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
what = resend;
- if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
what = restart_frozen_disk_io;
if (what != nothing)
@@ -1408,7 +1515,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Do not change the order of the if above and the two below... */
if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
}
/* No point in queuing send_bitmap if we don't have a connection
* anymore, so check also the _current_ state, not only the new state
@@ -1441,11 +1548,11 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
}
if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
- if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
+ if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
drbd_uuid_new_current(mdev);
drbd_send_uuids(mdev);
}
-
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
/* We may still be Primary ourselves.
@@ -1473,14 +1580,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
drbd_send_sizes(mdev, 0, 0); /* to start sync... */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
}
/* We want to pause/continue resync, tell peer. */
if (ns.conn >= C_CONNECTED &&
((os.aftr_isp != ns.aftr_isp) ||
(os.user_isp != ns.user_isp)))
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
/* In case one of the isp bits got set, suspend other devices. */
if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
@@ -1490,10 +1597,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Make sure the peer gets informed about eventual state
changes (ISP bits) while we were in WFReportParams. */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
/* We are in the progress to start a full sync... */
if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
@@ -1513,33 +1620,38 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* first half of local IO error, failure to attach,
* or administrative detach */
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh;
- int was_io_error;
+ enum drbd_io_error_p eh = EP_PASS_ON;
+ int was_io_error = 0;
/* corresponding get_ldev was in __drbd_set_state, to serialize
- * our cleanup here with the transition to D_DISKLESS,
- * so it is safe to dreference ldev here. */
- eh = mdev->ldev->dc.on_io_error;
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
-
- /* current state still has to be D_FAILED,
- * there is only one way out: to D_DISKLESS,
- * and that may only happen after our put_ldev below. */
- if (mdev->state.disk != D_FAILED)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s during detach\n",
- drbd_disk_str(mdev->state.disk));
-
- if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I am detaching my disk\n");
- else
- dev_err(DEV, "Sending state for detaching disk failed\n");
-
- drbd_rs_cancel_all(mdev);
-
- /* In case we want to get something to stable storage still,
- * this may be the last chance.
- * Following put_ldev may transition to D_DISKLESS. */
- drbd_md_sync(mdev);
+ * our cleanup here with the transition to D_DISKLESS.
+ * But is is still not save to dreference ldev here, since
+ * we might come from an failed Attach before ldev was set. */
+ if (mdev->ldev) {
+ eh = mdev->ldev->dc.on_io_error;
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+ /* Immediately allow completion of all application IO, that waits
+ for completion from the local disk. */
+ tl_abort_disk_io(mdev);
+
+ /* current state still has to be D_FAILED,
+ * there is only one way out: to D_DISKLESS,
+ * and that may only happen after our put_ldev below. */
+ if (mdev->state.disk != D_FAILED)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s during detach\n",
+ drbd_disk_str(mdev->state.disk));
+
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ drbd_rs_cancel_all(mdev);
+
+ /* In case we want to get something to stable storage still,
+ * this may be the last chance.
+ * Following put_ldev may transition to D_DISKLESS. */
+ drbd_md_sync(mdev);
+ }
put_ldev(mdev);
if (was_io_error && eh == EP_CALL_HELPER)
@@ -1561,16 +1673,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
- if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I'm now diskless.\n");
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
/* corresponding get_ldev in __drbd_set_state
* this may finally trigger drbd_ldev_destroy. */
put_ldev(mdev);
}
/* Notify peer that I had a local IO error, and did not detached.. */
- if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
- drbd_send_state(mdev);
+ if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
/* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
@@ -1588,7 +1701,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* sync target done with resync. Explicitly notify peer, even though
* it should (at least for non-empty resyncs) already know itself. */
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
+
+ /* Wake up role changes, that were delayed because of connection establishing */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
+ clear_bit(STATE_SENT, &mdev->flags);
+ wake_up(&mdev->state_wait);
+ }
/* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk
@@ -1598,8 +1717,8 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* No harm done if some bits change during this phase.
*/
if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
- drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
- "write from resync_finished", BM_LOCKED_SET_ALLOWED);
+ drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+ "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
put_ldev(mdev);
}
@@ -2057,7 +2176,11 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
- uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+ uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ if (uuid && uuid != UUID_JUST_CREATED)
+ uuid = uuid + UUID_NEW_BM_OFFSET;
+ else
+ get_random_bytes(&uuid, sizeof(u64));
drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_print_uuids(mdev, "updated sync UUID");
drbd_md_sync(mdev);
@@ -2089,6 +2212,10 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
+ /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
+ if (mdev->agreed_pro_version <= 94)
+ max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+
p.d_size = cpu_to_be64(d_size);
p.u_size = cpu_to_be64(u_size);
p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
@@ -2102,10 +2229,10 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
}
/**
- * drbd_send_state() - Sends the drbd state to the peer
+ * drbd_send_current_state() - Sends the drbd state to the peer
* @mdev: DRBD device.
*/
-int drbd_send_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_conf *mdev)
{
struct socket *sock;
struct p_state p;
@@ -2131,6 +2258,37 @@ int drbd_send_state(struct drbd_conf *mdev)
return ok;
}
+/**
+ * drbd_send_state() - After a state change, sends the new state to the peer
+ * @mdev: DRBD device.
+ * @state: the state to send, not necessarily the current state.
+ *
+ * Each state change queues an "after_state_ch" work, which will eventually
+ * send the resulting new state to the peer. If more state changes happen
+ * between queuing and processing of the after_state_ch work, we still
+ * want to send each intermediary state in the order it occurred.
+ */
+int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+{
+ struct socket *sock;
+ struct p_state p;
+ int ok = 0;
+
+ mutex_lock(&mdev->data.mutex);
+
+ p.state = cpu_to_be32(state.i);
+ sock = mdev->data.socket;
+
+ if (likely(sock != NULL)) {
+ ok = _drbd_send_cmd(mdev, sock, P_STATE,
+ (struct p_header80 *)&p, sizeof(p), 0);
+ }
+
+ mutex_unlock(&mdev->data.mutex);
+
+ return ok;
+}
+
int drbd_send_state_req(struct drbd_conf *mdev,
union drbd_state mask, union drbd_state val)
{
@@ -2615,7 +2773,7 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
if (!_drbd_no_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2629,7 +2787,7 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
if (!_drbd_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2695,8 +2853,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
p.sector = cpu_to_be64(req->sector);
p.block_id = (unsigned long)req;
- p.seq_num = cpu_to_be32(req->seq_num =
- atomic_add_return(1, &mdev->packet_seq));
+ p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
@@ -2987,8 +3144,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
atomic_set(&mdev->ap_in_flight, 0);
+ atomic_set(&mdev->md_io_in_use, 0);
- mutex_init(&mdev->md_io_mutex);
mutex_init(&mdev->data.mutex);
mutex_init(&mdev->meta.mutex);
sema_init(&mdev->data.work.s, 0);
@@ -3126,6 +3283,10 @@ static void drbd_destroy_mempools(void)
/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
+ if (drbd_md_io_bio_set)
+ bioset_free(drbd_md_io_bio_set);
+ if (drbd_md_io_page_pool)
+ mempool_destroy(drbd_md_io_page_pool);
if (drbd_ee_mempool)
mempool_destroy(drbd_ee_mempool);
if (drbd_request_mempool)
@@ -3139,6 +3300,8 @@ static void drbd_destroy_mempools(void)
if (drbd_al_ext_cache)
kmem_cache_destroy(drbd_al_ext_cache);
+ drbd_md_io_bio_set = NULL;
+ drbd_md_io_page_pool = NULL;
drbd_ee_mempool = NULL;
drbd_request_mempool = NULL;
drbd_ee_cache = NULL;
@@ -3162,6 +3325,8 @@ static int drbd_create_mempools(void)
drbd_bm_ext_cache = NULL;
drbd_al_ext_cache = NULL;
drbd_pp_pool = NULL;
+ drbd_md_io_page_pool = NULL;
+ drbd_md_io_bio_set = NULL;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -3185,6 +3350,16 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
+#ifdef COMPAT_HAVE_BIOSET_CREATE
+ drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
+ if (drbd_md_io_bio_set == NULL)
+ goto Enomem;
+#endif
+
+ drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
+ if (drbd_md_io_page_pool == NULL)
+ goto Enomem;
+
drbd_request_mempool = mempool_create(number,
mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
if (drbd_request_mempool == NULL)
@@ -3262,6 +3437,8 @@ static void drbd_delete_device(unsigned int minor)
if (!mdev)
return;
+ del_timer_sync(&mdev->request_timer);
+
/* paranoia asserts */
if (mdev->open_cnt != 0)
dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
@@ -3666,8 +3843,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!get_ldev_if_state(mdev, D_FAILED))
return;
- mutex_lock(&mdev->md_io_mutex);
- buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ goto out;
+
memset(buffer, 0, 512);
buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
@@ -3698,7 +3877,8 @@ void drbd_md_sync(struct drbd_conf *mdev)
* since we updated it on metadata. */
mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
+out:
put_ldev(mdev);
}
@@ -3718,8 +3898,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!get_ldev_if_state(mdev, D_ATTACHING))
return ERR_IO_MD_DISK;
- mutex_lock(&mdev->md_io_mutex);
- buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ goto out;
if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
@@ -3780,7 +3961,8 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
mdev->sync_conf.al_extents = 127;
err:
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
+ out:
put_ldev(mdev);
return rv;
@@ -4183,12 +4365,11 @@ const char *drbd_buildtag(void)
static char buildtag[38] = "\0uilt-in";
if (buildtag[0] == 0) {
-#ifdef CONFIG_MODULES
- if (THIS_MODULE != NULL)
- sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
- else
+#ifdef MODULE
+ sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
+#else
+ buildtag[0] = 'b';
#endif
- buildtag[0] = 'b';
}
return buildtag;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 946166e13953..6d4de6a72e80 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -289,7 +289,7 @@ static int _try_outdate_peer_async(void *data)
*/
spin_lock_irq(&mdev->req_lock);
ns = mdev->state;
- if (ns.conn < C_WF_REPORT_PARAMS) {
+ if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
ns.pdsk = nps;
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
@@ -432,7 +432,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
/* if this was forced, we should consider sync */
if (forced)
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
}
drbd_md_sync(mdev);
@@ -845,9 +845,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) {
- if (mdev->agreed_pro_version < 94)
- peer = mdev->peer_max_bio_size;
- else if (mdev->agreed_pro_version == 94)
+ if (mdev->agreed_pro_version < 94) {
+ peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
+ } else if (mdev->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
else /* drbd 8.3.8 onwards */
peer = DRBD_MAX_BIO_SIZE;
@@ -1032,7 +1033,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) nbc->dc.disk_size);
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1046,7 +1047,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
- retcode = ERR_MD_DISK_TO_SMALL;
+ retcode = ERR_MD_DISK_TOO_SMALL;
dev_warn(DEV, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
@@ -1057,7 +1058,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* (we may currently be R_PRIMARY with no local disk...) */
if (drbd_get_max_capacity(nbc) <
drbd_get_capacity(mdev->this_bdev)) {
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1138,7 +1139,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto force_diskless_dec;
}
@@ -1336,17 +1337,34 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
{
enum drbd_ret_code retcode;
int ret;
+ struct detach dt = {};
+
+ if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
+ reply->ret_code = ERR_MANDATORY_TAG;
+ goto out;
+ }
+
+ if (dt.detach_force) {
+ drbd_force_state(mdev, NS(disk, D_FAILED));
+ reply->ret_code = SS_SUCCESS;
+ goto out;
+ }
+
drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
+ drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
+ drbd_md_put_buffer(mdev);
/* D_FAILED will transition to DISKLESS. */
ret = wait_event_interruptible(mdev->misc_wait,
mdev->state.disk != D_FAILED);
drbd_resume_io(mdev);
+
if ((int)retcode == (int)SS_IS_DISKLESS)
retcode = SS_NOTHING_TO_DO;
if (ret)
retcode = ERR_INTR;
reply->ret_code = retcode;
+out:
return 0;
}
@@ -1711,7 +1729,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
if (rs.no_resync && mdev->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
- goto fail;
+ goto fail_ldev;
}
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
@@ -1738,6 +1756,10 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
fail:
reply->ret_code = retcode;
return 0;
+
+ fail_ldev:
+ put_ldev(mdev);
+ goto fail;
}
static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
@@ -1941,6 +1963,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -1959,6 +1982,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
}
+ drbd_resume_io(mdev);
reply->ret_code = retcode;
return 0;
@@ -1980,6 +2004,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -1998,6 +2023,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
} else
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
}
+ drbd_resume_io(mdev);
reply->ret_code = retcode;
return 0;
@@ -2170,11 +2196,13 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
/* w_make_ov_request expects position to be aligned */
mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+ drbd_resume_io(mdev);
return 0;
}
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 2959cdfb77f5..869bada2ed06 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -52,7 +52,7 @@ void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
if (unlikely(v >= 1000000)) {
/* cool: > GiByte/s */
seq_printf(seq, "%ld,", v / 1000000);
- v /= 1000000;
+ v %= 1000000;
seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
} else if (likely(v >= 1000))
seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 436f519bed1c..ea4836e0ae98 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -466,6 +466,7 @@ static int drbd_accept(struct drbd_conf *mdev, const char **what,
goto out;
}
(*newsock)->ops = sock->ops;
+ __module_get((*newsock)->ops->owner);
out:
return err;
@@ -750,6 +751,7 @@ static int drbd_connect(struct drbd_conf *mdev)
{
struct socket *s, *sock, *msock;
int try, h, ok;
+ enum drbd_state_rv rv;
D_ASSERT(!mdev->data.socket);
@@ -888,25 +890,32 @@ retry:
}
}
- if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
- return 0;
-
sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
atomic_set(&mdev->packet_seq, 0);
mdev->peer_seq = 0;
- drbd_thread_start(&mdev->asender);
-
if (drbd_send_protocol(mdev) == -1)
return -1;
+ set_bit(STATE_SENT, &mdev->flags);
drbd_send_sync_param(mdev, &mdev->sync_conf);
drbd_send_sizes(mdev, 0, 0);
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags);
+
+ spin_lock_irq(&mdev->req_lock);
+ rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
+ if (mdev->state.conn != C_WF_REPORT_PARAMS)
+ clear_bit(STATE_SENT, &mdev->flags);
+ spin_unlock_irq(&mdev->req_lock);
+
+ if (rv < SS_SUCCESS)
+ return 0;
+
+ drbd_thread_start(&mdev->asender);
mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
return 1;
@@ -957,7 +966,7 @@ static void drbd_flush(struct drbd_conf *mdev)
rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
NULL);
if (rv) {
- dev_err(DEV, "local disk flush failed with status %d\n", rv);
+ dev_info(DEV, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0
* if (rv == -EOPNOTSUPP) */
@@ -1001,13 +1010,14 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
if (epoch_size != 0 &&
atomic_read(&epoch->active) == 0 &&
- test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
+ (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
spin_unlock(&mdev->epoch_lock);
drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
spin_lock(&mdev->epoch_lock);
}
- dec_unacked(mdev);
+ if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
+ dec_unacked(mdev);
if (mdev->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
@@ -1096,7 +1106,11 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
* side than those of the sending peer, we may need to submit the
- * request in more than one bio. */
+ * request in more than one bio.
+ *
+ * Plain bio_alloc is good enough here, this is no DRBD internally
+ * generated bio, but a bio allocated on behalf of the peer.
+ */
next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages);
if (!bio) {
@@ -1583,6 +1597,24 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
return ok;
}
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+{
+
+ struct drbd_epoch_entry *rs_e;
+ bool rv = 0;
+
+ spin_lock_irq(&mdev->req_lock);
+ list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
+ if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+ rv = 1;
+ break;
+ }
+ }
+ spin_unlock_irq(&mdev->req_lock);
+
+ return rv;
+}
+
/* Called from receive_Data.
* Synchronize packets on sock with packets on msock.
*
@@ -1826,6 +1858,9 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
list_add(&e->w.list, &mdev->active_ee);
spin_unlock_irq(&mdev->req_lock);
+ if (mdev->state.conn == C_SYNC_TARGET)
+ wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
+
switch (mdev->net_conf->wire_protocol) {
case DRBD_PROT_C:
inc_unacked(mdev);
@@ -2420,7 +2455,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
- dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
+ dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
return -1;
@@ -2806,10 +2841,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
if (apv >= 88) {
if (apv == 88) {
- if (data_size > SHARED_SECRET_MAX) {
- dev_err(DEV, "verify-alg too long, "
- "peer wants %u, accepting only %u byte\n",
- data_size, SHARED_SECRET_MAX);
+ if (data_size > SHARED_SECRET_MAX || data_size == 0) {
+ dev_err(DEV, "verify-alg of wrong size, "
+ "peer wants %u, accepting only up to %u byte\n",
+ data_size, SHARED_SECRET_MAX);
return false;
}
@@ -3168,9 +3203,20 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
os = ns = mdev->state;
spin_unlock_irq(&mdev->req_lock);
- /* peer says his disk is uptodate, while we think it is inconsistent,
- * and this happens while we think we have a sync going on. */
- if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
+ /* If some other part of the code (asender thread, timeout)
+ * already decided to close the connection again,
+ * we must not "re-establish" it here. */
+ if (os.conn <= C_TEAR_DOWN)
+ return false;
+
+ /* If this is the "end of sync" confirmation, usually the peer disk
+ * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
+ * set) resync started in PausedSyncT, or if the timing of pause-/
+ * unpause-sync events has been "just right", the peer disk may
+ * transition from D_CONSISTENT to D_UP_TO_DATE as well.
+ */
+ if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
+ real_peer_disk == D_UP_TO_DATE &&
os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
/* If we are (becoming) SyncSource, but peer is still in sync
* preparation, ignore its uptodate-ness to avoid flapping, it
@@ -3288,7 +3334,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
/* Nowadays only used when forcing a node into primary role and
setting its disk to UpToDate with that */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
}
}
@@ -3776,6 +3822,13 @@ static void drbd_disconnect(struct drbd_conf *mdev)
if (mdev->state.conn == C_STANDALONE)
return;
+ /* We are about to start the cleanup after connection loss.
+ * Make sure drbd_make_request knows about that.
+ * Usually we should be in some network failure state already,
+ * but just in case we are not, we fix it up here.
+ */
+ drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+
/* asender does not clean up anything. it must not interfere, either */
drbd_thread_stop(&mdev->asender);
drbd_free_sock(mdev);
@@ -3803,8 +3856,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
wake_up(&mdev->misc_wait);
- del_timer(&mdev->request_timer);
-
/* make sure syncer is stopped and w_resume_next_sg queued */
del_timer_sync(&mdev->resync_timer);
resync_timer_fn((unsigned long)mdev);
@@ -4433,7 +4484,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
if (mdev->state.conn == C_AHEAD &&
atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
mdev->start_resync_timer.expires = jiffies + HZ;
add_timer(&mdev->start_resync_timer);
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 4a0f314086e5..8e93a6ac9bb6 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -37,6 +37,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
const int rw = bio_data_dir(bio);
int cpu;
cpu = part_stat_lock();
+ part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
part_inc_in_flight(&mdev->vdisk->part0, rw);
@@ -214,8 +215,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned long s = req->rq_state;
struct drbd_conf *mdev = req->mdev;
- /* only WRITES may end up here without a master bio (on barrier ack) */
- int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
+ int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
/* we must not complete the master bio, while it is
* still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -230,7 +230,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
return;
if (s & RQ_NET_PENDING)
return;
- if (s & RQ_LOCAL_PENDING)
+ if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
return;
if (req->master_bio) {
@@ -277,6 +277,9 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
req->master_bio = NULL;
}
+ if (s & RQ_LOCAL_PENDING)
+ return;
+
if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
/* this is disconnected (local only) operation,
* or protocol C P_WRITE_ACK,
@@ -429,7 +432,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case completed_ok:
- if (bio_data_dir(req->master_bio) == WRITE)
+ if (req->rq_state & RQ_WRITE)
mdev->writ_cnt += req->size>>9;
else
mdev->read_cnt += req->size>>9;
@@ -438,7 +441,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state &= ~RQ_LOCAL_PENDING;
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
+ break;
+
+ case abort_disk_io:
+ req->rq_state |= RQ_LOCAL_ABORTED;
+ if (req->rq_state & RQ_WRITE)
+ _req_may_be_done_not_susp(req, m);
+ else
+ goto goto_queue_for_net_read;
break;
case write_completed_with_error:
@@ -447,7 +457,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
__drbd_chk_io_error(mdev, false);
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
break;
case read_ahead_completed_with_error:
@@ -455,7 +464,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
break;
case read_completed_with_error:
@@ -464,10 +472,16 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+ if (req->rq_state & RQ_LOCAL_ABORTED) {
+ _req_may_be_done(req, m);
+ break;
+ }
__drbd_chk_io_error(mdev, false);
- put_ldev(mdev);
+
+ goto_queue_for_net_read:
+
+ D_ASSERT(!(req->rq_state & RQ_NET_MASK));
/* no point in retrying if there is no good remote data,
* or we have no connection. */
@@ -556,10 +570,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_queue_work(&mdev->data.work, &req->w);
break;
- case oos_handed_to_network:
- /* actually the same */
+ case read_retry_remote_canceled:
case send_canceled:
- /* treat it the same */
case send_failed:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
@@ -589,17 +601,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
}
req->rq_state &= ~RQ_NET_QUEUED;
req->rq_state |= RQ_NET_SENT;
- /* because _drbd_send_zc_bio could sleep, and may want to
- * dereference the bio even after the "write_acked_by_peer" and
- * "completed_ok" events came in, once we return from
- * _drbd_send_zc_bio (drbd_send_dblock), we have to check
- * whether it is done already, and end it. */
_req_may_be_done_not_susp(req, m);
break;
- case read_retry_remote_canceled:
+ case oos_handed_to_network:
+ /* Was not set PENDING, no longer QUEUED, so is now DONE
+ * as far as this connection is concerned. */
req->rq_state &= ~RQ_NET_QUEUED;
- /* fall through, in case we raced with drbd_disconnect */
+ req->rq_state |= RQ_NET_DONE;
+ _req_may_be_done_not_susp(req, m);
+ break;
+
case connection_lost_while_pending:
/* transfer log cleanup after connection loss */
/* assert something? */
@@ -616,8 +628,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case write_acked_by_peer_and_sis:
- req->rq_state |= RQ_NET_SIS;
case conflict_discarded_by_peer:
/* for discarded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
@@ -628,18 +638,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
(unsigned long long)req->sector, req->size);
req->rq_state |= RQ_NET_DONE;
/* fall through */
+ case write_acked_by_peer_and_sis:
case write_acked_by_peer:
+ if (what == write_acked_by_peer_and_sis)
+ req->rq_state |= RQ_NET_SIS;
/* protocol C; successfully written on peer.
- * Nothing to do here.
+ * Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
- * for volatile write-back caches on lower level devices.
- *
- * A barrier request is expected to have forced all prior
- * requests onto stable storage, so completion of a barrier
- * request could set NET_DONE right here, and not wait for the
- * P_BARRIER_ACK, but that is an unnecessary optimization. */
+ * for volatile write-back caches on lower level devices. */
- /* this makes it effectively the same as for: */
case recv_acked_by_peer:
/* protocol B; pretends to be successfully written on peer.
* see also notes above in handed_over_to_network about
@@ -763,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
}
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+ int congested = 0;
+
+ /* If I don't even have good local storage, we can not reasonably try
+ * to pull ahead of the peer. We also need the local reference to make
+ * sure mdev->act_log is there.
+ * Note: caller has to make sure that net_conf is there.
+ */
+ if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ return;
+
+ if (mdev->net_conf->cong_fill &&
+ atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+ dev_info(DEV, "Congestion-fill threshold reached\n");
+ congested = 1;
+ }
+
+ if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+ dev_info(DEV, "Congestion-extents threshold reached\n");
+ congested = 1;
+ }
+
+ if (congested) {
+ queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+ if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+ _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+ else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+ _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+ }
+ put_ldev(mdev);
+}
+
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_rw(bio);
@@ -773,6 +814,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
int local, remote, send_oos = 0;
int err = -EIO;
int ret = 0;
+ union drbd_state s;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -834,8 +876,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
drbd_al_begin_io(mdev, sector);
}
- remote = remote && drbd_should_do_remote(mdev->state);
- send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+ s = mdev->state;
+ remote = remote && drbd_should_do_remote(s);
+ send_oos = rw == WRITE && drbd_should_send_oos(s);
D_ASSERT(!(remote && send_oos));
if (!(local || remote) && !is_susp(mdev->state)) {
@@ -867,7 +910,7 @@ allocate_barrier:
if (is_susp(mdev->state)) {
/* If we got suspended, use the retry mechanism of
- generic_make_request() to restart processing of this
+ drbd_make_request() to restart processing of this
bio. In the next call to drbd_make_request
we sleep in inc_ap_bio() */
ret = 1;
@@ -968,29 +1011,8 @@ allocate_barrier:
_req_mod(req, queue_for_send_oos);
if (remote &&
- mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
- int congested = 0;
-
- if (mdev->net_conf->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
- dev_info(DEV, "Congestion-fill threshold reached\n");
- congested = 1;
- }
-
- if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
- dev_info(DEV, "Congestion-extents threshold reached\n");
- congested = 1;
- }
-
- if (congested) {
- queue_barrier(mdev); /* last barrier, after mirrored writes */
-
- if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
- _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
- else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
- _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
- }
- }
+ mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+ maybe_pull_ahead(mdev);
spin_unlock_irq(&mdev->req_lock);
kfree(b); /* if someone else has beaten us to it... */
@@ -1091,7 +1113,6 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
*/
D_ASSERT(bio->bi_size > 0);
D_ASSERT((bio->bi_size & 0x1ff) == 0);
- D_ASSERT(bio->bi_idx == 0);
/* to make some things easier, force alignment of requests within the
* granularity of our hash tables */
@@ -1099,8 +1120,9 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
if (likely(s_enr == e_enr)) {
- inc_ap_bio(mdev, 1);
- drbd_make_request_common(mdev, bio, start_time);
+ do {
+ inc_ap_bio(mdev, 1);
+ } while (drbd_make_request_common(mdev, bio, start_time));
return;
}
@@ -1196,36 +1218,66 @@ void request_timer_fn(unsigned long data)
struct drbd_conf *mdev = (struct drbd_conf *) data;
struct drbd_request *req; /* oldest request */
struct list_head *le;
- unsigned long et = 0; /* effective timeout = ko_count * timeout */
+ unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
+ unsigned long now;
if (get_net_conf(mdev)) {
- et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
+ if (mdev->state.conn >= C_WF_REPORT_PARAMS)
+ ent = mdev->net_conf->timeout*HZ/10
+ * mdev->net_conf->ko_count;
put_net_conf(mdev);
}
- if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
+ if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
+ dt = mdev->ldev->dc.disk_timeout * HZ / 10;
+ put_ldev(mdev);
+ }
+ et = min_not_zero(dt, ent);
+
+ if (!et)
return; /* Recurring timer stopped */
+ now = jiffies;
+
spin_lock_irq(&mdev->req_lock);
le = &mdev->oldest_tle->requests;
if (list_empty(le)) {
spin_unlock_irq(&mdev->req_lock);
- mod_timer(&mdev->request_timer, jiffies + et);
+ mod_timer(&mdev->request_timer, now + et);
return;
}
le = le->prev;
req = list_entry(le, struct drbd_request, tl_requests);
- if (time_is_before_eq_jiffies(req->start_time + et)) {
- if (req->rq_state & RQ_NET_PENDING) {
- dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
- _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
- } else {
- dev_warn(DEV, "Local backing block device frozen?\n");
- mod_timer(&mdev->request_timer, jiffies + et);
- }
- } else {
- mod_timer(&mdev->request_timer, req->start_time + et);
- }
+ /* The request is considered timed out, if
+ * - we have some effective timeout from the configuration,
+ * with above state restrictions applied,
+ * - the oldest request is waiting for a response from the network
+ * resp. the local disk,
+ * - the oldest request is in fact older than the effective timeout,
+ * - the connection was established (resp. disk was attached)
+ * for longer than the timeout already.
+ * Note that for 32bit jiffies and very stable connections/disks,
+ * we may have a wrap around, which is catched by
+ * !time_in_range(now, last_..._jif, last_..._jif + timeout).
+ *
+ * Side effect: once per 32bit wrap-around interval, which means every
+ * ~198 days with 250 HZ, we have a window where the timeout would need
+ * to expire twice (worst case) to become effective. Good enough.
+ */
+ if (ent && req->rq_state & RQ_NET_PENDING &&
+ time_after(now, req->start_time + ent) &&
+ !time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) {
+ dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
+ _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
+ }
+ if (dt && req->rq_state & RQ_LOCAL_PENDING &&
+ time_after(now, req->start_time + dt) &&
+ !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
+ dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
+ __drbd_chk_io_error(mdev, 1);
+ }
+ nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
spin_unlock_irq(&mdev->req_lock);
+ mod_timer(&mdev->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 68a234a5fdc5..3d2111919486 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -105,6 +105,7 @@ enum drbd_req_event {
read_completed_with_error,
read_ahead_completed_with_error,
write_completed_with_error,
+ abort_disk_io,
completed_ok,
resend,
fail_frozen_disk_io,
@@ -118,18 +119,21 @@ enum drbd_req_event {
* same time, so we should hold the request lock anyways.
*/
enum drbd_req_state_bits {
- /* 210
- * 000: no local possible
- * 001: to be submitted
+ /* 3210
+ * 0000: no local possible
+ * 0001: to be submitted
* UNUSED, we could map: 011: submitted, completion still pending
- * 110: completed ok
- * 010: completed with error
+ * 0110: completed ok
+ * 0010: completed with error
+ * 1001: Aborted (before completion)
+ * 1x10: Aborted and completed -> free
*/
__RQ_LOCAL_PENDING,
__RQ_LOCAL_COMPLETED,
__RQ_LOCAL_OK,
+ __RQ_LOCAL_ABORTED,
- /* 76543
+ /* 87654
* 00000: no network possible
* 00001: to be send
* 00011: to be send, on worker queue
@@ -199,8 +203,9 @@ enum drbd_req_state_bits {
#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
#define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
#define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK)
+#define RQ_LOCAL_ABORTED (1UL << __RQ_LOCAL_ABORTED)
-#define RQ_LOCAL_MASK ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
+#define RQ_LOCAL_MASK ((RQ_LOCAL_ABORTED << 1)-1)
#define RQ_NET_PENDING (1UL << __RQ_NET_PENDING)
#define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d3e6f6213ba..620c70ff2231 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -70,11 +70,29 @@ rwlock_t global_state_lock;
void drbd_md_io_complete(struct bio *bio, int error)
{
struct drbd_md_io *md_io;
+ struct drbd_conf *mdev;
md_io = (struct drbd_md_io *)bio->bi_private;
+ mdev = container_of(md_io, struct drbd_conf, md_io);
+
md_io->error = error;
- complete(&md_io->event);
+ /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
+ * to timeout on the lower level device, and eventually detach from it.
+ * If this io completion runs after that timeout expired, this
+ * drbd_md_put_buffer() may allow us to finally try and re-attach.
+ * During normal operation, this only puts that extra reference
+ * down to 1 again.
+ * Make sure we first drop the reference, and only then signal
+ * completion, or we may (in drbd_al_read_log()) cycle so fast into the
+ * next drbd_md_sync_page_io(), that we trigger the
+ * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
+ */
+ drbd_md_put_buffer(mdev);
+ md_io->done = 1;
+ wake_up(&mdev->misc_wait);
+ bio_put(bio);
+ put_ldev(mdev);
}
/* reads on behalf of the partner,
@@ -226,6 +244,7 @@ void drbd_endio_pri(struct bio *bio, int error)
spin_lock_irqsave(&mdev->req_lock, flags);
__req_mod(req, what, &m);
spin_unlock_irqrestore(&mdev->req_lock, flags);
+ put_ldev(mdev);
if (m.bio)
complete_master_bio(mdev, &m);
@@ -290,7 +309,7 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
sg_init_table(&sg, 1);
crypto_hash_init(&desc);
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
crypto_hash_update(&desc, &sg, sg.length);
}
@@ -728,7 +747,7 @@ int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
return 1;
}
@@ -1519,14 +1538,14 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
}
drbd_state_lock(mdev);
-
+ write_lock_irq(&global_state_lock);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ write_unlock_irq(&global_state_lock);
drbd_state_unlock(mdev);
return;
}
- write_lock_irq(&global_state_lock);
- ns = mdev->state;
+ ns.i = mdev->state.i;
ns.aftr_isp = !_drbd_may_sync_now(mdev);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b0b00d70c166..553f43a90953 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -551,7 +551,7 @@ static void floppy_ready(void);
static void floppy_start(void);
static void process_fd_request(void);
static void recalibrate_floppy(void);
-static void floppy_shutdown(unsigned long);
+static void floppy_shutdown(struct work_struct *);
static int floppy_request_regions(int);
static void floppy_release_regions(int);
@@ -588,6 +588,8 @@ static int buffer_max = -1;
static struct floppy_fdc_state fdc_state[N_FDC];
static int fdc; /* current fdc */
+static struct workqueue_struct *floppy_wq;
+
static struct floppy_struct *_floppy = floppy_type;
static unsigned char current_drive;
static long current_count_sectors;
@@ -629,16 +631,15 @@ static inline void set_debugt(void) { }
static inline void debugt(const char *func, const char *msg) { }
#endif /* DEBUGT */
-typedef void (*timeout_fn)(unsigned long);
-static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown);
static const char *timeout_message;
static void is_alive(const char *func, const char *message)
{
/* this routine checks whether the floppy driver is "alive" */
if (test_bit(0, &fdc_busy) && command_status < 2 &&
- !timer_pending(&fd_timeout)) {
+ !delayed_work_pending(&fd_timeout)) {
DPRINT("%s: timeout handler died. %s\n", func, message);
}
}
@@ -666,15 +667,19 @@ static int output_log_pos;
static void __reschedule_timeout(int drive, const char *message)
{
+ unsigned long delay;
+
if (drive == current_reqD)
drive = current_drive;
- del_timer(&fd_timeout);
+ __cancel_delayed_work(&fd_timeout);
+
if (drive < 0 || drive >= N_DRIVE) {
- fd_timeout.expires = jiffies + 20UL * HZ;
+ delay = 20UL * HZ;
drive = 0;
} else
- fd_timeout.expires = jiffies + UDP->timeout;
- add_timer(&fd_timeout);
+ delay = UDP->timeout;
+
+ queue_delayed_work(floppy_wq, &fd_timeout, delay);
if (UDP->flags & FD_DEBUG)
DPRINT("reschedule timeout %s\n", message);
timeout_message = message;
@@ -872,7 +877,7 @@ static int lock_fdc(int drive, bool interruptible)
command_status = FD_COMMAND_NONE;
- __reschedule_timeout(drive, "lock fdc");
+ reschedule_timeout(drive, "lock fdc");
set_fdc(drive);
return 0;
}
@@ -880,23 +885,15 @@ static int lock_fdc(int drive, bool interruptible)
/* unlocks the driver */
static void unlock_fdc(void)
{
- unsigned long flags;
-
- raw_cmd = NULL;
if (!test_bit(0, &fdc_busy))
DPRINT("FDC access conflict!\n");
- if (do_floppy)
- DPRINT("device interrupt still active at FDC release: %pf!\n",
- do_floppy);
+ raw_cmd = NULL;
command_status = FD_COMMAND_NONE;
- spin_lock_irqsave(&floppy_lock, flags);
- del_timer(&fd_timeout);
+ __cancel_delayed_work(&fd_timeout);
+ do_floppy = NULL;
cont = NULL;
clear_bit(0, &fdc_busy);
- if (current_req || set_next_request())
- do_fd_request(current_req->q);
- spin_unlock_irqrestore(&floppy_lock, flags);
wake_up(&fdc_wait);
}
@@ -968,26 +965,24 @@ static DECLARE_WORK(floppy_work, NULL);
static void schedule_bh(void (*handler)(void))
{
+ WARN_ON(work_pending(&floppy_work));
+
PREPARE_WORK(&floppy_work, (work_func_t)handler);
- schedule_work(&floppy_work);
+ queue_work(floppy_wq, &floppy_work);
}
-static DEFINE_TIMER(fd_timer, NULL, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timer, NULL);
static void cancel_activity(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&floppy_lock, flags);
do_floppy = NULL;
- PREPARE_WORK(&floppy_work, (work_func_t)empty);
- del_timer(&fd_timer);
- spin_unlock_irqrestore(&floppy_lock, flags);
+ cancel_delayed_work_sync(&fd_timer);
+ cancel_work_sync(&floppy_work);
}
/* this function makes sure that the disk stays in the drive during the
* transfer */
-static void fd_watchdog(void)
+static void fd_watchdog(struct work_struct *arg)
{
debug_dcl(DP->flags, "calling disk change from watchdog\n");
@@ -997,21 +992,20 @@ static void fd_watchdog(void)
cont->done(0);
reset_fdc();
} else {
- del_timer(&fd_timer);
- fd_timer.function = (timeout_fn)fd_watchdog;
- fd_timer.expires = jiffies + HZ / 10;
- add_timer(&fd_timer);
+ cancel_delayed_work(&fd_timer);
+ PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog);
+ queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
}
}
static void main_command_interrupt(void)
{
- del_timer(&fd_timer);
+ cancel_delayed_work(&fd_timer);
cont->interrupt();
}
/* waits for a delay (spinup or select) to pass */
-static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
+static int fd_wait_for_completion(unsigned long expires, work_func_t function)
{
if (FDCS->reset) {
reset_fdc(); /* do the reset during sleep to win time
@@ -1020,11 +1014,10 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
return 1;
}
- if (time_before(jiffies, delay)) {
- del_timer(&fd_timer);
- fd_timer.function = function;
- fd_timer.expires = delay;
- add_timer(&fd_timer);
+ if (time_before(jiffies, expires)) {
+ cancel_delayed_work(&fd_timer);
+ PREPARE_DELAYED_WORK(&fd_timer, function);
+ queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
return 1;
}
return 0;
@@ -1342,7 +1335,7 @@ static int fdc_dtr(void)
*/
FDCS->dtr = raw_cmd->rate & 3;
return fd_wait_for_completion(jiffies + 2UL * HZ / 100,
- (timeout_fn)floppy_ready);
+ (work_func_t)floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
@@ -1447,7 +1440,7 @@ static void setup_rw_floppy(void)
int flags;
int dflags;
unsigned long ready_date;
- timeout_fn function;
+ work_func_t function;
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
@@ -1461,9 +1454,9 @@ static void setup_rw_floppy(void)
*/
if (time_after(ready_date, jiffies + DP->select_delay)) {
ready_date -= DP->select_delay;
- function = (timeout_fn)floppy_start;
+ function = (work_func_t)floppy_start;
} else
- function = (timeout_fn)setup_rw_floppy;
+ function = (work_func_t)setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
@@ -1493,7 +1486,7 @@ static void setup_rw_floppy(void)
inr = result();
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
- fd_watchdog();
+ fd_watchdog(NULL);
}
static int blind_seek;
@@ -1802,20 +1795,22 @@ static void show_floppy(void)
pr_info("do_floppy=%pf\n", do_floppy);
if (work_pending(&floppy_work))
pr_info("floppy_work.func=%pf\n", floppy_work.func);
- if (timer_pending(&fd_timer))
- pr_info("fd_timer.function=%pf\n", fd_timer.function);
- if (timer_pending(&fd_timeout)) {
- pr_info("timer_function=%pf\n", fd_timeout.function);
- pr_info("expires=%lu\n", fd_timeout.expires - jiffies);
- pr_info("now=%lu\n", jiffies);
- }
+ if (delayed_work_pending(&fd_timer))
+ pr_info("delayed work.function=%p expires=%ld\n",
+ fd_timer.work.func,
+ fd_timer.timer.expires - jiffies);
+ if (delayed_work_pending(&fd_timeout))
+ pr_info("timer_function=%p expires=%ld\n",
+ fd_timeout.work.func,
+ fd_timeout.timer.expires - jiffies);
+
pr_info("cont=%p\n", cont);
pr_info("current_req=%p\n", current_req);
pr_info("command_status=%d\n", command_status);
pr_info("\n");
}
-static void floppy_shutdown(unsigned long data)
+static void floppy_shutdown(struct work_struct *arg)
{
unsigned long flags;
@@ -1868,7 +1863,7 @@ static int start_motor(void (*function)(void))
/* wait_for_completion also schedules reset if needed. */
return fd_wait_for_completion(DRS->select_date + DP->select_delay,
- (timeout_fn)function);
+ (work_func_t)function);
}
static void floppy_ready(void)
@@ -2821,7 +2816,6 @@ do_request:
spin_lock_irq(&floppy_lock);
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
-
if (!pending) {
do_floppy = NULL;
unlock_fdc();
@@ -2898,13 +2892,15 @@ static void do_fd_request(struct request_queue *q)
current_req->cmd_flags))
return;
- if (test_bit(0, &fdc_busy)) {
+ if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
return;
}
- lock_fdc(MAXTIMEOUT, false);
+ command_status = FD_COMMAND_NONE;
+ __reschedule_timeout(MAXTIMEOUT, "fd_request");
+ set_fdc(0);
process_fd_request();
is_alive(__func__, "");
}
@@ -3612,9 +3608,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
- if (UDRS->fd_ref < 0)
- UDRS->fd_ref = 0;
- else if (!UDRS->fd_ref--) {
+ if (!UDRS->fd_ref--) {
DPRINT("floppy_release with fd_ref == 0");
UDRS->fd_ref = 0;
}
@@ -3650,13 +3644,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
set_bit(FD_VERIFY_BIT, &UDRS->flags);
}
- if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL)))
- goto out2;
-
- if (mode & FMODE_EXCL)
- UDRS->fd_ref = -1;
- else
- UDRS->fd_ref++;
+ UDRS->fd_ref++;
opened_bdev[drive] = bdev;
@@ -3719,10 +3707,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
mutex_unlock(&floppy_mutex);
return 0;
out:
- if (UDRS->fd_ref < 0)
- UDRS->fd_ref = 0;
- else
- UDRS->fd_ref--;
+ UDRS->fd_ref--;
+
if (!UDRS->fd_ref)
opened_bdev[drive] = NULL;
out2:
@@ -4159,10 +4145,16 @@ static int __init floppy_init(void)
goto out_put_disk;
}
+ floppy_wq = alloc_ordered_workqueue("floppy", 0);
+ if (!floppy_wq) {
+ err = -ENOMEM;
+ goto out_put_disk;
+ }
+
disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!disks[dr]->queue) {
err = -ENOMEM;
- goto out_put_disk;
+ goto out_destroy_workq;
}
blk_queue_max_hw_sectors(disks[dr]->queue, 64);
@@ -4213,7 +4205,7 @@ static int __init floppy_init(void)
use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
err = -ENODEV;
goto out_unreg_region;
}
@@ -4224,7 +4216,7 @@ static int __init floppy_init(void)
fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
err = -EBUSY;
goto out_unreg_region;
}
@@ -4281,13 +4273,13 @@ static int __init floppy_init(void)
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
}
fdc = 0;
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
current_drive = 0;
initialized = true;
if (have_no_fdc) {
DPRINT("no floppy controllers found\n");
err = have_no_fdc;
- goto out_flush_work;
+ goto out_release_dma;
}
for (drive = 0; drive < N_DRIVE; drive++) {
@@ -4302,7 +4294,7 @@ static int __init floppy_init(void)
err = platform_device_register(&floppy_device[drive]);
if (err)
- goto out_flush_work;
+ goto out_release_dma;
err = device_create_file(&floppy_device[drive].dev,
&dev_attr_cmos);
@@ -4320,13 +4312,14 @@ static int __init floppy_init(void)
out_unreg_platform_dev:
platform_device_unregister(&floppy_device[drive]);
-out_flush_work:
- flush_work_sync(&floppy_work);
+out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
platform_driver_unregister(&floppy_driver);
+out_destroy_workq:
+ destroy_workqueue(floppy_wq);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
@@ -4397,7 +4390,7 @@ static int floppy_grab_irq_and_dma(void)
* We might have scheduled a free_irq(), wait it to
* drain first:
*/
- flush_work_sync(&floppy_work);
+ flush_workqueue(floppy_wq);
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
@@ -4488,9 +4481,9 @@ static void floppy_release_irq_and_dma(void)
pr_info("motor off timer %d still active\n", drive);
#endif
- if (timer_pending(&fd_timeout))
+ if (delayed_work_pending(&fd_timeout))
pr_info("floppy timer still active:%s\n", timeout_message);
- if (timer_pending(&fd_timer))
+ if (delayed_work_pending(&fd_timer))
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
@@ -4560,8 +4553,9 @@ static void __exit floppy_module_exit(void)
put_disk(disks[drive]);
}
- del_timer_sync(&fd_timeout);
- del_timer_sync(&fd_timer);
+ cancel_delayed_work_sync(&fd_timeout);
+ cancel_delayed_work_sync(&fd_timer);
+ destroy_workqueue(floppy_wq);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bbca966f8f66..3bba65510d23 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
struct gendisk *disk;
int err;
+ err = -ENOMEM;
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
- if (!lo) {
- err = -ENOMEM;
+ if (!lo)
goto out;
- }
- err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
- if (err < 0)
+ if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
goto out_free_dev;
if (i >= 0) {
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 304000c3d433..a8fddeb3d638 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <../drivers/ata/ahci.h>
#include <linux/export.h>
+#include <linux/debugfs.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
* allocated in mtip_init().
*/
static int mtip_major;
+static struct dentry *dfs_parent;
static DEFINE_SPINLOCK(rssd_index_lock);
static DEFINE_IDA(rssd_index_ida);
@@ -294,18 +296,16 @@ static int hba_reset_nosleep(struct driver_data *dd)
*/
static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
{
- unsigned long flags = 0;
-
atomic_set(&port->commands[tag].active, 1);
- spin_lock_irqsave(&port->cmd_issue_lock, flags);
+ spin_lock(&port->cmd_issue_lock);
writel((1 << MTIP_TAG_BIT(tag)),
port->s_active[MTIP_TAG_INDEX(tag)]);
writel((1 << MTIP_TAG_BIT(tag)),
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
- spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+ spin_unlock(&port->cmd_issue_lock);
/* Set the command's timeout value.*/
port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -436,8 +436,7 @@ static void mtip_init_port(struct mtip_port *port)
writel(0xFFFFFFFF, port->completed[i]);
/* Clear any pending interrupts for this port */
- writel(readl(port->dd->mmio + PORT_IRQ_STAT),
- port->dd->mmio + PORT_IRQ_STAT);
+ writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
/* Clear any pending interrupts on the HBA. */
writel(readl(port->dd->mmio + HOST_IRQ_STAT),
@@ -782,13 +781,24 @@ static void mtip_handle_tfe(struct driver_data *dd)
/* Stop the timer to prevent command timeouts. */
del_timer(&port->cmd_timer);
+ set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+
+ if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
+ test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+ cmd = &port->commands[MTIP_TAG_INTERNAL];
+ dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
+
+ atomic_inc(&cmd->active); /* active > 1 indicates error */
+ if (cmd->comp_data && cmd->comp_func) {
+ cmd->comp_func(port, MTIP_TAG_INTERNAL,
+ cmd->comp_data, PORT_IRQ_TF_ERR);
+ }
+ goto handle_tfe_exit;
+ }
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
- /* Set eh_active */
- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
/* Loop through all the groups */
for (group = 0; group < dd->slot_groups; group++) {
completed = readl(port->completed[group]);
@@ -940,6 +950,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
+handle_tfe_exit:
/* clear eh_active */
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
@@ -961,6 +972,8 @@ static inline void mtip_process_sdbf(struct driver_data *dd)
/* walk all bits in all slot groups */
for (group = 0; group < dd->slot_groups; group++) {
completed = readl(port->completed[group]);
+ if (!completed)
+ continue;
/* clear completed status register in the hardware.*/
writel(completed, port->completed[group]);
@@ -1329,22 +1342,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
rv = -EAGAIN;
}
-
- if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL)) {
- dev_warn(&port->dd->pdev->dev,
- "Retiring internal command but CI is 1.\n");
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &port->dd->dd_flag)) {
- hba_reset_nosleep(port->dd);
- rv = -ENXIO;
- } else {
- mtip_restart_port(port);
- rv = -EAGAIN;
- }
- goto exec_ic_exit;
- }
-
} else {
/* Spin for <timeout> checking if command still outstanding */
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -1361,21 +1358,25 @@ static int mtip_exec_internal_command(struct mtip_port *port,
rv = -ENXIO;
goto exec_ic_exit;
}
+ if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
+ atomic_inc(&int_cmd->active); /* error */
+ break;
+ }
}
+ }
- if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ if (atomic_read(&int_cmd->active) > 1) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command [%02X] failed\n", fis->command);
+ rv = -EIO;
+ }
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL)) {
- dev_err(&port->dd->pdev->dev,
- "Internal command did not complete [atomic]\n");
+ rv = -ENXIO;
+ if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ mtip_restart_port(port);
rv = -EAGAIN;
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &port->dd->dd_flag)) {
- hba_reset_nosleep(port->dd);
- rv = -ENXIO;
- } else {
- mtip_restart_port(port);
- rv = -EAGAIN;
- }
}
}
exec_ic_exit:
@@ -1893,13 +1894,33 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
void __user *user_buffer)
{
struct host_to_dev_fis fis;
- struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+ struct host_to_dev_fis *reply;
+ u8 *buf = NULL;
+ dma_addr_t dma_addr = 0;
+ int rv = 0, xfer_sz = command[3];
+
+ if (xfer_sz) {
+ if (user_buffer)
+ return -EFAULT;
+
+ buf = dmam_alloc_coherent(&port->dd->pdev->dev,
+ ATA_SECT_SIZE * xfer_sz,
+ &dma_addr,
+ GFP_KERNEL);
+ if (!buf) {
+ dev_err(&port->dd->pdev->dev,
+ "Memory allocation failed (%d bytes)\n",
+ ATA_SECT_SIZE * xfer_sz);
+ return -ENOMEM;
+ }
+ memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
+ }
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
- fis.type = 0x27;
- fis.opts = 1 << 7;
- fis.command = command[0];
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = command[0];
fis.features = command[2];
fis.sect_count = command[3];
if (fis.command == ATA_CMD_SMART) {
@@ -1908,6 +1929,11 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
fis.cyl_hi = 0xC2;
}
+ if (xfer_sz)
+ reply = (port->rxfis + RX_FIS_PIO_SETUP);
+ else
+ reply = (port->rxfis + RX_FIS_D2H_REG);
+
dbg_printk(MTIP_DRV_NAME
" %s: User Command: cmd %x, sect %x, "
"feat %x, sectcnt %x\n",
@@ -1917,43 +1943,46 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
command[2],
command[3]);
- memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
-
/* Execute the command. */
if (mtip_exec_internal_command(port,
&fis,
5,
- port->sector_buffer_dma,
- (command[3] != 0) ? ATA_SECT_SIZE : 0,
+ (xfer_sz ? dma_addr : 0),
+ (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
0,
GFP_KERNEL,
MTIP_IOCTL_COMMAND_TIMEOUT_MS)
< 0) {
- return -1;
+ rv = -EFAULT;
+ goto exit_drive_command;
}
/* Collect the completion status. */
command[0] = reply->command; /* Status*/
command[1] = reply->features; /* Error*/
- command[2] = command[3];
+ command[2] = reply->sect_count;
dbg_printk(MTIP_DRV_NAME
" %s: Completion Status: stat %x, "
- "err %x, cmd %x\n",
+ "err %x, nsect %x\n",
__func__,
command[0],
command[1],
command[2]);
- if (user_buffer && command[3]) {
+ if (xfer_sz) {
if (copy_to_user(user_buffer,
- port->sector_buffer,
+ buf,
ATA_SECT_SIZE * command[3])) {
- return -EFAULT;
+ rv = -EFAULT;
+ goto exit_drive_command;
}
}
-
- return 0;
+exit_drive_command:
+ if (buf)
+ dmam_free_coherent(&port->dd->pdev->dev,
+ ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
+ return rv;
}
/*
@@ -2003,6 +2032,32 @@ static unsigned int implicit_sector(unsigned char command,
return rv;
}
+static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+{
+ switch (fis->command) {
+ case ATA_CMD_DOWNLOAD_MICRO:
+ *timeout = 120000; /* 2 minutes */
+ break;
+ case ATA_CMD_SEC_ERASE_UNIT:
+ case 0xFC:
+ *timeout = 240000; /* 4 minutes */
+ break;
+ case ATA_CMD_STANDBYNOW1:
+ *timeout = 10000; /* 10 seconds */
+ break;
+ case 0xF7:
+ case 0xFA:
+ *timeout = 60000; /* 60 seconds */
+ break;
+ case ATA_CMD_SMART:
+ *timeout = 15000; /* 15 seconds */
+ break;
+ default:
+ *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ break;
+ }
+}
+
/*
* Executes a taskfile
* See ide_taskfile_ioctl() for derivation
@@ -2023,7 +2078,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
unsigned int taskin = 0;
unsigned int taskout = 0;
u8 nsect = 0;
- unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ unsigned int timeout;
unsigned int force_single_sector;
unsigned int transfer_size;
unsigned long task_file_data;
@@ -2153,32 +2208,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
fis.lba_hi,
fis.device);
- switch (fis.command) {
- case ATA_CMD_DOWNLOAD_MICRO:
- /* Change timeout for Download Microcode to 2 minutes */
- timeout = 120000;
- break;
- case ATA_CMD_SEC_ERASE_UNIT:
- /* Change timeout for Security Erase Unit to 4 minutes.*/
- timeout = 240000;
- break;
- case ATA_CMD_STANDBYNOW1:
- /* Change timeout for standby immediate to 10 seconds.*/
- timeout = 10000;
- break;
- case 0xF7:
- case 0xFA:
- /* Change timeout for vendor unique command to 10 secs */
- timeout = 10000;
- break;
- case ATA_CMD_SMART:
- /* Change timeout for vendor unique command to 15 secs */
- timeout = 15000;
- break;
- default:
- timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
- break;
- }
+ mtip_set_timeout(&fis, &timeout);
/* Determine the correct transfer size.*/
if (force_single_sector)
@@ -2295,13 +2325,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
{
switch (cmd) {
case HDIO_GET_IDENTITY:
- if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
- dev_warn(&dd->pdev->dev,
- "Unable to read identity\n");
- return -EIO;
- }
-
+ {
+ if (copy_to_user((void __user *)arg, dd->port->identify,
+ sizeof(u16) * ATA_ID_WORDS))
+ return -EFAULT;
break;
+ }
case HDIO_DRIVE_CMD:
{
u8 drive_command[4];
@@ -2519,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
}
/*
- * Sysfs register/status dump.
+ * Sysfs status dump.
*
* @dev Pointer to the device structure, passed by the kernrel.
* @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2528,72 +2557,138 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
* return value
* The size, in bytes, of the data copied into buf.
*/
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- u32 group_allocated;
struct driver_data *dd = dev_to_disk(dev)->private_data;
int size = 0;
+
+ if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "thermal_shutdown\n");
+ else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "write_protect\n");
+ else
+ size += sprintf(buf, "%s", "online\n");
+
+ return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+ size_t len, loff_t *offset)
+{
+ struct driver_data *dd = (struct driver_data *)f->private_data;
+ char buf[MTIP_DFS_MAX_BUF_SIZE];
+ u32 group_allocated;
+ int size = *offset;
int n;
- size += sprintf(&buf[size], "S ACTive:\n");
+ if (!len || size)
+ return 0;
+
+ if (size < 0)
+ return -EINVAL;
+
+ size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
- for (n = 0; n < dd->slot_groups; n++)
- size += sprintf(&buf[size], "0x%08x\n",
+ for (n = dd->slot_groups-1; n >= 0; n--)
+ size += sprintf(&buf[size], "%08X ",
readl(dd->port->s_active[n]));
- size += sprintf(&buf[size], "Command Issue:\n");
+ size += sprintf(&buf[size], "]\n");
+ size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
- for (n = 0; n < dd->slot_groups; n++)
- size += sprintf(&buf[size], "0x%08x\n",
+ for (n = dd->slot_groups-1; n >= 0; n--)
+ size += sprintf(&buf[size], "%08X ",
readl(dd->port->cmd_issue[n]));
- size += sprintf(&buf[size], "Allocated:\n");
+ size += sprintf(&buf[size], "]\n");
+ size += sprintf(&buf[size], "H/ Completed : [ 0x");
+
+ for (n = dd->slot_groups-1; n >= 0; n--)
+ size += sprintf(&buf[size], "%08X ",
+ readl(dd->port->completed[n]));
- for (n = 0; n < dd->slot_groups; n++) {
+ size += sprintf(&buf[size], "]\n");
+ size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
+ readl(dd->port->mmio + PORT_IRQ_STAT));
+ size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
+ readl(dd->mmio + HOST_IRQ_STAT));
+ size += sprintf(&buf[size], "\n");
+
+ size += sprintf(&buf[size], "L/ Allocated : [ 0x");
+
+ for (n = dd->slot_groups-1; n >= 0; n--) {
if (sizeof(long) > sizeof(u32))
group_allocated =
dd->port->allocated[n/2] >> (32*(n&1));
else
group_allocated = dd->port->allocated[n];
- size += sprintf(&buf[size], "0x%08x\n",
- group_allocated);
+ size += sprintf(&buf[size], "%08X ", group_allocated);
}
+ size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "Completed:\n");
+ size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
- for (n = 0; n < dd->slot_groups; n++)
- size += sprintf(&buf[size], "0x%08x\n",
- readl(dd->port->completed[n]));
+ for (n = dd->slot_groups-1; n >= 0; n--) {
+ if (sizeof(long) > sizeof(u32))
+ group_allocated =
+ dd->port->cmds_to_issue[n/2] >> (32*(n&1));
+ else
+ group_allocated = dd->port->cmds_to_issue[n];
+ size += sprintf(&buf[size], "%08X ", group_allocated);
+ }
+ size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
- readl(dd->port->mmio + PORT_IRQ_STAT));
- size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
- readl(dd->mmio + HOST_IRQ_STAT));
+ *offset = size <= len ? size : len;
+ size = copy_to_user(ubuf, buf, *offset);
+ if (size)
+ return -EFAULT;
- return size;
+ return *offset;
}
-static ssize_t mtip_hw_show_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+ size_t len, loff_t *offset)
{
- struct driver_data *dd = dev_to_disk(dev)->private_data;
- int size = 0;
+ struct driver_data *dd = (struct driver_data *)f->private_data;
+ char buf[MTIP_DFS_MAX_BUF_SIZE];
+ int size = *offset;
- if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
- size += sprintf(buf, "%s", "thermal_shutdown\n");
- else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
- size += sprintf(buf, "%s", "write_protect\n");
- else
- size += sprintf(buf, "%s", "online\n");
+ if (!len || size)
+ return 0;
- return size;
+ if (size < 0)
+ return -EINVAL;
+
+ size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
+ dd->port->flags);
+ size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
+ dd->dd_flag);
+
+ *offset = size <= len ? size : len;
+ size = copy_to_user(ubuf, buf, *offset);
+ if (size)
+ return -EFAULT;
+
+ return *offset;
}
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+static const struct file_operations mtip_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mtip_hw_read_registers,
+ .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mtip_hw_read_flags,
+ .llseek = no_llseek,
+};
/*
* Create the sysfs related attributes.
@@ -2610,9 +2705,6 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
if (!kobj || !dd)
return -EINVAL;
- if (sysfs_create_file(kobj, &dev_attr_registers.attr))
- dev_warn(&dd->pdev->dev,
- "Error creating 'registers' sysfs entry\n");
if (sysfs_create_file(kobj, &dev_attr_status.attr))
dev_warn(&dd->pdev->dev,
"Error creating 'status' sysfs entry\n");
@@ -2634,12 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
if (!kobj || !dd)
return -EINVAL;
- sysfs_remove_file(kobj, &dev_attr_registers.attr);
sysfs_remove_file(kobj, &dev_attr_status.attr);
return 0;
}
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+ if (!dfs_parent)
+ return -1;
+
+ dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+ if (IS_ERR_OR_NULL(dd->dfs_node)) {
+ dev_warn(&dd->pdev->dev,
+ "Error creating node %s under debugfs\n",
+ dd->disk->disk_name);
+ dd->dfs_node = NULL;
+ return -1;
+ }
+
+ debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+ &mtip_flags_fops);
+ debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+ &mtip_regs_fops);
+
+ return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+ debugfs_remove_recursive(dd->dfs_node);
+}
+
+
/*
* Perform any init/resume time hardware setup
*
@@ -3634,7 +3753,10 @@ skip_create_disk:
set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
blk_queue_physical_block_size(dd->queue, 4096);
+ blk_queue_max_hw_sectors(dd->queue, 0xffff);
+ blk_queue_max_segment_size(dd->queue, 0x400000);
blk_queue_io_min(dd->queue, 4096);
+
/*
* write back cache is not supported in the device. FUA depends on
* write back cache support, hence setting flush support to zero.
@@ -3662,6 +3784,7 @@ skip_create_disk:
mtip_hw_sysfs_init(dd, kobj);
kobject_put(kobj);
}
+ mtip_hw_debugfs_init(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3687,6 +3810,8 @@ start_service_thread:
return rv;
kthread_run_error:
+ mtip_hw_debugfs_exit(dd);
+
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
@@ -3737,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
kobject_put(kobj);
}
}
+ mtip_hw_debugfs_exit(dd);
/*
* Delete our gendisk structure. This also removes the device
@@ -4084,10 +4210,20 @@ static int __init mtip_init(void)
}
mtip_major = error;
+ if (!dfs_parent) {
+ dfs_parent = debugfs_create_dir("rssd", NULL);
+ if (IS_ERR_OR_NULL(dfs_parent)) {
+ printk(KERN_WARNING "Error creating debugfs parent\n");
+ dfs_parent = NULL;
+ }
+ }
+
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);
- if (error)
+ if (error) {
+ debugfs_remove(dfs_parent);
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+ }
return error;
}
@@ -4104,6 +4240,8 @@ static int __init mtip_init(void)
*/
static void __exit mtip_exit(void)
{
+ debugfs_remove_recursive(dfs_parent);
+
/* Release the allocated major block device number. */
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 4ef58336310a..f51fc23d17bb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -26,7 +26,6 @@
#include <linux/ata.h>
#include <linux/interrupt.h>
#include <linux/genhd.h>
-#include <linux/version.h>
/* Offset of Subsystem Device ID in pci confoguration space */
#define PCI_SUBSYSTEM_DEVICEID 0x2E
@@ -111,35 +110,39 @@
#define dbg_printk(format, arg...)
#endif
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
#define __force_bit2int (unsigned int __force)
-/* below are bit numbers in 'flags' defined in mtip_port */
-#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */
-#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */
-#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */
-#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */
-#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+enum {
+ /* below are bit numbers in 'flags' defined in mtip_port */
+ MTIP_PF_IC_ACTIVE_BIT = 0, /* pio/ioctl */
+ MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
+ MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
+ MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
+ MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
(1 << MTIP_PF_EH_ACTIVE_BIT) | \
(1 << MTIP_PF_SE_ACTIVE_BIT) | \
- (1 << MTIP_PF_DM_ACTIVE_BIT))
-
-#define MTIP_PF_SVC_THD_ACTIVE_BIT 4
-#define MTIP_PF_ISSUE_CMDS_BIT 5
-#define MTIP_PF_REBUILD_BIT 6
-#define MTIP_PF_SVC_THD_STOP_BIT 8
-
-/* below are bit numbers in 'dd_flag' defined in driver_data */
-#define MTIP_DDF_REMOVE_PENDING_BIT 1
-#define MTIP_DDF_OVER_TEMP_BIT 2
-#define MTIP_DDF_WRITE_PROTECT_BIT 3
-#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+ (1 << MTIP_PF_DM_ACTIVE_BIT)),
+
+ MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
+ MTIP_PF_ISSUE_CMDS_BIT = 5,
+ MTIP_PF_REBUILD_BIT = 6,
+ MTIP_PF_SVC_THD_STOP_BIT = 8,
+
+ /* below are bit numbers in 'dd_flag' defined in driver_data */
+ MTIP_DDF_REMOVE_PENDING_BIT = 1,
+ MTIP_DDF_OVER_TEMP_BIT = 2,
+ MTIP_DDF_WRITE_PROTECT_BIT = 3,
+ MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
(1 << MTIP_DDF_OVER_TEMP_BIT) | \
- (1 << MTIP_DDF_WRITE_PROTECT_BIT))
+ (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
-#define MTIP_DDF_CLEANUP_BIT 5
-#define MTIP_DDF_RESUME_BIT 6
-#define MTIP_DDF_INIT_DONE_BIT 7
-#define MTIP_DDF_REBUILD_FAILED_BIT 8
+ MTIP_DDF_CLEANUP_BIT = 5,
+ MTIP_DDF_RESUME_BIT = 6,
+ MTIP_DDF_INIT_DONE_BIT = 7,
+ MTIP_DDF_REBUILD_FAILED_BIT = 8,
+};
__packed struct smart_attr{
u8 attr_id;
@@ -445,6 +448,8 @@ struct driver_data {
unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+ struct dentry *dfs_node;
};
#endif
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 013c7a549fb6..65665c9c42c6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -141,7 +141,7 @@ struct rbd_request {
struct rbd_snap {
struct device dev;
const char *name;
- size_t size;
+ u64 size;
struct list_head node;
u64 id;
};
@@ -175,8 +175,7 @@ struct rbd_device {
/* protects updating the header */
struct rw_semaphore header_rwsem;
char snap_name[RBD_MAX_SNAP_NAME_LEN];
- u32 cur_snap; /* index+1 of current snapshot within snap context
- 0 - for the head */
+ u64 snap_id; /* current snapshot id */
int read_only;
struct list_head node;
@@ -241,7 +240,7 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
put_device(&rbd_dev->dev);
}
-static int __rbd_update_snaps(struct rbd_device *rbd_dev);
+static int __rbd_refresh_header(struct rbd_device *rbd_dev);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -450,7 +449,9 @@ static void rbd_client_release(struct kref *kref)
struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
dout("rbd_release_client %p\n", rbdc);
+ spin_lock(&rbd_client_list_lock);
list_del(&rbdc->node);
+ spin_unlock(&rbd_client_list_lock);
ceph_destroy_client(rbdc->client);
kfree(rbdc->rbd_opts);
@@ -463,9 +464,7 @@ static void rbd_client_release(struct kref *kref)
*/
static void rbd_put_client(struct rbd_device *rbd_dev)
{
- spin_lock(&rbd_client_list_lock);
kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
- spin_unlock(&rbd_client_list_lock);
rbd_dev->rbd_client = NULL;
}
@@ -487,16 +486,18 @@ static void rbd_coll_release(struct kref *kref)
*/
static int rbd_header_from_disk(struct rbd_image_header *header,
struct rbd_image_header_ondisk *ondisk,
- int allocated_snaps,
+ u32 allocated_snaps,
gfp_t gfp_flags)
{
- int i;
- u32 snap_count;
+ u32 i, snap_count;
if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT)))
return -ENXIO;
snap_count = le32_to_cpu(ondisk->snap_count);
+ if (snap_count > (UINT_MAX - sizeof(struct ceph_snap_context))
+ / sizeof (*ondisk))
+ return -EINVAL;
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
snap_count * sizeof (*ondisk),
gfp_flags);
@@ -506,11 +507,11 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
if (snap_count) {
header->snap_names = kmalloc(header->snap_names_len,
- GFP_KERNEL);
+ gfp_flags);
if (!header->snap_names)
goto err_snapc;
header->snap_sizes = kmalloc(snap_count * sizeof(u64),
- GFP_KERNEL);
+ gfp_flags);
if (!header->snap_sizes)
goto err_names;
} else {
@@ -552,21 +553,6 @@ err_snapc:
return -ENOMEM;
}
-static int snap_index(struct rbd_image_header *header, int snap_num)
-{
- return header->total_snaps - snap_num;
-}
-
-static u64 cur_snap_id(struct rbd_device *rbd_dev)
-{
- struct rbd_image_header *header = &rbd_dev->header;
-
- if (!rbd_dev->cur_snap)
- return 0;
-
- return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
-}
-
static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
u64 *seq, u64 *size)
{
@@ -605,7 +591,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
snapc->seq = header->snap_seq;
else
snapc->seq = 0;
- dev->cur_snap = 0;
+ dev->snap_id = CEPH_NOSNAP;
dev->read_only = 0;
if (size)
*size = header->image_size;
@@ -613,8 +599,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
ret = snap_by_name(header, dev->snap_name, &snapc->seq, size);
if (ret < 0)
goto done;
-
- dev->cur_snap = header->total_snaps - ret;
+ dev->snap_id = snapc->seq;
dev->read_only = 1;
}
@@ -935,7 +920,6 @@ static int rbd_do_request(struct request *rq,
layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_stripe_count = cpu_to_le32(1);
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_preferred = cpu_to_le32(-1);
layout->fl_pg_pool = cpu_to_le32(dev->poolid);
ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
req, ops);
@@ -1168,7 +1152,7 @@ static int rbd_req_read(struct request *rq,
int coll_index)
{
return rbd_do_op(rq, rbd_dev, NULL,
- (snapid ? snapid : CEPH_NOSNAP),
+ snapid,
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
2,
@@ -1187,7 +1171,7 @@ static int rbd_req_sync_read(struct rbd_device *dev,
u64 *ver)
{
return rbd_req_sync_op(dev, NULL,
- (snapid ? snapid : CEPH_NOSNAP),
+ snapid,
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
NULL,
@@ -1238,7 +1222,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
notify_id, (int)opcode);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(dev);
+ rc = __rbd_refresh_header(dev);
mutex_unlock(&ctl_mutex);
if (rc)
pr_warning(RBD_DRV_NAME "%d got notification but failed to "
@@ -1521,7 +1505,7 @@ static void rbd_rq_fn(struct request_queue *q)
coll, cur_seg);
else
rbd_req_read(rq, rbd_dev,
- cur_snap_id(rbd_dev),
+ rbd_dev->snap_id,
ofs,
op_size, bio,
coll, cur_seg);
@@ -1592,7 +1576,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
{
ssize_t rc;
struct rbd_image_header_ondisk *dh;
- int snap_count = 0;
+ u32 snap_count = 0;
u64 ver;
size_t len;
@@ -1656,7 +1640,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
struct ceph_mon_client *monc;
/* we should create a snapshot only if we're pointing at the head */
- if (dev->cur_snap)
+ if (dev->snap_id != CEPH_NOSNAP)
return -EINVAL;
monc = &dev->rbd_client->client->monc;
@@ -1683,7 +1667,9 @@ static int rbd_header_add_snap(struct rbd_device *dev,
if (ret < 0)
return ret;
- dev->header.snapc->seq = new_snapid;
+ down_write(&dev->header_rwsem);
+ dev->header.snapc->seq = new_snapid;
+ up_write(&dev->header_rwsem);
return 0;
bad:
@@ -1703,7 +1689,7 @@ static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int __rbd_update_snaps(struct rbd_device *rbd_dev)
+static int __rbd_refresh_header(struct rbd_device *rbd_dev)
{
int ret;
struct rbd_image_header h;
@@ -1890,7 +1876,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(rbd_dev);
+ rc = __rbd_refresh_header(rbd_dev);
if (rc < 0)
ret = rc;
@@ -1949,7 +1935,7 @@ static ssize_t rbd_snap_size_show(struct device *dev,
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%zd\n", snap->size);
+ return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
}
static ssize_t rbd_snap_id_show(struct device *dev,
@@ -1958,7 +1944,7 @@ static ssize_t rbd_snap_id_show(struct device *dev,
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%llu\n", (unsigned long long) snap->id);
+ return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
}
static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
@@ -2173,7 +2159,7 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
rbd_dev->header.obj_version);
if (ret == -ERANGE) {
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(rbd_dev);
+ rc = __rbd_refresh_header(rbd_dev);
mutex_unlock(&ctl_mutex);
if (rc < 0)
return rc;
@@ -2558,7 +2544,7 @@ static ssize_t rbd_snap_add(struct device *dev,
if (ret < 0)
goto err_unlock;
- ret = __rbd_update_snaps(rbd_dev);
+ ret = __rbd_refresh_header(rbd_dev);
if (ret < 0)
goto err_unlock;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index aa2712060bfb..9a72277a31df 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
}
}
+struct mm_plug_cb {
+ struct blk_plug_cb cb;
+ struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+ struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+ spin_lock_irq(&mmcb->card->lock);
+ activate(mmcb->card);
+ spin_unlock_irq(&mmcb->card->lock);
+ kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+ struct blk_plug *plug = current->plug;
+ struct mm_plug_cb *mmcb;
+
+ if (!plug)
+ return 0;
+
+ list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+ if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+ return 1;
+ }
+ /* Not currently on the callback list */
+ mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+ if (!mmcb)
+ return 0;
+
+ mmcb->card = card;
+ mmcb->cb.callback = mm_unplug;
+ list_add(&mmcb->cb.list, &plug->cb_list);
+ return 1;
+}
+
static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
+ if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+ activate(card);
spin_unlock_irq(&card->lock);
return;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 773cf27dc23f..9ad3b5ec1dc1 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 4e86393a09cf..e4fb3374dcd2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
return free;
}
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
unsigned long id)
{
+ if (info->shadow[id].req.u.rw.id != id)
+ return -EINVAL;
+ if (info->shadow[id].request == NULL)
+ return -EINVAL;
info->shadow[id].req.u.rw.id = info->shadow_free;
info->shadow[id].request = NULL;
info->shadow_free = id;
+ return 0;
}
+static const char *op_name(int op)
+{
+ static const char *const names[] = {
+ [BLKIF_OP_READ] = "read",
+ [BLKIF_OP_WRITE] = "write",
+ [BLKIF_OP_WRITE_BARRIER] = "barrier",
+ [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+ [BLKIF_OP_DISCARD] = "discard" };
+
+ if (op < 0 || op >= ARRAY_SIZE(names))
+ return "unknown";
+
+ if (!names[op])
+ return "reserved";
+
+ return names[op];
+}
static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
{
unsigned int end = minor + nr;
@@ -526,6 +548,14 @@ static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
return 0;
}
+static char *encode_disk_name(char *ptr, unsigned int n)
+{
+ if (n >= 26)
+ ptr = encode_disk_name(ptr, n / 26 - 1);
+ *ptr = 'a' + n % 26;
+ return ptr + 1;
+}
+
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info,
u16 vdisk_info, u16 sector_size)
@@ -536,6 +566,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
unsigned int offset;
int minor;
int nr_parts;
+ char *ptr;
BUG_ON(info->gd != NULL);
BUG_ON(info->rq != NULL);
@@ -560,7 +591,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
"emulated IDE disks,\n\t choose an xvd device name"
"from xvde on\n", info->vdevice);
}
- err = -ENODEV;
+ if (minor >> MINORBITS) {
+ pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
+ info->vdevice, minor);
+ return -ENODEV;
+ }
if ((minor % nr_parts) == 0)
nr_minors = nr_parts;
@@ -574,23 +609,14 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (gd == NULL)
goto release;
- if (nr_minors > 1) {
- if (offset < 26)
- sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
- else
- sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
- 'a' + ((offset / 26)-1), 'a' + (offset % 26));
- } else {
- if (offset < 26)
- sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
- 'a' + offset,
- minor & (nr_parts - 1));
- else
- sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
- 'a' + ((offset / 26) - 1),
- 'a' + (offset % 26),
- minor & (nr_parts - 1));
- }
+ strcpy(gd->disk_name, DEV_NAME);
+ ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
+ BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
+ if (nr_minors > 1)
+ *ptr = 0;
+ else
+ snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
+ "%d", minor & (nr_parts - 1));
gd->major = XENVBD_MAJOR;
gd->first_minor = minor;
@@ -742,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
+ /*
+ * The backend has messed up and given us an id that we would
+ * never have given to it (we stamp it up to BLK_RING_SIZE -
+ * look in get_id_from_freelist.
+ */
+ if (id >= BLK_RING_SIZE) {
+ WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+ info->gd->disk_name, op_name(bret->operation), id);
+ /* We can't safely get the 'struct request' as
+ * the id is busted. */
+ continue;
+ }
req = info->shadow[id].request;
if (bret->operation != BLKIF_OP_DISCARD)
blkif_completion(&info->shadow[id]);
- add_id_to_freelist(info, id);
+ if (add_id_to_freelist(info, id)) {
+ WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+ info->gd->disk_name, op_name(bret->operation), id);
+ continue;
+ }
error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
- printk(KERN_WARNING "blkfront: %s: discard op failed\n",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
@@ -767,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
- printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
- info->flush_op == BLKIF_OP_WRITE_BARRIER ?
- "barrier" : "flush disk cache",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
- printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
- info->flush_op == BLKIF_OP_WRITE_BARRIER ?
- "barrier" : "flush disk cache",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
}
if (unlikely(error)) {
@@ -1496,7 +1534,9 @@ module_init(xlblk_init);
static void __exit xlblk_exit(void)
{
- return xenbus_unregister_driver(&blkfront_driver);
+ xenbus_unregister_driver(&blkfront_driver);
+ unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
+ kfree(minors);
}
module_exit(xlblk_exit);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 2812b152d6e9..10308cd8a7ed 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3002) },
+ { USB_DEVICE(0x0CF3, 0xE019) },
{ USB_DEVICE(0x13d3, 0x3304) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0489, 0xE03D) },
@@ -77,10 +78,14 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
+ { USB_DEVICE(0x0930, 0x0219) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C) },
+
{ } /* Terminating entry */
};
@@ -98,6 +103,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */
};
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 90bda50dc446..27068d149380 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -67,6 +67,7 @@ struct btmrvl_adapter {
u8 wakeup_tries;
wait_queue_head_t cmd_wait_q;
u8 cmd_complete;
+ bool is_suspended;
};
struct btmrvl_private {
@@ -135,12 +136,14 @@ int btmrvl_remove_card(struct btmrvl_private *priv);
void btmrvl_interrupt(struct btmrvl_private *priv);
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
+int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
int btmrvl_enable_ps(struct btmrvl_private *priv);
int btmrvl_prepare_command(struct btmrvl_private *priv);
+int btmrvl_enable_hs(struct btmrvl_private *priv);
#ifdef CONFIG_DEBUG_FS
void btmrvl_debugfs_init(struct hci_dev *hdev);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index d1209adc882d..dc304def8400 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -44,23 +44,33 @@ void btmrvl_interrupt(struct btmrvl_private *priv)
}
EXPORT_SYMBOL_GPL(btmrvl_interrupt);
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
struct hci_ev_cmd_complete *ec;
- u16 opcode, ocf;
+ u16 opcode, ocf, ogf;
if (hdr->evt == HCI_EV_CMD_COMPLETE) {
ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
opcode = __le16_to_cpu(ec->opcode);
ocf = hci_opcode_ocf(opcode);
+ ogf = hci_opcode_ogf(opcode);
+
if (ocf == BT_CMD_MODULE_CFG_REQ &&
priv->btmrvl_dev.sendcmdflag) {
priv->btmrvl_dev.sendcmdflag = false;
priv->adapter->cmd_complete = true;
wake_up_interruptible(&priv->adapter->cmd_wait_q);
}
+
+ if (ogf == OGF) {
+ BT_DBG("vendor event skipped: ogf 0x%4.4x", ogf);
+ kfree_skb(skb);
+ return false;
+ }
}
+
+ return true;
}
EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt);
@@ -200,6 +210,36 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
}
EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
+int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
+{
+ struct sk_buff *skb;
+ struct btmrvl_cmd *cmd;
+
+ skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!skb) {
+ BT_ERR("No free skb");
+ return -ENOMEM;
+ }
+
+ cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
+ cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
+ BT_CMD_HOST_SLEEP_CONFIG));
+ cmd->length = 2;
+ cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
+ cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
+
+ bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
+
+ skb->dev = (void *) priv->btmrvl_dev.hcidev;
+ skb_queue_head(&priv->adapter->tx_queue, skb);
+
+ BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
+ cmd->data[1]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
+
int btmrvl_enable_ps(struct btmrvl_private *priv)
{
struct sk_buff *skb;
@@ -232,7 +272,7 @@ int btmrvl_enable_ps(struct btmrvl_private *priv)
}
EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
-static int btmrvl_enable_hs(struct btmrvl_private *priv)
+int btmrvl_enable_hs(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
@@ -268,35 +308,15 @@ static int btmrvl_enable_hs(struct btmrvl_private *priv)
return ret;
}
+EXPORT_SYMBOL_GPL(btmrvl_enable_hs);
int btmrvl_prepare_command(struct btmrvl_private *priv)
{
- struct sk_buff *skb = NULL;
- struct btmrvl_cmd *cmd;
int ret = 0;
if (priv->btmrvl_dev.hscfgcmd) {
priv->btmrvl_dev.hscfgcmd = 0;
-
- skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
- if (skb == NULL) {
- BT_ERR("No free skb");
- return -ENOMEM;
- }
-
- cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
- cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_CONFIG));
- cmd->length = 2;
- cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
- cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
-
- bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
- skb->dev = (void *) priv->btmrvl_dev.hcidev;
- skb_queue_head(&priv->adapter->tx_queue, skb);
-
- BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x",
- cmd->data[0], cmd->data[1]);
+ btmrvl_send_hscfg_cmd(priv);
}
if (priv->btmrvl_dev.pscmd) {
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 27b74b0d547b..0cd61d9f07cd 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -339,9 +339,7 @@ static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card)
done:
kfree(tmphlprbuf);
- if (fw_helper)
- release_firmware(fw_helper);
-
+ release_firmware(fw_helper);
return ret;
}
@@ -484,10 +482,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
done:
kfree(tmpfwbuf);
-
- if (fw_firmware)
- release_firmware(fw_firmware);
-
+ release_firmware(fw_firmware);
return ret;
}
@@ -567,10 +562,12 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
skb_put(skb, buf_len);
skb_pull(skb, SDIO_HEADER_LEN);
- if (type == HCI_EVENT_PKT)
- btmrvl_check_evtpkt(priv, skb);
+ if (type == HCI_EVENT_PKT) {
+ if (btmrvl_check_evtpkt(priv, skb))
+ hci_recv_frame(skb);
+ } else
+ hci_recv_frame(skb);
- hci_recv_frame(skb);
hdev->stat.byte_rx += buf_len;
break;
@@ -1013,6 +1010,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
+ priv->btmrvl_dev.gpio_gap = 0xffff;
+ btmrvl_send_hscfg_cmd(priv);
+
return 0;
disable_host_int:
@@ -1048,11 +1048,111 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
}
}
+static int btmrvl_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmrvl_sdio_card *card;
+ struct btmrvl_private *priv;
+ mmc_pm_flag_t pm_flags;
+ struct hci_dev *hcidev;
+
+ if (func) {
+ pm_flags = sdio_get_host_pm_caps(func);
+ BT_DBG("%s: suspend: PM flags = 0x%x", sdio_func_id(func),
+ pm_flags);
+ if (!(pm_flags & MMC_PM_KEEP_POWER)) {
+ BT_ERR("%s: cannot remain alive while suspended",
+ sdio_func_id(func));
+ return -ENOSYS;
+ }
+ card = sdio_get_drvdata(func);
+ if (!card || !card->priv) {
+ BT_ERR("card or priv structure is not valid");
+ return 0;
+ }
+ } else {
+ BT_ERR("sdio_func is not specified");
+ return 0;
+ }
+
+ priv = card->priv;
+
+ if (priv->adapter->hs_state != HS_ACTIVATED) {
+ if (btmrvl_enable_hs(priv)) {
+ BT_ERR("HS not actived, suspend failed!");
+ return -EBUSY;
+ }
+ }
+ hcidev = priv->btmrvl_dev.hcidev;
+ BT_DBG("%s: SDIO suspend", hcidev->name);
+ hci_suspend_dev(hcidev);
+ skb_queue_purge(&priv->adapter->tx_queue);
+
+ priv->adapter->is_suspended = true;
+
+ /* We will keep the power when hs enabled successfully */
+ if (priv->adapter->hs_state == HS_ACTIVATED) {
+ BT_DBG("suspend with MMC_PM_KEEP_POWER");
+ return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ } else {
+ BT_DBG("suspend without MMC_PM_KEEP_POWER");
+ return 0;
+ }
+}
+
+static int btmrvl_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmrvl_sdio_card *card;
+ struct btmrvl_private *priv;
+ mmc_pm_flag_t pm_flags;
+ struct hci_dev *hcidev;
+
+ if (func) {
+ pm_flags = sdio_get_host_pm_caps(func);
+ BT_DBG("%s: resume: PM flags = 0x%x", sdio_func_id(func),
+ pm_flags);
+ card = sdio_get_drvdata(func);
+ if (!card || !card->priv) {
+ BT_ERR("card or priv structure is not valid");
+ return 0;
+ }
+ } else {
+ BT_ERR("sdio_func is not specified");
+ return 0;
+ }
+ priv = card->priv;
+
+ if (!priv->adapter->is_suspended) {
+ BT_DBG("device already resumed");
+ return 0;
+ }
+
+ priv->adapter->is_suspended = false;
+ hcidev = priv->btmrvl_dev.hcidev;
+ BT_DBG("%s: SDIO resume", hcidev->name);
+ hci_resume_dev(hcidev);
+ priv->hw_wakeup_firmware(priv);
+ priv->adapter->hs_state = HS_DEACTIVATED;
+ BT_DBG("%s: HS DEACTIVATED in resume!", hcidev->name);
+
+ return 0;
+}
+
+static const struct dev_pm_ops btmrvl_sdio_pm_ops = {
+ .suspend = btmrvl_sdio_suspend,
+ .resume = btmrvl_sdio_resume,
+};
+
static struct sdio_driver bt_mrvl_sdio = {
.name = "btmrvl_sdio",
.id_table = btmrvl_sdio_ids,
.probe = btmrvl_sdio_probe,
.remove = btmrvl_sdio_remove,
+ .drv = {
+ .owner = THIS_MODULE,
+ .pm = &btmrvl_sdio_pm_ops,
+ }
};
static int __init btmrvl_sdio_init_module(void)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 461c68bc4dd7..83ebb241bfcc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -125,6 +125,7 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros 3011 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
@@ -139,10 +140,14 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -855,6 +860,7 @@ static void btusb_work(struct work_struct *work)
{
struct btusb_data *data = container_of(work, struct btusb_data, work);
struct hci_dev *hdev = data->hdev;
+ int new_alts;
int err;
if (hdev->conn_hash.sco_num > 0) {
@@ -868,11 +874,19 @@ static void btusb_work(struct work_struct *work)
set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
}
- if (data->isoc_altsetting != 2) {
+
+ if (hdev->voice_setting & 0x0020) {
+ static const int alts[3] = { 2, 4, 5 };
+ new_alts = alts[hdev->conn_hash.sco_num - 1];
+ } else {
+ new_alts = hdev->conn_hash.sco_num;
+ }
+
+ if (data->isoc_altsetting != new_alts) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
- if (__set_isoc_interface(hdev, 2) < 0)
+ if (__set_isoc_interface(hdev, new_alts) < 0)
return;
}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 98a8c05d4f23..e564579a6115 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -388,7 +388,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
hdev->close = hci_uart_close;
hdev->flush = hci_uart_flush;
hdev->send = hci_uart_send_frame;
- hdev->parent = hu->tty->dev;
+ SET_HCIDEV_DEV(hdev, hu->tty->dev);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 158bfe507da7..3f72595a6017 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -252,8 +252,9 @@ static int vhci_open(struct inode *inode, struct file *file)
}
file->private_data = data;
+ nonseekable_open(inode, file);
- return nonseekable_open(inode, file);
+ return 0;
}
static int vhci_release(struct inode *inode, struct file *file)
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 17e05d1076b3..a0df182f6f7d 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -958,7 +958,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
if (set_memory_uc((unsigned long)table, 1 << page_order))
printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
- bridge->gatt_table = (void *)table;
+ bridge->gatt_table = (u32 __iomem *)table;
#else
bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
@@ -1010,7 +1010,6 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
case LVL2_APER_SIZE:
/* The generic routines can't deal with 2 level gatt's */
return -EINVAL;
- break;
default:
page_order = 0;
break;
@@ -1077,7 +1076,6 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
case LVL2_APER_SIZE:
/* The generic routines can't deal with 2 level gatt's */
return -EINVAL;
- break;
default:
num_entries = 0;
break;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 962e75dc4781..0a4185279417 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -898,6 +898,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_B43_HB),
ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
@@ -907,6 +908,11 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
+ ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
+ ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
{ }
};
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 7ea18a5fe71c..8e2d9140f300 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -96,6 +96,7 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
+#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@@ -211,6 +212,7 @@
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
@@ -235,6 +237,19 @@
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
+#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
+#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
+#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */
+#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 7f025fb620de..1237e7575c3f 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1179,6 +1179,20 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
writel(addr | pte_flags, intel_private.gtt + entry);
}
+static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ u32 pte_flags;
+
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+
+ writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
+}
+
static void gen6_cleanup(void)
{
}
@@ -1205,12 +1219,16 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
u32 reg_addr;
+ int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000;
- intel_private.registers = ioremap(reg_addr, 128 * 4096);
+ if (INTEL_GTT_GEN >= 7)
+ size = MB(2);
+
+ intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
@@ -1354,6 +1372,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
+static const struct intel_gtt_driver valleyview_gtt_driver = {
+ .gen = 7,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = valleyview_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1460,6 +1487,22 @@ static const struct intel_gtt_driver_description {
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
+ "ValleyView", &valleyview_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
+ "Haswell", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV,
+ "Haswell", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index ffa888cd1c88..192000377737 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -158,7 +158,6 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
break;
case LVL2_APER_SIZE:
return -EINVAL;
- break;
default:
num_entries = 0;
break;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index b2402eb076c7..f45dad39a18b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -63,7 +63,7 @@ config HW_RANDOM_AMD
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
depends on HW_RANDOM && HAVE_CLK
- default HW_RANDOM
+ default (HW_RANDOM && ARCH_AT91)
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Atmel AT91 devices.
@@ -250,3 +250,16 @@ config UML_RANDOM
(check your distro, or download from
http://sourceforge.net/projects/gkernel/). rngd periodically reads
/dev/hwrng and injects the entropy into /dev/random.
+
+config HW_RANDOM_PSERIES
+ tristate "pSeries HW Random Number Generator support"
+ depends on HW_RANDOM && PPC64 && IBMVIO
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on POWER7+ machines and above
+
+ To compile this driver as a module, choose M here: the
+ module will be called pseries-rng.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b2ff5265a996..d901dfa30321 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
+obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index f518b99f53f5..731c9046cf7b 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -34,8 +34,15 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
u32 *data = buf;
/* data ready? */
- if (readl(trng->base + TRNG_ODATA) & 1) {
+ if (readl(trng->base + TRNG_ISR) & 1) {
*data = readl(trng->base + TRNG_ODATA);
+ /*
+ ensure data ready is only set again AFTER the next data
+ word is ready in case it got set between checking ISR
+ and reading ODATA, so we don't risk re-reading the
+ same word
+ */
+ readl(trng->base + TRNG_ISR);
return 4;
} else
return 0;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index a07a5caa599c..1412565c01af 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -115,22 +115,12 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENOENT;
- goto err_region;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- ret = -EBUSY;
- goto err_region;
- }
-
- dev_set_drvdata(&pdev->dev, res);
- rng_base = ioremap(res->start, resource_size(res));
+ rng_base = devm_request_and_ioremap(&pdev->dev, res);
if (!rng_base) {
ret = -ENOMEM;
goto err_ioremap;
}
+ dev_set_drvdata(&pdev->dev, res);
ret = hwrng_register(&omap_rng_ops);
if (ret)
@@ -145,11 +135,8 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
return 0;
err_register:
- iounmap(rng_base);
rng_base = NULL;
err_ioremap:
- release_mem_region(res->start, resource_size(res));
-err_region:
if (cpu_is_omap24xx()) {
clk_disable(rng_ick);
clk_put(rng_ick);
@@ -159,20 +146,15 @@ err_region:
static int __exit omap_rng_remove(struct platform_device *pdev)
{
- struct resource *res = dev_get_drvdata(&pdev->dev);
-
hwrng_unregister(&omap_rng_ops);
omap_rng_write_reg(RNG_MASK_REG, 0x0);
- iounmap(rng_base);
-
if (cpu_is_omap24xx()) {
clk_disable(rng_ick);
clk_put(rng_ick);
}
- release_mem_region(res->start, resource_size(res));
rng_base = NULL;
return 0;
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
new file mode 100644
index 000000000000..5f1197929f0c
--- /dev/null
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2010 Michael Neuling IBM Corporation
+ *
+ * Driver for the pseries hardware RNG for POWER7+ and above
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/hw_random.h>
+#include <asm/vio.h>
+
+#define MODULE_NAME "pseries-rng"
+
+static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) {
+ printk(KERN_ERR "pseries rng hcall error\n");
+ return 0;
+ }
+ return 8;
+}
+
+/**
+ * pseries_rng_get_desired_dma - Return desired DMA allocate for CMO operations
+ *
+ * This is a required function for a driver to operate in a CMO environment
+ * but this device does not make use of DMA allocations, return 0.
+ *
+ * Return value:
+ * Number of bytes of IO data the driver will need to perform well -> 0
+ */
+static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
+{
+ return 0;
+};
+
+static struct hwrng pseries_rng = {
+ .name = MODULE_NAME,
+ .data_read = pseries_rng_data_read,
+};
+
+static int __init pseries_rng_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
+{
+ return hwrng_register(&pseries_rng);
+}
+
+static int __exit pseries_rng_remove(struct vio_dev *dev)
+{
+ hwrng_unregister(&pseries_rng);
+ return 0;
+}
+
+static struct vio_device_id pseries_rng_driver_ids[] = {
+ { "ibm,random-v1", "ibm,random"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
+
+static struct vio_driver pseries_rng_driver = {
+ .name = MODULE_NAME,
+ .probe = pseries_rng_probe,
+ .remove = pseries_rng_remove,
+ .get_desired_dma = pseries_rng_get_desired_dma,
+ .id_table = pseries_rng_driver_ids
+};
+
+static int __init rng_init(void)
+{
+ printk(KERN_INFO "Registering IBM pSeries RNG driver\n");
+ return vio_register_driver(&pseries_rng_driver);
+}
+
+module_init(rng_init);
+
+static void __exit rng_exit(void)
+{
+ vio_unregister_driver(&pseries_rng_driver);
+}
+module_exit(rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Neuling <mikey@neuling.org>");
+MODULE_DESCRIPTION("H/W RNG driver for IBM pSeries processors");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 165e1febae53..4864407e3fc4 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -12,6 +12,7 @@ config HAVE_MACH_CLKDEV
config COMMON_CLK
bool
select HAVE_CLK_PREPARE
+ select CLKDEV_LOOKUP
---help---
The common clock framework is a single definition of struct
clk, useful across many platforms, as well as an
@@ -22,17 +23,6 @@ config COMMON_CLK
menu "Common Clock Framework"
depends on COMMON_CLK
-config COMMON_CLK_DISABLE_UNUSED
- bool "Disabled unused clocks at boot"
- depends on COMMON_CLK
- ---help---
- Traverses the entire clock tree and disables any clocks that are
- enabled in hardware but have not been enabled by any device drivers.
- This saves power and keeps the software model of the clock in line
- with reality.
-
- If in doubt, say "N".
-
config COMMON_CLK_DEBUG
bool "DebugFS representation of clock tree"
depends on COMMON_CLK
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 1f736bc11c4b..b9a5158a30b1 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
- clk-mux.o clk-divider.o
+ clk-mux.o clk-divider.o clk-fixed-factor.o
+# SoCs specific
+obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index d5ac6a75ea57..8ea11b444528 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -45,7 +45,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
return parent_rate / div;
}
-EXPORT_SYMBOL_GPL(clk_divider_recalc_rate);
/*
* The reverse of DIV_ROUND_UP: The maximum number which
@@ -68,8 +67,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_ONE_BASED)
maxdiv--;
- if (!best_parent_rate) {
- parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
+ if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
bestdiv = DIV_ROUND_UP(parent_rate, rate);
bestdiv = bestdiv == 0 ? 1 : bestdiv;
bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
@@ -109,24 +108,18 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
int div;
div = clk_divider_bestdiv(hw, rate, prate);
- if (prate)
- return *prate / div;
- else {
- unsigned long r;
- r = __clk_get_rate(__clk_get_parent(hw->clk));
- return r / div;
- }
+ return *prate / div;
}
-EXPORT_SYMBOL_GPL(clk_divider_round_rate);
-static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
unsigned int div;
unsigned long flags = 0;
u32 val;
- div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate;
+ div = parent_rate / rate;
if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
div--;
@@ -147,15 +140,26 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
return 0;
}
-EXPORT_SYMBOL_GPL(clk_divider_set_rate);
-struct clk_ops clk_divider_ops = {
+const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
.round_rate = clk_divider_round_rate,
.set_rate = clk_divider_set_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -163,38 +167,34 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
{
struct clk_divider *div;
struct clk *clk;
+ struct clk_init_data init;
+ /* allocate the divider */
div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
-
if (!div) {
pr_err("%s: could not allocate divider clk\n", __func__);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* struct clk_divider assignments */
div->reg = reg;
div->shift = shift;
div->width = width;
div->flags = clk_divider_flags;
div->lock = lock;
+ div->hw.init = &init;
- if (parent_name) {
- div->parent[0] = kstrdup(parent_name, GFP_KERNEL);
- if (!div->parent[0])
- goto out;
- }
-
- clk = clk_register(dev, name,
- &clk_divider_ops, &div->hw,
- div->parent,
- (parent_name ? 1 : 0),
- flags);
- if (clk)
- return clk;
+ /* register the clock */
+ clk = clk_register(dev, &div->hw);
-out:
- kfree(div->parent[0]);
- kfree(div);
+ if (IS_ERR(clk))
+ kfree(div);
- return NULL;
+ return clk;
}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
new file mode 100644
index 000000000000..c8c003e217ad
--- /dev/null
+++ b/drivers/clk/clk-fixed-factor.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Standard functionality for the common clock API.
+ */
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+/*
+ * DOC: basic fixed multiplier and divider clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is fixed. clk->rate = parent->rate / div * mult
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
+
+static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+ return parent_rate * fix->mult / fix->div;
+}
+
+static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+ if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ unsigned long best_parent;
+
+ best_parent = (rate / fix->mult) * fix->div;
+ *prate = __clk_round_rate(__clk_get_parent(hw->clk),
+ best_parent);
+ }
+
+ return (*prate / fix->div) * fix->mult;
+}
+
+static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+struct clk_ops clk_fixed_factor_ops = {
+ .round_rate = clk_factor_round_rate,
+ .set_rate = clk_factor_set_rate,
+ .recalc_rate = clk_factor_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
+
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ struct clk_fixed_factor *fix;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ fix = kmalloc(sizeof(*fix), GFP_KERNEL);
+ if (!fix) {
+ pr_err("%s: could not allocate fixed factor clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_fixed_factor assignments */
+ fix->mult = mult;
+ fix->div = div;
+ fix->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(dev, &fix->hw);
+
+ if (IS_ERR(clk))
+ kfree(fix);
+
+ return clk;
+}
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 90c79fb5d1bd..cbd246229786 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -32,51 +32,50 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
{
return to_clk_fixed_rate(hw)->fixed_rate;
}
-EXPORT_SYMBOL_GPL(clk_fixed_rate_recalc_rate);
-struct clk_ops clk_fixed_rate_ops = {
+const struct clk_ops clk_fixed_rate_ops = {
.recalc_rate = clk_fixed_rate_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
+/**
+ * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate)
{
struct clk_fixed_rate *fixed;
- char **parent_names = NULL;
- u8 len;
+ struct clk *clk;
+ struct clk_init_data init;
+ /* allocate fixed-rate clock */
fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
-
if (!fixed) {
pr_err("%s: could not allocate fixed clk\n", __func__);
return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_fixed_rate_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* struct clk_fixed_rate assignments */
fixed->fixed_rate = fixed_rate;
+ fixed->hw.init = &init;
- if (parent_name) {
- parent_names = kmalloc(sizeof(char *), GFP_KERNEL);
-
- if (! parent_names)
- goto out;
+ /* register the clock */
+ clk = clk_register(dev, &fixed->hw);
- len = sizeof(char) * strlen(parent_name);
-
- parent_names[0] = kmalloc(len, GFP_KERNEL);
-
- if (!parent_names[0])
- goto out;
-
- strncpy(parent_names[0], parent_name, len);
- }
+ if (IS_ERR(clk))
+ kfree(fixed);
-out:
- return clk_register(dev, name,
- &clk_fixed_rate_ops, &fixed->hw,
- parent_names,
- (parent_name ? 1 : 0),
- flags);
+ return clk;
}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index b5902e2ef2fd..578465e04be6 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -28,32 +28,38 @@
#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
-static void clk_gate_set_bit(struct clk_gate *gate)
+/*
+ * It works on following logic:
+ *
+ * For enabling clock, enable = 1
+ * set2dis = 1 -> clear bit -> set = 0
+ * set2dis = 0 -> set bit -> set = 1
+ *
+ * For disabling clock, enable = 0
+ * set2dis = 1 -> set bit -> set = 1
+ * set2dis = 0 -> clear bit -> set = 0
+ *
+ * So, result is always: enable xor set2dis.
+ */
+static void clk_gate_endisable(struct clk_hw *hw, int enable)
{
- u32 reg;
+ struct clk_gate *gate = to_clk_gate(hw);
+ int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
unsigned long flags = 0;
+ u32 reg;
+
+ set ^= enable;
if (gate->lock)
spin_lock_irqsave(gate->lock, flags);
reg = readl(gate->reg);
- reg |= BIT(gate->bit_idx);
- writel(reg, gate->reg);
-
- if (gate->lock)
- spin_unlock_irqrestore(gate->lock, flags);
-}
-
-static void clk_gate_clear_bit(struct clk_gate *gate)
-{
- u32 reg;
- unsigned long flags = 0;
- if (gate->lock)
- spin_lock_irqsave(gate->lock, flags);
+ if (set)
+ reg |= BIT(gate->bit_idx);
+ else
+ reg &= ~BIT(gate->bit_idx);
- reg = readl(gate->reg);
- reg &= ~BIT(gate->bit_idx);
writel(reg, gate->reg);
if (gate->lock)
@@ -62,27 +68,15 @@ static void clk_gate_clear_bit(struct clk_gate *gate)
static int clk_gate_enable(struct clk_hw *hw)
{
- struct clk_gate *gate = to_clk_gate(hw);
-
- if (gate->flags & CLK_GATE_SET_TO_DISABLE)
- clk_gate_clear_bit(gate);
- else
- clk_gate_set_bit(gate);
+ clk_gate_endisable(hw, 1);
return 0;
}
-EXPORT_SYMBOL_GPL(clk_gate_enable);
static void clk_gate_disable(struct clk_hw *hw)
{
- struct clk_gate *gate = to_clk_gate(hw);
-
- if (gate->flags & CLK_GATE_SET_TO_DISABLE)
- clk_gate_set_bit(gate);
- else
- clk_gate_clear_bit(gate);
+ clk_gate_endisable(hw, 0);
}
-EXPORT_SYMBOL_GPL(clk_gate_disable);
static int clk_gate_is_enabled(struct clk_hw *hw)
{
@@ -99,15 +93,25 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
return reg ? 1 : 0;
}
-EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
-struct clk_ops clk_gate_ops = {
+const struct clk_ops clk_gate_ops = {
.enable = clk_gate_enable,
.disable = clk_gate_disable,
.is_enabled = clk_gate_is_enabled,
};
EXPORT_SYMBOL_GPL(clk_gate_ops);
+/**
+ * clk_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
@@ -115,36 +119,32 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
{
struct clk_gate *gate;
struct clk *clk;
+ struct clk_init_data init;
+ /* allocate the gate */
gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
-
if (!gate) {
pr_err("%s: could not allocate gated clk\n", __func__);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_gate_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* struct clk_gate assignments */
gate->reg = reg;
gate->bit_idx = bit_idx;
gate->flags = clk_gate_flags;
gate->lock = lock;
+ gate->hw.init = &init;
- if (parent_name) {
- gate->parent[0] = kstrdup(parent_name, GFP_KERNEL);
- if (!gate->parent[0])
- goto out;
- }
+ clk = clk_register(dev, &gate->hw);
+
+ if (IS_ERR(clk))
+ kfree(gate);
- clk = clk_register(dev, name,
- &clk_gate_ops, &gate->hw,
- gate->parent,
- (parent_name ? 1 : 0),
- flags);
- if (clk)
- return clk;
-out:
- kfree(gate->parent[0]);
- kfree(gate);
-
- return NULL;
+ return clk;
}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index c71ad1f41a97..fd36a8ea73d9 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -55,7 +55,6 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
return val;
}
-EXPORT_SYMBOL_GPL(clk_mux_get_parent);
static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
@@ -82,35 +81,47 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-EXPORT_SYMBOL_GPL(clk_mux_set_parent);
-struct clk_ops clk_mux_ops = {
+const struct clk_ops clk_mux_ops = {
.get_parent = clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
struct clk *clk_register_mux(struct device *dev, const char *name,
- char **parent_names, u8 num_parents, unsigned long flags,
+ const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock)
{
struct clk_mux *mux;
+ struct clk *clk;
+ struct clk_init_data init;
- mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL);
-
+ /* allocate the mux */
+ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
if (!mux) {
pr_err("%s: could not allocate mux clk\n", __func__);
return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_mux_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
/* struct clk_mux assignments */
mux->reg = reg;
mux->shift = shift;
mux->width = width;
mux->flags = clk_mux_flags;
mux->lock = lock;
+ mux->hw.init = &init;
+
+ clk = clk_register(dev, &mux->hw);
+
+ if (IS_ERR(clk))
+ kfree(mux);
- return clk_register(dev, name, &clk_mux_ops, &mux->hw,
- parent_names, num_parents, flags);
+ return clk;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9cf6f59e3e19..9a1eb0cfa95f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -194,9 +194,8 @@ static int __init clk_debug_init(void)
late_initcall(clk_debug_init);
#else
static inline int clk_debug_register(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DEBUG */
+#endif
-#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
{
@@ -246,9 +245,6 @@ static int clk_disable_unused(void)
return 0;
}
late_initcall(clk_disable_unused);
-#else
-static inline int clk_disable_unused(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
/*** helper functions ***/
@@ -287,7 +283,7 @@ unsigned long __clk_get_rate(struct clk *clk)
unsigned long ret;
if (!clk) {
- ret = -EINVAL;
+ ret = 0;
goto out;
}
@@ -297,7 +293,7 @@ unsigned long __clk_get_rate(struct clk *clk)
goto out;
if (!clk->parent)
- ret = -ENODEV;
+ ret = 0;
out:
return ret;
@@ -562,7 +558,7 @@ EXPORT_SYMBOL_GPL(clk_enable);
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk. Does not query the hardware. If
- * clk is NULL then returns -EINVAL.
+ * clk is NULL then returns 0.
*/
unsigned long clk_get_rate(struct clk *clk)
{
@@ -584,18 +580,22 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
*/
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
- unsigned long unused;
+ unsigned long parent_rate = 0;
if (!clk)
return -EINVAL;
- if (!clk->ops->round_rate)
- return clk->rate;
+ if (!clk->ops->round_rate) {
+ if (clk->flags & CLK_SET_RATE_PARENT)
+ return __clk_round_rate(clk->parent, rate);
+ else
+ return clk->rate;
+ }
- if (clk->flags & CLK_SET_RATE_PARENT)
- return clk->ops->round_rate(clk->hw, rate, &unused);
- else
- return clk->ops->round_rate(clk->hw, rate, NULL);
+ if (clk->parent)
+ parent_rate = clk->parent->rate;
+
+ return clk->ops->round_rate(clk->hw, rate, &parent_rate);
}
/**
@@ -765,25 +765,41 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
{
struct clk *top = clk;
- unsigned long best_parent_rate = clk->parent->rate;
+ unsigned long best_parent_rate = 0;
unsigned long new_rate;
- if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) {
- clk->new_rate = clk->rate;
+ /* sanity */
+ if (IS_ERR_OR_NULL(clk))
+ return NULL;
+
+ /* save parent rate, if it exists */
+ if (clk->parent)
+ best_parent_rate = clk->parent->rate;
+
+ /* never propagate up to the parent */
+ if (!(clk->flags & CLK_SET_RATE_PARENT)) {
+ if (!clk->ops->round_rate) {
+ clk->new_rate = clk->rate;
+ return NULL;
+ }
+ new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+ goto out;
+ }
+
+ /* need clk->parent from here on out */
+ if (!clk->parent) {
+ pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
return NULL;
}
- if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) {
+ if (!clk->ops->round_rate) {
top = clk_calc_new_rates(clk->parent, rate);
- new_rate = clk->new_rate = clk->parent->new_rate;
+ new_rate = clk->parent->new_rate;
goto out;
}
- if (clk->flags & CLK_SET_RATE_PARENT)
- new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
- else
- new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
+ new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
if (best_parent_rate != clk->parent->rate) {
top = clk_calc_new_rates(clk->parent, best_parent_rate);
@@ -834,18 +850,21 @@ static void clk_change_rate(struct clk *clk)
{
struct clk *child;
unsigned long old_rate;
+ unsigned long best_parent_rate = 0;
struct hlist_node *tmp;
old_rate = clk->rate;
+ if (clk->parent)
+ best_parent_rate = clk->parent->rate;
+
if (clk->ops->set_rate)
- clk->ops->set_rate(clk->hw, clk->new_rate);
+ clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
if (clk->ops->recalc_rate)
- clk->rate = clk->ops->recalc_rate(clk->hw,
- clk->parent->rate);
+ clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
else
- clk->rate = clk->parent->rate;
+ clk->rate = best_parent_rate;
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
@@ -859,38 +878,19 @@ static void clk_change_rate(struct clk *clk)
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
- * In the simplest case clk_set_rate will only change the rate of clk.
+ * In the simplest case clk_set_rate will only adjust the rate of clk.
*
- * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
- * will fail; only when the clk is disabled will it be able to change
- * its rate.
+ * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
+ * propagate up to clk's parent; whether or not this happens depends on the
+ * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
+ * after calling .round_rate then upstream parent propagation is ignored. If
+ * *parent_rate comes back with a new rate for clk's parent then we propagate
+ * up to clk's parent and set it's rate. Upward propagation will continue
+ * until either a clk does not support the CLK_SET_RATE_PARENT flag or
+ * .round_rate stops requesting changes to clk's parent_rate.
*
- * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
- * recursively propagate up to clk's parent; whether or not this happens
- * depends on the outcome of clk's .round_rate implementation. If
- * *parent_rate is 0 after calling .round_rate then upstream parent
- * propagation is ignored. If *parent_rate comes back with a new rate
- * for clk's parent then we propagate up to clk's parent and set it's
- * rate. Upward propagation will continue until either a clk does not
- * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
- * changes to clk's parent_rate. If there is a failure during upstream
- * propagation then clk_set_rate will unwind and restore each clk's rate
- * that had been successfully changed. Afterwards a rate change abort
- * notification will be propagated downstream, starting from the clk
- * that failed.
- *
- * At the end of all of the rate setting, clk_set_rate internally calls
- * __clk_recalc_rates and propagates the rate changes downstream,
- * starting from the highest clk whose rate was changed. This has the
- * added benefit of propagating post-rate change notifiers.
- *
- * Note that while post-rate change and rate change abort notifications
- * are guaranteed to be sent to a clk only once per call to
- * clk_set_rate, pre-change notifications will be sent for every clk
- * whose rate is changed. Stacking pre-change notifications is noisy
- * for the drivers subscribed to them, but this allows drivers to react
- * to intermediate clk rate changes up until the point where the final
- * rate is achieved at the end of upstream propagation.
+ * Rate changes are accomplished via tree traversal that also recalculates the
+ * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
*
* Returns 0 on success, -EERROR otherwise.
*/
@@ -906,6 +906,11 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (rate == clk->rate)
goto out;
+ if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(clk, rate);
if (!top) {
@@ -997,7 +1002,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
if (!clk->parents)
clk->parents =
- kmalloc((sizeof(struct clk*) * clk->num_parents),
+ kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
if (!clk->parents)
@@ -1062,21 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
old_parent = clk->parent;
- /* find index of new parent clock using cached parent ptrs */
- for (i = 0; i < clk->num_parents; i++)
- if (clk->parents[i] == parent)
- break;
+ if (!clk->parents)
+ clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+ GFP_KERNEL);
/*
- * find index of new parent clock using string name comparison
- * also try to cache the parent to avoid future calls to __clk_lookup
+ * find index of new parent clock using cached parent ptrs,
+ * or if not yet cached, use string name comparison and cache
+ * them now to avoid future calls to __clk_lookup.
*/
- if (i == clk->num_parents)
- for (i = 0; i < clk->num_parents; i++)
- if (!strcmp(clk->parent_names[i], parent->name)) {
+ for (i = 0; i < clk->num_parents; i++) {
+ if (clk->parents && clk->parents[i] == parent)
+ break;
+ else if (!strcmp(clk->parent_names[i], parent->name)) {
+ if (clk->parents)
clk->parents[i] = __clk_lookup(parent->name);
- break;
- }
+ break;
+ }
+ }
if (i == clk->num_parents) {
pr_debug("%s: clock %s is not a possible parent of clock %s\n",
@@ -1175,40 +1183,41 @@ EXPORT_SYMBOL_GPL(clk_set_parent);
*
* Initializes the lists in struct clk, queries the hardware for the
* parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- * .name
- * .ops
- * .hw
- * .parent_names
- * .num_parents
- * .flags
- *
- * Essentially, everything that would normally be passed into clk_register is
- * assumed to be initialized already in __clk_init. The other members may be
- * populated, but are optional.
- *
- * __clk_init is only exposed via clk-private.h and is intended for use with
- * very large numbers of clocks that need to be statically initialized. It is
- * a layering violation to include clk-private.h from any code which implements
- * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements it's operations.
*/
-void __clk_init(struct device *dev, struct clk *clk)
+int __clk_init(struct device *dev, struct clk *clk)
{
- int i;
+ int i, ret = 0;
struct clk *orphan;
struct hlist_node *tmp, *tmp2;
if (!clk)
- return;
+ return -EINVAL;
mutex_lock(&prepare_lock);
/* check to see if a clock with this name is already registered */
- if (__clk_lookup(clk->name))
+ if (__clk_lookup(clk->name)) {
+ pr_debug("%s: clk %s already initialized\n",
+ __func__, clk->name);
+ ret = -EEXIST;
goto out;
+ }
+
+ /* check that clk_ops are sane. See Documentation/clk.txt */
+ if (clk->ops->set_rate &&
+ !(clk->ops->round_rate && clk->ops->recalc_rate)) {
+ pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+ __func__, clk->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (clk->ops->set_parent && !clk->ops->get_parent) {
+ pr_warning("%s: %s must implement .get_parent & .set_parent\n",
+ __func__, clk->name);
+ ret = -EINVAL;
+ goto out;
+ }
/* throw a WARN if any entries in parent_names are NULL */
for (i = 0; i < clk->num_parents; i++)
@@ -1302,48 +1311,130 @@ void __clk_init(struct device *dev, struct clk *clk)
out:
mutex_unlock(&prepare_lock);
- return;
+ return ret;
}
/**
+ * __clk_register - register a clock and return a cookie.
+ *
+ * Same as clk_register, except that the .clk field inside hw shall point to a
+ * preallocated (generally statically allocated) struct clk. None of the fields
+ * of the struct clk need to be initialized.
+ *
+ * The data pointed to by .init and .clk field shall NOT be marked as init
+ * data.
+ *
+ * __clk_register is only exposed via clk-private.h and is intended for use with
+ * very large numbers of clocks that need to be statically initialized. It is
+ * a layering violation to include clk-private.h from any code which implements
+ * a clock's .ops; as such any statically initialized clock data MUST be in a
+ * separate C file from the logic that implements it's operations. Returns 0
+ * on success, otherwise an error code.
+ */
+struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = hw->clk;
+ clk->name = hw->init->name;
+ clk->ops = hw->init->ops;
+ clk->hw = hw;
+ clk->flags = hw->init->flags;
+ clk->parent_names = hw->init->parent_names;
+ clk->num_parents = hw->init->num_parents;
+
+ ret = __clk_init(dev, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(__clk_register);
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
- * @name: clock name
- * @ops: operations this clock supports
* @hw: link to hardware-specific clock data
- * @parent_names: array of string names for all possible parents
- * @num_parents: number of possible parents
- * @flags: framework-level hints and quirks
*
* clk_register is the primary interface for populating the clock tree with new
* clock nodes. It returns a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
*/
-struct clk *clk_register(struct device *dev, const char *name,
- const struct clk_ops *ops, struct clk_hw *hw,
- char **parent_names, u8 num_parents, unsigned long flags)
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
+ int i, ret;
struct clk *clk;
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
- if (!clk)
- return NULL;
+ if (!clk) {
+ pr_err("%s: could not allocate clk\n", __func__);
+ ret = -ENOMEM;
+ goto fail_out;
+ }
- clk->name = name;
- clk->ops = ops;
+ clk->name = kstrdup(hw->init->name, GFP_KERNEL);
+ if (!clk->name) {
+ pr_err("%s: could not allocate clk->name\n", __func__);
+ ret = -ENOMEM;
+ goto fail_name;
+ }
+ clk->ops = hw->init->ops;
clk->hw = hw;
- clk->flags = flags;
- clk->parent_names = parent_names;
- clk->num_parents = num_parents;
+ clk->flags = hw->init->flags;
+ clk->num_parents = hw->init->num_parents;
hw->clk = clk;
- __clk_init(dev, clk);
+ /* allocate local copy in case parent_names is __initdata */
+ clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
+ GFP_KERNEL);
- return clk;
+ if (!clk->parent_names) {
+ pr_err("%s: could not allocate clk->parent_names\n", __func__);
+ ret = -ENOMEM;
+ goto fail_parent_names;
+ }
+
+
+ /* copy each string name in case parent_names is __initdata */
+ for (i = 0; i < clk->num_parents; i++) {
+ clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
+ GFP_KERNEL);
+ if (!clk->parent_names[i]) {
+ pr_err("%s: could not copy parent_names\n", __func__);
+ ret = -ENOMEM;
+ goto fail_parent_names_copy;
+ }
+ }
+
+ ret = __clk_init(dev, clk);
+ if (!ret)
+ return clk;
+
+fail_parent_names_copy:
+ while (--i >= 0)
+ kfree(clk->parent_names[i]);
+ kfree(clk->parent_names);
+fail_parent_names:
+ kfree(clk->name);
+fail_name:
+ kfree(clk);
+fail_out:
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(clk_register);
+/**
+ * clk_unregister - unregister a currently registered clock
+ * @clk: clock to unregister
+ *
+ * Currently unimplemented.
+ */
+void clk_unregister(struct clk *clk) {}
+EXPORT_SYMBOL_GPL(clk_unregister);
+
/*** clk rate change notifiers ***/
/**
diff --git a/drivers/clk/mxs/Makefile b/drivers/clk/mxs/Makefile
new file mode 100644
index 000000000000..7bedeec08524
--- /dev/null
+++ b/drivers/clk/mxs/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for mxs specific clk
+#
+
+obj-y += clk.o clk-pll.o clk-ref.o clk-div.o clk-frac.o
+
+obj-$(CONFIG_SOC_IMX23) += clk-imx23.o
+obj-$(CONFIG_SOC_IMX28) += clk-imx28.o
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
new file mode 100644
index 000000000000..90e1da93877e
--- /dev/null
+++ b/drivers/clk/mxs/clk-div.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_div - mxs integer divider clock
+ * @divider: the parent class
+ * @ops: pointer to clk_ops of parent class
+ * @reg: register address
+ * @busy: busy bit shift
+ *
+ * The mxs divider clock is a subclass of basic clk_divider with an
+ * addtional busy bit.
+ */
+struct clk_div {
+ struct clk_divider divider;
+ const struct clk_ops *ops;
+ void __iomem *reg;
+ u8 busy;
+};
+
+static inline struct clk_div *to_clk_div(struct clk_hw *hw)
+{
+ struct clk_divider *divider = container_of(hw, struct clk_divider, hw);
+
+ return container_of(divider, struct clk_div, divider);
+}
+
+static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_div *div = to_clk_div(hw);
+
+ return div->ops->recalc_rate(&div->divider.hw, parent_rate);
+}
+
+static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_div *div = to_clk_div(hw);
+
+ return div->ops->round_rate(&div->divider.hw, rate, prate);
+}
+
+static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_div *div = to_clk_div(hw);
+ int ret;
+
+ ret = div->ops->set_rate(&div->divider.hw, rate, parent_rate);
+ if (!ret)
+ ret = mxs_clk_wait(div->reg, div->busy);
+
+ return ret;
+}
+
+static struct clk_ops clk_div_ops = {
+ .recalc_rate = clk_div_recalc_rate,
+ .round_rate = clk_div_round_rate,
+ .set_rate = clk_div_set_rate,
+};
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+ struct clk_div *div;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_div_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ div->reg = reg;
+ div->busy = busy;
+
+ div->divider.reg = reg;
+ div->divider.shift = shift;
+ div->divider.width = width;
+ div->divider.flags = CLK_DIVIDER_ONE_BASED;
+ div->divider.lock = &mxs_lock;
+ div->divider.hw.init = &init;
+ div->ops = &clk_divider_ops;
+
+ clk = clk_register(NULL, &div->divider.hw);
+ if (IS_ERR(clk))
+ kfree(div);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
new file mode 100644
index 000000000000..e6aa6b567d68
--- /dev/null
+++ b/drivers/clk/mxs/clk-frac.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_frac - mxs fractional divider clock
+ * @hw: clk_hw for the fractional divider clock
+ * @reg: register address
+ * @shift: the divider bit shift
+ * @width: the divider bit width
+ * @busy: busy bit shift
+ *
+ * The clock is an adjustable fractional divider with a busy bit to wait
+ * when the divider is adjusted.
+ */
+struct clk_frac {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 shift;
+ u8 width;
+ u8 busy;
+};
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ u32 div;
+
+ div = readl_relaxed(frac->reg) >> frac->shift;
+ div &= (1 << frac->width) - 1;
+
+ return (parent_rate >> frac->width) * div;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ unsigned long parent_rate = *prate;
+ u32 div;
+ u64 tmp;
+
+ if (rate > parent_rate)
+ return -EINVAL;
+
+ tmp = rate;
+ tmp <<= frac->width;
+ do_div(tmp, parent_rate);
+ div = tmp;
+
+ if (!div)
+ return -EINVAL;
+
+ return (parent_rate >> frac->width) * div;
+}
+
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ unsigned long flags;
+ u32 div, val;
+ u64 tmp;
+
+ if (rate > parent_rate)
+ return -EINVAL;
+
+ tmp = rate;
+ tmp <<= frac->width;
+ do_div(tmp, parent_rate);
+ div = tmp;
+
+ if (!div)
+ return -EINVAL;
+
+ spin_lock_irqsave(&mxs_lock, flags);
+
+ val = readl_relaxed(frac->reg);
+ val &= ~(((1 << frac->width) - 1) << frac->shift);
+ val |= div << frac->shift;
+ writel_relaxed(val, frac->reg);
+
+ spin_unlock_irqrestore(&mxs_lock, flags);
+
+ return mxs_clk_wait(frac->reg, frac->busy);
+}
+
+static struct clk_ops clk_frac_ops = {
+ .recalc_rate = clk_frac_recalc_rate,
+ .round_rate = clk_frac_round_rate,
+ .set_rate = clk_frac_set_rate,
+};
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+ struct clk_frac *frac;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+ if (!frac)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_frac_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ frac->reg = reg;
+ frac->shift = shift;
+ frac->width = width;
+ frac->busy = busy;
+ frac->hw.init = &init;
+
+ clk = clk_register(NULL, &frac->hw);
+ if (IS_ERR(clk))
+ kfree(frac);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
new file mode 100644
index 000000000000..db2391c054ee
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx23.h>
+#include "clk.h"
+
+#define DIGCTRL MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
+#define CLKCTRL MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
+#define PLLCTRL0 (CLKCTRL + 0x0000)
+#define CPU (CLKCTRL + 0x0020)
+#define HBUS (CLKCTRL + 0x0030)
+#define XBUS (CLKCTRL + 0x0040)
+#define XTAL (CLKCTRL + 0x0050)
+#define PIX (CLKCTRL + 0x0060)
+#define SSP (CLKCTRL + 0x0070)
+#define GPMI (CLKCTRL + 0x0080)
+#define SPDIF (CLKCTRL + 0x0090)
+#define EMI (CLKCTRL + 0x00a0)
+#define SAIF (CLKCTRL + 0x00c0)
+#define TV (CLKCTRL + 0x00d0)
+#define ETM (CLKCTRL + 0x00e0)
+#define FRAC (CLKCTRL + 0x00f0)
+#define CLKSEQ (CLKCTRL + 0x0110)
+
+#define BP_CPU_INTERRUPT_WAIT 12
+#define BP_CLKSEQ_BYPASS_SAIF 0
+#define BP_CLKSEQ_BYPASS_SSP 5
+#define BP_SAIF_DIV_FRAC_EN 16
+#define BP_FRAC_IOFRAC 24
+
+static void __init clk_misc_init(void)
+{
+ u32 val;
+
+ /* Gate off cpu clock in WFI for power saving */
+ __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+ /* Clear BYPASS for SAIF */
+ __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SAIF, CLKSEQ);
+
+ /* SAIF has to use frac div for functional operation */
+ val = readl_relaxed(SAIF);
+ val |= 1 << BP_SAIF_DIV_FRAC_EN;
+ writel_relaxed(val, SAIF);
+
+ /*
+ * Source ssp clock from ref_io than ref_xtal,
+ * as ref_xtal only provides 24 MHz as maximum.
+ */
+ __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SSP, CLKSEQ);
+
+ /*
+ * 480 MHz seems too high to be ssp clock source directly,
+ * so set frac to get a 288 MHz ref_io.
+ */
+ __mxs_clrl(0x3f << BP_FRAC_IOFRAC, FRAC);
+ __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
+}
+
+static struct clk_lookup uart_lookups[] = {
+ { .dev_id = "duart", },
+ { .dev_id = "mxs-auart.0", },
+ { .dev_id = "mxs-auart.1", },
+ { .dev_id = "8006c000.serial", },
+ { .dev_id = "8006e000.serial", },
+ { .dev_id = "80070000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] = {
+ { .dev_id = "imx23-dma-apbh", },
+ { .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] = {
+ { .dev_id = "duart", .con_id = "apb_pclk"},
+ { .dev_id = "80070000.serial", .con_id = "apb_pclk"},
+ { .dev_id = "imx23-dma-apbx", },
+ { .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp_lookups[] = {
+ { .dev_id = "imx23-mmc.0", },
+ { .dev_id = "imx23-mmc.1", },
+ { .dev_id = "80010000.ssp", },
+ { .dev_id = "80034000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] = {
+ { .dev_id = "imx23-fb", },
+ { .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] = {
+ { .dev_id = "imx23-gpmi-nand", },
+ { .dev_id = "8000c000.gpmi", },
+};
+
+static const char *sel_pll[] __initconst = { "pll", "ref_xtal", };
+static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_io[] __initconst = { "ref_io", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+
+enum imx23_clk {
+ ref_xtal, pll, ref_cpu, ref_emi, ref_pix, ref_io, saif_sel,
+ lcdif_sel, gpmi_sel, ssp_sel, emi_sel, cpu, etm_sel, cpu_pll,
+ cpu_xtal, hbus, xbus, lcdif_div, ssp_div, gpmi_div, emi_pll,
+ emi_xtal, etm_div, saif_div, clk32k_div, rtc, adc, spdif_div,
+ clk32k, dri, pwm, filt, uart, ssp, gpmi, spdif, emi, saif,
+ lcdif, etm, usb, usb_pwr,
+ clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx23_clk clks_init_on[] __initdata = {
+ cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx23_clocks_init(void)
+{
+ int i;
+
+ clk_misc_init();
+
+ clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+ clks[pll] = mxs_clk_pll("pll", "ref_xtal", PLLCTRL0, 16, 480000000);
+ clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll", FRAC, 0);
+ clks[ref_emi] = mxs_clk_ref("ref_emi", "pll", FRAC, 1);
+ clks[ref_pix] = mxs_clk_ref("ref_pix", "pll", FRAC, 2);
+ clks[ref_io] = mxs_clk_ref("ref_io", "pll", FRAC, 3);
+ clks[saif_sel] = mxs_clk_mux("saif_sel", CLKSEQ, 0, 1, sel_pll, ARRAY_SIZE(sel_pll));
+ clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 1, 1, sel_pix, ARRAY_SIZE(sel_pix));
+ clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 4, 1, sel_io, ARRAY_SIZE(sel_io));
+ clks[ssp_sel] = mxs_clk_mux("ssp_sel", CLKSEQ, 5, 1, sel_io, ARRAY_SIZE(sel_io));
+ clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 6, 1, emi_sels, ARRAY_SIZE(emi_sels));
+ clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 7, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+ clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+ clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+ clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+ clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 29);
+ clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+ clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", PIX, 0, 12, 29);
+ clks[ssp_div] = mxs_clk_div("ssp_div", "ssp_sel", SSP, 0, 9, 29);
+ clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+ clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+ clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+ clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 6, 29);
+ clks[saif_div] = mxs_clk_frac("saif_div", "saif_sel", SAIF, 0, 16, 29);
+ clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+ clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+ clks[adc] = mxs_clk_fixed_factor("adc", "clk32k", 1, 16);
+ clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll", 1, 4);
+ clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+ clks[dri] = mxs_clk_gate("dri", "ref_xtal", XTAL, 28);
+ clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+ clks[filt] = mxs_clk_gate("filt", "ref_xtal", XTAL, 30);
+ clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+ clks[ssp] = mxs_clk_gate("ssp", "ssp_div", SSP, 31);
+ clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+ clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+ clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+ clks[saif] = mxs_clk_gate("saif", "saif_div", SAIF, 31);
+ clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", PIX, 31);
+ clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+ clks[usb] = mxs_clk_gate("usb", "usb_pwr", DIGCTRL, 2);
+ clks[usb_pwr] = clk_register_gate(NULL, "usb_pwr", "pll", 0, PLLCTRL0, 18, 0, &mxs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ if (IS_ERR(clks[i])) {
+ pr_err("i.MX23 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ return PTR_ERR(clks[i]);
+ }
+
+ clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+ clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+ clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+ clk_register_clkdevs(clks[ssp], ssp_lookups, ARRAY_SIZE(ssp_lookups));
+ clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+ clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clks[clks_init_on[i]]);
+
+ mxs_timer_init(MX23_INT_TIMER0);
+
+ return 0;
+}
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
new file mode 100644
index 000000000000..7fad6c8c13d2
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx28.h>
+#include "clk.h"
+
+#define CLKCTRL MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
+#define PLL0CTRL0 (CLKCTRL + 0x0000)
+#define PLL1CTRL0 (CLKCTRL + 0x0020)
+#define PLL2CTRL0 (CLKCTRL + 0x0040)
+#define CPU (CLKCTRL + 0x0050)
+#define HBUS (CLKCTRL + 0x0060)
+#define XBUS (CLKCTRL + 0x0070)
+#define XTAL (CLKCTRL + 0x0080)
+#define SSP0 (CLKCTRL + 0x0090)
+#define SSP1 (CLKCTRL + 0x00a0)
+#define SSP2 (CLKCTRL + 0x00b0)
+#define SSP3 (CLKCTRL + 0x00c0)
+#define GPMI (CLKCTRL + 0x00d0)
+#define SPDIF (CLKCTRL + 0x00e0)
+#define EMI (CLKCTRL + 0x00f0)
+#define SAIF0 (CLKCTRL + 0x0100)
+#define SAIF1 (CLKCTRL + 0x0110)
+#define LCDIF (CLKCTRL + 0x0120)
+#define ETM (CLKCTRL + 0x0130)
+#define ENET (CLKCTRL + 0x0140)
+#define FLEXCAN (CLKCTRL + 0x0160)
+#define FRAC0 (CLKCTRL + 0x01b0)
+#define FRAC1 (CLKCTRL + 0x01c0)
+#define CLKSEQ (CLKCTRL + 0x01d0)
+
+#define BP_CPU_INTERRUPT_WAIT 12
+#define BP_SAIF_DIV_FRAC_EN 16
+#define BP_ENET_DIV_TIME 21
+#define BP_ENET_SLEEP 31
+#define BP_CLKSEQ_BYPASS_SAIF0 0
+#define BP_CLKSEQ_BYPASS_SSP0 3
+#define BP_FRAC0_IO1FRAC 16
+#define BP_FRAC0_IO0FRAC 24
+
+#define DIGCTRL MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
+#define BP_SAIF_CLKMUX 10
+
+/*
+ * HW_SAIF_CLKMUX_SEL:
+ * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
+ * clock pins selected for SAIF1 input clocks.
+ * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
+ * SAIF0 clock inputs selected for SAIF1 input clocks.
+ * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
+ * clocks.
+ * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
+ * clocks.
+ */
+int mxs_saif_clkmux_select(unsigned int clkmux)
+{
+ if (clkmux > 0x3)
+ return -EINVAL;
+
+ __mxs_clrl(0x3 << BP_SAIF_CLKMUX, DIGCTRL);
+ __mxs_setl(clkmux << BP_SAIF_CLKMUX, DIGCTRL);
+
+ return 0;
+}
+
+static void __init clk_misc_init(void)
+{
+ u32 val;
+
+ /* Gate off cpu clock in WFI for power saving */
+ __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+ /* 0 is a bad default value for a divider */
+ __mxs_setl(1 << BP_ENET_DIV_TIME, ENET);
+
+ /* Clear BYPASS for SAIF */
+ __mxs_clrl(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ);
+
+ /* SAIF has to use frac div for functional operation */
+ val = readl_relaxed(SAIF0);
+ val |= 1 << BP_SAIF_DIV_FRAC_EN;
+ writel_relaxed(val, SAIF0);
+
+ val = readl_relaxed(SAIF1);
+ val |= 1 << BP_SAIF_DIV_FRAC_EN;
+ writel_relaxed(val, SAIF1);
+
+ /* Extra fec clock setting */
+ val = readl_relaxed(ENET);
+ val &= ~(1 << BP_ENET_SLEEP);
+ writel_relaxed(val, ENET);
+
+ /*
+ * Source ssp clock from ref_io than ref_xtal,
+ * as ref_xtal only provides 24 MHz as maximum.
+ */
+ __mxs_clrl(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ);
+
+ /*
+ * 480 MHz seems too high to be ssp clock source directly,
+ * so set frac0 to get a 288 MHz ref_io0.
+ */
+ val = readl_relaxed(FRAC0);
+ val &= ~(0x3f << BP_FRAC0_IO0FRAC);
+ val |= 30 << BP_FRAC0_IO0FRAC;
+ writel_relaxed(val, FRAC0);
+}
+
+static struct clk_lookup uart_lookups[] = {
+ { .dev_id = "duart", },
+ { .dev_id = "mxs-auart.0", },
+ { .dev_id = "mxs-auart.1", },
+ { .dev_id = "mxs-auart.2", },
+ { .dev_id = "mxs-auart.3", },
+ { .dev_id = "mxs-auart.4", },
+ { .dev_id = "8006a000.serial", },
+ { .dev_id = "8006c000.serial", },
+ { .dev_id = "8006e000.serial", },
+ { .dev_id = "80070000.serial", },
+ { .dev_id = "80072000.serial", },
+ { .dev_id = "80074000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] = {
+ { .dev_id = "imx28-dma-apbh", },
+ { .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] = {
+ { .dev_id = "duart", .con_id = "apb_pclk"},
+ { .dev_id = "80074000.serial", .con_id = "apb_pclk"},
+ { .dev_id = "imx28-dma-apbx", },
+ { .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp0_lookups[] = {
+ { .dev_id = "imx28-mmc.0", },
+ { .dev_id = "80010000.ssp", },
+};
+
+static struct clk_lookup ssp1_lookups[] = {
+ { .dev_id = "imx28-mmc.1", },
+ { .dev_id = "80012000.ssp", },
+};
+
+static struct clk_lookup ssp2_lookups[] = {
+ { .dev_id = "imx28-mmc.2", },
+ { .dev_id = "80014000.ssp", },
+};
+
+static struct clk_lookup ssp3_lookups[] = {
+ { .dev_id = "imx28-mmc.3", },
+ { .dev_id = "80016000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] = {
+ { .dev_id = "imx28-fb", },
+ { .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] = {
+ { .dev_id = "imx28-gpmi-nand", },
+ { .dev_id = "8000c000.gpmi", },
+};
+
+static struct clk_lookup fec_lookups[] = {
+ { .dev_id = "imx28-fec.0", },
+ { .dev_id = "imx28-fec.1", },
+ { .dev_id = "800f0000.ethernet", },
+ { .dev_id = "800f4000.ethernet", },
+};
+
+static struct clk_lookup can0_lookups[] = {
+ { .dev_id = "flexcan.0", },
+ { .dev_id = "80032000.can", },
+};
+
+static struct clk_lookup can1_lookups[] = {
+ { .dev_id = "flexcan.1", },
+ { .dev_id = "80034000.can", },
+};
+
+static struct clk_lookup saif0_lookups[] = {
+ { .dev_id = "mxs-saif.0", },
+ { .dev_id = "80042000.saif", },
+};
+
+static struct clk_lookup saif1_lookups[] = {
+ { .dev_id = "mxs-saif.1", },
+ { .dev_id = "80046000.saif", },
+};
+
+static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_io0[] __initconst = { "ref_io0", "ref_xtal", };
+static const char *sel_io1[] __initconst = { "ref_io1", "ref_xtal", };
+static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", };
+static const char *sel_pll0[] __initconst = { "pll0", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+static const char *ptp_sels[] __initconst = { "ref_xtal", "pll0", };
+
+enum imx28_clk {
+ ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1,
+ ref_pix, ref_hsadc, ref_gpmi, saif0_sel, saif1_sel, gpmi_sel,
+ ssp0_sel, ssp1_sel, ssp2_sel, ssp3_sel, emi_sel, etm_sel,
+ lcdif_sel, cpu, ptp_sel, cpu_pll, cpu_xtal, hbus, xbus,
+ ssp0_div, ssp1_div, ssp2_div, ssp3_div, gpmi_div, emi_pll,
+ emi_xtal, lcdif_div, etm_div, ptp, saif0_div, saif1_div,
+ clk32k_div, rtc, lradc, spdif_div, clk32k, pwm, uart, ssp0,
+ ssp1, ssp2, ssp3, gpmi, spdif, emi, saif0, saif1, lcdif, etm,
+ fec, can0, can1, usb0, usb1, usb0_pwr, usb1_pwr, enet_out,
+ clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx28_clk clks_init_on[] __initdata = {
+ cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx28_clocks_init(void)
+{
+ int i;
+
+ clk_misc_init();
+
+ clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+ clks[pll0] = mxs_clk_pll("pll0", "ref_xtal", PLL0CTRL0, 17, 480000000);
+ clks[pll1] = mxs_clk_pll("pll1", "ref_xtal", PLL1CTRL0, 17, 480000000);
+ clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
+ clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
+ clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
+ clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2);
+ clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3);
+ clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
+ clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
+ clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
+ clks[saif0_sel] = mxs_clk_mux("saif0_sel", CLKSEQ, 0, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+ clks[saif1_sel] = mxs_clk_mux("saif1_sel", CLKSEQ, 1, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+ clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 2, 1, sel_gpmi, ARRAY_SIZE(sel_gpmi));
+ clks[ssp0_sel] = mxs_clk_mux("ssp0_sel", CLKSEQ, 3, 1, sel_io0, ARRAY_SIZE(sel_io0));
+ clks[ssp1_sel] = mxs_clk_mux("ssp1_sel", CLKSEQ, 4, 1, sel_io0, ARRAY_SIZE(sel_io0));
+ clks[ssp2_sel] = mxs_clk_mux("ssp2_sel", CLKSEQ, 5, 1, sel_io1, ARRAY_SIZE(sel_io1));
+ clks[ssp3_sel] = mxs_clk_mux("ssp3_sel", CLKSEQ, 6, 1, sel_io1, ARRAY_SIZE(sel_io1));
+ clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 7, 1, emi_sels, ARRAY_SIZE(emi_sels));
+ clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+ clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 14, 1, sel_pix, ARRAY_SIZE(sel_pix));
+ clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+ clks[ptp_sel] = mxs_clk_mux("ptp_sel", ENET, 19, 1, ptp_sels, ARRAY_SIZE(ptp_sels));
+ clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+ clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+ clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 31);
+ clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+ clks[ssp0_div] = mxs_clk_div("ssp0_div", "ssp0_sel", SSP0, 0, 9, 29);
+ clks[ssp1_div] = mxs_clk_div("ssp1_div", "ssp1_sel", SSP1, 0, 9, 29);
+ clks[ssp2_div] = mxs_clk_div("ssp2_div", "ssp2_sel", SSP2, 0, 9, 29);
+ clks[ssp3_div] = mxs_clk_div("ssp3_div", "ssp3_sel", SSP3, 0, 9, 29);
+ clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+ clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+ clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+ clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", LCDIF, 0, 13, 29);
+ clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 7, 29);
+ clks[ptp] = mxs_clk_div("ptp", "ptp_sel", ENET, 21, 6, 27);
+ clks[saif0_div] = mxs_clk_frac("saif0_div", "saif0_sel", SAIF0, 0, 16, 29);
+ clks[saif1_div] = mxs_clk_frac("saif1_div", "saif1_sel", SAIF1, 0, 16, 29);
+ clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+ clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+ clks[lradc] = mxs_clk_fixed_factor("lradc", "clk32k", 1, 16);
+ clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll0", 1, 4);
+ clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+ clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+ clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+ clks[ssp0] = mxs_clk_gate("ssp0", "ssp0_div", SSP0, 31);
+ clks[ssp1] = mxs_clk_gate("ssp1", "ssp1_div", SSP1, 31);
+ clks[ssp2] = mxs_clk_gate("ssp2", "ssp2_div", SSP2, 31);
+ clks[ssp3] = mxs_clk_gate("ssp3", "ssp3_div", SSP3, 31);
+ clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+ clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+ clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+ clks[saif0] = mxs_clk_gate("saif0", "saif0_div", SAIF0, 31);
+ clks[saif1] = mxs_clk_gate("saif1", "saif1_div", SAIF1, 31);
+ clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", LCDIF, 31);
+ clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+ clks[fec] = mxs_clk_gate("fec", "hbus", ENET, 30);
+ clks[can0] = mxs_clk_gate("can0", "ref_xtal", FLEXCAN, 30);
+ clks[can1] = mxs_clk_gate("can1", "ref_xtal", FLEXCAN, 28);
+ clks[usb0] = mxs_clk_gate("usb0", "usb0_pwr", DIGCTRL, 2);
+ clks[usb1] = mxs_clk_gate("usb1", "usb1_pwr", DIGCTRL, 16);
+ clks[usb0_pwr] = clk_register_gate(NULL, "usb0_pwr", "pll0", 0, PLL0CTRL0, 18, 0, &mxs_lock);
+ clks[usb1_pwr] = clk_register_gate(NULL, "usb1_pwr", "pll1", 0, PLL1CTRL0, 18, 0, &mxs_lock);
+ clks[enet_out] = clk_register_gate(NULL, "enet_out", "pll2", 0, ENET, 18, 0, &mxs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ if (IS_ERR(clks[i])) {
+ pr_err("i.MX28 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ return PTR_ERR(clks[i]);
+ }
+
+ clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdev(clks[enet_out], NULL, "enet_out");
+ clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+ clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+ clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+ clk_register_clkdevs(clks[ssp0], ssp0_lookups, ARRAY_SIZE(ssp0_lookups));
+ clk_register_clkdevs(clks[ssp1], ssp1_lookups, ARRAY_SIZE(ssp1_lookups));
+ clk_register_clkdevs(clks[ssp2], ssp2_lookups, ARRAY_SIZE(ssp2_lookups));
+ clk_register_clkdevs(clks[ssp3], ssp3_lookups, ARRAY_SIZE(ssp3_lookups));
+ clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+ clk_register_clkdevs(clks[saif0], saif0_lookups, ARRAY_SIZE(saif0_lookups));
+ clk_register_clkdevs(clks[saif1], saif1_lookups, ARRAY_SIZE(saif1_lookups));
+ clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+ clk_register_clkdevs(clks[fec], fec_lookups, ARRAY_SIZE(fec_lookups));
+ clk_register_clkdevs(clks[can0], can0_lookups, ARRAY_SIZE(can0_lookups));
+ clk_register_clkdevs(clks[can1], can1_lookups, ARRAY_SIZE(can1_lookups));
+
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clks[clks_init_on[i]]);
+
+ mxs_timer_init(MX28_INT_TIMER0);
+
+ return 0;
+}
diff --git a/drivers/clk/mxs/clk-pll.c b/drivers/clk/mxs/clk-pll.c
new file mode 100644
index 000000000000..fadae41833ec
--- /dev/null
+++ b/drivers/clk/mxs/clk-pll.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_pll - mxs pll clock
+ * @hw: clk_hw for the pll
+ * @base: base address of the pll
+ * @power: the shift of power bit
+ * @rate: the clock rate of the pll
+ *
+ * The mxs pll is a fixed rate clock with power and gate control,
+ * and the shift of gate bit is always 31.
+ */
+struct clk_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+ u8 power;
+ unsigned long rate;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+static int clk_pll_prepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << pll->power, pll->base + SET);
+
+ udelay(10);
+
+ return 0;
+}
+
+static void clk_pll_unprepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << pll->power, pll->base + CLR);
+}
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << 31, pll->base + CLR);
+
+ return 0;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << 31, pll->base + SET);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ return pll->rate;
+}
+
+static const struct clk_ops clk_pll_ops = {
+ .prepare = clk_pll_prepare,
+ .unprepare = clk_pll_unprepare,
+ .enable = clk_pll_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+};
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+ void __iomem *base, u8 power, unsigned long rate)
+{
+ struct clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_pll_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ pll->base = base;
+ pll->rate = rate;
+ pll->power = power;
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
new file mode 100644
index 000000000000..4adeed6c2f94
--- /dev/null
+++ b/drivers/clk/mxs/clk-ref.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_ref - mxs reference clock
+ * @hw: clk_hw for the reference clock
+ * @reg: register address
+ * @idx: the index of the reference clock within the same register
+ *
+ * The mxs reference clock sources from pll. Every 4 reference clocks share
+ * one register space, and @idx is used to identify them. Each reference
+ * clock has a gate control and a fractional * divider. The rate is calculated
+ * as pll rate * (18 / FRAC), where FRAC = 18 ~ 35.
+ */
+struct clk_ref {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 idx;
+};
+
+#define to_clk_ref(_hw) container_of(_hw, struct clk_ref, hw)
+
+static int clk_ref_enable(struct clk_hw *hw)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+
+ writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR);
+
+ return 0;
+}
+
+static void clk_ref_disable(struct clk_hw *hw)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+
+ writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET);
+}
+
+static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+ u64 tmp = parent_rate;
+ u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f;
+
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+ u64 tmp = parent_rate;
+ u8 frac;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+
+ if (frac < 18)
+ frac = 18;
+ else if (frac > 35)
+ frac = 35;
+
+ tmp = parent_rate;
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+ unsigned long flags;
+ u64 tmp = parent_rate;
+ u32 val;
+ u8 frac, shift = ref->idx * 8;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+
+ if (frac < 18)
+ frac = 18;
+ else if (frac > 35)
+ frac = 35;
+
+ spin_lock_irqsave(&mxs_lock, flags);
+
+ val = readl_relaxed(ref->reg);
+ val &= ~(0x3f << shift);
+ val |= frac << shift;
+ writel_relaxed(val, ref->reg);
+
+ spin_unlock_irqrestore(&mxs_lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_ref_ops = {
+ .enable = clk_ref_enable,
+ .disable = clk_ref_disable,
+ .recalc_rate = clk_ref_recalc_rate,
+ .round_rate = clk_ref_round_rate,
+ .set_rate = clk_ref_set_rate,
+};
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx)
+{
+ struct clk_ref *ref;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_ref_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ ref->reg = reg;
+ ref->idx = idx;
+ ref->hw.init = &init;
+
+ clk = clk_register(NULL, &ref->hw);
+ if (IS_ERR(clk))
+ kfree(ref);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk.c b/drivers/clk/mxs/clk.c
new file mode 100644
index 000000000000..b24d56067c80
--- /dev/null
+++ b/drivers/clk/mxs/clk.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+
+DEFINE_SPINLOCK(mxs_lock);
+
+int mxs_clk_wait(void __iomem *reg, u8 shift)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+ while (readl_relaxed(reg) & (1 << shift))
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
new file mode 100644
index 000000000000..81421e28e69c
--- /dev/null
+++ b/drivers/clk/mxs/clk.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __MXS_CLK_H
+#define __MXS_CLK_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#define SET 0x4
+#define CLR 0x8
+
+extern spinlock_t mxs_lock;
+
+int mxs_clk_wait(void __iomem *reg, u8 shift);
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+ void __iomem *base, u8 power, unsigned long rate);
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx);
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+static inline struct clk *mxs_clk_fixed(const char *name, int rate)
+{
+ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *mxs_clk_gate(const char *name,
+ const char *parent_name, void __iomem *reg, u8 shift)
+{
+ return clk_register_gate(NULL, name, parent_name, CLK_SET_RATE_PARENT,
+ reg, shift, CLK_GATE_SET_TO_DISABLE,
+ &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg,
+ u8 shift, u8 width, const char **parent_names, int num_parents)
+{
+ return clk_register_mux(NULL, name, parent_names, num_parents,
+ CLK_SET_RATE_PARENT, reg, shift, width,
+ 0, &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_fixed_factor(const char *name,
+ const char *parent_name, unsigned int mult, unsigned int div)
+{
+ return clk_register_fixed_factor(NULL, name, parent_name,
+ CLK_SET_RATE_PARENT, mult, div);
+}
+
+#endif /* __MXS_CLK_H */
diff --git a/drivers/clk/spear/Makefile b/drivers/clk/spear/Makefile
new file mode 100644
index 000000000000..cdb425d3b8ee
--- /dev/null
+++ b/drivers/clk/spear/Makefile
@@ -0,0 +1,10 @@
+#
+# SPEAr Clock specific Makefile
+#
+
+obj-y += clk.o clk-aux-synth.o clk-frac-synth.o clk-gpt-synth.o clk-vco-pll.o
+
+obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx_clock.o
+obj-$(CONFIG_ARCH_SPEAR6XX) += spear6xx_clock.o
+obj-$(CONFIG_MACH_SPEAR1310) += spear1310_clock.o
+obj-$(CONFIG_MACH_SPEAR1340) += spear1340_clock.o
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
new file mode 100644
index 000000000000..6756e7c3bc07
--- /dev/null
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Auxiliary Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-aux-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: Auxiliary Synthesizer clock
+ *
+ * Aux synth gives rate for different values of eq, x and y
+ *
+ * Fout from synthesizer can be given from two equations:
+ * Fout1 = (Fin * X/Y)/2 EQ1
+ * Fout2 = Fin * X/Y EQ2
+ */
+
+#define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
+
+static struct aux_clk_masks default_aux_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = AUX_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = AUX_XSCALE_MASK,
+ .xscale_sel_shift = AUX_XSCALE_SHIFT,
+ .yscale_sel_mask = AUX_YSCALE_MASK,
+ .yscale_sel_shift = AUX_YSCALE_SHIFT,
+ .enable_bit = AUX_SYNT_ENB,
+};
+
+static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
+ int index)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ struct aux_rate_tbl *rtbl = aux->rtbl;
+ u8 eq = rtbl[index].eq ? 1 : 2;
+
+ return (((prate / 10000) * rtbl[index].xscale) /
+ (rtbl[index].yscale * eq)) * 10000;
+}
+
+static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
+ aux->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ unsigned int num = 1, den = 1, val, eqn;
+ unsigned long flags = 0;
+
+ if (aux->lock)
+ spin_lock_irqsave(aux->lock, flags);
+
+ val = readl_relaxed(aux->reg);
+
+ if (aux->lock)
+ spin_unlock_irqrestore(aux->lock, flags);
+
+ eqn = (val >> aux->masks->eq_sel_shift) & aux->masks->eq_sel_mask;
+ if (eqn == aux->masks->eq1_mask)
+ den = 2;
+
+ /* calculate numerator */
+ num = (val >> aux->masks->xscale_sel_shift) &
+ aux->masks->xscale_sel_mask;
+
+ /* calculate denominator */
+ den *= (val >> aux->masks->yscale_sel_shift) &
+ aux->masks->yscale_sel_mask;
+
+ if (!den)
+ return 0;
+
+ return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of aux */
+static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ struct aux_rate_tbl *rtbl = aux->rtbl;
+ unsigned long val, flags = 0;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, aux_calc_rate, aux->rtbl_cnt,
+ &i);
+
+ if (aux->lock)
+ spin_lock_irqsave(aux->lock, flags);
+
+ val = readl_relaxed(aux->reg) &
+ ~(aux->masks->eq_sel_mask << aux->masks->eq_sel_shift);
+ val |= (rtbl[i].eq & aux->masks->eq_sel_mask) <<
+ aux->masks->eq_sel_shift;
+ val &= ~(aux->masks->xscale_sel_mask << aux->masks->xscale_sel_shift);
+ val |= (rtbl[i].xscale & aux->masks->xscale_sel_mask) <<
+ aux->masks->xscale_sel_shift;
+ val &= ~(aux->masks->yscale_sel_mask << aux->masks->yscale_sel_shift);
+ val |= (rtbl[i].yscale & aux->masks->yscale_sel_mask) <<
+ aux->masks->yscale_sel_shift;
+ writel_relaxed(val, aux->reg);
+
+ if (aux->lock)
+ spin_unlock_irqrestore(aux->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_aux_ops = {
+ .recalc_rate = clk_aux_recalc_rate,
+ .round_rate = clk_aux_round_rate,
+ .set_rate = clk_aux_set_rate,
+};
+
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+ const char *parent_name, unsigned long flags, void __iomem *reg,
+ struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
+{
+ struct clk_aux *aux;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ if (!aux_name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ if (!aux) {
+ pr_err("could not allocate aux clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_aux assignments */
+ if (!masks)
+ aux->masks = &default_aux_masks;
+ else
+ aux->masks = masks;
+
+ aux->reg = reg;
+ aux->rtbl = rtbl;
+ aux->rtbl_cnt = rtbl_cnt;
+ aux->lock = lock;
+ aux->hw.init = &init;
+
+ init.name = aux_name;
+ init.ops = &clk_aux_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &aux->hw);
+ if (IS_ERR_OR_NULL(clk))
+ goto free_aux;
+
+ if (gate_name) {
+ struct clk *tgate_clk;
+
+ tgate_clk = clk_register_gate(NULL, gate_name, aux_name, 0, reg,
+ aux->masks->enable_bit, 0, lock);
+ if (IS_ERR_OR_NULL(tgate_clk))
+ goto free_aux;
+
+ if (gate_clk)
+ *gate_clk = tgate_clk;
+ }
+
+ return clk;
+
+free_aux:
+ kfree(aux);
+ pr_err("clk register failed\n");
+
+ return NULL;
+}
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
new file mode 100644
index 000000000000..958aa3ad1d60
--- /dev/null
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Fractional Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-frac-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define DIV_FACTOR_MASK 0x1FFFF
+
+/*
+ * DOC: Fractional Synthesizer clock
+ *
+ * Fout from synthesizer can be given from below equation:
+ *
+ * Fout= Fin/2*div (division factor)
+ * div is 17 bits:-
+ * 0-13 (fractional part)
+ * 14-16 (integer part)
+ * div is (16-14 bits).(13-0 bits) (in binary)
+ *
+ * Fout = Fin/(2 * div)
+ * Fout = ((Fin / 10000)/(2 * div)) * 10000
+ * Fout = (2^14 * (Fin / 10000)/(2^14 * (2 * div))) * 10000
+ * Fout = (((Fin / 10000) << 14)/(2 * (div << 14))) * 10000
+ *
+ * div << 14 simply 17 bit value written at register.
+ * Max error due to scaling down by 10000 is 10 KHz
+ */
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
+ int index)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ struct frac_rate_tbl *rtbl = frac->rtbl;
+
+ prate /= 10000;
+ prate <<= 14;
+ prate /= (2 * rtbl[index].div);
+ prate *= 10000;
+
+ return prate;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
+ frac->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ unsigned long flags = 0;
+ unsigned int div = 1, val;
+
+ if (frac->lock)
+ spin_lock_irqsave(frac->lock, flags);
+
+ val = readl_relaxed(frac->reg);
+
+ if (frac->lock)
+ spin_unlock_irqrestore(frac->lock, flags);
+
+ div = val & DIV_FACTOR_MASK;
+
+ if (!div)
+ return 0;
+
+ parent_rate = parent_rate / 10000;
+
+ parent_rate = (parent_rate << 14) / (2 * div);
+ return parent_rate * 10000;
+}
+
+/* Configures new clock rate of frac */
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ struct frac_rate_tbl *rtbl = frac->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, frac_calc_rate, frac->rtbl_cnt,
+ &i);
+
+ if (frac->lock)
+ spin_lock_irqsave(frac->lock, flags);
+
+ val = readl_relaxed(frac->reg) & ~DIV_FACTOR_MASK;
+ val |= rtbl[i].div & DIV_FACTOR_MASK;
+ writel_relaxed(val, frac->reg);
+
+ if (frac->lock)
+ spin_unlock_irqrestore(frac->lock, flags);
+
+ return 0;
+}
+
+struct clk_ops clk_frac_ops = {
+ .recalc_rate = clk_frac_recalc_rate,
+ .round_rate = clk_frac_round_rate,
+ .set_rate = clk_frac_set_rate,
+};
+
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct clk_frac *frac;
+ struct clk *clk;
+
+ if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+ if (!frac) {
+ pr_err("could not allocate frac clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_frac assignments */
+ frac->reg = reg;
+ frac->rtbl = rtbl;
+ frac->rtbl_cnt = rtbl_cnt;
+ frac->lock = lock;
+ frac->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_frac_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &frac->hw);
+ if (!IS_ERR_OR_NULL(clk))
+ return clk;
+
+ pr_err("clk register failed\n");
+ kfree(frac);
+
+ return NULL;
+}
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
new file mode 100644
index 000000000000..1afc18c4effc
--- /dev/null
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * General Purpose Timer Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-gpt-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define GPT_MSCALE_MASK 0xFFF
+#define GPT_NSCALE_SHIFT 12
+#define GPT_NSCALE_MASK 0xF
+
+/*
+ * DOC: General Purpose Timer Synthesizer clock
+ *
+ * Calculates gpt synth clk rate for different values of mscale and nscale
+ *
+ * Fout= Fin/((2 ^ (N+1)) * (M+1))
+ */
+
+#define to_clk_gpt(_hw) container_of(_hw, struct clk_gpt, hw)
+
+static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
+ int index)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ struct gpt_rate_tbl *rtbl = gpt->rtbl;
+
+ prate /= ((1 << (rtbl[index].nscale + 1)) * (rtbl[index].mscale + 1));
+
+ return prate;
+}
+
+static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
+ gpt->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ unsigned long flags = 0;
+ unsigned int div = 1, val;
+
+ if (gpt->lock)
+ spin_lock_irqsave(gpt->lock, flags);
+
+ val = readl_relaxed(gpt->reg);
+
+ if (gpt->lock)
+ spin_unlock_irqrestore(gpt->lock, flags);
+
+ div += val & GPT_MSCALE_MASK;
+ div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1);
+
+ if (!div)
+ return 0;
+
+ return parent_rate / div;
+}
+
+/* Configures new clock rate of gpt */
+static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ struct gpt_rate_tbl *rtbl = gpt->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, gpt_calc_rate, gpt->rtbl_cnt,
+ &i);
+
+ if (gpt->lock)
+ spin_lock_irqsave(gpt->lock, flags);
+
+ val = readl(gpt->reg) & ~GPT_MSCALE_MASK;
+ val &= ~(GPT_NSCALE_MASK << GPT_NSCALE_SHIFT);
+
+ val |= rtbl[i].mscale & GPT_MSCALE_MASK;
+ val |= (rtbl[i].nscale & GPT_NSCALE_MASK) << GPT_NSCALE_SHIFT;
+
+ writel_relaxed(val, gpt->reg);
+
+ if (gpt->lock)
+ spin_unlock_irqrestore(gpt->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_gpt_ops = {
+ .recalc_rate = clk_gpt_recalc_rate,
+ .round_rate = clk_gpt_round_rate,
+ .set_rate = clk_gpt_set_rate,
+};
+
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+ long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+ rtbl_cnt, spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct clk_gpt *gpt;
+ struct clk *clk;
+
+ if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ gpt = kzalloc(sizeof(*gpt), GFP_KERNEL);
+ if (!gpt) {
+ pr_err("could not allocate gpt clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_gpt assignments */
+ gpt->reg = reg;
+ gpt->rtbl = rtbl;
+ gpt->rtbl_cnt = rtbl_cnt;
+ gpt->lock = lock;
+ gpt->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_gpt_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &gpt->hw);
+ if (!IS_ERR_OR_NULL(clk))
+ return clk;
+
+ pr_err("clk register failed\n");
+ kfree(gpt);
+
+ return NULL;
+}
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
new file mode 100644
index 000000000000..5f1b6badeb15
--- /dev/null
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * VCO-PLL clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-vco-pll: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: VCO-PLL clock
+ *
+ * VCO and PLL rate are derived from following equations:
+ *
+ * In normal mode
+ * vco = (2 * M[15:8] * Fin)/N
+ *
+ * In Dithered mode
+ * vco = (2 * M[15:0] * Fin)/(256 * N)
+ *
+ * pll_rate = pll/2^p
+ *
+ * vco and pll are very closely bound to each other, "vco needs to program:
+ * mode, m & n" and "pll needs to program p", both share common enable/disable
+ * logic.
+ *
+ * clk_register_vco_pll() registers instances of both vco & pll.
+ * CLK_SET_RATE_PARENT flag is forced for pll, as it will always pass its
+ * set_rate to vco. A single rate table exists for both the clocks, which
+ * configures m, n and p.
+ */
+
+/* PLL_CTR register masks */
+#define PLL_MODE_NORMAL 0
+#define PLL_MODE_FRACTION 1
+#define PLL_MODE_DITH_DSM 2
+#define PLL_MODE_DITH_SSM 3
+#define PLL_MODE_MASK 3
+#define PLL_MODE_SHIFT 3
+#define PLL_ENABLE 2
+
+#define PLL_LOCK_SHIFT 0
+#define PLL_LOCK_MASK 1
+
+/* PLL FRQ register masks */
+#define PLL_NORM_FDBK_M_MASK 0xFF
+#define PLL_NORM_FDBK_M_SHIFT 24
+#define PLL_DITH_FDBK_M_MASK 0xFFFF
+#define PLL_DITH_FDBK_M_SHIFT 16
+#define PLL_DIV_P_MASK 0x7
+#define PLL_DIV_P_SHIFT 8
+#define PLL_DIV_N_MASK 0xFF
+#define PLL_DIV_N_SHIFT 0
+
+#define to_clk_vco(_hw) container_of(_hw, struct clk_vco, hw)
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+/* Calculates pll clk rate for specific value of mode, m, n and p */
+static unsigned long pll_calc_rate(struct pll_rate_tbl *rtbl,
+ unsigned long prate, int index, unsigned long *pll_rate)
+{
+ unsigned long rate = prate;
+ unsigned int mode;
+
+ mode = rtbl[index].mode ? 256 : 1;
+ rate = (((2 * rate / 10000) * rtbl[index].m) / (mode * rtbl[index].n));
+
+ if (pll_rate)
+ *pll_rate = (rate / (1 << rtbl[index].p)) * 10000;
+
+ return rate * 10000;
+}
+
+static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate, int *index)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ unsigned long prev_rate, vco_prev_rate, rate = 0;
+ unsigned long vco_parent_rate =
+ __clk_get_rate(__clk_get_parent(__clk_get_parent(hw->clk)));
+
+ if (!prate) {
+ pr_err("%s: prate is must for pll clk\n", __func__);
+ return -EINVAL;
+ }
+
+ for (*index = 0; *index < pll->vco->rtbl_cnt; (*index)++) {
+ prev_rate = rate;
+ vco_prev_rate = *prate;
+ *prate = pll_calc_rate(pll->vco->rtbl, vco_parent_rate, *index,
+ &rate);
+ if (drate < rate) {
+ /* previous clock was best */
+ if (*index) {
+ rate = prev_rate;
+ *prate = vco_prev_rate;
+ (*index)--;
+ }
+ break;
+ }
+ }
+
+ return rate;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ int unused;
+
+ return clk_pll_round_rate_index(hw, drate, prate, &unused);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
+ parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ unsigned int p;
+
+ if (pll->vco->lock)
+ spin_lock_irqsave(pll->vco->lock, flags);
+
+ p = readl_relaxed(pll->vco->cfg_reg);
+
+ if (pll->vco->lock)
+ spin_unlock_irqrestore(pll->vco->lock, flags);
+
+ p = (p >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
+
+ return parent_rate / (1 << p);
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct pll_rate_tbl *rtbl = pll->vco->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_pll_round_rate_index(hw, drate, NULL, &i);
+
+ if (pll->vco->lock)
+ spin_lock_irqsave(pll->vco->lock, flags);
+
+ val = readl_relaxed(pll->vco->cfg_reg);
+ val &= ~(PLL_DIV_P_MASK << PLL_DIV_P_SHIFT);
+ val |= (rtbl[i].p & PLL_DIV_P_MASK) << PLL_DIV_P_SHIFT;
+ writel_relaxed(val, pll->vco->cfg_reg);
+
+ if (pll->vco->lock)
+ spin_unlock_irqrestore(pll->vco->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_pll_ops = {
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+static inline unsigned long vco_calc_rate(struct clk_hw *hw,
+ unsigned long prate, int index)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+
+ return pll_calc_rate(vco->rtbl, prate, index, NULL);
+}
+
+static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
+ vco->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+ unsigned long flags = 0;
+ unsigned int num = 2, den = 0, val, mode = 0;
+
+ if (vco->lock)
+ spin_lock_irqsave(vco->lock, flags);
+
+ mode = (readl_relaxed(vco->mode_reg) >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
+
+ val = readl_relaxed(vco->cfg_reg);
+
+ if (vco->lock)
+ spin_unlock_irqrestore(vco->lock, flags);
+
+ den = (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK;
+
+ /* calculate numerator & denominator */
+ if (!mode) {
+ /* Normal mode */
+ num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK;
+ } else {
+ /* Dithered mode */
+ num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK;
+ den *= 256;
+ }
+
+ if (!den) {
+ WARN(1, "%s: denominator can't be zero\n", __func__);
+ return 0;
+ }
+
+ return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of vco */
+static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+ struct pll_rate_tbl *rtbl = vco->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, vco_calc_rate, vco->rtbl_cnt,
+ &i);
+
+ if (vco->lock)
+ spin_lock_irqsave(vco->lock, flags);
+
+ val = readl_relaxed(vco->mode_reg);
+ val &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+ val |= (rtbl[i].mode & PLL_MODE_MASK) << PLL_MODE_SHIFT;
+ writel_relaxed(val, vco->mode_reg);
+
+ val = readl_relaxed(vco->cfg_reg);
+ val &= ~(PLL_DIV_N_MASK << PLL_DIV_N_SHIFT);
+ val |= (rtbl[i].n & PLL_DIV_N_MASK) << PLL_DIV_N_SHIFT;
+
+ val &= ~(PLL_DITH_FDBK_M_MASK << PLL_DITH_FDBK_M_SHIFT);
+ if (rtbl[i].mode)
+ val |= (rtbl[i].m & PLL_DITH_FDBK_M_MASK) <<
+ PLL_DITH_FDBK_M_SHIFT;
+ else
+ val |= (rtbl[i].m & PLL_NORM_FDBK_M_MASK) <<
+ PLL_NORM_FDBK_M_SHIFT;
+
+ writel_relaxed(val, vco->cfg_reg);
+
+ if (vco->lock)
+ spin_unlock_irqrestore(vco->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_vco_ops = {
+ .recalc_rate = clk_vco_recalc_rate,
+ .round_rate = clk_vco_round_rate,
+ .set_rate = clk_vco_set_rate,
+};
+
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+ const char *vco_gate_name, const char *parent_name,
+ unsigned long flags, void __iomem *mode_reg, void __iomem
+ *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+ spinlock_t *lock, struct clk **pll_clk,
+ struct clk **vco_gate_clk)
+{
+ struct clk_vco *vco;
+ struct clk_pll *pll;
+ struct clk *vco_clk, *tpll_clk, *tvco_gate_clk;
+ struct clk_init_data vco_init, pll_init;
+ const char **vco_parent_name;
+
+ if (!vco_name || !pll_name || !parent_name || !mode_reg || !cfg_reg ||
+ !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ vco = kzalloc(sizeof(*vco), GFP_KERNEL);
+ if (!vco) {
+ pr_err("could not allocate vco clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("could not allocate pll clk\n");
+ goto free_vco;
+ }
+
+ /* struct clk_vco assignments */
+ vco->mode_reg = mode_reg;
+ vco->cfg_reg = cfg_reg;
+ vco->rtbl = rtbl;
+ vco->rtbl_cnt = rtbl_cnt;
+ vco->lock = lock;
+ vco->hw.init = &vco_init;
+
+ pll->vco = vco;
+ pll->hw.init = &pll_init;
+
+ if (vco_gate_name) {
+ tvco_gate_clk = clk_register_gate(NULL, vco_gate_name,
+ parent_name, 0, mode_reg, PLL_ENABLE, 0, lock);
+ if (IS_ERR_OR_NULL(tvco_gate_clk))
+ goto free_pll;
+
+ if (vco_gate_clk)
+ *vco_gate_clk = tvco_gate_clk;
+ vco_parent_name = &vco_gate_name;
+ } else {
+ vco_parent_name = &parent_name;
+ }
+
+ vco_init.name = vco_name;
+ vco_init.ops = &clk_vco_ops;
+ vco_init.flags = flags;
+ vco_init.parent_names = vco_parent_name;
+ vco_init.num_parents = 1;
+
+ pll_init.name = pll_name;
+ pll_init.ops = &clk_pll_ops;
+ pll_init.flags = CLK_SET_RATE_PARENT;
+ pll_init.parent_names = &vco_name;
+ pll_init.num_parents = 1;
+
+ vco_clk = clk_register(NULL, &vco->hw);
+ if (IS_ERR_OR_NULL(vco_clk))
+ goto free_pll;
+
+ tpll_clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR_OR_NULL(tpll_clk))
+ goto free_pll;
+
+ if (pll_clk)
+ *pll_clk = tpll_clk;
+
+ return vco_clk;
+
+free_pll:
+ kfree(pll);
+free_vco:
+ kfree(vco);
+
+ pr_err("Failed to register vco pll clock\n");
+
+ return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
new file mode 100644
index 000000000000..7cd63788d546
--- /dev/null
+++ b/drivers/clk/spear/clk.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * SPEAr clk - Common routines
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/types.h>
+#include "clk.h"
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+ int *index)
+{
+ unsigned long prev_rate, rate = 0;
+
+ for (*index = 0; *index < rtbl_cnt; (*index)++) {
+ prev_rate = rate;
+ rate = calc_rate(hw, parent_rate, *index);
+ if (drate < rate) {
+ /* previous clock was best */
+ if (*index) {
+ rate = prev_rate;
+ (*index)--;
+ }
+ break;
+ }
+ }
+
+ return rate;
+}
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
new file mode 100644
index 000000000000..931737677dfa
--- /dev/null
+++ b/drivers/clk/spear/clk.h
@@ -0,0 +1,134 @@
+/*
+ * Clock framework definitions for SPEAr platform
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __SPEAR_CLK_H
+#define __SPEAR_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+/* Auxiliary Synth clk */
+/* Default masks */
+#define AUX_EQ_SEL_SHIFT 30
+#define AUX_EQ_SEL_MASK 1
+#define AUX_EQ1_SEL 0
+#define AUX_EQ2_SEL 1
+#define AUX_XSCALE_SHIFT 16
+#define AUX_XSCALE_MASK 0xFFF
+#define AUX_YSCALE_SHIFT 0
+#define AUX_YSCALE_MASK 0xFFF
+#define AUX_SYNT_ENB 31
+
+struct aux_clk_masks {
+ u32 eq_sel_mask;
+ u32 eq_sel_shift;
+ u32 eq1_mask;
+ u32 eq2_mask;
+ u32 xscale_sel_mask;
+ u32 xscale_sel_shift;
+ u32 yscale_sel_mask;
+ u32 yscale_sel_shift;
+ u32 enable_bit;
+};
+
+struct aux_rate_tbl {
+ u16 xscale;
+ u16 yscale;
+ u8 eq;
+};
+
+struct clk_aux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct aux_clk_masks *masks;
+ struct aux_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+/* Fractional Synth clk */
+struct frac_rate_tbl {
+ u32 div;
+};
+
+struct clk_frac {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct frac_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+/* GPT clk */
+struct gpt_rate_tbl {
+ u16 mscale;
+ u16 nscale;
+};
+
+struct clk_gpt {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct gpt_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+/* VCO-PLL clk */
+struct pll_rate_tbl {
+ u8 mode;
+ u16 m;
+ u8 n;
+ u8 p;
+};
+
+struct clk_vco {
+ struct clk_hw hw;
+ void __iomem *mode_reg;
+ void __iomem *cfg_reg;
+ struct pll_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+struct clk_pll {
+ struct clk_hw hw;
+ struct clk_vco *vco;
+ const char *parent[1];
+ spinlock_t *lock;
+};
+
+typedef unsigned long (*clk_calc_rate)(struct clk_hw *hw, unsigned long prate,
+ int index);
+
+/* clk register routines */
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+ const char *parent_name, unsigned long flags, void __iomem *reg,
+ struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+ long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+ rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+ const char *vco_gate_name, const char *parent_name,
+ unsigned long flags, void __iomem *mode_reg, void __iomem
+ *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+ spinlock_t *lock, struct clk **pll_clk,
+ struct clk **vco_gate_clk);
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+ int *index);
+
+#endif /* __SPEAR_CLK_H */
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
new file mode 100644
index 000000000000..0fcec2aae19c
--- /dev/null
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -0,0 +1,1104 @@
+/*
+ * arch/arm/mach-spear13xx/spear1310_clock.c
+ *
+ * SPEAr1310 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* PLL related registers and bit values */
+#define SPEAR1310_PLL_CFG (VA_MISC_BASE + 0x210)
+ /* PLL_CFG bit values */
+ #define SPEAR1310_CLCD_SYNT_CLK_MASK 1
+ #define SPEAR1310_CLCD_SYNT_CLK_SHIFT 31
+ #define SPEAR1310_RAS_SYNT2_3_CLK_MASK 2
+ #define SPEAR1310_RAS_SYNT2_3_CLK_SHIFT 29
+ #define SPEAR1310_RAS_SYNT_CLK_MASK 2
+ #define SPEAR1310_RAS_SYNT0_1_CLK_SHIFT 27
+ #define SPEAR1310_PLL_CLK_MASK 2
+ #define SPEAR1310_PLL3_CLK_SHIFT 24
+ #define SPEAR1310_PLL2_CLK_SHIFT 22
+ #define SPEAR1310_PLL1_CLK_SHIFT 20
+
+#define SPEAR1310_PLL1_CTR (VA_MISC_BASE + 0x214)
+#define SPEAR1310_PLL1_FRQ (VA_MISC_BASE + 0x218)
+#define SPEAR1310_PLL2_CTR (VA_MISC_BASE + 0x220)
+#define SPEAR1310_PLL2_FRQ (VA_MISC_BASE + 0x224)
+#define SPEAR1310_PLL3_CTR (VA_MISC_BASE + 0x22C)
+#define SPEAR1310_PLL3_FRQ (VA_MISC_BASE + 0x230)
+#define SPEAR1310_PLL4_CTR (VA_MISC_BASE + 0x238)
+#define SPEAR1310_PLL4_FRQ (VA_MISC_BASE + 0x23C)
+#define SPEAR1310_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
+ /* PERIP_CLK_CFG bit values */
+ #define SPEAR1310_GPT_OSC24_VAL 0
+ #define SPEAR1310_GPT_APB_VAL 1
+ #define SPEAR1310_GPT_CLK_MASK 1
+ #define SPEAR1310_GPT3_CLK_SHIFT 11
+ #define SPEAR1310_GPT2_CLK_SHIFT 10
+ #define SPEAR1310_GPT1_CLK_SHIFT 9
+ #define SPEAR1310_GPT0_CLK_SHIFT 8
+ #define SPEAR1310_UART_CLK_PLL5_VAL 0
+ #define SPEAR1310_UART_CLK_OSC24_VAL 1
+ #define SPEAR1310_UART_CLK_SYNT_VAL 2
+ #define SPEAR1310_UART_CLK_MASK 2
+ #define SPEAR1310_UART_CLK_SHIFT 4
+
+ #define SPEAR1310_AUX_CLK_PLL5_VAL 0
+ #define SPEAR1310_AUX_CLK_SYNT_VAL 1
+ #define SPEAR1310_CLCD_CLK_MASK 2
+ #define SPEAR1310_CLCD_CLK_SHIFT 2
+ #define SPEAR1310_C3_CLK_MASK 1
+ #define SPEAR1310_C3_CLK_SHIFT 1
+
+#define SPEAR1310_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
+ #define SPEAR1310_GMAC_PHY_IF_SEL_MASK 3
+ #define SPEAR1310_GMAC_PHY_IF_SEL_SHIFT 4
+ #define SPEAR1310_GMAC_PHY_CLK_MASK 1
+ #define SPEAR1310_GMAC_PHY_CLK_SHIFT 3
+ #define SPEAR1310_GMAC_PHY_INPUT_CLK_MASK 2
+ #define SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT 1
+
+#define SPEAR1310_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
+ /* I2S_CLK_CFG register mask */
+ #define SPEAR1310_I2S_SCLK_X_MASK 0x1F
+ #define SPEAR1310_I2S_SCLK_X_SHIFT 27
+ #define SPEAR1310_I2S_SCLK_Y_MASK 0x1F
+ #define SPEAR1310_I2S_SCLK_Y_SHIFT 22
+ #define SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT 21
+ #define SPEAR1310_I2S_SCLK_SYNTH_ENB 20
+ #define SPEAR1310_I2S_PRS1_CLK_X_MASK 0xFF
+ #define SPEAR1310_I2S_PRS1_CLK_X_SHIFT 12
+ #define SPEAR1310_I2S_PRS1_CLK_Y_MASK 0xFF
+ #define SPEAR1310_I2S_PRS1_CLK_Y_SHIFT 4
+ #define SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT 3
+ #define SPEAR1310_I2S_REF_SEL_MASK 1
+ #define SPEAR1310_I2S_REF_SHIFT 2
+ #define SPEAR1310_I2S_SRC_CLK_MASK 2
+ #define SPEAR1310_I2S_SRC_CLK_SHIFT 0
+
+#define SPEAR1310_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
+#define SPEAR1310_UART_CLK_SYNT (VA_MISC_BASE + 0x254)
+#define SPEAR1310_GMAC_CLK_SYNT (VA_MISC_BASE + 0x258)
+#define SPEAR1310_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x25C)
+#define SPEAR1310_CFXD_CLK_SYNT (VA_MISC_BASE + 0x260)
+#define SPEAR1310_ADC_CLK_SYNT (VA_MISC_BASE + 0x264)
+#define SPEAR1310_AMBA_CLK_SYNT (VA_MISC_BASE + 0x268)
+#define SPEAR1310_CLCD_CLK_SYNT (VA_MISC_BASE + 0x270)
+#define SPEAR1310_RAS_CLK_SYNT0 (VA_MISC_BASE + 0x280)
+#define SPEAR1310_RAS_CLK_SYNT1 (VA_MISC_BASE + 0x288)
+#define SPEAR1310_RAS_CLK_SYNT2 (VA_MISC_BASE + 0x290)
+#define SPEAR1310_RAS_CLK_SYNT3 (VA_MISC_BASE + 0x298)
+ /* Check Fractional synthesizer reg masks */
+
+#define SPEAR1310_PERIP1_CLK_ENB (VA_MISC_BASE + 0x300)
+ /* PERIP1_CLK_ENB register masks */
+ #define SPEAR1310_RTC_CLK_ENB 31
+ #define SPEAR1310_ADC_CLK_ENB 30
+ #define SPEAR1310_C3_CLK_ENB 29
+ #define SPEAR1310_JPEG_CLK_ENB 28
+ #define SPEAR1310_CLCD_CLK_ENB 27
+ #define SPEAR1310_DMA_CLK_ENB 25
+ #define SPEAR1310_GPIO1_CLK_ENB 24
+ #define SPEAR1310_GPIO0_CLK_ENB 23
+ #define SPEAR1310_GPT1_CLK_ENB 22
+ #define SPEAR1310_GPT0_CLK_ENB 21
+ #define SPEAR1310_I2S0_CLK_ENB 20
+ #define SPEAR1310_I2S1_CLK_ENB 19
+ #define SPEAR1310_I2C0_CLK_ENB 18
+ #define SPEAR1310_SSP_CLK_ENB 17
+ #define SPEAR1310_UART_CLK_ENB 15
+ #define SPEAR1310_PCIE_SATA_2_CLK_ENB 14
+ #define SPEAR1310_PCIE_SATA_1_CLK_ENB 13
+ #define SPEAR1310_PCIE_SATA_0_CLK_ENB 12
+ #define SPEAR1310_UOC_CLK_ENB 11
+ #define SPEAR1310_UHC1_CLK_ENB 10
+ #define SPEAR1310_UHC0_CLK_ENB 9
+ #define SPEAR1310_GMAC_CLK_ENB 8
+ #define SPEAR1310_CFXD_CLK_ENB 7
+ #define SPEAR1310_SDHCI_CLK_ENB 6
+ #define SPEAR1310_SMI_CLK_ENB 5
+ #define SPEAR1310_FSMC_CLK_ENB 4
+ #define SPEAR1310_SYSRAM0_CLK_ENB 3
+ #define SPEAR1310_SYSRAM1_CLK_ENB 2
+ #define SPEAR1310_SYSROM_CLK_ENB 1
+ #define SPEAR1310_BUS_CLK_ENB 0
+
+#define SPEAR1310_PERIP2_CLK_ENB (VA_MISC_BASE + 0x304)
+ /* PERIP2_CLK_ENB register masks */
+ #define SPEAR1310_THSENS_CLK_ENB 8
+ #define SPEAR1310_I2S_REF_PAD_CLK_ENB 7
+ #define SPEAR1310_ACP_CLK_ENB 6
+ #define SPEAR1310_GPT3_CLK_ENB 5
+ #define SPEAR1310_GPT2_CLK_ENB 4
+ #define SPEAR1310_KBD_CLK_ENB 3
+ #define SPEAR1310_CPU_DBG_CLK_ENB 2
+ #define SPEAR1310_DDR_CORE_CLK_ENB 1
+ #define SPEAR1310_DDR_CTRL_CLK_ENB 0
+
+#define SPEAR1310_RAS_CLK_ENB (VA_MISC_BASE + 0x310)
+ /* RAS_CLK_ENB register masks */
+ #define SPEAR1310_SYNT3_CLK_ENB 17
+ #define SPEAR1310_SYNT2_CLK_ENB 16
+ #define SPEAR1310_SYNT1_CLK_ENB 15
+ #define SPEAR1310_SYNT0_CLK_ENB 14
+ #define SPEAR1310_PCLK3_CLK_ENB 13
+ #define SPEAR1310_PCLK2_CLK_ENB 12
+ #define SPEAR1310_PCLK1_CLK_ENB 11
+ #define SPEAR1310_PCLK0_CLK_ENB 10
+ #define SPEAR1310_PLL3_CLK_ENB 9
+ #define SPEAR1310_PLL2_CLK_ENB 8
+ #define SPEAR1310_C125M_PAD_CLK_ENB 7
+ #define SPEAR1310_C30M_CLK_ENB 6
+ #define SPEAR1310_C48M_CLK_ENB 5
+ #define SPEAR1310_OSC_25M_CLK_ENB 4
+ #define SPEAR1310_OSC_32K_CLK_ENB 3
+ #define SPEAR1310_OSC_24M_CLK_ENB 2
+ #define SPEAR1310_PCLK_CLK_ENB 1
+ #define SPEAR1310_ACLK_CLK_ENB 0
+
+/* RAS Area Control Register */
+#define SPEAR1310_RAS_CTRL_REG0 (VA_SPEAR1310_RAS_BASE + 0x000)
+ #define SPEAR1310_SSP1_CLK_MASK 3
+ #define SPEAR1310_SSP1_CLK_SHIFT 26
+ #define SPEAR1310_TDM_CLK_MASK 1
+ #define SPEAR1310_TDM2_CLK_SHIFT 24
+ #define SPEAR1310_TDM1_CLK_SHIFT 23
+ #define SPEAR1310_I2C_CLK_MASK 1
+ #define SPEAR1310_I2C7_CLK_SHIFT 22
+ #define SPEAR1310_I2C6_CLK_SHIFT 21
+ #define SPEAR1310_I2C5_CLK_SHIFT 20
+ #define SPEAR1310_I2C4_CLK_SHIFT 19
+ #define SPEAR1310_I2C3_CLK_SHIFT 18
+ #define SPEAR1310_I2C2_CLK_SHIFT 17
+ #define SPEAR1310_I2C1_CLK_SHIFT 16
+ #define SPEAR1310_GPT64_CLK_MASK 1
+ #define SPEAR1310_GPT64_CLK_SHIFT 15
+ #define SPEAR1310_RAS_UART_CLK_MASK 1
+ #define SPEAR1310_UART5_CLK_SHIFT 14
+ #define SPEAR1310_UART4_CLK_SHIFT 13
+ #define SPEAR1310_UART3_CLK_SHIFT 12
+ #define SPEAR1310_UART2_CLK_SHIFT 11
+ #define SPEAR1310_UART1_CLK_SHIFT 10
+ #define SPEAR1310_PCI_CLK_MASK 1
+ #define SPEAR1310_PCI_CLK_SHIFT 0
+
+#define SPEAR1310_RAS_CTRL_REG1 (VA_SPEAR1310_RAS_BASE + 0x004)
+ #define SPEAR1310_PHY_CLK_MASK 0x3
+ #define SPEAR1310_RMII_PHY_CLK_SHIFT 0
+ #define SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT 2
+
+#define SPEAR1310_RAS_SW_CLK_CTRL (VA_SPEAR1310_RAS_BASE + 0x0148)
+ #define SPEAR1310_CAN1_CLK_ENB 25
+ #define SPEAR1310_CAN0_CLK_ENB 24
+ #define SPEAR1310_GPT64_CLK_ENB 23
+ #define SPEAR1310_SSP1_CLK_ENB 22
+ #define SPEAR1310_I2C7_CLK_ENB 21
+ #define SPEAR1310_I2C6_CLK_ENB 20
+ #define SPEAR1310_I2C5_CLK_ENB 19
+ #define SPEAR1310_I2C4_CLK_ENB 18
+ #define SPEAR1310_I2C3_CLK_ENB 17
+ #define SPEAR1310_I2C2_CLK_ENB 16
+ #define SPEAR1310_I2C1_CLK_ENB 15
+ #define SPEAR1310_UART5_CLK_ENB 14
+ #define SPEAR1310_UART4_CLK_ENB 13
+ #define SPEAR1310_UART3_CLK_ENB 12
+ #define SPEAR1310_UART2_CLK_ENB 11
+ #define SPEAR1310_UART1_CLK_ENB 10
+ #define SPEAR1310_RS485_1_CLK_ENB 9
+ #define SPEAR1310_RS485_0_CLK_ENB 8
+ #define SPEAR1310_TDM2_CLK_ENB 7
+ #define SPEAR1310_TDM1_CLK_ENB 6
+ #define SPEAR1310_PCI_CLK_ENB 5
+ #define SPEAR1310_GMII_CLK_ENB 4
+ #define SPEAR1310_MII2_CLK_ENB 3
+ #define SPEAR1310_MII1_CLK_ENB 2
+ #define SPEAR1310_MII0_CLK_ENB 1
+ #define SPEAR1310_ESRAM_CLK_ENB 0
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ /* PCLK 24MHz */
+ {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+ {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For VCO1div2 = 500 MHz */
+ {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+ /* For gmac phy input clk */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+ {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+ {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+ {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+ {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_X_MASK,
+ .xscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1310_I2S_SCLK_X_MASK,
+ .xscale_sel_shift = SPEAR1310_I2S_SCLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1310_I2S_SCLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1310_I2S_SCLK_Y_SHIFT,
+ .enable_bit = SPEAR1310_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+ /* For parent clk = 49.152 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz */
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+ /* For i2s_ref_clk = 12.288MHz */
+ {.xscale = 1, .yscale = 4, .eq = 0}, /* 1.53 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 0}, /* 3.07 Mhz */
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+ /* For ahb = 166.67 MHz */
+ {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+ {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+ {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+ /* For vco1div4 = 250 MHz */
+ {.div = 0x14000}, /* 25 MHz */
+ {.div = 0x0A000}, /* 50 MHz */
+ {.div = 0x05000}, /* 100 MHz */
+ {.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "uart_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
+ "osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
+ "i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll2_clk", };
+static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
+ "ras_pll2_clk", "ras_syn0_clk", };
+static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
+ "ras_pll2_clk", "ras_syn0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_syn3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_syn1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_syn1_clk",
+ "ras_plclk0_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_syn2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_syn1_clk", };
+
+void __init spear1310_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+ 25000000);
+ clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+ 125000000);
+ clk_register_clkdev(clk, "gmii_pad_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+ CLK_IS_ROOT, 12288000);
+ clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_RTC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+ /* clock derived from 24 or 25 MHz osc clk */
+ /* vco-pll */
+ clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco1_mclk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
+ 0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco2_mclk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
+ 0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco3_mclk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
+ 0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco3_clk", NULL);
+ clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+ clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+ 0, SPEAR1310_PLL4_CTR, SPEAR1310_PLL4_FRQ, pll4_rtbl,
+ ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco4_clk", NULL);
+ clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll5_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+ 25000000);
+ clk_register_clkdev(clk, "pll6_clk", NULL);
+
+ /* vco div n clocks */
+ clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+ 4);
+ clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+ /* peripherals */
+ clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+ 128);
+ clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_thermal");
+
+ /* clock derived from pll4 clk */
+ clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 2);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+ clk = clk_register_fixed_factor(NULL, "ahb_clk", "pll1_clk", 0, 1,
+ 6);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "apb_clk", "pll1_clk", 0, 1,
+ 12);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ /* gpt clocks */
+ clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt0_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt3_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt3");
+
+ /* others */
+ clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1310_UART_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
+ ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "uart0_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+ clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
+ "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+ clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1310_CFXD_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2800000.cf");
+ clk_register_clkdev(clk, NULL, "arasan_xd");
+
+ clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1310_C3_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_syn_clk", NULL);
+ clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
+ ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "c3_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c3");
+
+ /* gmac */
+ clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
+ ARRAY_SIZE(gmac_phy_input_parents), 0,
+ SPEAR1310_GMAC_CLK_CFG,
+ SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
+ SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "phy_input_mclk", NULL);
+
+ clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+ 0, SPEAR1310_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+ ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "phy_syn_clk", NULL);
+ clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
+ ARRAY_SIZE(gmac_phy_parents), 0,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
+ SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+ /* clcd */
+ clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
+ ARRAY_SIZE(clcd_synth_parents), 0,
+ SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
+ SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
+
+ clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
+ SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
+ ARRAY_SIZE(clcd_rtbl), &_lock);
+ clk_register_clkdev(clk, "clcd_syn_clk", NULL);
+
+ clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
+ ARRAY_SIZE(clcd_pixel_parents), 0,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
+ SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "clcd_clk", NULL);
+
+ /* i2s */
+ clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
+ ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
+ SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
+ SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+ ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+ clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
+ ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
+ SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk",
+ "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
+ &i2s_sclk_masks, i2s_sclk_rtbl,
+ ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
+
+ /* clock derived from ahb clk */
+ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2C0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_DMA_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea800000.dma");
+ clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+ clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_JPEG_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2000000.jpeg");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GMAC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+ clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_FSMC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SMI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+ clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UOC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "uoc");
+
+ clk = clk_register_gate(NULL, "pcie_sata_0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_0_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie.0");
+ clk_register_clkdev(clk, NULL, "ahci.0");
+
+ clk = clk_register_gate(NULL, "pcie_sata_1_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_1_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie.1");
+ clk_register_clkdev(clk, NULL, "ahci.1");
+
+ clk = clk_register_gate(NULL, "pcie_sata_2_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_2_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie.2");
+ clk_register_clkdev(clk, NULL, "ahci.2");
+
+ clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+ clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
+ 0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
+ ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "adc_syn_clk", NULL);
+ clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "adc_clk");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SSP_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+ clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+ clk = clk_register_gate(NULL, "i2s0_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0180000.i2s");
+
+ clk = clk_register_gate(NULL, "i2s1_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0200000.i2s");
+
+ clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_KBD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+ /* RAS clks */
+ clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+ ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+ SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+ ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+ SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
+
+ clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_syn0_clk", NULL);
+
+ clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_syn1_clk", NULL);
+
+ clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_syn2_clk", NULL);
+
+ clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_syn3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_osc_24m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_osc_25m_clk", "osc_25m_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_25M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_osc_25m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_osc_32k_clk", "osc_32k_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_32K_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_osc_32k_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_pad_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_tx125_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "ras_30m_fixed_clk", "pll5_clk", 0,
+ 30000000);
+ clk = clk_register_gate(NULL, "ras_30m_clk", "ras_30m_fixed_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_C30M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_30m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "ras_48m_fixed_clk", "pll5_clk", 0,
+ 48000000);
+ clk = clk_register_gate(NULL, "ras_48m_clk", "ras_48m_fixed_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_C48M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_48m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_ACLK_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_PCLK_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, CLK_IS_ROOT,
+ 50000000);
+
+ clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, CLK_IS_ROOT,
+ 50000000);
+
+ clk = clk_register_gate(NULL, "can0_clk", "apb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+ clk = clk_register_gate(NULL, "can1_clk", "apb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+ clk = clk_register_gate(NULL, "ras_smii0_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c400000.eth");
+
+ clk = clk_register_gate(NULL, "ras_smii1_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c500000.eth");
+
+ clk = clk_register_gate(NULL, "ras_smii2_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c600000.eth");
+
+ clk = clk_register_gate(NULL, "ras_rgmii_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_GMII_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c700000.eth");
+
+ clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
+ smii_rgmii_phy_parents,
+ ARRAY_SIZE(smii_rgmii_phy_parents), 0,
+ SPEAR1310_RAS_CTRL_REG1,
+ SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT,
+ SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.1");
+ clk_register_clkdev(clk, NULL, "stmmacphy.2");
+ clk_register_clkdev(clk, NULL, "stmmacphy.4");
+
+ clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
+ ARRAY_SIZE(rmii_phy_parents), 0,
+ SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
+ SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.3");
+
+ clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart1_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c800000.serial");
+
+ clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart2_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c900000.serial");
+
+ clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart3_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5ca00000.serial");
+
+ clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart4_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cb00000.serial");
+
+ clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart5_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cc00000.serial");
+
+ clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c1_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cd00000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c2_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5ce00000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c3_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cf00000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c4_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d000000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c5_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d100000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c6_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d200000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c7_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d300000.i2c");
+
+ clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
+ ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ssp1_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d400000.spi");
+
+ clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
+ ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "pci_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "pci");
+
+ clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
+ ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "tdm1_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
+
+ clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
+ ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "tdm2_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
+}
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
new file mode 100644
index 000000000000..2352cee7f645
--- /dev/null
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -0,0 +1,961 @@
+/*
+ * arch/arm/mach-spear13xx/spear1340_clock.c
+ *
+ * SPEAr1340 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* Clock Configuration Registers */
+#define SPEAR1340_SYS_CLK_CTRL (VA_MISC_BASE + 0x200)
+ #define SPEAR1340_HCLK_SRC_SEL_SHIFT 27
+ #define SPEAR1340_HCLK_SRC_SEL_MASK 1
+ #define SPEAR1340_SCLK_SRC_SEL_SHIFT 23
+ #define SPEAR1340_SCLK_SRC_SEL_MASK 3
+
+/* PLL related registers and bit values */
+#define SPEAR1340_PLL_CFG (VA_MISC_BASE + 0x210)
+ /* PLL_CFG bit values */
+ #define SPEAR1340_CLCD_SYNT_CLK_MASK 1
+ #define SPEAR1340_CLCD_SYNT_CLK_SHIFT 31
+ #define SPEAR1340_GEN_SYNT2_3_CLK_SHIFT 29
+ #define SPEAR1340_GEN_SYNT_CLK_MASK 2
+ #define SPEAR1340_GEN_SYNT0_1_CLK_SHIFT 27
+ #define SPEAR1340_PLL_CLK_MASK 2
+ #define SPEAR1340_PLL3_CLK_SHIFT 24
+ #define SPEAR1340_PLL2_CLK_SHIFT 22
+ #define SPEAR1340_PLL1_CLK_SHIFT 20
+
+#define SPEAR1340_PLL1_CTR (VA_MISC_BASE + 0x214)
+#define SPEAR1340_PLL1_FRQ (VA_MISC_BASE + 0x218)
+#define SPEAR1340_PLL2_CTR (VA_MISC_BASE + 0x220)
+#define SPEAR1340_PLL2_FRQ (VA_MISC_BASE + 0x224)
+#define SPEAR1340_PLL3_CTR (VA_MISC_BASE + 0x22C)
+#define SPEAR1340_PLL3_FRQ (VA_MISC_BASE + 0x230)
+#define SPEAR1340_PLL4_CTR (VA_MISC_BASE + 0x238)
+#define SPEAR1340_PLL4_FRQ (VA_MISC_BASE + 0x23C)
+#define SPEAR1340_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
+ /* PERIP_CLK_CFG bit values */
+ #define SPEAR1340_SPDIF_CLK_MASK 1
+ #define SPEAR1340_SPDIF_OUT_CLK_SHIFT 15
+ #define SPEAR1340_SPDIF_IN_CLK_SHIFT 14
+ #define SPEAR1340_GPT3_CLK_SHIFT 13
+ #define SPEAR1340_GPT2_CLK_SHIFT 12
+ #define SPEAR1340_GPT_CLK_MASK 1
+ #define SPEAR1340_GPT1_CLK_SHIFT 9
+ #define SPEAR1340_GPT0_CLK_SHIFT 8
+ #define SPEAR1340_UART_CLK_MASK 2
+ #define SPEAR1340_UART1_CLK_SHIFT 6
+ #define SPEAR1340_UART0_CLK_SHIFT 4
+ #define SPEAR1340_CLCD_CLK_MASK 2
+ #define SPEAR1340_CLCD_CLK_SHIFT 2
+ #define SPEAR1340_C3_CLK_MASK 1
+ #define SPEAR1340_C3_CLK_SHIFT 1
+
+#define SPEAR1340_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
+ #define SPEAR1340_GMAC_PHY_CLK_MASK 1
+ #define SPEAR1340_GMAC_PHY_CLK_SHIFT 2
+ #define SPEAR1340_GMAC_PHY_INPUT_CLK_MASK 2
+ #define SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT 0
+
+#define SPEAR1340_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
+ /* I2S_CLK_CFG register mask */
+ #define SPEAR1340_I2S_SCLK_X_MASK 0x1F
+ #define SPEAR1340_I2S_SCLK_X_SHIFT 27
+ #define SPEAR1340_I2S_SCLK_Y_MASK 0x1F
+ #define SPEAR1340_I2S_SCLK_Y_SHIFT 22
+ #define SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT 21
+ #define SPEAR1340_I2S_SCLK_SYNTH_ENB 20
+ #define SPEAR1340_I2S_PRS1_CLK_X_MASK 0xFF
+ #define SPEAR1340_I2S_PRS1_CLK_X_SHIFT 12
+ #define SPEAR1340_I2S_PRS1_CLK_Y_MASK 0xFF
+ #define SPEAR1340_I2S_PRS1_CLK_Y_SHIFT 4
+ #define SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT 3
+ #define SPEAR1340_I2S_REF_SEL_MASK 1
+ #define SPEAR1340_I2S_REF_SHIFT 2
+ #define SPEAR1340_I2S_SRC_CLK_MASK 2
+ #define SPEAR1340_I2S_SRC_CLK_SHIFT 0
+
+#define SPEAR1340_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
+#define SPEAR1340_UART0_CLK_SYNT (VA_MISC_BASE + 0x254)
+#define SPEAR1340_UART1_CLK_SYNT (VA_MISC_BASE + 0x258)
+#define SPEAR1340_GMAC_CLK_SYNT (VA_MISC_BASE + 0x25C)
+#define SPEAR1340_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x260)
+#define SPEAR1340_CFXD_CLK_SYNT (VA_MISC_BASE + 0x264)
+#define SPEAR1340_ADC_CLK_SYNT (VA_MISC_BASE + 0x270)
+#define SPEAR1340_AMBA_CLK_SYNT (VA_MISC_BASE + 0x274)
+#define SPEAR1340_CLCD_CLK_SYNT (VA_MISC_BASE + 0x27C)
+#define SPEAR1340_SYS_CLK_SYNT (VA_MISC_BASE + 0x284)
+#define SPEAR1340_GEN_CLK_SYNT0 (VA_MISC_BASE + 0x28C)
+#define SPEAR1340_GEN_CLK_SYNT1 (VA_MISC_BASE + 0x294)
+#define SPEAR1340_GEN_CLK_SYNT2 (VA_MISC_BASE + 0x29C)
+#define SPEAR1340_GEN_CLK_SYNT3 (VA_MISC_BASE + 0x304)
+#define SPEAR1340_PERIP1_CLK_ENB (VA_MISC_BASE + 0x30C)
+ #define SPEAR1340_RTC_CLK_ENB 31
+ #define SPEAR1340_ADC_CLK_ENB 30
+ #define SPEAR1340_C3_CLK_ENB 29
+ #define SPEAR1340_CLCD_CLK_ENB 27
+ #define SPEAR1340_DMA_CLK_ENB 25
+ #define SPEAR1340_GPIO1_CLK_ENB 24
+ #define SPEAR1340_GPIO0_CLK_ENB 23
+ #define SPEAR1340_GPT1_CLK_ENB 22
+ #define SPEAR1340_GPT0_CLK_ENB 21
+ #define SPEAR1340_I2S_PLAY_CLK_ENB 20
+ #define SPEAR1340_I2S_REC_CLK_ENB 19
+ #define SPEAR1340_I2C0_CLK_ENB 18
+ #define SPEAR1340_SSP_CLK_ENB 17
+ #define SPEAR1340_UART0_CLK_ENB 15
+ #define SPEAR1340_PCIE_SATA_CLK_ENB 12
+ #define SPEAR1340_UOC_CLK_ENB 11
+ #define SPEAR1340_UHC1_CLK_ENB 10
+ #define SPEAR1340_UHC0_CLK_ENB 9
+ #define SPEAR1340_GMAC_CLK_ENB 8
+ #define SPEAR1340_CFXD_CLK_ENB 7
+ #define SPEAR1340_SDHCI_CLK_ENB 6
+ #define SPEAR1340_SMI_CLK_ENB 5
+ #define SPEAR1340_FSMC_CLK_ENB 4
+ #define SPEAR1340_SYSRAM0_CLK_ENB 3
+ #define SPEAR1340_SYSRAM1_CLK_ENB 2
+ #define SPEAR1340_SYSROM_CLK_ENB 1
+ #define SPEAR1340_BUS_CLK_ENB 0
+
+#define SPEAR1340_PERIP2_CLK_ENB (VA_MISC_BASE + 0x310)
+ #define SPEAR1340_THSENS_CLK_ENB 8
+ #define SPEAR1340_I2S_REF_PAD_CLK_ENB 7
+ #define SPEAR1340_ACP_CLK_ENB 6
+ #define SPEAR1340_GPT3_CLK_ENB 5
+ #define SPEAR1340_GPT2_CLK_ENB 4
+ #define SPEAR1340_KBD_CLK_ENB 3
+ #define SPEAR1340_CPU_DBG_CLK_ENB 2
+ #define SPEAR1340_DDR_CORE_CLK_ENB 1
+ #define SPEAR1340_DDR_CTRL_CLK_ENB 0
+
+#define SPEAR1340_PERIP3_CLK_ENB (VA_MISC_BASE + 0x314)
+ #define SPEAR1340_PLGPIO_CLK_ENB 18
+ #define SPEAR1340_VIDEO_DEC_CLK_ENB 16
+ #define SPEAR1340_VIDEO_ENC_CLK_ENB 15
+ #define SPEAR1340_SPDIF_OUT_CLK_ENB 13
+ #define SPEAR1340_SPDIF_IN_CLK_ENB 12
+ #define SPEAR1340_VIDEO_IN_CLK_ENB 11
+ #define SPEAR1340_CAM0_CLK_ENB 10
+ #define SPEAR1340_CAM1_CLK_ENB 9
+ #define SPEAR1340_CAM2_CLK_ENB 8
+ #define SPEAR1340_CAM3_CLK_ENB 7
+ #define SPEAR1340_MALI_CLK_ENB 6
+ #define SPEAR1340_CEC0_CLK_ENB 5
+ #define SPEAR1340_CEC1_CLK_ENB 4
+ #define SPEAR1340_PWM_CLK_ENB 3
+ #define SPEAR1340_I2C1_CLK_ENB 2
+ #define SPEAR1340_UART1_CLK_ENB 1
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ /* PCLK 24MHz */
+ {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+ {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+ {.mode = 0, .m = 0x96, .n = 0x06, .p = 0x0}, /* vco 1200, pll 1200 MHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/*
+ * All below entries generate 166 MHz for
+ * different values of vco1div2
+ */
+static struct frac_rate_tbl amba_synth_rtbl[] = {
+ {.div = 0x06062}, /* for vco1div2 = 500 MHz */
+ {.div = 0x04D1B}, /* for vco1div2 = 400 MHz */
+ {.div = 0x04000}, /* for vco1div2 = 332 MHz */
+ {.div = 0x03031}, /* for vco1div2 = 250 MHz */
+ {.div = 0x0268D}, /* for vco1div2 = 200 MHz */
+};
+
+/*
+ * Synthesizer Clock derived from vcodiv2. This clock is one of the
+ * possible clocks to feed cpu directly.
+ * We can program this synthesizer to make cpu run on different clock
+ * frequencies.
+ * Following table provides configuration values to let cpu run on 200,
+ * 250, 332, 400 or 500 MHz considering different possibilites of input
+ * (vco1div2) clock.
+ *
+ * --------------------------------------------------------------------
+ * vco1div2(Mhz) fout(Mhz) cpuclk = fout/2 div
+ * --------------------------------------------------------------------
+ * 400 200 100 0x04000
+ * 400 250 125 0x03333
+ * 400 332 166 0x0268D
+ * 400 400 200 0x02000
+ * --------------------------------------------------------------------
+ * 500 200 100 0x05000
+ * 500 250 125 0x04000
+ * 500 332 166 0x03031
+ * 500 400 200 0x02800
+ * 500 500 250 0x02000
+ * --------------------------------------------------------------------
+ * 664 200 100 0x06a38
+ * 664 250 125 0x054FD
+ * 664 332 166 0x04000
+ * 664 400 200 0x0351E
+ * 664 500 250 0x02A7E
+ * --------------------------------------------------------------------
+ * 800 200 100 0x08000
+ * 800 250 125 0x06666
+ * 800 332 166 0x04D18
+ * 800 400 200 0x04000
+ * 800 500 250 0x03333
+ * --------------------------------------------------------------------
+ * sys rate configuration table is in descending order of divisor.
+ */
+static struct frac_rate_tbl sys_synth_rtbl[] = {
+ {.div = 0x08000},
+ {.div = 0x06a38},
+ {.div = 0x06666},
+ {.div = 0x054FD},
+ {.div = 0x05000},
+ {.div = 0x04D18},
+ {.div = 0x04000},
+ {.div = 0x0351E},
+ {.div = 0x03333},
+ {.div = 0x03031},
+ {.div = 0x02A7E},
+ {.div = 0x02800},
+ {.div = 0x0268D},
+ {.div = 0x02000},
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For VCO1div2 = 500 MHz */
+ {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+ /* For gmac phy input clk */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+ {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+ {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+ {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x07BA0}, /* 65 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+ {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0360D}, /* 148 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x035E0}, /* 148.5 MHz, for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_X_MASK,
+ .xscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1340_I2S_SCLK_X_MASK,
+ .xscale_sel_shift = SPEAR1340_I2S_SCLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1340_I2S_SCLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1340_I2S_SCLK_Y_SHIFT,
+ .enable_bit = SPEAR1340_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+ /* For parent clk = 49.152 MHz */
+ {.xscale = 1, .yscale = 12, .eq = 0}, /* 2.048 MHz, smp freq = 8Khz */
+ {.xscale = 11, .yscale = 96, .eq = 0}, /* 2.816 MHz, smp freq = 11Khz */
+ {.xscale = 1, .yscale = 6, .eq = 0}, /* 4.096 MHz, smp freq = 16Khz */
+ {.xscale = 11, .yscale = 48, .eq = 0}, /* 5.632 MHz, smp freq = 22Khz */
+
+ /*
+ * with parent clk = 49.152, freq gen is 8.192 MHz, smp freq = 32Khz
+ * with parent clk = 12.288, freq gen is 2.048 MHz, smp freq = 8Khz
+ */
+ {.xscale = 1, .yscale = 3, .eq = 0},
+
+ /* For parent clk = 49.152 MHz */
+ {.xscale = 17, .yscale = 37, .eq = 0}, /* 11.289 MHz, smp freq = 44Khz*/
+ {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz, smp freq = 48Khz*/
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+ /* For sclk = ref_clk * x/2/y */
+ {.xscale = 1, .yscale = 4, .eq = 0},
+ {.xscale = 1, .yscale = 2, .eq = 0},
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+ /* For ahb = 166.67 MHz */
+ {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+ {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+ {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+ /* For vco1div4 = 250 MHz */
+ {.div = 0x1624E}, /* 22.5792 MHz */
+ {.div = 0x14585}, /* 24.576 MHz */
+ {.div = 0x14000}, /* 25 MHz */
+ {.div = 0x0B127}, /* 45.1584 MHz */
+ {.div = 0x0A000}, /* 50 MHz */
+ {.div = 0x061A8}, /* 81.92 MHz */
+ {.div = 0x05000}, /* 100 MHz */
+ {.div = 0x02800}, /* 200 MHz */
+ {.div = 0x02620}, /* 210 MHz */
+ {.div = 0x02460}, /* 220 MHz */
+ {.div = 0x022C0}, /* 230 MHz */
+ {.div = 0x02160}, /* 240 MHz */
+ {.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *sys_parents[] = { "pll1_clk", "pll1_clk", "pll1_clk",
+ "pll1_clk", "sys_synth_clk", "sys_synth_clk", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_syn_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
+ "uart0_syn_gclk", };
+static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
+ "uart1_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
+ "osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
+ "i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_syn2_clk", };
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_syn3_clk", };
+
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll2_clk", };
+
+void __init spear1340_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+ 25000000);
+ clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+ 125000000);
+ clk_register_clkdev(clk, "gmii_pad_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+ CLK_IS_ROOT, 12288000);
+ clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_RTC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+ /* clock derived from 24 or 25 MHz osc clk */
+ /* vco-pll */
+ clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco1_mclk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
+ SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco2_mclk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
+ SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco3_mclk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
+ SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco3_clk", NULL);
+ clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+ clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+ 0, SPEAR1340_PLL4_CTR, SPEAR1340_PLL4_FRQ, pll4_rtbl,
+ ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco4_clk", NULL);
+ clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll5_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+ 25000000);
+ clk_register_clkdev(clk, "pll6_clk", NULL);
+
+ /* vco div n clocks */
+ clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+ 4);
+ clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+ /* peripherals */
+ clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+ 128);
+ clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_thermal");
+
+ /* clock derived from pll4 clk */
+ clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_frac("sys_syn_clk", "vco1div2_clk", 0,
+ SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
+ ARRAY_SIZE(sys_synth_rtbl), &_lock);
+ clk_register_clkdev(clk, "sys_syn_clk", NULL);
+
+ clk = clk_register_frac("amba_syn_clk", "vco1div2_clk", 0,
+ SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
+ ARRAY_SIZE(amba_synth_rtbl), &_lock);
+ clk_register_clkdev(clk, "amba_syn_clk", NULL);
+
+ clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
+ ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+ SPEAR1340_SCLK_SRC_SEL_SHIFT,
+ SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "sys_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mclk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "cpu_div3_clk", "cpu_clk", 0, 1,
+ 3);
+ clk_register_clkdev(clk, "cpu_div3_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+ clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
+ ARRAY_SIZE(ahb_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+ SPEAR1340_HCLK_SRC_SEL_SHIFT,
+ SPEAR1340_HCLK_SRC_SEL_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ /* gpt clocks */
+ clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt0_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt3_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt3");
+
+ /* others */
+ clk = clk_register_aux("uart0_syn_clk", "uart0_syn_gclk",
+ "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart0_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
+ ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "uart0_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+ clk = clk_register_aux("uart1_syn_clk", "uart1_syn_gclk",
+ "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart1_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
+ ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "uart1_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b4100000.serial");
+
+ clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
+ "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+ clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1340_CFXD_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2800000.cf");
+ clk_register_clkdev(clk, NULL, "arasan_xd");
+
+ clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk", 0,
+ SPEAR1340_C3_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_syn_clk", NULL);
+ clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
+ ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "c3_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c3");
+
+ /* gmac */
+ clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
+ ARRAY_SIZE(gmac_phy_input_parents), 0,
+ SPEAR1340_GMAC_CLK_CFG,
+ SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
+ SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "phy_input_mclk", NULL);
+
+ clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+ 0, SPEAR1340_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+ ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "phy_syn_clk", NULL);
+ clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
+ ARRAY_SIZE(gmac_phy_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
+ SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+ /* clcd */
+ clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
+ ARRAY_SIZE(clcd_synth_parents), 0,
+ SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
+ SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
+
+ clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
+ SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
+ ARRAY_SIZE(clcd_rtbl), &_lock);
+ clk_register_clkdev(clk, "clcd_syn_clk", NULL);
+
+ clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
+ ARRAY_SIZE(clcd_pixel_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
+ SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "clcd_clk", NULL);
+
+ /* i2s */
+ clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
+ ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
+ SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
+ SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+ ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+ clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
+ ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
+ SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk", "i2s_ref_mclk",
+ 0, SPEAR1340_I2S_CLK_CFG, &i2s_sclk_masks,
+ i2s_sclk_rtbl, ARRAY_SIZE(i2s_sclk_rtbl), &_lock,
+ &clk1);
+ clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
+
+ /* clock derived from ahb clk */
+ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+ clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b4000000.i2c");
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_DMA_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea800000.dma");
+ clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GMAC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+ clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_FSMC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SMI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+ clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UOC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "uoc");
+
+ clk = clk_register_gate(NULL, "pcie_sata_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_PCIE_SATA_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie");
+ clk_register_clkdev(clk, NULL, "ahci");
+
+ clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+ clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
+ 0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
+ ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "adc_syn_clk", NULL);
+ clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "adc_clk");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "ssp_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SSP_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+ clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+ clk = clk_register_gate(NULL, "i2s_play_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_PLAY_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2400000.i2s");
+
+ clk = clk_register_gate(NULL, "i2s_rec_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_REC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2000000.i2s");
+
+ clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_KBD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+ /* RAS clks */
+ clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+ ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+ SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+ ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+ SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
+
+ clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_syn0_clk", NULL);
+
+ clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_syn1_clk", NULL);
+
+ clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_syn2_clk", NULL);
+
+ clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_syn3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "mali_clk", "gen_syn3_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "mali");
+
+ clk = clk_register_gate(NULL, "cec0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_cec.0");
+
+ clk = clk_register_gate(NULL, "cec1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_cec.1");
+
+ clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
+ ARRAY_SIZE(spdif_out_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
+ SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "spdif_out_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "spdif-out");
+
+ clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
+ ARRAY_SIZE(spdif_in_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
+ SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "spdif_in_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spdif-in");
+
+ clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "acp_clk");
+
+ clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "plgpio");
+
+ clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "video_dec");
+
+ clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "video_enc");
+
+ clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_vip");
+
+ clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.0");
+
+ clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.1");
+
+ clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.2");
+
+ clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.3");
+
+ clk = clk_register_gate(NULL, "pwm_clk", "pwm_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "pwm");
+}
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
new file mode 100644
index 000000000000..c3157454bb3f
--- /dev/null
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -0,0 +1,604 @@
+/*
+ * SPEAr3xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR (MISC_BASE + 0x008)
+#define PLL1_FRQ (MISC_BASE + 0x00C)
+#define PLL2_CTR (MISC_BASE + 0x014)
+#define PLL2_FRQ (MISC_BASE + 0x018)
+#define PLL_CLK_CFG (MISC_BASE + 0x020)
+ /* PLL_CLK_CFG register masks */
+ #define MCTR_CLK_SHIFT 28
+ #define MCTR_CLK_MASK 3
+
+#define CORE_CLK_CFG (MISC_BASE + 0x024)
+ /* CORE CLK CFG register masks */
+ #define GEN_SYNTH2_3_CLK_SHIFT 18
+ #define GEN_SYNTH2_3_CLK_MASK 1
+
+ #define HCLK_RATIO_SHIFT 10
+ #define HCLK_RATIO_MASK 2
+ #define PCLK_RATIO_SHIFT 8
+ #define PCLK_RATIO_MASK 2
+
+#define PERIP_CLK_CFG (MISC_BASE + 0x028)
+ /* PERIP_CLK_CFG register masks */
+ #define UART_CLK_SHIFT 4
+ #define UART_CLK_MASK 1
+ #define FIRDA_CLK_SHIFT 5
+ #define FIRDA_CLK_MASK 2
+ #define GPT0_CLK_SHIFT 8
+ #define GPT1_CLK_SHIFT 11
+ #define GPT2_CLK_SHIFT 12
+ #define GPT_CLK_MASK 1
+
+#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
+ /* PERIP1_CLK_ENB register masks */
+ #define UART_CLK_ENB 3
+ #define SSP_CLK_ENB 5
+ #define I2C_CLK_ENB 7
+ #define JPEG_CLK_ENB 8
+ #define FIRDA_CLK_ENB 10
+ #define GPT1_CLK_ENB 11
+ #define GPT2_CLK_ENB 12
+ #define ADC_CLK_ENB 15
+ #define RTC_CLK_ENB 17
+ #define GPIO_CLK_ENB 18
+ #define DMA_CLK_ENB 19
+ #define SMI_CLK_ENB 21
+ #define GMAC_CLK_ENB 23
+ #define USBD_CLK_ENB 24
+ #define USBH_CLK_ENB 25
+ #define C3_CLK_ENB 31
+
+#define RAS_CLK_ENB (MISC_BASE + 0x034)
+ #define RAS_AHB_CLK_ENB 0
+ #define RAS_PLL1_CLK_ENB 1
+ #define RAS_APB_CLK_ENB 2
+ #define RAS_32K_CLK_ENB 3
+ #define RAS_24M_CLK_ENB 4
+ #define RAS_48M_CLK_ENB 5
+ #define RAS_PLL2_CLK_ENB 7
+ #define RAS_SYNT0_CLK_ENB 8
+ #define RAS_SYNT1_CLK_ENB 9
+ #define RAS_SYNT2_CLK_ENB 10
+ #define RAS_SYNT3_CLK_ENB 11
+
+#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
+#define AMEM_CLK_CFG (MISC_BASE + 0x050)
+ #define AMEM_CLK_ENB 0
+
+#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
+#define UART_CLK_SYNT (MISC_BASE + 0x064)
+#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
+#define GEN0_CLK_SYNT (MISC_BASE + 0x06C)
+#define GEN1_CLK_SYNT (MISC_BASE + 0x070)
+#define GEN2_CLK_SYNT (MISC_BASE + 0x074)
+#define GEN3_CLK_SYNT (MISC_BASE + 0x078)
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ {.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */
+ {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For PLL1 = 332 MHz */
+ {.xscale = 2, .yscale = 27, .eq = 0}, /* 12.296 MHz */
+ {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+ /* For pll1 = 332 MHz */
+ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+ {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+/* clock parents */
+static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk",
+};
+static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", };
+static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
+static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+ "pll2_clk", };
+
+#ifdef CONFIG_MACH_SPEAR300
+static void __init spear300_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
+ 1, 1);
+ clk_register_clkdev(clk, NULL, "60000000.clcd");
+
+ clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "94000000.flash");
+
+ clk = clk_register_fixed_factor(NULL, "sdhci_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+ clk = clk_register_fixed_factor(NULL, "gpio1_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a9000000.gpio");
+
+ clk = clk_register_fixed_factor(NULL, "kbd_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a0000000.kbd");
+}
+#endif
+
+/* array of all spear 310 clock lookups */
+#ifdef CONFIG_MACH_SPEAR310
+static void __init spear310_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "emi", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "44000000.flash");
+
+ clk = clk_register_fixed_factor(NULL, "tdm_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "tdm");
+
+ clk = clk_register_fixed_factor(NULL, "uart1_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2000000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart2_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2080000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart3_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2100000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart4_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2180000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart5_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2200000.serial");
+}
+#endif
+
+/* array of all spear 320 clock lookups */
+#ifdef CONFIG_MACH_SPEAR320
+ #define SMII_PCLK_SHIFT 18
+ #define SMII_PCLK_MASK 2
+ #define SMII_PCLK_VAL_PAD 0x0
+ #define SMII_PCLK_VAL_PLL2 0x1
+ #define SMII_PCLK_VAL_SYNTH0 0x2
+ #define SDHCI_PCLK_SHIFT 15
+ #define SDHCI_PCLK_MASK 1
+ #define SDHCI_PCLK_VAL_48M 0x0
+ #define SDHCI_PCLK_VAL_SYNTH3 0x1
+ #define I2S_REF_PCLK_SHIFT 8
+ #define I2S_REF_PCLK_MASK 1
+ #define I2S_REF_PCLK_SYNTH_VAL 0x1
+ #define I2S_REF_PCLK_PLL2_VAL 0x0
+ #define UART1_PCLK_SHIFT 6
+ #define UART1_PCLK_MASK 1
+ #define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0
+ #define SPEAR320_UARTX_PCLK_VAL_APB 0x1
+
+static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", };
+static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", };
+static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
+ "ras_syn0_gclk", };
+static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
+
+static void __init spear320_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL,
+ CLK_IS_ROOT, 125000000);
+ clk_register_clkdev(clk, "smii_125m_pad", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
+ 1, 1);
+ clk_register_clkdev(clk, NULL, "90000000.clcd");
+
+ clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "emi", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "4c000000.flash");
+
+ clk = clk_register_fixed_factor(NULL, "i2c1_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a7000000.i2c");
+
+ clk = clk_register_fixed_factor(NULL, "pwm_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "pwm", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "ssp1_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a5000000.spi");
+
+ clk = clk_register_fixed_factor(NULL, "ssp2_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a6000000.spi");
+
+ clk = clk_register_fixed_factor(NULL, "can0_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+ clk = clk_register_fixed_factor(NULL, "can1_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+ clk = clk_register_fixed_factor(NULL, "i2s_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "i2s");
+
+ clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents,
+ ARRAY_SIZE(i2s_ref_parents), 0, SPEAR320_CONTROL_REG,
+ I2S_REF_PCLK_SHIFT, I2S_REF_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "i2s_sclk", "i2s_ref_clk", 0, 1,
+ 4);
+ clk_register_clkdev(clk, "i2s_sclk", NULL);
+
+ clk = clk_register_mux(NULL, "rs485_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_RS485_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a9300000.serial");
+
+ clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents,
+ ARRAY_SIZE(sdhci_parents), 0, SPEAR320_CONTROL_REG,
+ SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+ clk = clk_register_mux(NULL, "smii_pclk", smii0_parents,
+ ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG,
+ SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "smii_pclk");
+
+ clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1);
+ clk_register_clkdev(clk, NULL, "smii");
+
+ clk = clk_register_mux(NULL, "uart1_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_CONTROL_REG,
+ UART1_PCLK_SHIFT, UART1_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "a3000000.serial");
+
+ clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART2_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a4000000.serial");
+
+ clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART3_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a9100000.serial");
+
+ clk = clk_register_mux(NULL, "uart4_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART4_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a9200000.serial");
+
+ clk = clk_register_mux(NULL, "uart5_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART5_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "60000000.serial");
+
+ clk = clk_register_mux(NULL, "uart6_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART6_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "60100000.serial");
+}
+#endif
+
+void __init spear3xx_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+ PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+ /* clock derived from 24 MHz osc clk */
+ clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll3_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "fc880000.wdt");
+
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL,
+ "osc_24m_clk", 0, PLL1_CTR, PLL1_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
+ "osc_24m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+ HCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+ UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
+ ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
+ UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "uart0_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0", "uart0_mclk", 0, PERIP1_CLK_ENB,
+ UART_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+ clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0,
+ FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_syn_clk", NULL);
+ clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
+ ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+ FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "firda_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
+ PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "firda");
+
+ /* gpt clocks */
+ clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
+ ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
+ ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
+ GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
+ ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
+ ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
+ GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
+ PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
+ ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
+ ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+ GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
+ PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ /* general synths clocks */
+ clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk",
+ 0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "gen0_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen0_syn_gclk", NULL);
+
+ clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk",
+ 0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "gen1_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
+ ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
+ GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
+
+ clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
+ "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen2_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen2_syn_gclk", NULL);
+
+ clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk",
+ "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen3_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen3_syn_gclk", NULL);
+
+ /* clock derived from pll3 clk */
+ clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+ USBH_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "usbh_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "usbh.1_clk", "usbh_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+ USBD_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "designware_udc");
+
+ /* clock derived from ahb clk */
+ clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+ 1);
+ clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+ ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+ MCTR_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+ PCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "amem_clk", "ahb_clk", 0, AMEM_CLK_CFG,
+ AMEM_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "amem_clk", NULL);
+
+ clk = clk_register_gate(NULL, "c3_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ C3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "c3_clk");
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ DMA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ GMAC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "e0800000.eth");
+
+ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ I2C_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0180000.i2c");
+
+ clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ JPEG_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "jpeg");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ SMI_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ ADC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "adc");
+
+ clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ GPIO_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+ clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0100000.spi");
+
+ /* RAS clk enable */
+ clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0, RAS_CLK_ENB,
+ RAS_AHB_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
+ RAS_APB_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
+ RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_32k_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_24m_clk", "osc_24m_clk", 0,
+ RAS_CLK_ENB, RAS_24M_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_24m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll1_clk", "pll1_clk", 0,
+ RAS_CLK_ENB, RAS_PLL1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_pll1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+ RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
+ RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn0_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn1_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn2_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn3_gclk", NULL);
+
+ if (of_machine_is_compatible("st,spear300"))
+ spear300_clk_init();
+ else if (of_machine_is_compatible("st,spear310"))
+ spear310_clk_init();
+ else if (of_machine_is_compatible("st,spear320"))
+ spear320_clk_init();
+}
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
new file mode 100644
index 000000000000..a98d0866f541
--- /dev/null
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -0,0 +1,340 @@
+/*
+ * SPEAr6xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR (MISC_BASE + 0x008)
+#define PLL1_FRQ (MISC_BASE + 0x00C)
+#define PLL2_CTR (MISC_BASE + 0x014)
+#define PLL2_FRQ (MISC_BASE + 0x018)
+#define PLL_CLK_CFG (MISC_BASE + 0x020)
+ /* PLL_CLK_CFG register masks */
+ #define MCTR_CLK_SHIFT 28
+ #define MCTR_CLK_MASK 3
+
+#define CORE_CLK_CFG (MISC_BASE + 0x024)
+ /* CORE CLK CFG register masks */
+ #define HCLK_RATIO_SHIFT 10
+ #define HCLK_RATIO_MASK 2
+ #define PCLK_RATIO_SHIFT 8
+ #define PCLK_RATIO_MASK 2
+
+#define PERIP_CLK_CFG (MISC_BASE + 0x028)
+ /* PERIP_CLK_CFG register masks */
+ #define CLCD_CLK_SHIFT 2
+ #define CLCD_CLK_MASK 2
+ #define UART_CLK_SHIFT 4
+ #define UART_CLK_MASK 1
+ #define FIRDA_CLK_SHIFT 5
+ #define FIRDA_CLK_MASK 2
+ #define GPT0_CLK_SHIFT 8
+ #define GPT1_CLK_SHIFT 10
+ #define GPT2_CLK_SHIFT 11
+ #define GPT3_CLK_SHIFT 12
+ #define GPT_CLK_MASK 1
+
+#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
+ /* PERIP1_CLK_ENB register masks */
+ #define UART0_CLK_ENB 3
+ #define UART1_CLK_ENB 4
+ #define SSP0_CLK_ENB 5
+ #define SSP1_CLK_ENB 6
+ #define I2C_CLK_ENB 7
+ #define JPEG_CLK_ENB 8
+ #define FSMC_CLK_ENB 9
+ #define FIRDA_CLK_ENB 10
+ #define GPT2_CLK_ENB 11
+ #define GPT3_CLK_ENB 12
+ #define GPIO2_CLK_ENB 13
+ #define SSP2_CLK_ENB 14
+ #define ADC_CLK_ENB 15
+ #define GPT1_CLK_ENB 11
+ #define RTC_CLK_ENB 17
+ #define GPIO1_CLK_ENB 18
+ #define DMA_CLK_ENB 19
+ #define SMI_CLK_ENB 21
+ #define CLCD_CLK_ENB 22
+ #define GMAC_CLK_ENB 23
+ #define USBD_CLK_ENB 24
+ #define USBH0_CLK_ENB 25
+ #define USBH1_CLK_ENB 26
+
+#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
+
+#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
+#define UART_CLK_SYNT (MISC_BASE + 0x064)
+
+/* vco rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ {.mode = 0, .m = 0x53, .n = 0x0F, .p = 0x1}, /* vco 332 & pll 166 MHz */
+ {.mode = 0, .m = 0x85, .n = 0x0F, .p = 0x1}, /* vco 532 & pll 266 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x0F, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For PLL1 = 332 MHz */
+ {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+static const char *clcd_parents[] = { "pll3_clk", "clcd_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", };
+static const char *uart_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *gpt0_1_parents[] = { "pll3_clk", "gpt0_1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
+static const char *gpt3_parents[] = { "pll3_clk", "gpt3_syn_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+ "pll2_clk", };
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+ /* For pll1 = 332 MHz */
+ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+ {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+void __init spear6xx_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, CLK_IS_ROOT,
+ 30000000);
+ clk_register_clkdev(clk, "osc_30m_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc_spear", "osc_32k_clk", 0,
+ PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "rtc-spear");
+
+ /* clock derived from 30 MHz osc clk */
+ clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll3_clk", NULL);
+
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
+ 0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+ &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_30m_clk",
+ 0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+ &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_30m_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "wdt");
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+ HCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+ UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
+ UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "uart_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
+ UART0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+ clk = clk_register_gate(NULL, "uart1", "uart_mclk", 0, PERIP1_CLK_ENB,
+ UART1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0080000.serial");
+
+ clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk",
+ 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_syn_clk", NULL);
+ clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
+ ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+ FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "firda_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
+ PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "firda");
+
+ clk = clk_register_aux("clcd_syn_clk", "clcd_syn_gclk", "pll1_clk",
+ 0, CLCD_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "clcd_syn_clk", NULL);
+ clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
+ ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
+ CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
+ PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "clcd");
+
+ /* gpt clocks */
+ clk = clk_register_gpt("gpt0_1_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
+ ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+ GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
+ ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+ GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
+ PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk = clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
+ ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+ GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
+ PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ clk = clk_register_gpt("gpt3_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
+ ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
+ GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt3_mclk", NULL);
+
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
+ PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt3");
+
+ /* clock derived from pll3 clk */
+ clk = clk_register_gate(NULL, "usbh0_clk", "pll3_clk", 0,
+ PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "usbh.0_clk");
+
+ clk = clk_register_gate(NULL, "usbh1_clk", "pll3_clk", 0,
+ PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "usbh.1_clk");
+
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+ USBD_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "designware_udc");
+
+ /* clock derived from ahb clk */
+ clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+ 1);
+ clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+ ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+ MCTR_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+ PCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ DMA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+ clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ FSMC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d1800000.flash");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ GMAC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "e0800000.ethernet");
+
+ clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ I2C_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0200000.i2c");
+
+ clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ JPEG_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "jpeg");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ SMI_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ ADC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "adc");
+
+ clk = clk_register_fixed_factor(NULL, "gpio0_clk", "apb_clk", 0, 1, 1);
+ clk_register_clkdev(clk, NULL, "f0100000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ GPIO1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ GPIO2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d8100000.gpio");
+
+ clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "ssp-pl022.0");
+
+ clk = clk_register_gate(NULL, "ssp1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "ssp-pl022.1");
+
+ clk = clk_register_gate(NULL, "ssp2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "ssp-pl022.2");
+}
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 8d81a1d32653..dd3e661a124d 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
+obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
new file mode 100644
index 000000000000..372051d1bba8
--- /dev/null
+++ b/drivers/clocksource/em_sti.c
@@ -0,0 +1,406 @@
+/*
+ * Emma Mobile Timer Support - STI
+ *
+ * Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
+
+struct em_sti_priv {
+ void __iomem *base;
+ struct clk *clk;
+ struct platform_device *pdev;
+ unsigned int active[USER_NR];
+ unsigned long rate;
+ raw_spinlock_t lock;
+ struct clock_event_device ced;
+ struct clocksource cs;
+};
+
+#define STI_CONTROL 0x00
+#define STI_COMPA_H 0x10
+#define STI_COMPA_L 0x14
+#define STI_COMPB_H 0x18
+#define STI_COMPB_L 0x1c
+#define STI_COUNT_H 0x20
+#define STI_COUNT_L 0x24
+#define STI_COUNT_RAW_H 0x28
+#define STI_COUNT_RAW_L 0x2c
+#define STI_SET_H 0x30
+#define STI_SET_L 0x34
+#define STI_INTSTATUS 0x40
+#define STI_INTRAWSTATUS 0x44
+#define STI_INTENSET 0x48
+#define STI_INTENCLR 0x4c
+#define STI_INTFFCLR 0x50
+
+static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
+{
+ return ioread32(p->base + offs);
+}
+
+static inline void em_sti_write(struct em_sti_priv *p, int offs,
+ unsigned long value)
+{
+ iowrite32(value, p->base + offs);
+}
+
+static int em_sti_enable(struct em_sti_priv *p)
+{
+ int ret;
+
+ /* enable clock */
+ ret = clk_enable(p->clk);
+ if (ret) {
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
+ return ret;
+ }
+
+ /* configure channel, periodic mode and maximum timeout */
+ p->rate = clk_get_rate(p->clk);
+
+ /* reset the counter */
+ em_sti_write(p, STI_SET_H, 0x40000000);
+ em_sti_write(p, STI_SET_L, 0x00000000);
+
+ /* mask and clear pending interrupts */
+ em_sti_write(p, STI_INTENCLR, 3);
+ em_sti_write(p, STI_INTFFCLR, 3);
+
+ /* enable updates of counter registers */
+ em_sti_write(p, STI_CONTROL, 1);
+
+ return 0;
+}
+
+static void em_sti_disable(struct em_sti_priv *p)
+{
+ /* mask interrupts */
+ em_sti_write(p, STI_INTENCLR, 3);
+
+ /* stop clock */
+ clk_disable(p->clk);
+}
+
+static cycle_t em_sti_count(struct em_sti_priv *p)
+{
+ cycle_t ticks;
+ unsigned long flags;
+
+ /* the STI hardware buffers the 48-bit count, but to
+ * break it out into two 32-bit access the registers
+ * must be accessed in a certain order.
+ * Always read STI_COUNT_H before STI_COUNT_L.
+ */
+ raw_spin_lock_irqsave(&p->lock, flags);
+ ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
+ ticks |= em_sti_read(p, STI_COUNT_L);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return ticks;
+}
+
+static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&p->lock, flags);
+
+ /* mask compare A interrupt */
+ em_sti_write(p, STI_INTENCLR, 1);
+
+ /* update compare A value */
+ em_sti_write(p, STI_COMPA_H, next >> 32);
+ em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
+
+ /* clear compare A interrupt source */
+ em_sti_write(p, STI_INTFFCLR, 1);
+
+ /* unmask compare A interrupt */
+ em_sti_write(p, STI_INTENSET, 1);
+
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return next;
+}
+
+static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
+{
+ struct em_sti_priv *p = dev_id;
+
+ p->ced.event_handler(&p->ced);
+ return IRQ_HANDLED;
+}
+
+static int em_sti_start(struct em_sti_priv *p, unsigned int user)
+{
+ unsigned long flags;
+ int used_before;
+ int ret = 0;
+
+ raw_spin_lock_irqsave(&p->lock, flags);
+ used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+ if (!used_before)
+ ret = em_sti_enable(p);
+
+ if (!ret)
+ p->active[user] = 1;
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return ret;
+}
+
+static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
+{
+ unsigned long flags;
+ int used_before, used_after;
+
+ raw_spin_lock_irqsave(&p->lock, flags);
+ used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+ p->active[user] = 0;
+ used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+
+ if (used_before && !used_after)
+ em_sti_disable(p);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
+{
+ return container_of(cs, struct em_sti_priv, cs);
+}
+
+static cycle_t em_sti_clocksource_read(struct clocksource *cs)
+{
+ return em_sti_count(cs_to_em_sti(cs));
+}
+
+static int em_sti_clocksource_enable(struct clocksource *cs)
+{
+ int ret;
+ struct em_sti_priv *p = cs_to_em_sti(cs);
+
+ ret = em_sti_start(p, USER_CLOCKSOURCE);
+ if (!ret)
+ __clocksource_updatefreq_hz(cs, p->rate);
+ return ret;
+}
+
+static void em_sti_clocksource_disable(struct clocksource *cs)
+{
+ em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
+}
+
+static void em_sti_clocksource_resume(struct clocksource *cs)
+{
+ em_sti_clocksource_enable(cs);
+}
+
+static int em_sti_register_clocksource(struct em_sti_priv *p)
+{
+ struct clocksource *cs = &p->cs;
+
+ memset(cs, 0, sizeof(*cs));
+ cs->name = dev_name(&p->pdev->dev);
+ cs->rating = 200;
+ cs->read = em_sti_clocksource_read;
+ cs->enable = em_sti_clocksource_enable;
+ cs->disable = em_sti_clocksource_disable;
+ cs->suspend = em_sti_clocksource_disable;
+ cs->resume = em_sti_clocksource_resume;
+ cs->mask = CLOCKSOURCE_MASK(48);
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ dev_info(&p->pdev->dev, "used as clock source\n");
+
+ /* Register with dummy 1 Hz value, gets updated in ->enable() */
+ clocksource_register_hz(cs, 1);
+ return 0;
+}
+
+static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
+{
+ return container_of(ced, struct em_sti_priv, ced);
+}
+
+static void em_sti_clock_event_mode(enum clock_event_mode mode,
+ struct clock_event_device *ced)
+{
+ struct em_sti_priv *p = ced_to_em_sti(ced);
+
+ /* deal with old setting first */
+ switch (ced->mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ em_sti_stop(p, USER_CLOCKEVENT);
+ break;
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+ em_sti_start(p, USER_CLOCKEVENT);
+ clockevents_config(&p->ced, p->rate);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ em_sti_stop(p, USER_CLOCKEVENT);
+ break;
+ default:
+ break;
+ }
+}
+
+static int em_sti_clock_event_next(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct em_sti_priv *p = ced_to_em_sti(ced);
+ cycle_t next;
+ int safe;
+
+ next = em_sti_set_next(p, em_sti_count(p) + delta);
+ safe = em_sti_count(p) < (next - 1);
+
+ return !safe;
+}
+
+static void em_sti_register_clockevent(struct em_sti_priv *p)
+{
+ struct clock_event_device *ced = &p->ced;
+
+ memset(ced, 0, sizeof(*ced));
+ ced->name = dev_name(&p->pdev->dev);
+ ced->features = CLOCK_EVT_FEAT_ONESHOT;
+ ced->rating = 200;
+ ced->cpumask = cpumask_of(0);
+ ced->set_next_event = em_sti_clock_event_next;
+ ced->set_mode = em_sti_clock_event_mode;
+
+ dev_info(&p->pdev->dev, "used for clock events\n");
+
+ /* Register with dummy 1 Hz value, gets updated in ->set_mode() */
+ clockevents_config_and_register(ced, 1, 2, 0xffffffff);
+}
+
+static int __devinit em_sti_probe(struct platform_device *pdev)
+{
+ struct em_sti_priv *p;
+ struct resource *res;
+ int irq, ret;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ p->pdev = pdev;
+ platform_set_drvdata(pdev, p);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ /* map memory, let base point to the STI instance */
+ p->base = ioremap_nocache(res->start, resource_size(res));
+ if (p->base == NULL) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto err0;
+ }
+
+ /* get hold of clock */
+ p->clk = clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(p->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ if (request_irq(irq, em_sti_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&pdev->dev), p)) {
+ dev_err(&pdev->dev, "failed to request low IRQ\n");
+ ret = -ENOENT;
+ goto err2;
+ }
+
+ raw_spin_lock_init(&p->lock);
+ em_sti_register_clockevent(p);
+ em_sti_register_clocksource(p);
+ return 0;
+
+err2:
+ clk_put(p->clk);
+err1:
+ iounmap(p->base);
+err0:
+ kfree(p);
+ return ret;
+}
+
+static int __devexit em_sti_remove(struct platform_device *pdev)
+{
+ return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
+ { .compatible = "renesas,em-sti", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
+
+static struct platform_driver em_sti_device_driver = {
+ .probe = em_sti_probe,
+ .remove = __devexit_p(em_sti_remove),
+ .driver = {
+ .name = "em_sti",
+ .of_match_table = em_sti_dt_ids,
+ }
+};
+
+module_platform_driver(em_sti_device_driver);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 32fe9ef5cc5c..98b06baafcc6 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -48,13 +48,13 @@ struct sh_cmt_priv {
unsigned long next_match_value;
unsigned long max_match_value;
unsigned long rate;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
unsigned long total_cycles;
};
-static DEFINE_SPINLOCK(sh_cmt_lock);
+static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
#define CMSTR -1 /* shared register */
#define CMCSR 0 /* channel register */
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- spin_lock_irqsave(&sh_cmt_lock, flags);
+ raw_spin_lock_irqsave(&sh_cmt_lock, flags);
value = sh_cmt_read(p, CMSTR);
if (start)
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
value &= ~(1 << cfg->timer_bit);
sh_cmt_write(p, CMSTR, value);
- spin_unlock_irqrestore(&sh_cmt_lock, flags);
+ raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
}
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
{
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
__sh_cmt_set_next(p, delta);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(p, &p->rate);
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
__sh_cmt_set_next(p, p->max_match_value);
out:
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
return ret;
}
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
unsigned long flags;
unsigned long f;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
p->flags &= ~flag;
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
__sh_cmt_set_next(p, p->max_match_value);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
unsigned long value;
int has_wrapped;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
value = p->total_cycles;
raw = sh_cmt_get_counter(p, &has_wrapped);
if (unlikely(has_wrapped))
raw += p->match_value + 1;
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
return value + raw;
}
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
p->max_match_value = (1 << p->width) - 1;
p->match_value = p->max_match_value;
- spin_lock_init(&p->lock);
+ raw_spin_lock_init(&p->lock);
if (clockevent_rating)
sh_cmt_register_clockevent(p, name, clockevent_rating);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index a2172f690418..d9b76ca64a61 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -43,7 +43,7 @@ struct sh_mtu2_priv {
struct clock_event_device ced;
};
-static DEFINE_SPINLOCK(sh_mtu2_lock);
+static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
#define TSTR -1 /* shared register */
#define TCR 0 /* channel register */
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- spin_lock_irqsave(&sh_mtu2_lock, flags);
+ raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
value = sh_mtu2_read(p, TSTR);
if (start)
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
value &= ~(1 << cfg->timer_bit);
sh_mtu2_write(p, TSTR, value);
- spin_unlock_irqrestore(&sh_mtu2_lock, flags);
+ raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
}
static int sh_mtu2_enable(struct sh_mtu2_priv *p)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 97f54b634be4..c1b51d49d106 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -45,7 +45,7 @@ struct sh_tmu_priv {
struct clocksource cs;
};
-static DEFINE_SPINLOCK(sh_tmu_lock);
+static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
#define TSTR -1 /* shared register */
#define TCOR 0 /* channel register */
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- spin_lock_irqsave(&sh_tmu_lock, flags);
+ raw_spin_lock_irqsave(&sh_tmu_lock, flags);
value = sh_tmu_read(p, TSTR);
if (start)
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
value &= ~(1 << cfg->timer_bit);
sh_tmu_write(p, TSTR, value);
- spin_unlock_irqrestore(&sh_tmu_lock, flags);
+ raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
}
static int sh_tmu_enable(struct sh_tmu_priv *p)
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
sh_tmu_enable(p);
- /* TODO: calculate good shift from rate and counter bit width */
-
- ced->shift = 32;
- ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
- ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
- ced->min_delta_ns = 5000;
+ clockevents_config(ced, p->rate);
if (periodic) {
p->periodic = (p->rate + HZ/2) / HZ;
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
ced->set_mode = sh_tmu_clock_event_mode;
dev_info(&p->pdev->dev, "used for clock events\n");
- clockevents_register_device(ced);
+
+ clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 371f13cc38eb..1092a770482e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -297,4 +297,32 @@ config CRYPTO_DEV_TEGRA_AES
To compile this driver as a module, choose M here: the module
will be called tegra-aes.
+config CRYPTO_DEV_NX
+ tristate "Support for Power7+ in-Nest cryptographic accleration"
+ depends on PPC64 && IBMVIO
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_CCM
+ select CRYPTO_GCM
+ select CRYPTO_AUTHENC
+ select CRYPTO_XCBC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ help
+ Support for Power7+ in-Nest cryptographic acceleration. This
+ module supports acceleration for AES and SHA2 algorithms. If you
+ choose 'M' here, this module will be called nx_crypto.
+
+config CRYPTO_DEV_UX500
+ tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
+ depends on ARCH_U8500
+ select CRYPTO_ALGAPI
+ help
+ Driver for ST-Ericsson UX500 crypto engine.
+
+if CRYPTO_DEV_UX500
+ source "drivers/crypto/ux500/Kconfig"
+endif # if CRYPTO_DEV_UX500
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index f3e64eadd7af..01390325d72d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
+obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ \ No newline at end of file
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 13f8e1a14988..802e85102c32 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1244,9 +1244,9 @@ err_start_dev:
iounmap(core_dev->dev->ce_base);
err_iomap:
free_irq(core_dev->irq, dev);
+err_request_irq:
irq_dispose_mapping(core_dev->irq);
tasklet_kill(&core_dev->tasklet);
-err_request_irq:
crypto4xx_destroy_sdr(core_dev->dev);
err_build_sdr:
crypto4xx_destroy_gdr(core_dev->dev);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 534a36469d57..4eec389184d3 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2267,8 +2267,11 @@ static void __exit caam_algapi_exit(void)
int i, err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node)
- return;
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return;
+ }
pdev = of_find_device_by_node(dev_node);
if (!pdev)
@@ -2350,8 +2353,11 @@ static int __init caam_algapi_init(void)
int i = 0, err = 0;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
- if (!dev_node)
- return -ENODEV;
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
pdev = of_find_device_by_node(dev_node);
if (!pdev)
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index c5f61c55d923..77557ebcd337 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -98,6 +98,12 @@ static int caam_probe(struct platform_device *pdev)
rspec = 0;
for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
rspec++;
+ if (!rspec) {
+ /* for backward compatible with device trees */
+ for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
+ rspec++;
+ }
+
ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
if (ctrlpriv->jrdev == NULL) {
iounmap(&topregs->ctrl);
@@ -111,6 +117,13 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->total_jobrs++;
ring++;
}
+ if (!ring) {
+ for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
+ caam_jr_probe(pdev, np, ring);
+ ctrlpriv->total_jobrs++;
+ ring++;
+ }
+ }
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
@@ -226,6 +239,9 @@ static struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
},
+ {
+ .compatible = "fsl,sec4.0",
+ },
{},
};
MODULE_DEVICE_TABLE(of, caam_match);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index e6ecc5f23943..1cc6b3f3e262 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -16,6 +16,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
@@ -79,6 +80,7 @@ struct crypto_priv {
void __iomem *reg;
void __iomem *sram;
int irq;
+ struct clk *clk;
struct task_struct *queue_th;
/* the lock protects queue and eng_st */
@@ -1053,6 +1055,12 @@ static int mv_probe(struct platform_device *pdev)
if (ret)
goto err_thread;
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ cp->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(cp->clk))
+ clk_prepare_enable(cp->clk);
+
writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
@@ -1118,6 +1126,12 @@ static int mv_remove(struct platform_device *pdev)
memset(cp->sram, 0, cp->sram_size);
iounmap(cp->sram);
iounmap(cp->reg);
+
+ if (!IS_ERR(cp->clk)) {
+ clk_disable_unprepare(cp->clk);
+ clk_put(cp->clk);
+ }
+
kfree(cp);
cpg = NULL;
return 0;
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
new file mode 100644
index 000000000000..411ce59c80d1
--- /dev/null
+++ b/drivers/crypto/nx/Makefile
@@ -0,0 +1,11 @@
+obj-$(CONFIG_CRYPTO_DEV_NX) += nx-crypto.o
+nx-crypto-objs := nx.o \
+ nx_debugfs.o \
+ nx-aes-cbc.o \
+ nx-aes-ecb.o \
+ nx-aes-gcm.o \
+ nx-aes-ccm.o \
+ nx-aes-ctr.o \
+ nx-aes-xcbc.o \
+ nx-sha256.o \
+ nx-sha512.o
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
new file mode 100644
index 000000000000..69ed796ee327
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -0,0 +1,141 @@
+/**
+ * AES CBC routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int cbc_aes_nx_set_key(struct crypto_tfm *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ case AES_KEYSIZE_192:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+ break;
+ case AES_KEYSIZE_256:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_CBC;
+ memcpy(csbcpb->cpb.aes_cbc.key, in_key, key_len);
+
+ return 0;
+}
+
+static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes,
+ int enc)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ int rc;
+
+ if (nbytes > nx_ctx->ap->databytelen)
+ return -EINVAL;
+
+ if (enc)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
+ csbcpb->cpb.aes_cbc.iv);
+ if (rc)
+ goto out;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+out:
+ return rc;
+}
+
+static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1);
+}
+
+static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0);
+}
+
+struct crypto_alg nx_cbc_aes_alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(nx_cbc_aes_alg.cra_list),
+ .cra_init = nx_crypto_ctx_aes_cbc_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_aes_nx_set_key,
+ .encrypt = cbc_aes_nx_encrypt,
+ .decrypt = cbc_aes_nx_decrypt,
+ }
+};
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
new file mode 100644
index 000000000000..7aeac678b9c0
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -0,0 +1,468 @@
+/**
+ * AES CCM routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
+ memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
+
+ csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
+ memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
+
+ return 0;
+
+}
+
+static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+
+ if (key_len < 3)
+ return -EINVAL;
+
+ key_len -= 3;
+
+ memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
+
+ return ccm_aes_nx_set_key(tfm, in_key, key_len);
+}
+
+static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ crypto_aead_crt(tfm)->authsize = authsize;
+
+ return 0;
+}
+
+static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ crypto_aead_crt(tfm)->authsize = authsize;
+
+ return 0;
+}
+
+/* taken from crypto/ccm.c */
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (unsigned int)(1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+/* taken from crypto/ccm.c */
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (1 > iv[0] || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* based on code from crypto/ccm.c */
+static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
+ unsigned int cryptlen, u8 *b0)
+{
+ unsigned int l, lp, m = authsize;
+ int rc;
+
+ memcpy(b0, iv, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+
+ /* set m, bits 3-5 */
+ *b0 |= (8 * ((m - 2) / 2));
+
+ /* set adata, bit 6, if associated data is used */
+ if (assoclen)
+ *b0 |= 64;
+
+ rc = set_msg_len(b0 + 16 - l, cryptlen, l);
+
+ return rc;
+}
+
+static int generate_pat(u8 *iv,
+ struct aead_request *req,
+ struct nx_crypto_ctx *nx_ctx,
+ unsigned int authsize,
+ unsigned int nbytes,
+ u8 *out)
+{
+ struct nx_sg *nx_insg = nx_ctx->in_sg;
+ struct nx_sg *nx_outsg = nx_ctx->out_sg;
+ unsigned int iauth_len = 0;
+ struct vio_pfo_op *op = NULL;
+ u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
+ int rc;
+
+ /* zero the ctr value */
+ memset(iv + 15 - iv[0], 0, iv[0] + 1);
+
+ if (!req->assoclen) {
+ b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
+ } else if (req->assoclen <= 14) {
+ /* if associated data is 14 bytes or less, we do 1 GCM
+ * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
+ * which is fed in through the source buffers here */
+ b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
+ b1 = nx_ctx->priv.ccm.iauth_tag;
+ iauth_len = req->assoclen;
+
+ nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
+ nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
+ nx_ctx->ap->sglen);
+
+ /* inlen should be negative, indicating to phyp that its a
+ * pointer to an sg list */
+ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
+ sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
+ sizeof(struct nx_sg);
+
+ NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
+
+ op = &nx_ctx->op;
+ result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
+ } else if (req->assoclen <= 65280) {
+ /* if associated data is less than (2^16 - 2^8), we construct
+ * B1 differently and feed in the associated data to a CCA
+ * operation */
+ b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
+ b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
+ iauth_len = 14;
+
+ /* remaining assoc data must have scatterlist built for it */
+ nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen,
+ req->assoc, iauth_len,
+ req->assoclen - iauth_len);
+ nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
+ sizeof(struct nx_sg);
+
+ op = &nx_ctx->op_aead;
+ result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
+ } else {
+ /* if associated data is less than (2^32), we construct B1
+ * differently yet again and feed in the associated data to a
+ * CCA operation */
+ pr_err("associated data len is %u bytes (returning -EINVAL)\n",
+ req->assoclen);
+ rc = -EINVAL;
+ }
+
+ rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
+ if (rc)
+ goto done;
+
+ if (b1) {
+ memset(b1, 0, 16);
+ *(u16 *)b1 = (u16)req->assoclen;
+
+ scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
+ iauth_len, SCATTERWALK_FROM_SG);
+
+ rc = nx_hcall_sync(nx_ctx, op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto done;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+
+ memcpy(out, result, AES_BLOCK_SIZE);
+ }
+done:
+ return rc;
+}
+
+static int ccm_nx_decrypt(struct aead_request *req,
+ struct blkcipher_desc *desc)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned int nbytes = req->cryptlen;
+ unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+ struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
+ int rc = -1;
+
+ if (nbytes > nx_ctx->ap->databytelen)
+ return -EINVAL;
+
+ nbytes -= authsize;
+
+ /* copy out the auth tag to compare with later */
+ scatterwalk_map_and_copy(priv->oauth_tag,
+ req->src, nbytes, authsize,
+ SCATTERWALK_FROM_SG);
+
+ rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+ csbcpb->cpb.aes_ccm.in_pat_or_b0);
+ if (rc)
+ goto out;
+
+ rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
+ csbcpb->cpb.aes_ccm.iv_or_ctr);
+ if (rc)
+ goto out;
+
+ NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+ NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
+ authsize) ? -EBADMSG : 0;
+out:
+ return rc;
+}
+
+static int ccm_nx_encrypt(struct aead_request *req,
+ struct blkcipher_desc *desc)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned int nbytes = req->cryptlen;
+ unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+ int rc = -1;
+
+ if (nbytes > nx_ctx->ap->databytelen)
+ return -EINVAL;
+
+ rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+ csbcpb->cpb.aes_ccm.in_pat_or_b0);
+ if (rc)
+ goto out;
+
+ rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
+ csbcpb->cpb.aes_ccm.iv_or_ctr);
+ if (rc)
+ goto out;
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ /* copy out the auth tag */
+ scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
+ req->dst, nbytes, authsize,
+ SCATTERWALK_TO_SG);
+out:
+ return rc;
+}
+
+static int ccm4309_aes_nx_encrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct blkcipher_desc desc;
+ u8 *iv = nx_ctx->priv.ccm.iv;
+
+ iv[0] = 3;
+ memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+ memcpy(iv + 4, req->iv, 8);
+
+ desc.info = iv;
+ desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
+
+ return ccm_nx_encrypt(req, &desc);
+}
+
+static int ccm_aes_nx_encrypt(struct aead_request *req)
+{
+ struct blkcipher_desc desc;
+ int rc;
+
+ desc.info = req->iv;
+ desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
+
+ rc = crypto_ccm_check_iv(desc.info);
+ if (rc)
+ return rc;
+
+ return ccm_nx_encrypt(req, &desc);
+}
+
+static int ccm4309_aes_nx_decrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct blkcipher_desc desc;
+ u8 *iv = nx_ctx->priv.ccm.iv;
+
+ iv[0] = 3;
+ memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+ memcpy(iv + 4, req->iv, 8);
+
+ desc.info = iv;
+ desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
+
+ return ccm_nx_decrypt(req, &desc);
+}
+
+static int ccm_aes_nx_decrypt(struct aead_request *req)
+{
+ struct blkcipher_desc desc;
+ int rc;
+
+ desc.info = req->iv;
+ desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
+
+ rc = crypto_ccm_check_iv(desc.info);
+ if (rc)
+ return rc;
+
+ return ccm_nx_decrypt(req, &desc);
+}
+
+/* tell the block cipher walk routines that this is a stream cipher by
+ * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
+ * during encrypt/decrypt doesn't solve this problem, because it calls
+ * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
+ * but instead uses this tfm->blocksize. */
+struct crypto_alg nx_ccm_aes_alg = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(nx_ccm_aes_alg.cra_list),
+ .cra_init = nx_crypto_ctx_aes_ccm_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ .cra_aead = {
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm_aes_nx_set_key,
+ .setauthsize = ccm_aes_nx_setauthsize,
+ .encrypt = ccm_aes_nx_encrypt,
+ .decrypt = ccm_aes_nx_decrypt,
+ }
+};
+
+struct crypto_alg nx_ccm4309_aes_alg = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_type = &crypto_nivaead_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(nx_ccm4309_aes_alg.cra_list),
+ .cra_init = nx_crypto_ctx_aes_ccm_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ .cra_aead = {
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm4309_aes_nx_set_key,
+ .setauthsize = ccm4309_aes_nx_setauthsize,
+ .encrypt = ccm4309_aes_nx_encrypt,
+ .decrypt = ccm4309_aes_nx_decrypt,
+ .geniv = "seqiv",
+ }
+};
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
new file mode 100644
index 000000000000..52d4eb05e8f7
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -0,0 +1,178 @@
+/**
+ * AES CTR routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int ctr_aes_nx_set_key(struct crypto_tfm *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ case AES_KEYSIZE_192:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+ break;
+ case AES_KEYSIZE_256:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_CTR;
+ memcpy(csbcpb->cpb.aes_ctr.key, in_key, key_len);
+
+ return 0;
+}
+
+static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+
+ if (key_len < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ memcpy(nx_ctx->priv.ctr.iv,
+ in_key + key_len - CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_NONCE_SIZE);
+
+ key_len -= CTR_RFC3686_NONCE_SIZE;
+
+ return ctr_aes_nx_set_key(tfm, in_key, key_len);
+}
+
+static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ int rc;
+
+ if (nbytes > nx_ctx->ap->databytelen)
+ return -EINVAL;
+
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
+ csbcpb->cpb.aes_ctr.iv);
+ if (rc)
+ goto out;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+out:
+ return rc;
+}
+
+static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ u8 *iv = nx_ctx->priv.ctr.iv;
+
+ memcpy(iv + CTR_RFC3686_NONCE_SIZE,
+ desc->info, CTR_RFC3686_IV_SIZE);
+ iv[15] = 1;
+
+ desc->info = nx_ctx->priv.ctr.iv;
+
+ return ctr_aes_nx_crypt(desc, dst, src, nbytes);
+}
+
+struct crypto_alg nx_ctr_aes_alg = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(nx_ctr_aes_alg.cra_list),
+ .cra_init = nx_crypto_ctx_aes_ctr_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_aes_nx_set_key,
+ .encrypt = ctr_aes_nx_crypt,
+ .decrypt = ctr_aes_nx_crypt,
+ }
+};
+
+struct crypto_alg nx_ctr3686_aes_alg = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(nx_ctr3686_aes_alg.cra_list),
+ .cra_init = nx_crypto_ctx_aes_ctr_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .geniv = "seqiv",
+ .setkey = ctr3686_aes_nx_set_key,
+ .encrypt = ctr3686_aes_nx_crypt,
+ .decrypt = ctr3686_aes_nx_crypt,
+ }
+};
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
new file mode 100644
index 000000000000..7b77bc2d1df4
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -0,0 +1,139 @@
+/**
+ * AES ECB routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int ecb_aes_nx_set_key(struct crypto_tfm *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ case AES_KEYSIZE_192:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+ break;
+ case AES_KEYSIZE_256:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
+ memcpy(csbcpb->cpb.aes_ecb.key, in_key, key_len);
+
+ return 0;
+}
+
+static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes,
+ int enc)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ int rc;
+
+ if (nbytes > nx_ctx->ap->databytelen)
+ return -EINVAL;
+
+ if (enc)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL);
+ if (rc)
+ goto out;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+out:
+ return rc;
+}
+
+static int ecb_aes_nx_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return ecb_aes_nx_crypt(desc, dst, src, nbytes, 1);
+}
+
+static int ecb_aes_nx_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return ecb_aes_nx_crypt(desc, dst, src, nbytes, 0);
+}
+
+struct crypto_alg nx_ecb_aes_alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(nx_ecb_aes_alg.cra_list),
+ .cra_init = nx_crypto_ctx_aes_ecb_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ .cra_blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ecb_aes_nx_set_key,
+ .encrypt = ecb_aes_nx_encrypt,
+ .decrypt = ecb_aes_nx_decrypt,
+ }
+};
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
new file mode 100644
index 000000000000..9ab1c7341dac
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -0,0 +1,353 @@
+/**
+ * AES GCM routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ case AES_KEYSIZE_192:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
+ NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
+ break;
+ case AES_KEYSIZE_256:
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
+ NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
+ memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
+
+ csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
+ memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
+
+ return 0;
+}
+
+static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+ char *nonce = nx_ctx->priv.gcm.nonce;
+ int rc;
+
+ if (key_len < 4)
+ return -EINVAL;
+
+ key_len -= 4;
+
+ rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
+ if (rc)
+ goto out;
+
+ memcpy(nonce, in_key + key_len, 4);
+out:
+ return rc;
+}
+
+static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ if (authsize > crypto_aead_alg(tfm)->maxauthsize)
+ return -EINVAL;
+
+ crypto_aead_crt(tfm)->authsize = authsize;
+
+ return 0;
+}
+
+static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ crypto_aead_crt(tfm)->authsize = authsize;
+
+ return 0;
+}
+
+static int nx_gca(struct nx_crypto_ctx *nx_ctx,
+ struct aead_request *req,
+ u8 *out)
+{
+ struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
+ int rc = -EINVAL;
+ struct scatter_walk walk;
+ struct nx_sg *nx_sg = nx_ctx->in_sg;
+
+ if (req->assoclen > nx_ctx->ap->databytelen)
+ goto out;
+
+ if (req->assoclen <= AES_BLOCK_SIZE) {
+ scatterwalk_start(&walk, req->assoc);
+ scatterwalk_copychunks(out, &walk, req->assoclen,
+ SCATTERWALK_FROM_SG);
+ scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
+
+ rc = 0;
+ goto out;
+ }
+
+ nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0,
+ req->assoclen);
+ nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg);
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+
+ memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
+out:
+ return rc;
+}
+
+static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct blkcipher_desc desc;
+ unsigned int nbytes = req->cryptlen;
+ int rc = -EINVAL;
+
+ if (nbytes > nx_ctx->ap->databytelen)
+ goto out;
+
+ desc.info = nx_ctx->priv.gcm.iv;
+ /* initialize the counter */
+ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
+
+ /* For scenarios where the input message is zero length, AES CTR mode
+ * may be used. Set the source data to be a single block (16B) of all
+ * zeros, and set the input IV value to be the same as the GMAC IV
+ * value. - nx_wb 4.8.1.3 */
+ if (nbytes == 0) {
+ char src[AES_BLOCK_SIZE] = {};
+ struct scatterlist sg;
+
+ desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
+ if (IS_ERR(desc.tfm)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key,
+ NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
+ NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);
+
+ sg_init_one(&sg, src, AES_BLOCK_SIZE);
+ if (enc)
+ crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg,
+ AES_BLOCK_SIZE);
+ else
+ crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg,
+ AES_BLOCK_SIZE);
+ crypto_free_blkcipher(desc.tfm);
+
+ rc = 0;
+ goto out;
+ }
+
+ desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
+
+ csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
+
+ if (req->assoclen) {
+ rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
+ if (rc)
+ goto out;
+ }
+
+ if (enc)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ else
+ nbytes -= AES_BLOCK_SIZE;
+
+ csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
+
+ rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes,
+ csbcpb->cpb.aes_gcm.iv_or_cnt);
+ if (rc)
+ goto out;
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ if (enc) {
+ /* copy out the auth tag */
+ scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
+ req->dst, nbytes,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)),
+ SCATTERWALK_TO_SG);
+ } else if (req->assoclen) {
+ u8 *itag = nx_ctx->priv.gcm.iauth_tag;
+ u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
+
+ scatterwalk_map_and_copy(itag, req->dst, nbytes,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)),
+ SCATTERWALK_FROM_SG);
+ rc = memcmp(itag, otag,
+ crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
+ -EBADMSG : 0;
+ }
+out:
+ return rc;
+}
+
+static int gcm_aes_nx_encrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ char *iv = nx_ctx->priv.gcm.iv;
+
+ memcpy(iv, req->iv, 12);
+
+ return gcm_aes_nx_crypt(req, 1);
+}
+
+static int gcm_aes_nx_decrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ char *iv = nx_ctx->priv.gcm.iv;
+
+ memcpy(iv, req->iv, 12);
+
+ return gcm_aes_nx_crypt(req, 0);
+}
+
+static int gcm4106_aes_nx_encrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ char *iv = nx_ctx->priv.gcm.iv;
+ char *nonce = nx_ctx->priv.gcm.nonce;
+
+ memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+ memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
+
+ return gcm_aes_nx_crypt(req, 1);
+}
+
+static int gcm4106_aes_nx_decrypt(struct aead_request *req)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ char *iv = nx_ctx->priv.gcm.iv;
+ char *nonce = nx_ctx->priv.gcm.nonce;
+
+ memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+ memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
+
+ return gcm_aes_nx_crypt(req, 0);
+}
+
+/* tell the block cipher walk routines that this is a stream cipher by
+ * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
+ * during encrypt/decrypt doesn't solve this problem, because it calls
+ * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
+ * but instead uses this tfm->blocksize. */
+struct crypto_alg nx_gcm_aes_alg = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(nx_gcm_aes_alg.cra_list),
+ .cra_init = nx_crypto_ctx_aes_gcm_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ .cra_aead = {
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = gcm_aes_nx_set_key,
+ .setauthsize = gcm_aes_nx_setauthsize,
+ .encrypt = gcm_aes_nx_encrypt,
+ .decrypt = gcm_aes_nx_decrypt,
+ }
+};
+
+struct crypto_alg nx_gcm4106_aes_alg = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_type = &crypto_nivaead_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(nx_gcm4106_aes_alg.cra_list),
+ .cra_init = nx_crypto_ctx_aes_gcm_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ .cra_aead = {
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .geniv = "seqiv",
+ .setkey = gcm4106_aes_nx_set_key,
+ .setauthsize = gcm4106_aes_nx_setauthsize,
+ .encrypt = gcm4106_aes_nx_encrypt,
+ .decrypt = gcm4106_aes_nx_decrypt,
+ }
+};
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
new file mode 100644
index 000000000000..93923e4628c0
--- /dev/null
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -0,0 +1,236 @@
+/**
+ * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+struct xcbc_state {
+ u8 state[AES_BLOCK_SIZE];
+ unsigned int count;
+ u8 buffer[AES_BLOCK_SIZE];
+};
+
+static int nx_xcbc_set_key(struct crypto_shash *desc,
+ const u8 *in_key,
+ unsigned int key_len)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
+
+ return 0;
+}
+
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *out_sg;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ memset(sctx, 0, sizeof *sctx);
+
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
+
+ memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
+ memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
+
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ AES_BLOCK_SIZE, nx_ctx->ap->sglen);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ return 0;
+}
+
+static int nx_xcbc_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *in_sg;
+ u32 to_process, leftover;
+ int rc = 0;
+
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* we've hit the nx chip previously and we're updating again,
+ * so copy over the partial digest */
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
+ }
+
+ /* 2 cases for total data len:
+ * 1: <= AES_BLOCK_SIZE: copy into state, return 0
+ * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
+ */
+ if (len + sctx->count <= AES_BLOCK_SIZE) {
+ memcpy(sctx->buffer + sctx->count, data, len);
+ sctx->count += len;
+ goto out;
+ }
+
+ /* to_process: the AES_BLOCK_SIZE data chunk to process in this
+ * update */
+ to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1);
+ leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1);
+
+ /* the hardware will not accept a 0 byte operation for this algorithm
+ * and the operation MUST be finalized to be correct. So if we happen
+ * to get an update that falls on a block sized boundary, we must
+ * save off the last block to finalize with later. */
+ if (!leftover) {
+ to_process -= AES_BLOCK_SIZE;
+ leftover = AES_BLOCK_SIZE;
+ }
+
+ if (sctx->count) {
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer,
+ sctx->count, nx_ctx->ap->sglen);
+ in_sg = nx_build_sg_list(in_sg, (u8 *)data,
+ to_process - sctx->count,
+ nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+ sizeof(struct nx_sg);
+ } else {
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process,
+ nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+ sizeof(struct nx_sg);
+ }
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+
+ /* copy the leftover back into the state struct */
+ memcpy(sctx->buffer, data + len - leftover, leftover);
+ sctx->count = leftover;
+
+ /* everything after the first update is continuation */
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+out:
+ return rc;
+}
+
+static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
+{
+ struct xcbc_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
+ int rc = 0;
+
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* we've hit the nx chip previously, now we're finalizing,
+ * so copy over the partial digest */
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
+ } else if (sctx->count == 0) {
+ /* we've never seen an update, so this is a 0 byte op. The
+ * hardware cannot handle a 0 byte op, so just copy out the
+ * known 0 byte result. This is cheaper than allocating a
+ * software context to do a 0 byte op */
+ u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
+ 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 };
+ memcpy(out, data, sizeof(data));
+ goto out;
+ }
+
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
+ sctx->count, nx_ctx->ap->sglen);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
+ nx_ctx->ap->sglen);
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (!nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+
+ memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
+out:
+ return rc;
+}
+
+struct shash_alg nx_shash_aes_xcbc_alg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .init = nx_xcbc_init,
+ .update = nx_xcbc_update,
+ .final = nx_xcbc_final,
+ .setkey = nx_xcbc_set_key,
+ .descsize = sizeof(struct xcbc_state),
+ .statesize = sizeof(struct xcbc_state),
+ .base = {
+ .cra_name = "xcbc(aes)",
+ .cra_driver_name = "xcbc-aes-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_init = nx_crypto_ctx_aes_xcbc_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ }
+};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
new file mode 100644
index 000000000000..9767315f8c0b
--- /dev/null
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -0,0 +1,246 @@
+/**
+ * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/module.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int nx_sha256_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_sg *out_sg;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+ memset(sctx, 0, sizeof *sctx);
+
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
+
+ NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ SHA256_DIGEST_SIZE, nx_ctx->ap->sglen);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ return 0;
+}
+
+static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg;
+ u64 to_process, leftover;
+ int rc = 0;
+
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* we've hit the nx chip previously and we're updating again,
+ * so copy over the partial digest */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest,
+ csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+ }
+
+ /* 2 cases for total data len:
+ * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0
+ * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
+ */
+ if (len + sctx->count <= SHA256_BLOCK_SIZE) {
+ memcpy(sctx->buf + sctx->count, data, len);
+ sctx->count += len;
+ goto out;
+ }
+
+ /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this
+ * update */
+ to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1);
+ leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1);
+
+ if (sctx->count) {
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
+ sctx->count, nx_ctx->ap->sglen);
+ in_sg = nx_build_sg_list(in_sg, (u8 *)data,
+ to_process - sctx->count,
+ nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+ sizeof(struct nx_sg);
+ } else {
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
+ to_process, nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+ sizeof(struct nx_sg);
+ }
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->sha256_ops));
+
+ /* copy the leftover back into the state struct */
+ memcpy(sctx->buf, data + len - leftover, leftover);
+ sctx->count = leftover;
+
+ csbcpb->cpb.sha256.message_bit_length += (u64)
+ (csbcpb->cpb.sha256.spbc * 8);
+
+ /* everything after the first update is continuation */
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+out:
+ return rc;
+}
+
+static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
+ int rc;
+
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* we've hit the nx chip previously, now we're finalizing,
+ * so copy over the partial digest */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest,
+ csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+ }
+
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
+
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
+ sctx->count, nx_ctx->ap->sglen);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
+ nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (!nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->sha256_ops));
+
+ atomic64_add(csbcpb->cpb.sha256.message_bit_length,
+ &(nx_ctx->stats->sha256_bytes));
+ memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+out:
+ return rc;
+}
+
+static int nx_sha256_export(struct shash_desc *desc, void *out)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct sha256_state *octx = out;
+
+ octx->count = sctx->count +
+ (csbcpb->cpb.sha256.message_bit_length / 8);
+ memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+
+ /* if no data has been processed yet, we need to export SHA256's
+ * initial data, in case this context gets imported into a software
+ * context */
+ if (csbcpb->cpb.sha256.message_bit_length)
+ memcpy(octx->state, csbcpb->cpb.sha256.message_digest,
+ SHA256_DIGEST_SIZE);
+ else {
+ octx->state[0] = SHA256_H0;
+ octx->state[1] = SHA256_H1;
+ octx->state[2] = SHA256_H2;
+ octx->state[3] = SHA256_H3;
+ octx->state[4] = SHA256_H4;
+ octx->state[5] = SHA256_H5;
+ octx->state[6] = SHA256_H6;
+ octx->state[7] = SHA256_H7;
+ }
+
+ return 0;
+}
+
+static int nx_sha256_import(struct shash_desc *desc, const void *in)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ const struct sha256_state *ictx = in;
+
+ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+
+ sctx->count = ictx->count & 0x3f;
+ csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8;
+
+ if (csbcpb->cpb.sha256.message_bit_length) {
+ memcpy(csbcpb->cpb.sha256.message_digest, ictx->state,
+ SHA256_DIGEST_SIZE);
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ }
+
+ return 0;
+}
+
+struct shash_alg nx_shash_sha256_alg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .init = nx_sha256_init,
+ .update = nx_sha256_update,
+ .final = nx_sha256_final,
+ .export = nx_sha256_export,
+ .import = nx_sha256_import,
+ .descsize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_init = nx_crypto_ctx_sha_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ }
+};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
new file mode 100644
index 000000000000..3177b8c3d5f1
--- /dev/null
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -0,0 +1,265 @@
+/**
+ * SHA-512 routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/module.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+static int nx_sha512_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_sg *out_sg;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+ memset(sctx, 0, sizeof *sctx);
+
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
+
+ NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+ SHA512_DIGEST_SIZE, nx_ctx->ap->sglen);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ return 0;
+}
+
+static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg;
+ u64 to_process, leftover, spbc_bits;
+ int rc = 0;
+
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* we've hit the nx chip previously and we're updating again,
+ * so copy over the partial digest */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest,
+ csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+ }
+
+ /* 2 cases for total data len:
+ * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0
+ * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
+ */
+ if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
+ memcpy(sctx->buf + sctx->count[0], data, len);
+ sctx->count[0] += len;
+ goto out;
+ }
+
+ /* to_process: the SHA512_BLOCK_SIZE data chunk to process in this
+ * update */
+ to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1);
+ leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1);
+
+ if (sctx->count[0]) {
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
+ sctx->count[0], nx_ctx->ap->sglen);
+ in_sg = nx_build_sg_list(in_sg, (u8 *)data,
+ to_process - sctx->count[0],
+ nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+ sizeof(struct nx_sg);
+ } else {
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
+ to_process, nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+ sizeof(struct nx_sg);
+ }
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->sha512_ops));
+
+ /* copy the leftover back into the state struct */
+ memcpy(sctx->buf, data + len - leftover, leftover);
+ sctx->count[0] = leftover;
+
+ spbc_bits = csbcpb->cpb.sha512.spbc * 8;
+ csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
+ if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
+ csbcpb->cpb.sha512.message_bit_length_hi++;
+
+ /* everything after the first update is continuation */
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+out:
+ return rc;
+}
+
+static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
+ u64 count0;
+ int rc;
+
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /* we've hit the nx chip previously, now we're finalizing,
+ * so copy over the partial digest */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest,
+ csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+ }
+
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ count0 = sctx->count[0] * 8;
+
+ csbcpb->cpb.sha512.message_bit_length_lo += count0;
+ if (csbcpb->cpb.sha512.message_bit_length_lo < count0)
+ csbcpb->cpb.sha512.message_bit_length_hi++;
+
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
+ nx_ctx->ap->sglen);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
+ nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (!nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->sha512_ops));
+ atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
+ &(nx_ctx->stats->sha512_bytes));
+
+ memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+out:
+ return rc;
+}
+
+static int nx_sha512_export(struct shash_desc *desc, void *out)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct sha512_state *octx = out;
+
+ /* move message_bit_length (128 bits) into count and convert its value
+ * to bytes */
+ octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 |
+ ((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61);
+ octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3;
+
+ octx->count[0] += sctx->count[0];
+ if (octx->count[0] < sctx->count[0])
+ octx->count[1]++;
+
+ memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
+
+ /* if no data has been processed yet, we need to export SHA512's
+ * initial data, in case this context gets imported into a software
+ * context */
+ if (csbcpb->cpb.sha512.message_bit_length_hi ||
+ csbcpb->cpb.sha512.message_bit_length_lo)
+ memcpy(octx->state, csbcpb->cpb.sha512.message_digest,
+ SHA512_DIGEST_SIZE);
+ else {
+ octx->state[0] = SHA512_H0;
+ octx->state[1] = SHA512_H1;
+ octx->state[2] = SHA512_H2;
+ octx->state[3] = SHA512_H3;
+ octx->state[4] = SHA512_H4;
+ octx->state[5] = SHA512_H5;
+ octx->state[6] = SHA512_H6;
+ octx->state[7] = SHA512_H7;
+ }
+
+ return 0;
+}
+
+static int nx_sha512_import(struct shash_desc *desc, const void *in)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ const struct sha512_state *ictx = in;
+
+ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+ sctx->count[0] = ictx->count[0] & 0x3f;
+ csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f)
+ << 3;
+ csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 |
+ ictx->count[0] >> 61;
+
+ if (csbcpb->cpb.sha512.message_bit_length_hi ||
+ csbcpb->cpb.sha512.message_bit_length_lo) {
+ memcpy(csbcpb->cpb.sha512.message_digest, ictx->state,
+ SHA512_DIGEST_SIZE);
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ }
+
+ return 0;
+}
+
+struct shash_alg nx_shash_sha512_alg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .init = nx_sha512_init,
+ .update = nx_sha512_update,
+ .final = nx_sha512_final,
+ .export = nx_sha512_export,
+ .import = nx_sha512_import,
+ .descsize = sizeof(struct sha512_state),
+ .statesize = sizeof(struct sha512_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+ .cra_init = nx_crypto_ctx_sha_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ }
+};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
new file mode 100644
index 000000000000..d7f179cc2e98
--- /dev/null
+++ b/drivers/crypto/nx/nx.c
@@ -0,0 +1,716 @@
+/**
+ * Routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/hash.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <asm/pSeries_reconfig.h>
+#include <asm/abs_addr.h>
+#include <asm/hvcall.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+
+/**
+ * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
+ *
+ * @nx_ctx: the crypto context handle
+ * @op: PFO operation struct to pass in
+ * @may_sleep: flag indicating the request can sleep
+ *
+ * Make the hcall, retrying while the hardware is busy. If we cannot yield
+ * the thread, limit the number of retries to 10 here.
+ */
+int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
+ struct vio_pfo_op *op,
+ u32 may_sleep)
+{
+ int rc, retries = 10;
+ struct vio_dev *viodev = nx_driver.viodev;
+
+ atomic_inc(&(nx_ctx->stats->sync_ops));
+
+ do {
+ rc = vio_h_cop_sync(viodev, op);
+ } while ((rc == -EBUSY && !may_sleep && retries--) ||
+ (rc == -EBUSY && may_sleep && cond_resched()));
+
+ if (rc) {
+ dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
+ "hcall rc: %ld\n", rc, op->hcall_err);
+ atomic_inc(&(nx_ctx->stats->errors));
+ atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
+ atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
+ }
+
+ return rc;
+}
+
+/**
+ * nx_build_sg_list - build an NX scatter list describing a single buffer
+ *
+ * @sg_head: pointer to the first scatter list element to build
+ * @start_addr: pointer to the linear buffer
+ * @len: length of the data at @start_addr
+ * @sgmax: the largest number of scatter list elements we're allowed to create
+ *
+ * This function will start writing nx_sg elements at @sg_head and keep
+ * writing them until all of the data from @start_addr is described or
+ * until sgmax elements have been written. Scatter list elements will be
+ * created such that none of the elements describes a buffer that crosses a 4K
+ * boundary.
+ */
+struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
+ u8 *start_addr,
+ unsigned int len,
+ u32 sgmax)
+{
+ unsigned int sg_len = 0;
+ struct nx_sg *sg;
+ u64 sg_addr = (u64)start_addr;
+ u64 end_addr;
+
+ /* determine the start and end for this address range - slightly
+ * different if this is in VMALLOC_REGION */
+ if (is_vmalloc_addr(start_addr))
+ sg_addr = phys_to_abs(page_to_phys(vmalloc_to_page(start_addr)))
+ + offset_in_page(sg_addr);
+ else
+ sg_addr = virt_to_abs(sg_addr);
+
+ end_addr = sg_addr + len;
+
+ /* each iteration will write one struct nx_sg element and add the
+ * length of data described by that element to sg_len. Once @len bytes
+ * have been described (or @sgmax elements have been written), the
+ * loop ends. min_t is used to ensure @end_addr falls on the same page
+ * as sg_addr, if not, we need to create another nx_sg element for the
+ * data on the next page */
+ for (sg = sg_head; sg_len < len; sg++) {
+ sg->addr = sg_addr;
+ sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr);
+ sg->len = sg_addr - sg->addr;
+ sg_len += sg->len;
+
+ if ((sg - sg_head) == sgmax) {
+ pr_err("nx: scatter/gather list overflow, pid: %d\n",
+ current->pid);
+ return NULL;
+ }
+ }
+
+ /* return the moved sg_head pointer */
+ return sg;
+}
+
+/**
+ * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
+ *
+ * @nx_dst: pointer to the first nx_sg element to write
+ * @sglen: max number of nx_sg entries we're allowed to write
+ * @sg_src: pointer to the source linux scatterlist to walk
+ * @start: number of bytes to fast-forward past at the beginning of @sg_src
+ * @src_len: number of bytes to walk in @sg_src
+ */
+struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
+ unsigned int sglen,
+ struct scatterlist *sg_src,
+ unsigned int start,
+ unsigned int src_len)
+{
+ struct scatter_walk walk;
+ struct nx_sg *nx_sg = nx_dst;
+ unsigned int n, offset = 0, len = src_len;
+ char *dst;
+
+ /* we need to fast forward through @start bytes first */
+ for (;;) {
+ scatterwalk_start(&walk, sg_src);
+
+ if (start < offset + sg_src->length)
+ break;
+
+ offset += sg_src->length;
+ sg_src = scatterwalk_sg_next(sg_src);
+ }
+
+ /* start - offset is the number of bytes to advance in the scatterlist
+ * element we're currently looking at */
+ scatterwalk_advance(&walk, start - offset);
+
+ while (len && nx_sg) {
+ n = scatterwalk_clamp(&walk, len);
+ if (!n) {
+ scatterwalk_start(&walk, sg_next(walk.sg));
+ n = scatterwalk_clamp(&walk, len);
+ }
+ dst = scatterwalk_map(&walk);
+
+ nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen);
+ len -= n;
+
+ scatterwalk_unmap(dst);
+ scatterwalk_advance(&walk, n);
+ scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
+ }
+
+ /* return the moved destination pointer */
+ return nx_sg;
+}
+
+/**
+ * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
+ * scatterlists based on them.
+ *
+ * @nx_ctx: NX crypto context for the lists we're building
+ * @desc: the block cipher descriptor for the operation
+ * @dst: destination scatterlist
+ * @src: source scatterlist
+ * @nbytes: length of data described in the scatterlists
+ * @iv: destination for the iv data, if the algorithm requires it
+ *
+ * This is common code shared by all the AES algorithms. It uses the block
+ * cipher walk routines to traverse input and output scatterlists, building
+ * corresponding NX scatterlists
+ */
+int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes,
+ u8 *iv)
+{
+ struct nx_sg *nx_insg = nx_ctx->in_sg;
+ struct nx_sg *nx_outsg = nx_ctx->out_sg;
+ struct blkcipher_walk walk;
+ int rc;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ if (rc)
+ goto out;
+
+ if (iv)
+ memcpy(iv, walk.iv, AES_BLOCK_SIZE);
+
+ while (walk.nbytes) {
+ nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
+ walk.nbytes, nx_ctx->ap->sglen);
+ nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
+ walk.nbytes, nx_ctx->ap->sglen);
+
+ rc = blkcipher_walk_done(desc, &walk, 0);
+ if (rc)
+ break;
+ }
+
+ if (walk.nbytes) {
+ nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
+ walk.nbytes, nx_ctx->ap->sglen);
+ nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
+ walk.nbytes, nx_ctx->ap->sglen);
+
+ rc = 0;
+ }
+
+ /* these lengths should be negative, which will indicate to phyp that
+ * the input and output parameters are scatterlists, not linear
+ * buffers */
+ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
+out:
+ return rc;
+}
+
+/**
+ * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct
+ *
+ * @nx_ctx: the nx context to initialize
+ * @function: the function code for the op
+ */
+void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
+{
+ memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
+ nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
+
+ nx_ctx->op.flags = function;
+ nx_ctx->op.csbcpb = virt_to_abs(nx_ctx->csbcpb);
+ nx_ctx->op.in = virt_to_abs(nx_ctx->in_sg);
+ nx_ctx->op.out = virt_to_abs(nx_ctx->out_sg);
+
+ if (nx_ctx->csbcpb_aead) {
+ nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
+
+ nx_ctx->op_aead.flags = function;
+ nx_ctx->op_aead.csbcpb = virt_to_abs(nx_ctx->csbcpb_aead);
+ nx_ctx->op_aead.in = virt_to_abs(nx_ctx->in_sg);
+ nx_ctx->op_aead.out = virt_to_abs(nx_ctx->out_sg);
+ }
+}
+
+static void nx_of_update_status(struct device *dev,
+ struct property *p,
+ struct nx_of *props)
+{
+ if (!strncmp(p->value, "okay", p->length)) {
+ props->status = NX_WAITING;
+ props->flags |= NX_OF_FLAG_STATUS_SET;
+ } else {
+ dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
+ (char *)p->value);
+ }
+}
+
+static void nx_of_update_sglen(struct device *dev,
+ struct property *p,
+ struct nx_of *props)
+{
+ if (p->length != sizeof(props->max_sg_len)) {
+ dev_err(dev, "%s: unexpected format for "
+ "ibm,max-sg-len property\n", __func__);
+ dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
+ "long, expected %zd bytes\n", __func__,
+ p->length, sizeof(props->max_sg_len));
+ return;
+ }
+
+ props->max_sg_len = *(u32 *)p->value;
+ props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
+}
+
+static void nx_of_update_msc(struct device *dev,
+ struct property *p,
+ struct nx_of *props)
+{
+ struct msc_triplet *trip;
+ struct max_sync_cop *msc;
+ unsigned int bytes_so_far, i, lenp;
+
+ msc = (struct max_sync_cop *)p->value;
+ lenp = p->length;
+
+ /* You can't tell if the data read in for this property is sane by its
+ * size alone. This is because there are sizes embedded in the data
+ * structure. The best we can do is check lengths as we parse and bail
+ * as soon as a length error is detected. */
+ bytes_so_far = 0;
+
+ while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
+ bytes_so_far += sizeof(struct max_sync_cop);
+
+ trip = msc->trip;
+
+ for (i = 0;
+ ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
+ i < msc->triplets;
+ i++) {
+ if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
+ dev_err(dev, "unknown function code/mode "
+ "combo: %d/%d (ignored)\n", msc->fc,
+ msc->mode);
+ goto next_loop;
+ }
+
+ switch (trip->keybitlen) {
+ case 128:
+ case 160:
+ props->ap[msc->fc][msc->mode][0].databytelen =
+ trip->databytelen;
+ props->ap[msc->fc][msc->mode][0].sglen =
+ trip->sglen;
+ break;
+ case 192:
+ props->ap[msc->fc][msc->mode][1].databytelen =
+ trip->databytelen;
+ props->ap[msc->fc][msc->mode][1].sglen =
+ trip->sglen;
+ break;
+ case 256:
+ if (msc->fc == NX_FC_AES) {
+ props->ap[msc->fc][msc->mode][2].
+ databytelen = trip->databytelen;
+ props->ap[msc->fc][msc->mode][2].sglen =
+ trip->sglen;
+ } else if (msc->fc == NX_FC_AES_HMAC ||
+ msc->fc == NX_FC_SHA) {
+ props->ap[msc->fc][msc->mode][1].
+ databytelen = trip->databytelen;
+ props->ap[msc->fc][msc->mode][1].sglen =
+ trip->sglen;
+ } else {
+ dev_warn(dev, "unknown function "
+ "code/key bit len combo"
+ ": (%u/256)\n", msc->fc);
+ }
+ break;
+ case 512:
+ props->ap[msc->fc][msc->mode][2].databytelen =
+ trip->databytelen;
+ props->ap[msc->fc][msc->mode][2].sglen =
+ trip->sglen;
+ break;
+ default:
+ dev_warn(dev, "unknown function code/key bit "
+ "len combo: (%u/%u)\n", msc->fc,
+ trip->keybitlen);
+ break;
+ }
+next_loop:
+ bytes_so_far += sizeof(struct msc_triplet);
+ trip++;
+ }
+
+ msc = (struct max_sync_cop *)trip;
+ }
+
+ props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
+}
+
+/**
+ * nx_of_init - read openFirmware values from the device tree
+ *
+ * @dev: device handle
+ * @props: pointer to struct to hold the properties values
+ *
+ * Called once at driver probe time, this function will read out the
+ * openFirmware properties we use at runtime. If all the OF properties are
+ * acceptable, when we exit this function props->flags will indicate that
+ * we're ready to register our crypto algorithms.
+ */
+static void nx_of_init(struct device *dev, struct nx_of *props)
+{
+ struct device_node *base_node = dev->of_node;
+ struct property *p;
+
+ p = of_find_property(base_node, "status", NULL);
+ if (!p)
+ dev_info(dev, "%s: property 'status' not found\n", __func__);
+ else
+ nx_of_update_status(dev, p, props);
+
+ p = of_find_property(base_node, "ibm,max-sg-len", NULL);
+ if (!p)
+ dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
+ __func__);
+ else
+ nx_of_update_sglen(dev, p, props);
+
+ p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
+ if (!p)
+ dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
+ __func__);
+ else
+ nx_of_update_msc(dev, p, props);
+}
+
+/**
+ * nx_register_algs - register algorithms with the crypto API
+ *
+ * Called from nx_probe()
+ *
+ * If all OF properties are in an acceptable state, the driver flags will
+ * indicate that we're ready and we'll create our debugfs files and register
+ * out crypto algorithms.
+ */
+static int nx_register_algs(void)
+{
+ int rc = -1;
+
+ if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
+ goto out;
+
+ memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
+
+ rc = NX_DEBUGFS_INIT(&nx_driver);
+ if (rc)
+ goto out;
+
+ rc = crypto_register_alg(&nx_ecb_aes_alg);
+ if (rc)
+ goto out;
+
+ rc = crypto_register_alg(&nx_cbc_aes_alg);
+ if (rc)
+ goto out_unreg_ecb;
+
+ rc = crypto_register_alg(&nx_ctr_aes_alg);
+ if (rc)
+ goto out_unreg_cbc;
+
+ rc = crypto_register_alg(&nx_ctr3686_aes_alg);
+ if (rc)
+ goto out_unreg_ctr;
+
+ rc = crypto_register_alg(&nx_gcm_aes_alg);
+ if (rc)
+ goto out_unreg_ctr3686;
+
+ rc = crypto_register_alg(&nx_gcm4106_aes_alg);
+ if (rc)
+ goto out_unreg_gcm;
+
+ rc = crypto_register_alg(&nx_ccm_aes_alg);
+ if (rc)
+ goto out_unreg_gcm4106;
+
+ rc = crypto_register_alg(&nx_ccm4309_aes_alg);
+ if (rc)
+ goto out_unreg_ccm;
+
+ rc = crypto_register_shash(&nx_shash_sha256_alg);
+ if (rc)
+ goto out_unreg_ccm4309;
+
+ rc = crypto_register_shash(&nx_shash_sha512_alg);
+ if (rc)
+ goto out_unreg_s256;
+
+ rc = crypto_register_shash(&nx_shash_aes_xcbc_alg);
+ if (rc)
+ goto out_unreg_s512;
+
+ nx_driver.of.status = NX_OKAY;
+
+ goto out;
+
+out_unreg_s512:
+ crypto_unregister_shash(&nx_shash_sha512_alg);
+out_unreg_s256:
+ crypto_unregister_shash(&nx_shash_sha256_alg);
+out_unreg_ccm4309:
+ crypto_unregister_alg(&nx_ccm4309_aes_alg);
+out_unreg_ccm:
+ crypto_unregister_alg(&nx_ccm_aes_alg);
+out_unreg_gcm4106:
+ crypto_unregister_alg(&nx_gcm4106_aes_alg);
+out_unreg_gcm:
+ crypto_unregister_alg(&nx_gcm_aes_alg);
+out_unreg_ctr3686:
+ crypto_unregister_alg(&nx_ctr3686_aes_alg);
+out_unreg_ctr:
+ crypto_unregister_alg(&nx_ctr_aes_alg);
+out_unreg_cbc:
+ crypto_unregister_alg(&nx_cbc_aes_alg);
+out_unreg_ecb:
+ crypto_unregister_alg(&nx_ecb_aes_alg);
+out:
+ return rc;
+}
+
+/**
+ * nx_crypto_ctx_init - create and initialize a crypto api context
+ *
+ * @nx_ctx: the crypto api context
+ * @fc: function code for the context
+ * @mode: the function code specific mode for this context
+ */
+static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
+{
+ if (nx_driver.of.status != NX_OKAY) {
+ pr_err("Attempt to initialize NX crypto context while device "
+ "is not available!\n");
+ return -ENODEV;
+ }
+
+ /* we need an extra page for csbcpb_aead for these modes */
+ if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
+ nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
+ sizeof(struct nx_csbcpb);
+ else
+ nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) +
+ sizeof(struct nx_csbcpb);
+
+ nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
+ if (!nx_ctx->kmem)
+ return -ENOMEM;
+
+ /* the csbcpb and scatterlists must be 4K aligned pages */
+ nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
+ (u64)NX_PAGE_SIZE));
+ nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
+ nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
+
+ if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
+ nx_ctx->csbcpb_aead =
+ (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
+ NX_PAGE_SIZE);
+
+ /* give each context a pointer to global stats and their OF
+ * properties */
+ nx_ctx->stats = &nx_driver.stats;
+ memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
+ sizeof(struct alg_props) * 3);
+
+ return 0;
+}
+
+/* entry points from the crypto tfm initializers */
+int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
+{
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_CCM);
+}
+
+int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
+{
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_GCM);
+}
+
+int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
+{
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_CTR);
+}
+
+int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
+{
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_CBC);
+}
+
+int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
+{
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_ECB);
+}
+
+int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
+{
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
+}
+
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
+{
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_XCBC_MAC);
+}
+
+/**
+ * nx_crypto_ctx_exit - destroy a crypto api context
+ *
+ * @tfm: the crypto transform pointer for the context
+ *
+ * As crypto API contexts are destroyed, this exit hook is called to free the
+ * memory associated with it.
+ */
+void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+
+ kzfree(nx_ctx->kmem);
+ nx_ctx->csbcpb = NULL;
+ nx_ctx->csbcpb_aead = NULL;
+ nx_ctx->in_sg = NULL;
+ nx_ctx->out_sg = NULL;
+}
+
+static int __devinit nx_probe(struct vio_dev *viodev,
+ const struct vio_device_id *id)
+{
+ dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
+ viodev->name, viodev->resource_id);
+
+ if (nx_driver.viodev) {
+ dev_err(&viodev->dev, "%s: Attempt to register more than one "
+ "instance of the hardware\n", __func__);
+ return -EINVAL;
+ }
+
+ nx_driver.viodev = viodev;
+
+ nx_of_init(&viodev->dev, &nx_driver.of);
+
+ return nx_register_algs();
+}
+
+static int __devexit nx_remove(struct vio_dev *viodev)
+{
+ dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
+ viodev->unit_address);
+
+ if (nx_driver.of.status == NX_OKAY) {
+ NX_DEBUGFS_FINI(&nx_driver);
+
+ crypto_unregister_alg(&nx_ccm_aes_alg);
+ crypto_unregister_alg(&nx_ccm4309_aes_alg);
+ crypto_unregister_alg(&nx_gcm_aes_alg);
+ crypto_unregister_alg(&nx_gcm4106_aes_alg);
+ crypto_unregister_alg(&nx_ctr_aes_alg);
+ crypto_unregister_alg(&nx_ctr3686_aes_alg);
+ crypto_unregister_alg(&nx_cbc_aes_alg);
+ crypto_unregister_alg(&nx_ecb_aes_alg);
+ crypto_unregister_shash(&nx_shash_sha256_alg);
+ crypto_unregister_shash(&nx_shash_sha512_alg);
+ crypto_unregister_shash(&nx_shash_aes_xcbc_alg);
+ }
+
+ return 0;
+}
+
+
+/* module wide initialization/cleanup */
+static int __init nx_init(void)
+{
+ return vio_register_driver(&nx_driver.viodriver);
+}
+
+static void __exit nx_fini(void)
+{
+ vio_unregister_driver(&nx_driver.viodriver);
+}
+
+static struct vio_device_id nx_crypto_driver_ids[] __devinitdata = {
+ { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
+
+/* driver state structure */
+struct nx_crypto_driver nx_driver = {
+ .viodriver = {
+ .id_table = nx_crypto_driver_ids,
+ .probe = nx_probe,
+ .remove = nx_remove,
+ .name = NX_NAME,
+ },
+};
+
+module_init(nx_init);
+module_exit(nx_fini);
+
+MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
+MODULE_DESCRIPTION(NX_STRING);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(NX_VERSION);
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
new file mode 100644
index 000000000000..3232b182dd28
--- /dev/null
+++ b/drivers/crypto/nx/nx.h
@@ -0,0 +1,193 @@
+
+#ifndef __NX_H__
+#define __NX_H__
+
+#define NX_NAME "nx-crypto"
+#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
+#define NX_VERSION "1.0"
+
+static const char nx_driver_string[] = NX_STRING;
+static const char nx_driver_version[] = NX_VERSION;
+
+/* a scatterlist in the format PHYP is expecting */
+struct nx_sg {
+ u64 addr;
+ u32 rsvd;
+ u32 len;
+} __attribute((packed));
+
+#define NX_PAGE_SIZE (4096)
+#define NX_MAX_SG_ENTRIES (NX_PAGE_SIZE/(sizeof(struct nx_sg)))
+
+enum nx_status {
+ NX_DISABLED,
+ NX_WAITING,
+ NX_OKAY
+};
+
+/* msc_triplet and max_sync_cop are used only to assist in parsing the
+ * openFirmware property */
+struct msc_triplet {
+ u32 keybitlen;
+ u32 databytelen;
+ u32 sglen;
+} __packed;
+
+struct max_sync_cop {
+ u32 fc;
+ u32 mode;
+ u32 triplets;
+ struct msc_triplet trip[0];
+} __packed;
+
+struct alg_props {
+ u32 databytelen;
+ u32 sglen;
+};
+
+#define NX_OF_FLAG_MAXSGLEN_SET (1)
+#define NX_OF_FLAG_STATUS_SET (2)
+#define NX_OF_FLAG_MAXSYNCCOP_SET (4)
+#define NX_OF_FLAG_MASK_READY (NX_OF_FLAG_MAXSGLEN_SET | \
+ NX_OF_FLAG_STATUS_SET | \
+ NX_OF_FLAG_MAXSYNCCOP_SET)
+struct nx_of {
+ u32 flags;
+ u32 max_sg_len;
+ enum nx_status status;
+ struct alg_props ap[NX_MAX_FC][NX_MAX_MODE][3];
+};
+
+struct nx_stats {
+ atomic_t aes_ops;
+ atomic64_t aes_bytes;
+ atomic_t sha256_ops;
+ atomic64_t sha256_bytes;
+ atomic_t sha512_ops;
+ atomic64_t sha512_bytes;
+
+ atomic_t sync_ops;
+
+ atomic_t errors;
+ atomic_t last_error;
+ atomic_t last_error_pid;
+};
+
+struct nx_debugfs {
+ struct dentry *dfs_root;
+ struct dentry *dfs_aes_ops, *dfs_aes_bytes;
+ struct dentry *dfs_sha256_ops, *dfs_sha256_bytes;
+ struct dentry *dfs_sha512_ops, *dfs_sha512_bytes;
+ struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid;
+};
+
+struct nx_crypto_driver {
+ struct nx_stats stats;
+ struct nx_of of;
+ struct vio_dev *viodev;
+ struct vio_driver viodriver;
+ struct nx_debugfs dfs;
+};
+
+#define NX_GCM4106_NONCE_LEN (4)
+#define NX_GCM_CTR_OFFSET (12)
+struct nx_gcm_priv {
+ u8 iv[16];
+ u8 iauth_tag[16];
+ u8 nonce[NX_GCM4106_NONCE_LEN];
+};
+
+#define NX_CCM_AES_KEY_LEN (16)
+#define NX_CCM4309_AES_KEY_LEN (19)
+#define NX_CCM4309_NONCE_LEN (3)
+struct nx_ccm_priv {
+ u8 iv[16];
+ u8 b0[16];
+ u8 iauth_tag[16];
+ u8 oauth_tag[16];
+ u8 nonce[NX_CCM4309_NONCE_LEN];
+};
+
+struct nx_xcbc_priv {
+ u8 key[16];
+};
+
+struct nx_ctr_priv {
+ u8 iv[16];
+};
+
+struct nx_crypto_ctx {
+ void *kmem; /* unaligned, kmalloc'd buffer */
+ size_t kmem_len; /* length of kmem */
+ struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */
+ struct vio_pfo_op op; /* operation struct with hcall parameters */
+ struct nx_csbcpb *csbcpb_aead; /* secondary csbcpb used by AEAD algs */
+ struct vio_pfo_op op_aead;/* operation struct for csbcpb_aead */
+
+ struct nx_sg *in_sg; /* aligned pointer into kmem to an sg list */
+ struct nx_sg *out_sg; /* aligned pointer into kmem to an sg list */
+
+ struct alg_props *ap; /* pointer into props based on our key size */
+ struct alg_props props[3];/* openFirmware properties for requests */
+ struct nx_stats *stats; /* pointer into an nx_crypto_driver for stats
+ reporting */
+
+ union {
+ struct nx_gcm_priv gcm;
+ struct nx_ccm_priv ccm;
+ struct nx_xcbc_priv xcbc;
+ struct nx_ctr_priv ctr;
+ } priv;
+};
+
+/* prototypes */
+int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm);
+void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
+void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
+int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
+ u32 may_sleep);
+struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32);
+int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
+ struct scatterlist *, struct scatterlist *, unsigned int,
+ u8 *);
+struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
+ struct scatterlist *, unsigned int,
+ unsigned int);
+
+#ifdef CONFIG_DEBUG_FS
+#define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv)
+#define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv)
+
+int nx_debugfs_init(struct nx_crypto_driver *);
+void nx_debugfs_fini(struct nx_crypto_driver *);
+#else
+#define NX_DEBUGFS_INIT(drv) (0)
+#define NX_DEBUGFS_FINI(drv) (0)
+#endif
+
+#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
+
+extern struct crypto_alg nx_cbc_aes_alg;
+extern struct crypto_alg nx_ecb_aes_alg;
+extern struct crypto_alg nx_gcm_aes_alg;
+extern struct crypto_alg nx_gcm4106_aes_alg;
+extern struct crypto_alg nx_ctr_aes_alg;
+extern struct crypto_alg nx_ctr3686_aes_alg;
+extern struct crypto_alg nx_ccm_aes_alg;
+extern struct crypto_alg nx_ccm4309_aes_alg;
+extern struct shash_alg nx_shash_aes_xcbc_alg;
+extern struct shash_alg nx_shash_sha512_alg;
+extern struct shash_alg nx_shash_sha256_alg;
+
+extern struct nx_crypto_driver nx_driver;
+
+#define SCATTERWALK_TO_SG 1
+#define SCATTERWALK_FROM_SG 0
+
+#endif
diff --git a/drivers/crypto/nx/nx_csbcpb.h b/drivers/crypto/nx/nx_csbcpb.h
new file mode 100644
index 000000000000..a304f956d6f8
--- /dev/null
+++ b/drivers/crypto/nx/nx_csbcpb.h
@@ -0,0 +1,205 @@
+
+#ifndef __NX_CSBCPB_H__
+#define __NX_CSBCPB_H__
+
+struct cop_symcpb_aes_ecb {
+ u8 key[32];
+ u8 __rsvd[80];
+} __packed;
+
+struct cop_symcpb_aes_cbc {
+ u8 iv[16];
+ u8 key[32];
+ u8 cv[16];
+ u32 spbc;
+ u8 __rsvd[44];
+} __packed;
+
+struct cop_symcpb_aes_gca {
+ u8 in_pat[16];
+ u8 key[32];
+ u8 out_pat[16];
+ u32 spbc;
+ u8 __rsvd[44];
+} __packed;
+
+struct cop_symcpb_aes_gcm {
+ u8 in_pat_or_aad[16];
+ u8 iv_or_cnt[16];
+ u64 bit_length_aad;
+ u64 bit_length_data;
+ u8 in_s0[16];
+ u8 key[32];
+ u8 __rsvd1[16];
+ u8 out_pat_or_mac[16];
+ u8 out_s0[16];
+ u8 out_cnt[16];
+ u32 spbc;
+ u8 __rsvd2[12];
+} __packed;
+
+struct cop_symcpb_aes_ctr {
+ u8 iv[16];
+ u8 key[32];
+ u8 cv[16];
+ u32 spbc;
+ u8 __rsvd2[44];
+} __packed;
+
+struct cop_symcpb_aes_cca {
+ u8 b0[16];
+ u8 b1[16];
+ u8 key[16];
+ u8 out_pat_or_b0[16];
+ u32 spbc;
+ u8 __rsvd[44];
+} __packed;
+
+struct cop_symcpb_aes_ccm {
+ u8 in_pat_or_b0[16];
+ u8 iv_or_ctr[16];
+ u8 in_s0[16];
+ u8 key[16];
+ u8 __rsvd1[48];
+ u8 out_pat_or_mac[16];
+ u8 out_s0[16];
+ u8 out_ctr[16];
+ u32 spbc;
+ u8 __rsvd2[12];
+} __packed;
+
+struct cop_symcpb_aes_xcbc {
+ u8 cv[16];
+ u8 key[16];
+ u8 __rsvd1[16];
+ u8 out_cv_mac[16];
+ u32 spbc;
+ u8 __rsvd2[44];
+} __packed;
+
+struct cop_symcpb_sha256 {
+ u64 message_bit_length;
+ u64 __rsvd1;
+ u8 input_partial_digest[32];
+ u8 message_digest[32];
+ u32 spbc;
+ u8 __rsvd2[44];
+} __packed;
+
+struct cop_symcpb_sha512 {
+ u64 message_bit_length_hi;
+ u64 message_bit_length_lo;
+ u8 input_partial_digest[64];
+ u8 __rsvd1[32];
+ u8 message_digest[64];
+ u32 spbc;
+ u8 __rsvd2[76];
+} __packed;
+
+#define NX_FDM_INTERMEDIATE 0x01
+#define NX_FDM_CONTINUATION 0x02
+#define NX_FDM_ENDE_ENCRYPT 0x80
+
+#define NX_CPB_FDM(c) ((c)->cpb.hdr.fdm)
+#define NX_CPB_KS_DS(c) ((c)->cpb.hdr.ks_ds)
+
+#define NX_CPB_KEY_SIZE(c) (NX_CPB_KS_DS(c) >> 4)
+#define NX_CPB_SET_KEY_SIZE(c, x) NX_CPB_KS_DS(c) |= ((x) << 4)
+#define NX_CPB_SET_DIGEST_SIZE(c, x) NX_CPB_KS_DS(c) |= (x)
+
+struct cop_symcpb_header {
+ u8 mode;
+ u8 fdm;
+ u8 ks_ds;
+ u8 pad_byte;
+ u8 __rsvd[12];
+} __packed;
+
+struct cop_parameter_block {
+ struct cop_symcpb_header hdr;
+ union {
+ struct cop_symcpb_aes_ecb aes_ecb;
+ struct cop_symcpb_aes_cbc aes_cbc;
+ struct cop_symcpb_aes_gca aes_gca;
+ struct cop_symcpb_aes_gcm aes_gcm;
+ struct cop_symcpb_aes_cca aes_cca;
+ struct cop_symcpb_aes_ccm aes_ccm;
+ struct cop_symcpb_aes_ctr aes_ctr;
+ struct cop_symcpb_aes_xcbc aes_xcbc;
+ struct cop_symcpb_sha256 sha256;
+ struct cop_symcpb_sha512 sha512;
+ };
+} __packed;
+
+#define NX_CSB_VALID_BIT 0x80
+
+/* co-processor status block */
+struct cop_status_block {
+ u8 valid;
+ u8 crb_seq_number;
+ u8 completion_code;
+ u8 completion_extension;
+ u32 processed_byte_count;
+ u64 address;
+} __packed;
+
+/* Nest accelerator workbook section 4.4 */
+struct nx_csbcpb {
+ unsigned char __rsvd[112];
+ struct cop_status_block csb;
+ struct cop_parameter_block cpb;
+} __packed;
+
+/* nx_csbcpb related definitions */
+#define NX_MODE_AES_ECB 0
+#define NX_MODE_AES_CBC 1
+#define NX_MODE_AES_GMAC 2
+#define NX_MODE_AES_GCA 3
+#define NX_MODE_AES_GCM 4
+#define NX_MODE_AES_CCA 5
+#define NX_MODE_AES_CCM 6
+#define NX_MODE_AES_CTR 7
+#define NX_MODE_AES_XCBC_MAC 20
+#define NX_MODE_SHA 0
+#define NX_MODE_SHA_HMAC 1
+#define NX_MODE_AES_CBC_HMAC_ETA 8
+#define NX_MODE_AES_CBC_HMAC_ATE 9
+#define NX_MODE_AES_CBC_HMAC_EAA 10
+#define NX_MODE_AES_CTR_HMAC_ETA 12
+#define NX_MODE_AES_CTR_HMAC_ATE 13
+#define NX_MODE_AES_CTR_HMAC_EAA 14
+
+#define NX_FDM_CI_FULL 0
+#define NX_FDM_CI_FIRST 1
+#define NX_FDM_CI_LAST 2
+#define NX_FDM_CI_MIDDLE 3
+
+#define NX_FDM_PR_NONE 0
+#define NX_FDM_PR_PAD 1
+
+#define NX_KS_AES_128 1
+#define NX_KS_AES_192 2
+#define NX_KS_AES_256 3
+
+#define NX_DS_SHA256 2
+#define NX_DS_SHA512 3
+
+#define NX_FC_AES 0
+#define NX_FC_SHA 2
+#define NX_FC_AES_HMAC 6
+
+#define NX_MAX_FC (NX_FC_AES_HMAC + 1)
+#define NX_MAX_MODE (NX_MODE_AES_XCBC_MAC + 1)
+
+#define HCOP_FC_AES NX_FC_AES
+#define HCOP_FC_SHA NX_FC_SHA
+#define HCOP_FC_AES_HMAC NX_FC_AES_HMAC
+
+/* indices into the array of algorithm properties */
+#define NX_PROPS_AES_128 0
+#define NX_PROPS_AES_192 1
+#define NX_PROPS_AES_256 2
+#define NX_PROPS_SHA256 1
+#define NX_PROPS_SHA512 2
+
+#endif
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
new file mode 100644
index 000000000000..7ab2e8dcd9b4
--- /dev/null
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -0,0 +1,103 @@
+/**
+ * debugfs routines supporting the Power 7+ Nest Accelerators driver
+ *
+ * Copyright (C) 2011-2012 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Kent Yoder <yoder1@us.ibm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <asm/vio.h>
+
+#include "nx_csbcpb.h"
+#include "nx.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * debugfs
+ *
+ * For documentation on these attributes, please see:
+ *
+ * Documentation/ABI/testing/debugfs-pfo-nx-crypto
+ */
+
+int nx_debugfs_init(struct nx_crypto_driver *drv)
+{
+ struct nx_debugfs *dfs = &drv->dfs;
+
+ dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL);
+
+ dfs->dfs_aes_ops =
+ debugfs_create_u32("aes_ops",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ dfs->dfs_root, (u32 *)&drv->stats.aes_ops);
+ dfs->dfs_sha256_ops =
+ debugfs_create_u32("sha256_ops",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ dfs->dfs_root,
+ (u32 *)&drv->stats.sha256_ops);
+ dfs->dfs_sha512_ops =
+ debugfs_create_u32("sha512_ops",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ dfs->dfs_root,
+ (u32 *)&drv->stats.sha512_ops);
+ dfs->dfs_aes_bytes =
+ debugfs_create_u64("aes_bytes",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ dfs->dfs_root,
+ (u64 *)&drv->stats.aes_bytes);
+ dfs->dfs_sha256_bytes =
+ debugfs_create_u64("sha256_bytes",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ dfs->dfs_root,
+ (u64 *)&drv->stats.sha256_bytes);
+ dfs->dfs_sha512_bytes =
+ debugfs_create_u64("sha512_bytes",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ dfs->dfs_root,
+ (u64 *)&drv->stats.sha512_bytes);
+ dfs->dfs_errors =
+ debugfs_create_u32("errors",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ dfs->dfs_root, (u32 *)&drv->stats.errors);
+ dfs->dfs_last_error =
+ debugfs_create_u32("last_error",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ dfs->dfs_root,
+ (u32 *)&drv->stats.last_error);
+ dfs->dfs_last_error_pid =
+ debugfs_create_u32("last_error_pid",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ dfs->dfs_root,
+ (u32 *)&drv->stats.last_error_pid);
+ return 0;
+}
+
+void
+nx_debugfs_fini(struct nx_crypto_driver *drv)
+{
+ debugfs_remove_recursive(drv->dfs.dfs_root);
+}
+
+#endif
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
new file mode 100644
index 000000000000..b35e5c4b025a
--- /dev/null
+++ b/drivers/crypto/ux500/Kconfig
@@ -0,0 +1,30 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+config CRYPTO_DEV_UX500_CRYP
+ tristate "UX500 crypto driver for CRYP block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_DES
+ help
+ This selects the crypto driver for the UX500_CRYP hardware. It supports
+ AES-ECB, CBC and CTR with keys sizes of 128, 192 and 256 bit sizes.
+
+config CRYPTO_DEV_UX500_HASH
+ tristate "UX500 crypto driver for HASH block"
+ depends on CRYPTO_DEV_UX500
+ select CRYPTO_HASH
+ select CRYPTO_HMAC
+ help
+ This selects the hash driver for the UX500_HASH hardware.
+ Depends on UX500/STM DMA if running in DMA mode.
+
+config CRYPTO_DEV_UX500_DEBUG
+ bool "Activate ux500 platform debug-mode for crypto and hash block"
+ depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH
+ default n
+ help
+ Say Y if you want to add debug prints to ux500_hash and
+ ux500_cryp devices.
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile
new file mode 100644
index 000000000000..b9a365bade86
--- /dev/null
+++ b/drivers/crypto/ux500/Makefile
@@ -0,0 +1,8 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += cryp/
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
new file mode 100644
index 000000000000..e5d362a6f680
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -0,0 +1,13 @@
+#/*
+# * Copyright (C) ST-Ericsson SA 2010
+# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
+# * License terms: GNU General Public License (GPL) version 2 */
+
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_cryp_core.o := -DDEBUG -O0
+CFLAGS_cryp.o := -DDEBUG -O0
+CFLAGS_cryp_irq.o := -DDEBUG -O0
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
+ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
new file mode 100644
index 000000000000..e208ceaf81c9
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -0,0 +1,389 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <mach/hardware.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+/**
+ * cryp_wait_until_done - wait until the device logic is not busy
+ */
+void cryp_wait_until_done(struct cryp_device_data *device_data)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+}
+
+/**
+ * cryp_check - This routine checks Peripheral and PCell Id
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_check(struct cryp_device_data *device_data)
+{
+ int peripheralid2 = 0;
+
+ if (NULL == device_data)
+ return -EINVAL;
+
+ peripheralid2 = readl_relaxed(&device_data->base->periphId2);
+
+ if (peripheralid2 != CRYP_PERIPHERAL_ID2_DB8500)
+ return -EPERM;
+
+ /* Check Peripheral and Pcell Id Register for CRYP */
+ if ((CRYP_PERIPHERAL_ID0 ==
+ readl_relaxed(&device_data->base->periphId0))
+ && (CRYP_PERIPHERAL_ID1 ==
+ readl_relaxed(&device_data->base->periphId1))
+ && (CRYP_PERIPHERAL_ID3 ==
+ readl_relaxed(&device_data->base->periphId3))
+ && (CRYP_PCELL_ID0 ==
+ readl_relaxed(&device_data->base->pcellId0))
+ && (CRYP_PCELL_ID1 ==
+ readl_relaxed(&device_data->base->pcellId1))
+ && (CRYP_PCELL_ID2 ==
+ readl_relaxed(&device_data->base->pcellId2))
+ && (CRYP_PCELL_ID3 ==
+ readl_relaxed(&device_data->base->pcellId3))) {
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+/**
+ * cryp_activity - This routine enables/disable the cryptography function.
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_crypen: Enable/Disable functionality
+ */
+void cryp_activity(struct cryp_device_data *device_data,
+ enum cryp_crypen cryp_crypen)
+{
+ CRYP_PUT_BITS(&device_data->base->cr,
+ cryp_crypen,
+ CRYP_CR_CRYPEN_POS,
+ CRYP_CR_CRYPEN_MASK);
+}
+
+/**
+ * cryp_flush_inoutfifo - Resets both the input and the output FIFOs
+ * @device_data: Pointer to the device data struct for base address.
+ */
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
+{
+ /*
+ * We always need to disble the hardware before trying to flush the
+ * FIFO. This is something that isn't written in the design
+ * specification, but we have been informed by the hardware designers
+ * that this must be done.
+ */
+ cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+ cryp_wait_until_done(device_data);
+
+ CRYP_SET_BITS(&device_data->base->cr, CRYP_CR_FFLUSH_MASK);
+ /*
+ * CRYP_SR_INFIFO_READY_MASK is the expected value on the status
+ * register when starting a new calculation, which means Input FIFO is
+ * not full and input FIFO is empty.
+ */
+ while (readl_relaxed(&device_data->base->sr) !=
+ CRYP_SR_INFIFO_READY_MASK)
+ cpu_relax();
+}
+
+/**
+ * cryp_set_configuration - This routine set the cr CRYP IP
+ * @device_data: Pointer to the device data struct for base address.
+ * @cryp_config: Pointer to the configuration parameter
+ * @control_register: The control register to be written later on.
+ */
+int cryp_set_configuration(struct cryp_device_data *device_data,
+ struct cryp_config *cryp_config,
+ u32 *control_register)
+{
+ u32 cr_for_kse;
+
+ if (NULL == device_data || NULL == cryp_config)
+ return -EINVAL;
+
+ *control_register |= (cryp_config->keysize << CRYP_CR_KEYSIZE_POS);
+
+ /* Prepare key for decryption in AES_ECB and AES_CBC mode. */
+ if ((CRYP_ALGORITHM_DECRYPT == cryp_config->algodir) &&
+ ((CRYP_ALGO_AES_ECB == cryp_config->algomode) ||
+ (CRYP_ALGO_AES_CBC == cryp_config->algomode))) {
+ cr_for_kse = *control_register;
+ /*
+ * This seems a bit odd, but it is indeed needed to set this to
+ * encrypt even though it is a decryption that we are doing. It
+ * also mentioned in the design spec that you need to do this.
+ * After the keyprepartion for decrypting is done you should set
+ * algodir back to decryption, which is done outside this if
+ * statement.
+ *
+ * According to design specification we should set mode ECB
+ * during key preparation even though we might be running CBC
+ * when enter this function.
+ *
+ * Writing to KSE_ENABLED will drop CRYPEN when key preparation
+ * is done. Therefore we need to set CRYPEN again outside this
+ * if statement when running decryption.
+ */
+ cr_for_kse |= ((CRYP_ALGORITHM_ENCRYPT << CRYP_CR_ALGODIR_POS) |
+ (CRYP_ALGO_AES_ECB << CRYP_CR_ALGOMODE_POS) |
+ (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS) |
+ (KSE_ENABLED << CRYP_CR_KSE_POS));
+
+ writel_relaxed(cr_for_kse, &device_data->base->cr);
+ cryp_wait_until_done(device_data);
+ }
+
+ *control_register |=
+ ((cryp_config->algomode << CRYP_CR_ALGOMODE_POS) |
+ (cryp_config->algodir << CRYP_CR_ALGODIR_POS));
+
+ return 0;
+}
+
+/**
+ * cryp_configure_protection - set the protection bits in the CRYP logic.
+ * @device_data: Pointer to the device data struct for base address.
+ * @p_protect_config: Pointer to the protection mode and
+ * secure mode configuration
+ */
+int cryp_configure_protection(struct cryp_device_data *device_data,
+ struct cryp_protection_config *p_protect_config)
+{
+ if (NULL == p_protect_config)
+ return -EINVAL;
+
+ CRYP_WRITE_BIT(&device_data->base->cr,
+ (u32) p_protect_config->secure_access,
+ CRYP_CR_SECURE_MASK);
+ CRYP_PUT_BITS(&device_data->base->cr,
+ p_protect_config->privilege_access,
+ CRYP_CR_PRLG_POS,
+ CRYP_CR_PRLG_MASK);
+
+ return 0;
+}
+
+/**
+ * cryp_is_logic_busy - returns the busy status of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ */
+int cryp_is_logic_busy(struct cryp_device_data *device_data)
+{
+ return CRYP_TEST_BITS(&device_data->base->sr,
+ CRYP_SR_BUSY_MASK);
+}
+
+/**
+ * cryp_configure_for_dma - configures the CRYP IP for DMA operation
+ * @device_data: Pointer to the device data struct for base address.
+ * @dma_req: Specifies the DMA request type value.
+ */
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+ enum cryp_dma_req_type dma_req)
+{
+ CRYP_SET_BITS(&device_data->base->dmacr,
+ (u32) dma_req);
+}
+
+/**
+ * cryp_configure_key_values - configures the key values for CRYP operations
+ * @device_data: Pointer to the device data struct for base address.
+ * @key_reg_index: Key value index register
+ * @key_value: The key value struct
+ */
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+ enum cryp_key_reg_index key_reg_index,
+ struct cryp_key_value key_value)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+
+ switch (key_reg_index) {
+ case CRYP_KEY_REG_1:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_1_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_1_r);
+ break;
+ case CRYP_KEY_REG_2:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_2_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_2_r);
+ break;
+ case CRYP_KEY_REG_3:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_3_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_3_r);
+ break;
+ case CRYP_KEY_REG_4:
+ writel_relaxed(key_value.key_value_left,
+ &device_data->base->key_4_l);
+ writel_relaxed(key_value.key_value_right,
+ &device_data->base->key_4_r);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cryp_configure_init_vector - configures the initialization vector register
+ * @device_data: Pointer to the device data struct for base address.
+ * @init_vector_index: Specifies the index of the init vector.
+ * @init_vector_value: Specifies the value for the init vector.
+ */
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+ enum cryp_init_vector_index
+ init_vector_index,
+ struct cryp_init_vector_value
+ init_vector_value)
+{
+ while (cryp_is_logic_busy(device_data))
+ cpu_relax();
+
+ switch (init_vector_index) {
+ case CRYP_INIT_VECTOR_INDEX_0:
+ writel_relaxed(init_vector_value.init_value_left,
+ &device_data->base->init_vect_0_l);
+ writel_relaxed(init_vector_value.init_value_right,
+ &device_data->base->init_vect_0_r);
+ break;
+ case CRYP_INIT_VECTOR_INDEX_1:
+ writel_relaxed(init_vector_value.init_value_left,
+ &device_data->base->init_vect_1_l);
+ writel_relaxed(init_vector_value.init_value_right,
+ &device_data->base->init_vect_1_r);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cryp_save_device_context - Store hardware registers and
+ * other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx,
+ int cryp_mode)
+{
+ enum cryp_algo_mode algomode;
+ struct cryp_register *src_reg = device_data->base;
+ struct cryp_config *config =
+ (struct cryp_config *)device_data->current_ctx;
+
+ /*
+ * Always start by disable the hardware and wait for it to finish the
+ * ongoing calculations before trying to reprogram it.
+ */
+ cryp_activity(device_data, CRYP_CRYPEN_DISABLE);
+ cryp_wait_until_done(device_data);
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ cryp_configure_for_dma(device_data, CRYP_DMA_DISABLE_BOTH);
+
+ if (CRYP_TEST_BITS(&src_reg->sr, CRYP_SR_IFEM_MASK) == 0)
+ ctx->din = readl_relaxed(&src_reg->din);
+
+ ctx->cr = readl_relaxed(&src_reg->cr) & CRYP_CR_CONTEXT_SAVE_MASK;
+
+ switch (config->keysize) {
+ case CRYP_KEY_SIZE_256:
+ ctx->key_4_l = readl_relaxed(&src_reg->key_4_l);
+ ctx->key_4_r = readl_relaxed(&src_reg->key_4_r);
+
+ case CRYP_KEY_SIZE_192:
+ ctx->key_3_l = readl_relaxed(&src_reg->key_3_l);
+ ctx->key_3_r = readl_relaxed(&src_reg->key_3_r);
+
+ case CRYP_KEY_SIZE_128:
+ ctx->key_2_l = readl_relaxed(&src_reg->key_2_l);
+ ctx->key_2_r = readl_relaxed(&src_reg->key_2_r);
+
+ default:
+ ctx->key_1_l = readl_relaxed(&src_reg->key_1_l);
+ ctx->key_1_r = readl_relaxed(&src_reg->key_1_r);
+ }
+
+ /* Save IV for CBC mode for both AES and DES. */
+ algomode = ((ctx->cr & CRYP_CR_ALGOMODE_MASK) >> CRYP_CR_ALGOMODE_POS);
+ if (algomode == CRYP_ALGO_TDES_CBC ||
+ algomode == CRYP_ALGO_DES_CBC ||
+ algomode == CRYP_ALGO_AES_CBC) {
+ ctx->init_vect_0_l = readl_relaxed(&src_reg->init_vect_0_l);
+ ctx->init_vect_0_r = readl_relaxed(&src_reg->init_vect_0_r);
+ ctx->init_vect_1_l = readl_relaxed(&src_reg->init_vect_1_l);
+ ctx->init_vect_1_r = readl_relaxed(&src_reg->init_vect_1_r);
+ }
+}
+
+/**
+ * cryp_restore_device_context - Restore hardware registers and
+ * other device context parameter
+ * @device_data: Pointer to the device data struct for base address.
+ * @ctx: Crypto device context
+ */
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx)
+{
+ struct cryp_register *reg = device_data->base;
+ struct cryp_config *config =
+ (struct cryp_config *)device_data->current_ctx;
+
+ /*
+ * Fall through for all items in switch statement. DES is captured in
+ * the default.
+ */
+ switch (config->keysize) {
+ case CRYP_KEY_SIZE_256:
+ writel_relaxed(ctx->key_4_l, &reg->key_4_l);
+ writel_relaxed(ctx->key_4_r, &reg->key_4_r);
+
+ case CRYP_KEY_SIZE_192:
+ writel_relaxed(ctx->key_3_l, &reg->key_3_l);
+ writel_relaxed(ctx->key_3_r, &reg->key_3_r);
+
+ case CRYP_KEY_SIZE_128:
+ writel_relaxed(ctx->key_2_l, &reg->key_2_l);
+ writel_relaxed(ctx->key_2_r, &reg->key_2_r);
+
+ default:
+ writel_relaxed(ctx->key_1_l, &reg->key_1_l);
+ writel_relaxed(ctx->key_1_r, &reg->key_1_r);
+ }
+
+ /* Restore IV for CBC mode for AES and DES. */
+ if (config->algomode == CRYP_ALGO_TDES_CBC ||
+ config->algomode == CRYP_ALGO_DES_CBC ||
+ config->algomode == CRYP_ALGO_AES_CBC) {
+ writel_relaxed(ctx->init_vect_0_l, &reg->init_vect_0_l);
+ writel_relaxed(ctx->init_vect_0_r, &reg->init_vect_0_r);
+ writel_relaxed(ctx->init_vect_1_l, &reg->init_vect_1_l);
+ writel_relaxed(ctx->init_vect_1_r, &reg->init_vect_1_r);
+ }
+}
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
new file mode 100644
index 000000000000..14cfd05b777a
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -0,0 +1,308 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_H_
+#define _CRYP_H_
+
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/klist.h>
+#include <linux/mutex.h>
+
+#define DEV_DBG_NAME "crypX crypX:"
+
+/* CRYP enable/disable */
+enum cryp_crypen {
+ CRYP_CRYPEN_DISABLE = 0,
+ CRYP_CRYPEN_ENABLE = 1
+};
+
+/* CRYP Start Computation enable/disable */
+enum cryp_start {
+ CRYP_START_DISABLE = 0,
+ CRYP_START_ENABLE = 1
+};
+
+/* CRYP Init Signal enable/disable */
+enum cryp_init {
+ CRYP_INIT_DISABLE = 0,
+ CRYP_INIT_ENABLE = 1
+};
+
+/* Cryp State enable/disable */
+enum cryp_state {
+ CRYP_STATE_DISABLE = 0,
+ CRYP_STATE_ENABLE = 1
+};
+
+/* Key preparation bit enable */
+enum cryp_key_prep {
+ KSE_DISABLED = 0,
+ KSE_ENABLED = 1
+};
+
+/* Key size for AES */
+#define CRYP_KEY_SIZE_128 (0)
+#define CRYP_KEY_SIZE_192 (1)
+#define CRYP_KEY_SIZE_256 (2)
+
+/* AES modes */
+enum cryp_algo_mode {
+ CRYP_ALGO_TDES_ECB,
+ CRYP_ALGO_TDES_CBC,
+ CRYP_ALGO_DES_ECB,
+ CRYP_ALGO_DES_CBC,
+ CRYP_ALGO_AES_ECB,
+ CRYP_ALGO_AES_CBC,
+ CRYP_ALGO_AES_CTR,
+ CRYP_ALGO_AES_XTS
+};
+
+/* Cryp Encryption or Decryption */
+enum cryp_algorithm_dir {
+ CRYP_ALGORITHM_ENCRYPT,
+ CRYP_ALGORITHM_DECRYPT
+};
+
+/* Hardware access method */
+enum cryp_mode {
+ CRYP_MODE_POLLING,
+ CRYP_MODE_INTERRUPT,
+ CRYP_MODE_DMA
+};
+
+/**
+ * struct cryp_config -
+ * @keysize: Key size for AES
+ * @algomode: AES modes
+ * @algodir: Cryp Encryption or Decryption
+ *
+ * CRYP configuration structure to be passed to set configuration
+ */
+struct cryp_config {
+ int keysize;
+ enum cryp_algo_mode algomode;
+ enum cryp_algorithm_dir algodir;
+};
+
+/**
+ * struct cryp_protection_config -
+ * @privilege_access: Privileged cryp state enable/disable
+ * @secure_access: Secure cryp state enable/disable
+ *
+ * Protection configuration structure for setting privilage access
+ */
+struct cryp_protection_config {
+ enum cryp_state privilege_access;
+ enum cryp_state secure_access;
+};
+
+/* Cryp status */
+enum cryp_status_id {
+ CRYP_STATUS_BUSY = 0x10,
+ CRYP_STATUS_OUTPUT_FIFO_FULL = 0x08,
+ CRYP_STATUS_OUTPUT_FIFO_NOT_EMPTY = 0x04,
+ CRYP_STATUS_INPUT_FIFO_NOT_FULL = 0x02,
+ CRYP_STATUS_INPUT_FIFO_EMPTY = 0x01
+};
+
+/* Cryp DMA interface */
+enum cryp_dma_req_type {
+ CRYP_DMA_DISABLE_BOTH,
+ CRYP_DMA_ENABLE_IN_DATA,
+ CRYP_DMA_ENABLE_OUT_DATA,
+ CRYP_DMA_ENABLE_BOTH_DIRECTIONS
+};
+
+enum cryp_dma_channel {
+ CRYP_DMA_RX = 0,
+ CRYP_DMA_TX
+};
+
+/* Key registers */
+enum cryp_key_reg_index {
+ CRYP_KEY_REG_1,
+ CRYP_KEY_REG_2,
+ CRYP_KEY_REG_3,
+ CRYP_KEY_REG_4
+};
+
+/* Key register left and right */
+struct cryp_key_value {
+ u32 key_value_left;
+ u32 key_value_right;
+};
+
+/* Cryp Initialization structure */
+enum cryp_init_vector_index {
+ CRYP_INIT_VECTOR_INDEX_0,
+ CRYP_INIT_VECTOR_INDEX_1
+};
+
+/* struct cryp_init_vector_value -
+ * @init_value_left
+ * @init_value_right
+ * */
+struct cryp_init_vector_value {
+ u32 init_value_left;
+ u32 init_value_right;
+};
+
+/**
+ * struct cryp_device_context - structure for a cryp context.
+ * @cr: control register
+ * @dmacr: DMA control register
+ * @imsc: Interrupt mask set/clear register
+ * @key_1_l: Key 1l register
+ * @key_1_r: Key 1r register
+ * @key_2_l: Key 2l register
+ * @key_2_r: Key 2r register
+ * @key_3_l: Key 3l register
+ * @key_3_r: Key 3r register
+ * @key_4_l: Key 4l register
+ * @key_4_r: Key 4r register
+ * @init_vect_0_l: Initialization vector 0l register
+ * @init_vect_0_r: Initialization vector 0r register
+ * @init_vect_1_l: Initialization vector 1l register
+ * @init_vect_1_r: Initialization vector 0r register
+ * @din: Data in register
+ * @dout: Data out register
+ *
+ * CRYP power management specifc structure.
+ */
+struct cryp_device_context {
+ u32 cr;
+ u32 dmacr;
+ u32 imsc;
+
+ u32 key_1_l;
+ u32 key_1_r;
+ u32 key_2_l;
+ u32 key_2_r;
+ u32 key_3_l;
+ u32 key_3_r;
+ u32 key_4_l;
+ u32 key_4_r;
+
+ u32 init_vect_0_l;
+ u32 init_vect_0_r;
+ u32 init_vect_1_l;
+ u32 init_vect_1_r;
+
+ u32 din;
+ u32 dout;
+};
+
+struct cryp_dma {
+ dma_cap_mask_t mask;
+ struct completion cryp_dma_complete;
+ struct dma_chan *chan_cryp2mem;
+ struct dma_chan *chan_mem2cryp;
+ struct stedma40_chan_cfg *cfg_cryp2mem;
+ struct stedma40_chan_cfg *cfg_mem2cryp;
+ int sg_src_len;
+ int sg_dst_len;
+ struct scatterlist *sg_src;
+ struct scatterlist *sg_dst;
+ int nents_src;
+ int nents_dst;
+};
+
+/**
+ * struct cryp_device_data - structure for a cryp device.
+ * @base: Pointer to the hardware base address.
+ * @dev: Pointer to the devices dev structure.
+ * @clk: Pointer to the device's clock control.
+ * @pwr_regulator: Pointer to the device's power control.
+ * @power_status: Current status of the power.
+ * @ctx_lock: Lock for current_ctx.
+ * @current_ctx: Pointer to the currently allocated context.
+ * @list_node: For inclusion into a klist.
+ * @dma: The dma structure holding channel configuration.
+ * @power_state: TRUE = power state on, FALSE = power state off.
+ * @power_state_spinlock: Spinlock for power_state.
+ * @restore_dev_ctx: TRUE = saved ctx, FALSE = no saved ctx.
+ */
+struct cryp_device_data {
+ struct cryp_register __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ struct regulator *pwr_regulator;
+ int power_status;
+ struct spinlock ctx_lock;
+ struct cryp_ctx *current_ctx;
+ struct klist_node list_node;
+ struct cryp_dma dma;
+ bool power_state;
+ struct spinlock power_state_spinlock;
+ bool restore_dev_ctx;
+};
+
+void cryp_wait_until_done(struct cryp_device_data *device_data);
+
+/* Initialization functions */
+
+int cryp_check(struct cryp_device_data *device_data);
+
+void cryp_activity(struct cryp_device_data *device_data,
+ enum cryp_crypen cryp_crypen);
+
+void cryp_flush_inoutfifo(struct cryp_device_data *device_data);
+
+int cryp_set_configuration(struct cryp_device_data *device_data,
+ struct cryp_config *cryp_config,
+ u32 *control_register);
+
+void cryp_configure_for_dma(struct cryp_device_data *device_data,
+ enum cryp_dma_req_type dma_req);
+
+int cryp_configure_key_values(struct cryp_device_data *device_data,
+ enum cryp_key_reg_index key_reg_index,
+ struct cryp_key_value key_value);
+
+int cryp_configure_init_vector(struct cryp_device_data *device_data,
+ enum cryp_init_vector_index
+ init_vector_index,
+ struct cryp_init_vector_value
+ init_vector_value);
+
+int cryp_configure_protection(struct cryp_device_data *device_data,
+ struct cryp_protection_config *p_protect_config);
+
+/* Power management funtions */
+void cryp_save_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx,
+ int cryp_mode);
+
+void cryp_restore_device_context(struct cryp_device_data *device_data,
+ struct cryp_device_context *ctx);
+
+/* Data transfer and status bits. */
+int cryp_is_logic_busy(struct cryp_device_data *device_data);
+
+int cryp_get_status(struct cryp_device_data *device_data);
+
+/**
+ * cryp_write_indata - This routine writes 32 bit data into the data input
+ * register of the cryptography IP.
+ * @device_data: Pointer to the device data struct for base address.
+ * @write_data: Data to write.
+ */
+int cryp_write_indata(struct cryp_device_data *device_data, u32 write_data);
+
+/**
+ * cryp_read_outdata - This routine reads the data from the data output
+ * register of the CRYP logic
+ * @device_data: Pointer to the device data struct for base address.
+ * @read_data: Read the data from the output FIFO.
+ */
+int cryp_read_outdata(struct cryp_device_data *device_data, u32 *read_data);
+
+#endif /* _CRYP_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
new file mode 100644
index 000000000000..7cac12793a4b
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -0,0 +1,1784 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/crypto.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/klist.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/semaphore.h>
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+
+#include <plat/ste_dma40.h>
+
+#include <mach/crypto-ux500.h>
+#include <mach/hardware.h>
+
+#include "cryp_p.h"
+#include "cryp.h"
+
+#define CRYP_MAX_KEY_SIZE 32
+#define BYTES_PER_WORD 4
+
+static int cryp_mode;
+static atomic_t session_id;
+
+static struct stedma40_chan_cfg *mem_to_engine;
+static struct stedma40_chan_cfg *engine_to_mem;
+
+/**
+ * struct cryp_driver_data - data specific to the driver.
+ *
+ * @device_list: A list of registered devices to choose from.
+ * @device_allocation: A semaphore initialized with number of devices.
+ */
+struct cryp_driver_data {
+ struct klist device_list;
+ struct semaphore device_allocation;
+};
+
+/**
+ * struct cryp_ctx - Crypto context
+ * @config: Crypto mode.
+ * @key[CRYP_MAX_KEY_SIZE]: Key.
+ * @keylen: Length of key.
+ * @iv: Pointer to initialization vector.
+ * @indata: Pointer to indata.
+ * @outdata: Pointer to outdata.
+ * @datalen: Length of indata.
+ * @outlen: Length of outdata.
+ * @blocksize: Size of blocks.
+ * @updated: Updated flag.
+ * @dev_ctx: Device dependent context.
+ * @device: Pointer to the device.
+ */
+struct cryp_ctx {
+ struct cryp_config config;
+ u8 key[CRYP_MAX_KEY_SIZE];
+ u32 keylen;
+ u8 *iv;
+ const u8 *indata;
+ u8 *outdata;
+ u32 datalen;
+ u32 outlen;
+ u32 blocksize;
+ u8 updated;
+ struct cryp_device_context dev_ctx;
+ struct cryp_device_data *device;
+ u32 session_id;
+};
+
+static struct cryp_driver_data driver_data;
+
+/**
+ * uint8p_to_uint32_be - 4*uint8 to uint32 big endian
+ * @in: Data to convert.
+ */
+static inline u32 uint8p_to_uint32_be(u8 *in)
+{
+ u32 *data = (u32 *)in;
+
+ return cpu_to_be32p(data);
+}
+
+/**
+ * swap_bits_in_byte - mirror the bits in a byte
+ * @b: the byte to be mirrored
+ *
+ * The bits are swapped the following way:
+ * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and
+ * nibble 2 (n2) bits 4-7.
+ *
+ * Nibble 1 (n1):
+ * (The "old" (moved) bit is replaced with a zero)
+ * 1. Move bit 6 and 7, 4 positions to the left.
+ * 2. Move bit 3 and 5, 2 positions to the left.
+ * 3. Move bit 1-4, 1 position to the left.
+ *
+ * Nibble 2 (n2):
+ * 1. Move bit 0 and 1, 4 positions to the right.
+ * 2. Move bit 2 and 4, 2 positions to the right.
+ * 3. Move bit 3-6, 1 position to the right.
+ *
+ * Combine the two nibbles to a complete and swapped byte.
+ */
+
+static inline u8 swap_bits_in_byte(u8 b)
+{
+#define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */
+#define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5,
+ right shift 2 */
+#define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4,
+ right shift 1 */
+#define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */
+#define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4,
+ left shift 2 */
+#define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6,
+ left shift 1 */
+
+ u8 n1;
+ u8 n2;
+
+ /* Swap most significant nibble */
+ /* Right shift 4, bits 6 and 7 */
+ n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4));
+ /* Right shift 2, bits 3 and 5 */
+ n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2));
+ /* Right shift 1, bits 1-4 */
+ n1 = (n1 & R_SHIFT_1_MASK) >> 1;
+
+ /* Swap least significant nibble */
+ /* Left shift 4, bits 0 and 1 */
+ n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4));
+ /* Left shift 2, bits 2 and 4 */
+ n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2));
+ /* Left shift 1, bits 3-6 */
+ n2 = (n2 & L_SHIFT_1_MASK) << 1;
+
+ return n1 | n2;
+}
+
+static inline void swap_words_in_key_and_bits_in_byte(const u8 *in,
+ u8 *out, u32 len)
+{
+ unsigned int i = 0;
+ int j;
+ int index = 0;
+
+ j = len - BYTES_PER_WORD;
+ while (j >= 0) {
+ for (i = 0; i < BYTES_PER_WORD; i++) {
+ index = len - j - BYTES_PER_WORD + i;
+ out[j + i] =
+ swap_bits_in_byte(in[index]);
+ }
+ j -= BYTES_PER_WORD;
+ }
+}
+
+static void add_session_id(struct cryp_ctx *ctx)
+{
+ /*
+ * We never want 0 to be a valid value, since this is the default value
+ * for the software context.
+ */
+ if (unlikely(atomic_inc_and_test(&session_id)))
+ atomic_inc(&session_id);
+
+ ctx->session_id = atomic_read(&session_id);
+}
+
+static irqreturn_t cryp_interrupt_handler(int irq, void *param)
+{
+ struct cryp_ctx *ctx;
+ int i;
+ struct cryp_device_data *device_data;
+
+ if (param == NULL) {
+ BUG_ON(!param);
+ return IRQ_HANDLED;
+ }
+
+ /* The device is coming from the one found in hw_crypt_noxts. */
+ device_data = (struct cryp_device_data *)param;
+
+ ctx = device_data->current_ctx;
+
+ if (ctx == NULL) {
+ BUG_ON(!ctx);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen,
+ cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO) ?
+ "out" : "in");
+
+ if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_OUTPUT_FIFO)) {
+ if (ctx->outlen / ctx->blocksize > 0) {
+ for (i = 0; i < ctx->blocksize / 4; i++) {
+ *(ctx->outdata) = readl_relaxed(
+ &device_data->base->dout);
+ ctx->outdata += 4;
+ ctx->outlen -= 4;
+ }
+
+ if (ctx->outlen == 0) {
+ cryp_disable_irq_src(device_data,
+ CRYP_IRQ_SRC_OUTPUT_FIFO);
+ }
+ }
+ } else if (cryp_pending_irq_src(device_data,
+ CRYP_IRQ_SRC_INPUT_FIFO)) {
+ if (ctx->datalen / ctx->blocksize > 0) {
+ for (i = 0 ; i < ctx->blocksize / 4; i++) {
+ writel_relaxed(ctx->indata,
+ &device_data->base->din);
+ ctx->indata += 4;
+ ctx->datalen -= 4;
+ }
+
+ if (ctx->datalen == 0)
+ cryp_disable_irq_src(device_data,
+ CRYP_IRQ_SRC_INPUT_FIFO);
+
+ if (ctx->config.algomode == CRYP_ALGO_AES_XTS) {
+ CRYP_PUT_BITS(&device_data->base->cr,
+ CRYP_START_ENABLE,
+ CRYP_CR_START_POS,
+ CRYP_CR_START_MASK);
+
+ cryp_wait_until_done(device_data);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mode_is_aes(enum cryp_algo_mode mode)
+{
+ return CRYP_ALGO_AES_ECB == mode ||
+ CRYP_ALGO_AES_CBC == mode ||
+ CRYP_ALGO_AES_CTR == mode ||
+ CRYP_ALGO_AES_XTS == mode;
+}
+
+static int cfg_iv(struct cryp_device_data *device_data, u32 left, u32 right,
+ enum cryp_init_vector_index index)
+{
+ struct cryp_init_vector_value vector_value;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ vector_value.init_value_left = left;
+ vector_value.init_value_right = right;
+
+ return cryp_configure_init_vector(device_data,
+ index,
+ vector_value);
+}
+
+static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx)
+{
+ int i;
+ int status = 0;
+ int num_of_regs = ctx->blocksize / 8;
+ u32 iv[AES_BLOCK_SIZE / 4];
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ /*
+ * Since we loop on num_of_regs we need to have a check in case
+ * someone provides an incorrect blocksize which would force calling
+ * cfg_iv with i greater than 2 which is an error.
+ */
+ if (num_of_regs > 2) {
+ dev_err(device_data->dev, "[%s] Incorrect blocksize %d",
+ __func__, ctx->blocksize);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ctx->blocksize / 4; i++)
+ iv[i] = uint8p_to_uint32_be(ctx->iv + i*4);
+
+ for (i = 0; i < num_of_regs; i++) {
+ status = cfg_iv(device_data, iv[i*2], iv[i*2+1],
+ (enum cryp_init_vector_index) i);
+ if (status != 0)
+ return status;
+ }
+ return status;
+}
+
+static int set_key(struct cryp_device_data *device_data,
+ u32 left_key,
+ u32 right_key,
+ enum cryp_key_reg_index index)
+{
+ struct cryp_key_value key_value;
+ int cryp_error;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ key_value.key_value_left = left_key;
+ key_value.key_value_right = right_key;
+
+ cryp_error = cryp_configure_key_values(device_data,
+ index,
+ key_value);
+ if (cryp_error != 0)
+ dev_err(device_data->dev, "[%s]: "
+ "cryp_configure_key_values() failed!", __func__);
+
+ return cryp_error;
+}
+
+static int cfg_keys(struct cryp_ctx *ctx)
+{
+ int i;
+ int num_of_regs = ctx->keylen / 8;
+ u32 swapped_key[CRYP_MAX_KEY_SIZE / 4];
+ int cryp_error = 0;
+
+ dev_dbg(ctx->device->dev, "[%s]", __func__);
+
+ if (mode_is_aes(ctx->config.algomode)) {
+ swap_words_in_key_and_bits_in_byte((u8 *)ctx->key,
+ (u8 *)swapped_key,
+ ctx->keylen);
+ } else {
+ for (i = 0; i < ctx->keylen / 4; i++)
+ swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4);
+ }
+
+ for (i = 0; i < num_of_regs; i++) {
+ cryp_error = set_key(ctx->device,
+ *(((u32 *)swapped_key)+i*2),
+ *(((u32 *)swapped_key)+i*2+1),
+ (enum cryp_key_reg_index) i);
+
+ if (cryp_error != 0) {
+ dev_err(ctx->device->dev, "[%s]: set_key() failed!",
+ __func__);
+ return cryp_error;
+ }
+ }
+ return cryp_error;
+}
+
+static int cryp_setup_context(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ u32 control_register = CRYP_CR_DEFAULT;
+
+ switch (cryp_mode) {
+ case CRYP_MODE_INTERRUPT:
+ writel_relaxed(CRYP_IMSC_DEFAULT, &device_data->base->imsc);
+ break;
+
+ case CRYP_MODE_DMA:
+ writel_relaxed(CRYP_DMACR_DEFAULT, &device_data->base->dmacr);
+ break;
+
+ default:
+ break;
+ }
+
+ if (ctx->updated == 0) {
+ cryp_flush_inoutfifo(device_data);
+ if (cfg_keys(ctx) != 0) {
+ dev_err(ctx->device->dev, "[%s]: cfg_keys failed!",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (ctx->iv &&
+ CRYP_ALGO_AES_ECB != ctx->config.algomode &&
+ CRYP_ALGO_DES_ECB != ctx->config.algomode &&
+ CRYP_ALGO_TDES_ECB != ctx->config.algomode) {
+ if (cfg_ivs(device_data, ctx) != 0)
+ return -EPERM;
+ }
+
+ cryp_set_configuration(device_data, &ctx->config,
+ &control_register);
+ add_session_id(ctx);
+ } else if (ctx->updated == 1 &&
+ ctx->session_id != atomic_read(&session_id)) {
+ cryp_flush_inoutfifo(device_data);
+ cryp_restore_device_context(device_data, &ctx->dev_ctx);
+
+ add_session_id(ctx);
+ control_register = ctx->dev_ctx.cr;
+ } else
+ control_register = ctx->dev_ctx.cr;
+
+ writel(control_register |
+ (CRYP_CRYPEN_ENABLE << CRYP_CR_CRYPEN_POS),
+ &device_data->base->cr);
+
+ return 0;
+}
+
+static int cryp_get_device_data(struct cryp_ctx *ctx,
+ struct cryp_device_data **device_data)
+{
+ int ret;
+ struct klist_iter device_iterator;
+ struct klist_node *device_node;
+ struct cryp_device_data *local_device_data = NULL;
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ /* Wait until a device is available */
+ ret = down_interruptible(&driver_data.device_allocation);
+ if (ret)
+ return ret; /* Interrupted */
+
+ /* Select a device */
+ klist_iter_init(&driver_data.device_list, &device_iterator);
+
+ device_node = klist_next(&device_iterator);
+ while (device_node) {
+ local_device_data = container_of(device_node,
+ struct cryp_device_data, list_node);
+ spin_lock(&local_device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (local_device_data->current_ctx) {
+ device_node = klist_next(&device_iterator);
+ } else {
+ local_device_data->current_ctx = ctx;
+ ctx->device = local_device_data;
+ spin_unlock(&local_device_data->ctx_lock);
+ break;
+ }
+ spin_unlock(&local_device_data->ctx_lock);
+ }
+ klist_iter_exit(&device_iterator);
+
+ if (!device_node) {
+ /**
+ * No free device found.
+ * Since we allocated a device with down_interruptible, this
+ * should not be able to happen.
+ * Number of available devices, which are contained in
+ * device_allocation, is therefore decremented by not doing
+ * an up(device_allocation).
+ */
+ return -EBUSY;
+ }
+
+ *device_data = local_device_data;
+
+ return 0;
+}
+
+static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
+ struct device *dev)
+{
+ dma_cap_zero(device_data->dma.mask);
+ dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+ device_data->dma.cfg_mem2cryp = mem_to_engine;
+ device_data->dma.chan_mem2cryp =
+ dma_request_channel(device_data->dma.mask,
+ stedma40_filter,
+ device_data->dma.cfg_mem2cryp);
+
+ device_data->dma.cfg_cryp2mem = engine_to_mem;
+ device_data->dma.chan_cryp2mem =
+ dma_request_channel(device_data->dma.mask,
+ stedma40_filter,
+ device_data->dma.cfg_cryp2mem);
+
+ init_completion(&device_data->dma.cryp_dma_complete);
+}
+
+static void cryp_dma_out_callback(void *data)
+{
+ struct cryp_ctx *ctx = (struct cryp_ctx *) data;
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ complete(&ctx->device->dma.cryp_dma_complete);
+}
+
+static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
+ struct scatterlist *sg,
+ int len,
+ enum dma_data_direction direction)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *channel = NULL;
+ dma_cookie_t cookie;
+
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ if (unlikely(!IS_ALIGNED((u32)sg, 4))) {
+ dev_err(ctx->device->dev, "[%s]: Data in sg list isn't "
+ "aligned! Addr: 0x%08x", __func__, (u32)sg);
+ return -EFAULT;
+ }
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ channel = ctx->device->dma.chan_mem2cryp;
+ ctx->device->dma.sg_src = sg;
+ ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.nents_src,
+ direction);
+
+ if (!ctx->device->dma.sg_src_len) {
+ dev_dbg(ctx->device->dev,
+ "[%s]: Could not map the sg list (TO_DEVICE)",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(TO_DEVICE)", __func__);
+
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len,
+ direction, DMA_CTRL_ACK, NULL);
+ break;
+
+ case DMA_FROM_DEVICE:
+ channel = ctx->device->dma.chan_cryp2mem;
+ ctx->device->dma.sg_dst = sg;
+ ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.nents_dst,
+ direction);
+
+ if (!ctx->device->dma.sg_dst_len) {
+ dev_dbg(ctx->device->dev,
+ "[%s]: Could not map the sg list (FROM_DEVICE)",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(FROM_DEVICE)", __func__);
+
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len,
+ direction,
+ DMA_CTRL_ACK |
+ DMA_PREP_INTERRUPT, NULL);
+
+ desc->callback = cryp_dma_out_callback;
+ desc->callback_param = ctx;
+ break;
+
+ default:
+ dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction",
+ __func__);
+ return -EFAULT;
+ }
+
+ cookie = desc->tx_submit(desc);
+ dma_async_issue_pending(channel);
+
+ return 0;
+}
+
+static void cryp_dma_done(struct cryp_ctx *ctx)
+{
+ struct dma_chan *chan;
+
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ chan = ctx->device->dma.chan_mem2cryp;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
+
+ chan = ctx->device->dma.chan_cryp2mem;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
+}
+
+static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
+ int len)
+{
+ int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+ dev_dbg(ctx->device->dev, "[%s]: ", __func__);
+
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len)
+{
+ int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE);
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+static void cryp_polling_mode(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ int len = ctx->blocksize / BYTES_PER_WORD;
+ int remaining_length = ctx->datalen;
+ u32 *indata = (u32 *)ctx->indata;
+ u32 *outdata = (u32 *)ctx->outdata;
+
+ while (remaining_length > 0) {
+ writesl(&device_data->base->din, indata, len);
+ indata += len;
+ remaining_length -= (len * BYTES_PER_WORD);
+ cryp_wait_until_done(device_data);
+
+ readsl(&device_data->base->dout, outdata, len);
+ outdata += len;
+ cryp_wait_until_done(device_data);
+ }
+}
+
+static int cryp_disable_power(struct device *dev,
+ struct cryp_device_data *device_data,
+ bool save_device_context)
+{
+ int ret = 0;
+
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_spinlock);
+ if (!device_data->power_state)
+ goto out;
+
+ spin_lock(&device_data->ctx_lock);
+ if (save_device_context && device_data->current_ctx) {
+ cryp_save_device_context(device_data,
+ &device_data->current_ctx->dev_ctx,
+ cryp_mode);
+ device_data->restore_dev_ctx = true;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ clk_disable(device_data->clk);
+ ret = regulator_disable(device_data->pwr_regulator);
+ if (ret)
+ dev_err(dev, "[%s]: "
+ "regulator_disable() failed!",
+ __func__);
+
+ device_data->power_state = false;
+
+out:
+ spin_unlock(&device_data->power_state_spinlock);
+
+ return ret;
+}
+
+static int cryp_enable_power(
+ struct device *dev,
+ struct cryp_device_data *device_data,
+ bool restore_device_context)
+{
+ int ret = 0;
+
+ dev_dbg(dev, "[%s]", __func__);
+
+ spin_lock(&device_data->power_state_spinlock);
+ if (!device_data->power_state) {
+ ret = regulator_enable(device_data->pwr_regulator);
+ if (ret) {
+ dev_err(dev, "[%s]: regulator_enable() failed!",
+ __func__);
+ goto out;
+ }
+
+ ret = clk_enable(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s]: clk_enable() failed!",
+ __func__);
+ regulator_disable(device_data->pwr_regulator);
+ goto out;
+ }
+ device_data->power_state = true;
+ }
+
+ if (device_data->restore_dev_ctx) {
+ spin_lock(&device_data->ctx_lock);
+ if (restore_device_context && device_data->current_ctx) {
+ device_data->restore_dev_ctx = false;
+ cryp_restore_device_context(device_data,
+ &device_data->current_ctx->dev_ctx);
+ }
+ spin_unlock(&device_data->ctx_lock);
+ }
+out:
+ spin_unlock(&device_data->power_state_spinlock);
+
+ return ret;
+}
+
+static int hw_crypt_noxts(struct cryp_ctx *ctx,
+ struct cryp_device_data *device_data)
+{
+ int ret = 0;
+
+ const u8 *indata = ctx->indata;
+ u8 *outdata = ctx->outdata;
+ u32 datalen = ctx->datalen;
+ u32 outlen = datalen;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->outlen = ctx->datalen;
+
+ if (unlikely(!IS_ALIGNED((u32)indata, 4))) {
+ pr_debug(DEV_DBG_NAME " [%s]: Data isn't aligned! Addr: "
+ "0x%08x", __func__, (u32)indata);
+ return -EINVAL;
+ }
+
+ ret = cryp_setup_context(ctx, device_data);
+
+ if (ret)
+ goto out;
+
+ if (cryp_mode == CRYP_MODE_INTERRUPT) {
+ cryp_enable_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO |
+ CRYP_IRQ_SRC_OUTPUT_FIFO);
+
+ /*
+ * ctx->outlen is decremented in the cryp_interrupt_handler
+ * function. We had to add cpu_relax() (barrier) to make sure
+ * that gcc didn't optimze away this variable.
+ */
+ while (ctx->outlen > 0)
+ cpu_relax();
+ } else if (cryp_mode == CRYP_MODE_POLLING ||
+ cryp_mode == CRYP_MODE_DMA) {
+ /*
+ * The reason for having DMA in this if case is that if we are
+ * running cryp_mode = 2, then we separate DMA routines for
+ * handling cipher/plaintext > blocksize, except when
+ * running the normal CRYPTO_ALG_TYPE_CIPHER, then we still use
+ * the polling mode. Overhead of doing DMA setup eats up the
+ * benefits using it.
+ */
+ cryp_polling_mode(ctx, device_data);
+ } else {
+ dev_err(ctx->device->dev, "[%s]: Invalid operation mode!",
+ __func__);
+ ret = -EPERM;
+ goto out;
+ }
+
+ cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+ ctx->updated = 1;
+
+out:
+ ctx->indata = indata;
+ ctx->outdata = outdata;
+ ctx->datalen = datalen;
+ ctx->outlen = outlen;
+
+ return ret;
+}
+
+static int get_nents(struct scatterlist *sg, int nbytes)
+{
+ int nents = 0;
+
+ while (nbytes > 0) {
+ nbytes -= sg->length;
+ sg = scatterwalk_sg_next(sg);
+ nents++;
+ }
+
+ return nents;
+}
+
+static int ablk_dma_crypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_device_data *device_data;
+
+ int bytes_written = 0;
+ int bytes_read = 0;
+ int ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->datalen = areq->nbytes;
+ ctx->outlen = areq->nbytes;
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ ret = cryp_setup_context(ctx, device_data);
+ if (ret)
+ goto out;
+
+ /* We have the device now, so store the nents in the dma struct. */
+ ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen);
+ ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen);
+
+ /* Enable DMA in- and output. */
+ cryp_configure_for_dma(device_data, CRYP_DMA_ENABLE_BOTH_DIRECTIONS);
+
+ bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen);
+ bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written);
+
+ wait_for_completion(&ctx->device->dma.cryp_dma_complete);
+ cryp_dma_done(ctx);
+
+ cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode);
+ ctx->updated = 1;
+
+out:
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ if (unlikely(bytes_written != bytes_read))
+ return -EPERM;
+
+ return 0;
+}
+
+static int ablk_crypt(struct ablkcipher_request *areq)
+{
+ struct ablkcipher_walk walk;
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct cryp_device_data *device_data;
+ unsigned long src_paddr;
+ unsigned long dst_paddr;
+ int ret;
+ int nbytes;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ret = cryp_get_device_data(ctx, &device_data);
+ if (ret)
+ goto out;
+
+ ablkcipher_walk_init(&walk, areq->dst, areq->src, areq->nbytes);
+ ret = ablkcipher_walk_phys(areq, &walk);
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME "[%s]: ablkcipher_walk_phys() failed!",
+ __func__);
+ goto out;
+ }
+
+ while ((nbytes = walk.nbytes) > 0) {
+ ctx->iv = walk.iv;
+ src_paddr = (page_to_phys(walk.src.page) + walk.src.offset);
+ ctx->indata = phys_to_virt(src_paddr);
+
+ dst_paddr = (page_to_phys(walk.dst.page) + walk.dst.offset);
+ ctx->outdata = phys_to_virt(dst_paddr);
+
+ ctx->datalen = nbytes - (nbytes % ctx->blocksize);
+
+ ret = hw_crypt_noxts(ctx, device_data);
+ if (ret)
+ goto out;
+
+ nbytes -= ctx->datalen;
+ ret = ablkcipher_walk_done(areq, &walk, nbytes);
+ if (ret)
+ goto out;
+ }
+ ablkcipher_walk_complete(&walk);
+
+out:
+ /* Release the device */
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx = NULL;
+ ctx->device = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+
+ return ret;
+}
+
+static int aes_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->config.keysize = CRYP_KEY_SIZE_128;
+ break;
+
+ case AES_KEYSIZE_192:
+ ctx->config.keysize = CRYP_KEY_SIZE_192;
+ break;
+
+ case AES_KEYSIZE_256:
+ ctx->config.keysize = CRYP_KEY_SIZE_256;
+ break;
+
+ default:
+ pr_err(DEV_DBG_NAME "[%s]: Unknown keylen!", __func__);
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+
+ return 0;
+}
+
+static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+ u32 tmp[DES_EXPKEY_WORDS];
+ int ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+ if (keylen != DES_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ ret = des_ekey(tmp, key);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 *flags = &cipher->base.crt_flags;
+ const u32 *K = (const u32 *)key;
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ int i, ret;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+ if (keylen != DES3_EDE_KEY_SIZE) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Checking key interdependency for weak key detection. */
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY",
+ __func__);
+ return -EINVAL;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
+ if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ pr_debug(DEV_DBG_NAME " [%s]: "
+ "CRYPTO_TFM_REQ_WEAK_KEY", __func__);
+ return -EINVAL;
+ }
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->updated = 0;
+ return 0;
+}
+
+static int cryp_blk_encrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT;
+
+ /*
+ * DMA does not work for DES due to a hw bug */
+ if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+static int cryp_blk_decrypt(struct ablkcipher_request *areq)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ ctx->config.algodir = CRYP_ALGORITHM_DECRYPT;
+
+ /* DMA does not work for DES due to a hw bug */
+ if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode))
+ return ablk_dma_crypt(areq);
+
+ /* For everything except DMA, we run the non DMA version. */
+ return ablk_crypt(areq);
+}
+
+struct cryp_algo_template {
+ enum cryp_algo_mode algomode;
+ struct crypto_alg crypto;
+};
+
+static int cryp_cra_init(struct crypto_tfm *tfm)
+{
+ struct cryp_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct cryp_algo_template *cryp_alg = container_of(alg,
+ struct cryp_algo_template,
+ crypto);
+
+ ctx->config.algomode = cryp_alg->algomode;
+ ctx->blocksize = crypto_tfm_alg_blocksize(tfm);
+
+ return 0;
+}
+
+static struct cryp_algo_template cryp_algs[] = {
+ {
+ .algomode = CRYP_ALGO_AES_ECB,
+ .crypto = {
+ .cra_name = "aes",
+ .cra_driver_name = "aes-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt
+ }
+ }
+ }
+ },
+ {
+ .algomode = CRYP_ALGO_AES_ECB,
+ .crypto = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ }
+ }
+ }
+ },
+ {
+ .algomode = CRYP_ALGO_AES_CBC,
+ .crypto = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ }
+ }
+ },
+ {
+ .algomode = CRYP_ALGO_AES_CTR,
+ .crypto = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ }
+ }
+ }
+ },
+ {
+ .algomode = CRYP_ALGO_DES_ECB,
+ .crypto = {
+ .cra_name = "des",
+ .cra_driver_name = "des-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt
+ }
+ }
+ }
+
+ },
+ {
+ .algomode = CRYP_ALGO_TDES_ECB,
+ .crypto = {
+ .cra_name = "des3_ede",
+ .cra_driver_name = "des3_ede-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt
+ }
+ }
+ }
+ },
+ {
+ .algomode = CRYP_ALGO_DES_ECB,
+ .crypto = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ }
+ }
+ }
+ },
+ {
+ .algomode = CRYP_ALGO_TDES_ECB,
+ .crypto = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3_ede-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ }
+ }
+ }
+ },
+ {
+ .algomode = CRYP_ALGO_DES_CBC,
+ .crypto = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ }
+ }
+ }
+ },
+ {
+ .algomode = CRYP_ALGO_TDES_CBC,
+ .crypto = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3_ede-ux500",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct cryp_ctx),
+ .cra_alignmask = 3,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = cryp_cra_init,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ablkcipher_setkey,
+ .encrypt = cryp_blk_encrypt,
+ .decrypt = cryp_blk_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ }
+ }
+ }
+ }
+};
+
+/**
+ * cryp_algs_register_all -
+ */
+static int cryp_algs_register_all(void)
+{
+ int ret;
+ int i;
+ int count;
+
+ pr_debug("[%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(cryp_algs); i++) {
+ ret = crypto_register_alg(&cryp_algs[i].crypto);
+ if (ret) {
+ count = i;
+ pr_err("[%s] alg registration failed",
+ cryp_algs[i].crypto.cra_driver_name);
+ goto unreg;
+ }
+ }
+ return 0;
+unreg:
+ for (i = 0; i < count; i++)
+ crypto_unregister_alg(&cryp_algs[i].crypto);
+ return ret;
+}
+
+/**
+ * cryp_algs_unregister_all -
+ */
+static void cryp_algs_unregister_all(void)
+{
+ int i;
+
+ pr_debug(DEV_DBG_NAME " [%s]", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(cryp_algs); i++)
+ crypto_unregister_alg(&cryp_algs[i].crypto);
+}
+
+static int ux500_cryp_probe(struct platform_device *pdev)
+{
+ int ret;
+ int cryp_error = 0;
+ struct resource *res = NULL;
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+ struct cryp_protection_config prot = {
+ .privilege_access = CRYP_STATE_ENABLE
+ };
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "[%s]", __func__);
+ device_data = kzalloc(sizeof(struct cryp_device_data), GFP_ATOMIC);
+ if (!device_data) {
+ dev_err(dev, "[%s]: kzalloc() failed!", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ device_data->dev = dev;
+ device_data->current_ctx = NULL;
+
+ /* Grab the DMA configuration from platform data. */
+ mem_to_engine = &((struct cryp_platform_data *)
+ dev->platform_data)->mem_to_engine;
+ engine_to_mem = &((struct cryp_platform_data *)
+ dev->platform_data)->engine_to_mem;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "[%s]: platform_get_resource() failed",
+ __func__);
+ ret = -ENODEV;
+ goto out_kfree;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_err(dev, "[%s]: request_mem_region() failed",
+ __func__);
+ ret = -EBUSY;
+ goto out_kfree;
+ }
+
+ device_data->base = ioremap(res->start, resource_size(res));
+ if (!device_data->base) {
+ dev_err(dev, "[%s]: ioremap failed!", __func__);
+ ret = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ spin_lock_init(&device_data->ctx_lock);
+ spin_lock_init(&device_data->power_state_spinlock);
+
+ /* Enable power for CRYP hardware block */
+ device_data->pwr_regulator = regulator_get(&pdev->dev, "v-ape");
+ if (IS_ERR(device_data->pwr_regulator)) {
+ dev_err(dev, "[%s]: could not get cryp regulator", __func__);
+ ret = PTR_ERR(device_data->pwr_regulator);
+ device_data->pwr_regulator = NULL;
+ goto out_unmap;
+ }
+
+ /* Enable the clk for CRYP hardware block */
+ device_data->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(device_data->clk)) {
+ dev_err(dev, "[%s]: clk_get() failed!", __func__);
+ ret = PTR_ERR(device_data->clk);
+ goto out_regulator;
+ }
+
+ /* Enable device power (and clock) */
+ ret = cryp_enable_power(device_data->dev, device_data, false);
+ if (ret) {
+ dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
+ goto out_clk;
+ }
+
+ cryp_error = cryp_check(device_data);
+ if (cryp_error != 0) {
+ dev_err(dev, "[%s]: cryp_init() failed!", __func__);
+ ret = -EINVAL;
+ goto out_power;
+ }
+
+ cryp_error = cryp_configure_protection(device_data, &prot);
+ if (cryp_error != 0) {
+ dev_err(dev, "[%s]: cryp_configure_protection() failed!",
+ __func__);
+ ret = -EINVAL;
+ goto out_power;
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq) {
+ dev_err(dev, "[%s]: IORESOURCE_IRQ unavailable",
+ __func__);
+ goto out_power;
+ }
+
+ ret = request_irq(res_irq->start,
+ cryp_interrupt_handler,
+ 0,
+ "cryp1",
+ device_data);
+ if (ret) {
+ dev_err(dev, "[%s]: Unable to request IRQ", __func__);
+ goto out_power;
+ }
+
+ if (cryp_mode == CRYP_MODE_DMA)
+ cryp_dma_setup_channel(device_data, dev);
+
+ platform_set_drvdata(pdev, device_data);
+
+ /* Put the new device into the device list... */
+ klist_add_tail(&device_data->list_node, &driver_data.device_list);
+
+ /* ... and signal that a new device is available. */
+ up(&driver_data.device_allocation);
+
+ atomic_set(&session_id, 1);
+
+ ret = cryp_algs_register_all();
+ if (ret) {
+ dev_err(dev, "[%s]: cryp_algs_register_all() failed!",
+ __func__);
+ goto out_power;
+ }
+
+ return 0;
+
+out_power:
+ cryp_disable_power(device_data->dev, device_data, false);
+
+out_clk:
+ clk_put(device_data->clk);
+
+out_regulator:
+ regulator_put(device_data->pwr_regulator);
+
+out_unmap:
+ iounmap(device_data->base);
+
+out_free_mem:
+ release_mem_region(res->start, resource_size(res));
+
+out_kfree:
+ kfree(device_data);
+out:
+ return ret;
+}
+
+static int ux500_cryp_remove(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Try to decrease the number of available devices. */
+ if (down_trylock(&driver_data.device_allocation))
+ return -EBUSY;
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (device_data->current_ctx) {
+ /* The device is busy */
+ spin_unlock(&device_data->ctx_lock);
+ /* Return the device to the pool. */
+ up(&driver_data.device_allocation);
+ return -EBUSY;
+ }
+
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ cryp_algs_unregister_all();
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else {
+ disable_irq(res_irq->start);
+ free_irq(res_irq->start, device_data);
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+ __func__);
+
+ clk_put(device_data->clk);
+ regulator_put(device_data->pwr_regulator);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, res->end - res->start + 1);
+
+ kfree(device_data);
+
+ return 0;
+}
+
+static void ux500_cryp_shutdown(struct platform_device *pdev)
+{
+ struct resource *res_irq = NULL;
+ struct cryp_device_data *device_data;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return;
+ }
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (!device_data->current_ctx) {
+ if (down_trylock(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
+ "Shutting down anyway...", __func__);
+ /**
+ * (Allocate the device)
+ * Need to set this to non-null (dummy) value,
+ * to avoid usage if context switching.
+ */
+ device_data->current_ctx++;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ cryp_algs_unregister_all();
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else {
+ disable_irq(res_irq->start);
+ free_irq(res_irq->start, device_data);
+ }
+
+ if (cryp_disable_power(&pdev->dev, device_data, false))
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
+ __func__);
+
+}
+
+static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+ struct cryp_device_data *device_data;
+ struct resource *res_irq;
+ struct cryp_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ /* Handle state? */
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
+ __func__);
+ else
+ disable_irq(res_irq->start);
+
+ spin_lock(&device_data->ctx_lock);
+ if (!device_data->current_ctx)
+ device_data->current_ctx++;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (device_data->current_ctx == ++temp_ctx) {
+ if (down_interruptible(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
+ "failed", __func__);
+ ret = cryp_disable_power(&pdev->dev, device_data, false);
+
+ } else
+ ret = cryp_disable_power(&pdev->dev, device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__);
+
+ return ret;
+}
+
+static int ux500_cryp_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct cryp_device_data *device_data;
+ struct resource *res_irq;
+ struct cryp_ctx *temp_ctx = NULL;
+
+ dev_dbg(&pdev->dev, "[%s]", __func__);
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (device_data->current_ctx == ++temp_ctx)
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+
+ if (!device_data->current_ctx)
+ up(&driver_data.device_allocation);
+ else
+ ret = cryp_enable_power(&pdev->dev, device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!",
+ __func__);
+ else {
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res_irq)
+ enable_irq(res_irq->start);
+ }
+
+ return ret;
+}
+
+static struct platform_driver cryp_driver = {
+ .probe = ux500_cryp_probe,
+ .remove = ux500_cryp_remove,
+ .shutdown = ux500_cryp_shutdown,
+ .suspend = ux500_cryp_suspend,
+ .resume = ux500_cryp_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cryp1"
+ }
+};
+
+static int __init ux500_cryp_mod_init(void)
+{
+ pr_debug("[%s] is called!", __func__);
+ klist_init(&driver_data.device_list, NULL, NULL);
+ /* Initialize the semaphore to 0 devices (locked state) */
+ sema_init(&driver_data.device_allocation, 0);
+ return platform_driver_register(&cryp_driver);
+}
+
+static void __exit ux500_cryp_mod_fini(void)
+{
+ pr_debug("[%s] is called!", __func__);
+ platform_driver_unregister(&cryp_driver);
+ return;
+}
+
+module_init(ux500_cryp_mod_init);
+module_exit(ux500_cryp_mod_fini);
+
+module_param(cryp_mode, int, 0);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
+MODULE_ALIAS("aes-all");
+MODULE_ALIAS("des-all");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
new file mode 100644
index 000000000000..08d291cdbe6d
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.c
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitmap.h>
+#include <linux/device.h>
+
+#include "cryp.h"
+#include "cryp_p.h"
+#include "cryp_irq.h"
+#include "cryp_irqp.h"
+
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ u32 i;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ i = readl_relaxed(&device_data->base->imsc);
+ i = i | irq_src;
+ writel_relaxed(i, &device_data->base->imsc);
+}
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ u32 i;
+
+ dev_dbg(device_data->dev, "[%s]", __func__);
+
+ i = readl_relaxed(&device_data->base->imsc);
+ i = i & ~irq_src;
+ writel_relaxed(i, &device_data->base->imsc);
+}
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src)
+{
+ return (readl_relaxed(&device_data->base->mis) & irq_src) > 0;
+}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
new file mode 100644
index 000000000000..5a7837f1b8f9
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irq.h
@@ -0,0 +1,31 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_IRQ_H_
+#define _CRYP_IRQ_H_
+
+#include "cryp.h"
+
+enum cryp_irq_src_id {
+ CRYP_IRQ_SRC_INPUT_FIFO = 0x1,
+ CRYP_IRQ_SRC_OUTPUT_FIFO = 0x2,
+ CRYP_IRQ_SRC_ALL = 0x3
+};
+
+/**
+ * M0 Funtions
+ */
+void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src);
+
+#endif /* _CRYP_IRQ_H_ */
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
new file mode 100644
index 000000000000..8b339cc34bf8
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_irqp.h
@@ -0,0 +1,125 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __CRYP_IRQP_H_
+#define __CRYP_IRQP_H_
+
+#include "cryp_irq.h"
+
+/**
+ *
+ * CRYP Registers - Offset mapping
+ * +-----------------+
+ * 00h | CRYP_CR | Configuration register
+ * +-----------------+
+ * 04h | CRYP_SR | Status register
+ * +-----------------+
+ * 08h | CRYP_DIN | Data In register
+ * +-----------------+
+ * 0ch | CRYP_DOUT | Data out register
+ * +-----------------+
+ * 10h | CRYP_DMACR | DMA control register
+ * +-----------------+
+ * 14h | CRYP_IMSC | IMSC
+ * +-----------------+
+ * 18h | CRYP_RIS | Raw interrupt status
+ * +-----------------+
+ * 1ch | CRYP_MIS | Masked interrupt status.
+ * +-----------------+
+ * Key registers
+ * IVR registers
+ * Peripheral
+ * Cell IDs
+ *
+ * Refer data structure for other register map
+ */
+
+/**
+ * struct cryp_register
+ * @cr - Configuration register
+ * @status - Status register
+ * @din - Data input register
+ * @din_size - Data input size register
+ * @dout - Data output register
+ * @dout_size - Data output size register
+ * @dmacr - Dma control register
+ * @imsc - Interrupt mask set/clear register
+ * @ris - Raw interrupt status
+ * @mis - Masked interrupt statu register
+ * @key_1_l - Key register 1 L
+ * @key_1_r - Key register 1 R
+ * @key_2_l - Key register 2 L
+ * @key_2_r - Key register 2 R
+ * @key_3_l - Key register 3 L
+ * @key_3_r - Key register 3 R
+ * @key_4_l - Key register 4 L
+ * @key_4_r - Key register 4 R
+ * @init_vect_0_l - init vector 0 L
+ * @init_vect_0_r - init vector 0 R
+ * @init_vect_1_l - init vector 1 L
+ * @init_vect_1_r - init vector 1 R
+ * @cryp_unused1 - unused registers
+ * @itcr - Integration test control register
+ * @itip - Integration test input register
+ * @itop - Integration test output register
+ * @cryp_unused2 - unused registers
+ * @periphId0 - FE0 CRYP Peripheral Identication Register
+ * @periphId1 - FE4
+ * @periphId2 - FE8
+ * @periphId3 - FEC
+ * @pcellId0 - FF0 CRYP PCell Identication Register
+ * @pcellId1 - FF4
+ * @pcellId2 - FF8
+ * @pcellId3 - FFC
+ */
+struct cryp_register {
+ u32 cr; /* Configuration register */
+ u32 sr; /* Status register */
+ u32 din; /* Data input register */
+ u32 din_size; /* Data input size register */
+ u32 dout; /* Data output register */
+ u32 dout_size; /* Data output size register */
+ u32 dmacr; /* Dma control register */
+ u32 imsc; /* Interrupt mask set/clear register */
+ u32 ris; /* Raw interrupt status */
+ u32 mis; /* Masked interrupt statu register */
+
+ u32 key_1_l; /*Key register 1 L */
+ u32 key_1_r; /*Key register 1 R */
+ u32 key_2_l; /*Key register 2 L */
+ u32 key_2_r; /*Key register 2 R */
+ u32 key_3_l; /*Key register 3 L */
+ u32 key_3_r; /*Key register 3 R */
+ u32 key_4_l; /*Key register 4 L */
+ u32 key_4_r; /*Key register 4 R */
+
+ u32 init_vect_0_l; /*init vector 0 L */
+ u32 init_vect_0_r; /*init vector 0 R */
+ u32 init_vect_1_l; /*init vector 1 L */
+ u32 init_vect_1_r; /*init vector 1 R */
+
+ u32 cryp_unused1[(0x80 - 0x58) / sizeof(u32)]; /* unused registers */
+ u32 itcr; /*Integration test control register */
+ u32 itip; /*Integration test input register */
+ u32 itop; /*Integration test output register */
+ u32 cryp_unused2[(0xFE0 - 0x8C) / sizeof(u32)]; /* unused registers */
+
+ u32 periphId0; /* FE0 CRYP Peripheral Identication Register */
+ u32 periphId1; /* FE4 */
+ u32 periphId2; /* FE8 */
+ u32 periphId3; /* FEC */
+
+ u32 pcellId0; /* FF0 CRYP PCell Identication Register */
+ u32 pcellId1; /* FF4 */
+ u32 pcellId2; /* FF8 */
+ u32 pcellId3; /* FFC */
+};
+
+#endif
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
new file mode 100644
index 000000000000..6dcffe15c2bc
--- /dev/null
+++ b/drivers/crypto/ux500/cryp/cryp_p.h
@@ -0,0 +1,123 @@
+/**
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
+ * Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef _CRYP_P_H_
+#define _CRYP_P_H_
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include "cryp.h"
+#include "cryp_irqp.h"
+
+/**
+ * Generic Macros
+ */
+#define CRYP_SET_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
+
+#define CRYP_WRITE_BIT(reg_name, val, mask) \
+ writel_relaxed(((readl_relaxed(reg_name) & ~(mask)) |\
+ ((val) & (mask))), reg_name)
+
+#define CRYP_TEST_BITS(reg_name, val) \
+ (readl_relaxed(reg_name) & (val))
+
+#define CRYP_PUT_BITS(reg, val, shift, mask) \
+ writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
+ (((u32)val << shift) & (mask))), reg)
+
+/**
+ * CRYP specific Macros
+ */
+#define CRYP_PERIPHERAL_ID0 0xE3
+#define CRYP_PERIPHERAL_ID1 0x05
+
+#define CRYP_PERIPHERAL_ID2_DB8500 0x28
+#define CRYP_PERIPHERAL_ID3 0x00
+
+#define CRYP_PCELL_ID0 0x0D
+#define CRYP_PCELL_ID1 0xF0
+#define CRYP_PCELL_ID2 0x05
+#define CRYP_PCELL_ID3 0xB1
+
+/**
+ * CRYP register default values
+ */
+#define MAX_DEVICE_SUPPORT 2
+
+/* Priv set, keyrden set and datatype 8bits swapped set as default. */
+#define CRYP_CR_DEFAULT 0x0482
+#define CRYP_DMACR_DEFAULT 0x0
+#define CRYP_IMSC_DEFAULT 0x0
+#define CRYP_DIN_DEFAULT 0x0
+#define CRYP_DOUT_DEFAULT 0x0
+#define CRYP_KEY_DEFAULT 0x0
+#define CRYP_INIT_VECT_DEFAULT 0x0
+
+/**
+ * CRYP Control register specific mask
+ */
+#define CRYP_CR_SECURE_MASK BIT(0)
+#define CRYP_CR_PRLG_MASK BIT(1)
+#define CRYP_CR_ALGODIR_MASK BIT(2)
+#define CRYP_CR_ALGOMODE_MASK (BIT(5) | BIT(4) | BIT(3))
+#define CRYP_CR_DATATYPE_MASK (BIT(7) | BIT(6))
+#define CRYP_CR_KEYSIZE_MASK (BIT(9) | BIT(8))
+#define CRYP_CR_KEYRDEN_MASK BIT(10)
+#define CRYP_CR_KSE_MASK BIT(11)
+#define CRYP_CR_START_MASK BIT(12)
+#define CRYP_CR_INIT_MASK BIT(13)
+#define CRYP_CR_FFLUSH_MASK BIT(14)
+#define CRYP_CR_CRYPEN_MASK BIT(15)
+#define CRYP_CR_CONTEXT_SAVE_MASK (CRYP_CR_SECURE_MASK |\
+ CRYP_CR_PRLG_MASK |\
+ CRYP_CR_ALGODIR_MASK |\
+ CRYP_CR_ALGOMODE_MASK |\
+ CRYP_CR_DATATYPE_MASK |\
+ CRYP_CR_KEYSIZE_MASK |\
+ CRYP_CR_KEYRDEN_MASK |\
+ CRYP_CR_DATATYPE_MASK)
+
+
+#define CRYP_SR_INFIFO_READY_MASK (BIT(0) | BIT(1))
+#define CRYP_SR_IFEM_MASK BIT(0)
+#define CRYP_SR_BUSY_MASK BIT(4)
+
+/**
+ * Bit position used while setting bits in register
+ */
+#define CRYP_CR_PRLG_POS 1
+#define CRYP_CR_ALGODIR_POS 2
+#define CRYP_CR_ALGOMODE_POS 3
+#define CRYP_CR_DATATYPE_POS 6
+#define CRYP_CR_KEYSIZE_POS 8
+#define CRYP_CR_KEYRDEN_POS 10
+#define CRYP_CR_KSE_POS 11
+#define CRYP_CR_START_POS 12
+#define CRYP_CR_INIT_POS 13
+#define CRYP_CR_CRYPEN_POS 15
+
+#define CRYP_SR_BUSY_POS 4
+
+/**
+ * CRYP PCRs------PC_NAND control register
+ * BIT_MASK
+ */
+#define CRYP_DMA_REQ_MASK (BIT(1) | BIT(0))
+#define CRYP_DMA_REQ_MASK_POS 0
+
+
+struct cryp_system_context {
+ /* CRYP Register structure */
+ struct cryp_register *p_cryp_reg[MAX_DEVICE_SUPPORT];
+};
+
+#endif
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
new file mode 100644
index 000000000000..b2f90d9bac72
--- /dev/null
+++ b/drivers/crypto/ux500/hash/Makefile
@@ -0,0 +1,11 @@
+#
+# Copyright (C) ST-Ericsson SA 2010
+# Author: Shujuan Chen (shujuan.chen@stericsson.com)
+# License terms: GNU General Public License (GPL) version 2
+#
+ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
+CFLAGS_hash_core.o := -DDEBUG -O0
+endif
+
+obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
+ux500_hash-objs := hash_core.o
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
new file mode 100644
index 000000000000..cd9351cb24df
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen (shujuan.chen@stericsson.com)
+ * Author: Joakim Bech (joakim.xx.bech@stericsson.com)
+ * Author: Berne Hebark (berne.hebark@stericsson.com))
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _HASH_ALG_H
+#define _HASH_ALG_H
+
+#include <linux/bitops.h>
+
+#define HASH_BLOCK_SIZE 64
+#define HASH_DMA_ALIGN_SIZE 4
+#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024
+#define HASH_BYTES_PER_WORD 4
+
+/* Maximum value of the length's high word */
+#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL
+
+/* Power on Reset values HASH registers */
+#define HASH_RESET_CR_VALUE 0x0
+#define HASH_RESET_STR_VALUE 0x0
+
+/* Number of context swap registers */
+#define HASH_CSR_COUNT 52
+
+#define HASH_RESET_CSRX_REG_VALUE 0x0
+#define HASH_RESET_CSFULL_REG_VALUE 0x0
+#define HASH_RESET_CSDATAIN_REG_VALUE 0x0
+
+#define HASH_RESET_INDEX_VAL 0x0
+#define HASH_RESET_BIT_INDEX_VAL 0x0
+#define HASH_RESET_BUFFER_VAL 0x0
+#define HASH_RESET_LEN_HIGH_VAL 0x0
+#define HASH_RESET_LEN_LOW_VAL 0x0
+
+/* Control register bitfields */
+#define HASH_CR_RESUME_MASK 0x11FCF
+
+#define HASH_CR_SWITCHON_POS 31
+#define HASH_CR_SWITCHON_MASK BIT(31)
+
+#define HASH_CR_EMPTYMSG_POS 20
+#define HASH_CR_EMPTYMSG_MASK BIT(20)
+
+#define HASH_CR_DINF_POS 12
+#define HASH_CR_DINF_MASK BIT(12)
+
+#define HASH_CR_NBW_POS 8
+#define HASH_CR_NBW_MASK 0x00000F00UL
+
+#define HASH_CR_LKEY_POS 16
+#define HASH_CR_LKEY_MASK BIT(16)
+
+#define HASH_CR_ALGO_POS 7
+#define HASH_CR_ALGO_MASK BIT(7)
+
+#define HASH_CR_MODE_POS 6
+#define HASH_CR_MODE_MASK BIT(6)
+
+#define HASH_CR_DATAFORM_POS 4
+#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5))
+
+#define HASH_CR_DMAE_POS 3
+#define HASH_CR_DMAE_MASK BIT(3)
+
+#define HASH_CR_INIT_POS 2
+#define HASH_CR_INIT_MASK BIT(2)
+
+#define HASH_CR_PRIVN_POS 1
+#define HASH_CR_PRIVN_MASK BIT(1)
+
+#define HASH_CR_SECN_POS 0
+#define HASH_CR_SECN_MASK BIT(0)
+
+/* Start register bitfields */
+#define HASH_STR_DCAL_POS 8
+#define HASH_STR_DCAL_MASK BIT(8)
+#define HASH_STR_DEFAULT 0x0
+
+#define HASH_STR_NBLW_POS 0
+#define HASH_STR_NBLW_MASK 0x0000001FUL
+
+#define HASH_NBLW_MAX_VAL 0x1F
+
+/* PrimeCell IDs */
+#define HASH_P_ID0 0xE0
+#define HASH_P_ID1 0x05
+#define HASH_P_ID2 0x38
+#define HASH_P_ID3 0x00
+#define HASH_CELL_ID0 0x0D
+#define HASH_CELL_ID1 0xF0
+#define HASH_CELL_ID2 0x05
+#define HASH_CELL_ID3 0xB1
+
+#define HASH_SET_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
+
+#define HASH_CLEAR_BITS(reg_name, mask) \
+ writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name)
+
+#define HASH_PUT_BITS(reg, val, shift, mask) \
+ writel_relaxed(((readl(reg) & ~(mask)) | \
+ (((u32)val << shift) & (mask))), reg)
+
+#define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len))
+
+#define HASH_INITIALIZE \
+ HASH_PUT_BITS( \
+ &device_data->base->cr, \
+ 0x01, HASH_CR_INIT_POS, \
+ HASH_CR_INIT_MASK)
+
+#define HASH_SET_DATA_FORMAT(data_format) \
+ HASH_PUT_BITS( \
+ &device_data->base->cr, \
+ (u32) (data_format), HASH_CR_DATAFORM_POS, \
+ HASH_CR_DATAFORM_MASK)
+#define HASH_SET_NBLW(val) \
+ HASH_PUT_BITS( \
+ &device_data->base->str, \
+ (u32) (val), HASH_STR_NBLW_POS, \
+ HASH_STR_NBLW_MASK)
+#define HASH_SET_DCAL \
+ HASH_PUT_BITS( \
+ &device_data->base->str, \
+ 0x01, HASH_STR_DCAL_POS, \
+ HASH_STR_DCAL_MASK)
+
+/* Hardware access method */
+enum hash_mode {
+ HASH_MODE_CPU,
+ HASH_MODE_DMA
+};
+
+/**
+ * struct uint64 - Structure to handle 64 bits integers.
+ * @high_word: Most significant bits.
+ * @low_word: Least significant bits.
+ *
+ * Used to handle 64 bits integers.
+ */
+struct uint64 {
+ u32 high_word;
+ u32 low_word;
+};
+
+/**
+ * struct hash_register - Contains all registers in ux500 hash hardware.
+ * @cr: HASH control register (0x000).
+ * @din: HASH data input register (0x004).
+ * @str: HASH start register (0x008).
+ * @hx: HASH digest register 0..7 (0x00c-0x01C).
+ * @padding0: Reserved (0x02C).
+ * @itcr: Integration test control register (0x080).
+ * @itip: Integration test input register (0x084).
+ * @itop: Integration test output register (0x088).
+ * @padding1: Reserved (0x08C).
+ * @csfull: HASH context full register (0x0F8).
+ * @csdatain: HASH context swap data input register (0x0FC).
+ * @csrx: HASH context swap register 0..51 (0x100-0x1CC).
+ * @padding2: Reserved (0x1D0).
+ * @periphid0: HASH peripheral identification register 0 (0xFE0).
+ * @periphid1: HASH peripheral identification register 1 (0xFE4).
+ * @periphid2: HASH peripheral identification register 2 (0xFE8).
+ * @periphid3: HASH peripheral identification register 3 (0xFEC).
+ * @cellid0: HASH PCell identification register 0 (0xFF0).
+ * @cellid1: HASH PCell identification register 1 (0xFF4).
+ * @cellid2: HASH PCell identification register 2 (0xFF8).
+ * @cellid3: HASH PCell identification register 3 (0xFFC).
+ *
+ * The device communicates to the HASH via 32-bit-wide control registers
+ * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure
+ * with the registers used.
+ */
+struct hash_register {
+ u32 cr;
+ u32 din;
+ u32 str;
+ u32 hx[8];
+
+ u32 padding0[(0x080 - 0x02C) / sizeof(u32)];
+
+ u32 itcr;
+ u32 itip;
+ u32 itop;
+
+ u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)];
+
+ u32 csfull;
+ u32 csdatain;
+ u32 csrx[HASH_CSR_COUNT];
+
+ u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)];
+
+ u32 periphid0;
+ u32 periphid1;
+ u32 periphid2;
+ u32 periphid3;
+
+ u32 cellid0;
+ u32 cellid1;
+ u32 cellid2;
+ u32 cellid3;
+};
+
+/**
+ * struct hash_state - Hash context state.
+ * @temp_cr: Temporary HASH Control Register.
+ * @str_reg: HASH Start Register.
+ * @din_reg: HASH Data Input Register.
+ * @csr[52]: HASH Context Swap Registers 0-39.
+ * @csfull: HASH Context Swap Registers 40 ie Status flags.
+ * @csdatain: HASH Context Swap Registers 41 ie Input data.
+ * @buffer: Working buffer for messages going to the hardware.
+ * @length: Length of the part of message hashed so far (floor(N/64) * 64).
+ * @index: Valid number of bytes in buffer (N % 64).
+ * @bit_index: Valid number of bits in buffer (N % 8).
+ *
+ * This structure is used between context switches, i.e. when ongoing jobs are
+ * interupted with new jobs. When this happens we need to store intermediate
+ * results in software.
+ *
+ * WARNING: "index" is the member of the structure, to be sure that "buffer"
+ * is aligned on a 4-bytes boundary. This is highly implementation dependent
+ * and MUST be checked whenever this code is ported on new platforms.
+ */
+struct hash_state {
+ u32 temp_cr;
+ u32 str_reg;
+ u32 din_reg;
+ u32 csr[52];
+ u32 csfull;
+ u32 csdatain;
+ u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)];
+ struct uint64 length;
+ u8 index;
+ u8 bit_index;
+};
+
+/**
+ * enum hash_device_id - HASH device ID.
+ * @HASH_DEVICE_ID_0: Hash hardware with ID 0
+ * @HASH_DEVICE_ID_1: Hash hardware with ID 1
+ */
+enum hash_device_id {
+ HASH_DEVICE_ID_0 = 0,
+ HASH_DEVICE_ID_1 = 1
+};
+
+/**
+ * enum hash_data_format - HASH data format.
+ * @HASH_DATA_32_BITS: 32 bits data format
+ * @HASH_DATA_16_BITS: 16 bits data format
+ * @HASH_DATA_8_BITS: 8 bits data format.
+ * @HASH_DATA_1_BITS: 1 bit data format.
+ */
+enum hash_data_format {
+ HASH_DATA_32_BITS = 0x0,
+ HASH_DATA_16_BITS = 0x1,
+ HASH_DATA_8_BITS = 0x2,
+ HASH_DATA_1_BIT = 0x3
+};
+
+/**
+ * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm.
+ * @HASH_ALGO_SHA1: Indicates that SHA1 is used.
+ * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used.
+ */
+enum hash_algo {
+ HASH_ALGO_SHA1 = 0x0,
+ HASH_ALGO_SHA256 = 0x1
+};
+
+/**
+ * enum hash_op - Enumeration for selecting between HASH or HMAC mode.
+ * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode.
+ * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC.
+ */
+enum hash_op {
+ HASH_OPER_MODE_HASH = 0x0,
+ HASH_OPER_MODE_HMAC = 0x1
+};
+
+/**
+ * struct hash_config - Configuration data for the hardware.
+ * @data_format: Format of data entered into the hash data in register.
+ * @algorithm: Algorithm selection bit.
+ * @oper_mode: Operating mode selection bit.
+ */
+struct hash_config {
+ int data_format;
+ int algorithm;
+ int oper_mode;
+};
+
+/**
+ * struct hash_dma - Structure used for dma.
+ * @mask: DMA capabilities bitmap mask.
+ * @complete: Used to maintain state for a "completion".
+ * @chan_mem2hash: DMA channel.
+ * @cfg_mem2hash: DMA channel configuration.
+ * @sg_len: Scatterlist length.
+ * @sg: Scatterlist.
+ * @nents: Number of sg entries.
+ */
+struct hash_dma {
+ dma_cap_mask_t mask;
+ struct completion complete;
+ struct dma_chan *chan_mem2hash;
+ void *cfg_mem2hash;
+ int sg_len;
+ struct scatterlist *sg;
+ int nents;
+};
+
+/**
+ * struct hash_ctx - The context used for hash calculations.
+ * @key: The key used in the operation.
+ * @keylen: The length of the key.
+ * @state: The state of the current calculations.
+ * @config: The current configuration.
+ * @digestsize: The size of current digest.
+ * @device: Pointer to the device structure.
+ */
+struct hash_ctx {
+ u8 *key;
+ u32 keylen;
+ struct hash_config config;
+ int digestsize;
+ struct hash_device_data *device;
+};
+
+/**
+ * struct hash_ctx - The request context used for hash calculations.
+ * @state: The state of the current calculations.
+ * @dma_mode: Used in special cases (workaround), e.g. need to change to
+ * cpu mode, if not supported/working in dma mode.
+ * @updated: Indicates if hardware is initialized for new operations.
+ */
+struct hash_req_ctx {
+ struct hash_state state;
+ bool dma_mode;
+ u8 updated;
+};
+
+/**
+ * struct hash_device_data - structure for a hash device.
+ * @base: Pointer to the hardware base address.
+ * @list_node: For inclusion in klist.
+ * @dev: Pointer to the device dev structure.
+ * @ctx_lock: Spinlock for current_ctx.
+ * @current_ctx: Pointer to the currently allocated context.
+ * @power_state: TRUE = power state on, FALSE = power state off.
+ * @power_state_lock: Spinlock for power_state.
+ * @regulator: Pointer to the device's power control.
+ * @clk: Pointer to the device's clock control.
+ * @restore_dev_state: TRUE = saved state, FALSE = no saved state.
+ * @dma: Structure used for dma.
+ */
+struct hash_device_data {
+ struct hash_register __iomem *base;
+ struct klist_node list_node;
+ struct device *dev;
+ struct spinlock ctx_lock;
+ struct hash_ctx *current_ctx;
+ bool power_state;
+ struct spinlock power_state_lock;
+ struct regulator *regulator;
+ struct clk *clk;
+ bool restore_dev_state;
+ struct hash_state state; /* Used for saving and resuming state */
+ struct hash_dma dma;
+};
+
+int hash_check_hw(struct hash_device_data *device_data);
+
+int hash_setconfiguration(struct hash_device_data *device_data,
+ struct hash_config *config);
+
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx);
+
+void hash_get_digest(struct hash_device_data *device_data,
+ u8 *digest, int algorithm);
+
+int hash_hw_update(struct ahash_request *req);
+
+int hash_save_state(struct hash_device_data *device_data,
+ struct hash_state *state);
+
+int hash_resume_state(struct hash_device_data *device_data,
+ const struct hash_state *state);
+
+#endif
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
new file mode 100644
index 000000000000..6dbb9ec709a3
--- /dev/null
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -0,0 +1,2009 @@
+/*
+ * Cryptographic API.
+ * Support for Nomadik hardware crypto engine.
+
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
+ * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
+ * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
+ * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/klist.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/bitops.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+
+#include <mach/crypto-ux500.h>
+#include <mach/hardware.h>
+
+#include "hash_alg.h"
+
+#define DEV_DBG_NAME "hashX hashX:"
+
+static int hash_mode;
+module_param(hash_mode, int, 0);
+MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
+
+/**
+ * Pre-calculated empty message digests.
+ */
+static u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+
+static u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+
+/* HMAC-SHA1, no key */
+static u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
+ 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
+ 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
+ 0x70, 0x69, 0x0e, 0x1d
+};
+
+/* HMAC-SHA256, no key */
+static u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
+ 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
+ 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
+ 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
+ 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
+};
+
+/**
+ * struct hash_driver_data - data specific to the driver.
+ *
+ * @device_list: A list of registered devices to choose from.
+ * @device_allocation: A semaphore initialized with number of devices.
+ */
+struct hash_driver_data {
+ struct klist device_list;
+ struct semaphore device_allocation;
+};
+
+static struct hash_driver_data driver_data;
+
+/* Declaration of functions */
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data: Structure for the hash device.
+ * @message: Last word of a message
+ * @index_bytes: The number of bytes in the last message
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+ const u32 *message, u8 index_bytes);
+
+/**
+ * release_hash_device - Releases a previously allocated hash device.
+ * @device_data: Structure for the hash device.
+ *
+ */
+static void release_hash_device(struct hash_device_data *device_data)
+{
+ spin_lock(&device_data->ctx_lock);
+ device_data->current_ctx->device = NULL;
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ /*
+ * The down_interruptible part for this semaphore is called in
+ * cryp_get_device_data.
+ */
+ up(&driver_data.device_allocation);
+}
+
+static void hash_dma_setup_channel(struct hash_device_data *device_data,
+ struct device *dev)
+{
+ struct hash_platform_data *platform_data = dev->platform_data;
+ dma_cap_zero(device_data->dma.mask);
+ dma_cap_set(DMA_SLAVE, device_data->dma.mask);
+
+ device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
+ device_data->dma.chan_mem2hash =
+ dma_request_channel(device_data->dma.mask,
+ platform_data->dma_filter,
+ device_data->dma.cfg_mem2hash);
+
+ init_completion(&device_data->dma.complete);
+}
+
+static void hash_dma_callback(void *data)
+{
+ struct hash_ctx *ctx = (struct hash_ctx *) data;
+
+ complete(&ctx->device->dma.complete);
+}
+
+static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
+ int len, enum dma_data_direction direction)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *channel = NULL;
+ dma_cookie_t cookie;
+
+ if (direction != DMA_TO_DEVICE) {
+ dev_err(ctx->device->dev, "[%s] Invalid DMA direction",
+ __func__);
+ return -EFAULT;
+ }
+
+ sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
+
+ channel = ctx->device->dma.chan_mem2hash;
+ ctx->device->dma.sg = sg;
+ ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
+ ctx->device->dma.sg, ctx->device->dma.nents,
+ direction);
+
+ if (!ctx->device->dma.sg_len) {
+ dev_err(ctx->device->dev,
+ "[%s]: Could not map the sg list (TO_DEVICE)",
+ __func__);
+ return -EFAULT;
+ }
+
+ dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
+ "(TO_DEVICE)", __func__);
+ desc = channel->device->device_prep_slave_sg(channel,
+ ctx->device->dma.sg, ctx->device->dma.sg_len,
+ direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL);
+ if (!desc) {
+ dev_err(ctx->device->dev,
+ "[%s]: device_prep_slave_sg() failed!", __func__);
+ return -EFAULT;
+ }
+
+ desc->callback = hash_dma_callback;
+ desc->callback_param = ctx;
+
+ cookie = desc->tx_submit(desc);
+ dma_async_issue_pending(channel);
+
+ return 0;
+}
+
+static void hash_dma_done(struct hash_ctx *ctx)
+{
+ struct dma_chan *chan;
+
+ chan = ctx->device->dma.chan_mem2hash;
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
+ ctx->device->dma.sg_len, DMA_TO_DEVICE);
+
+}
+
+static int hash_dma_write(struct hash_ctx *ctx,
+ struct scatterlist *sg, int len)
+{
+ int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
+ if (error) {
+ dev_dbg(ctx->device->dev, "[%s]: hash_set_dma_transfer() "
+ "failed", __func__);
+ return error;
+ }
+
+ return len;
+}
+
+/**
+ * get_empty_message_digest - Returns a pre-calculated digest for
+ * the empty message.
+ * @device_data: Structure for the hash device.
+ * @zero_hash: Buffer to return the empty message digest.
+ * @zero_hash_size: Hash size of the empty message digest.
+ * @zero_digest: True if zero_digest returned.
+ */
+static int get_empty_message_digest(
+ struct hash_device_data *device_data,
+ u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
+{
+ int ret = 0;
+ struct hash_ctx *ctx = device_data->current_ctx;
+ *zero_digest = false;
+
+ /**
+ * Caller responsible for ctx != NULL.
+ */
+
+ if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
+ if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hash_sha1[0],
+ SHA1_DIGEST_SIZE);
+ *zero_hash_size = SHA1_DIGEST_SIZE;
+ *zero_digest = true;
+ } else if (HASH_ALGO_SHA256 ==
+ ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hash_sha256[0],
+ SHA256_DIGEST_SIZE);
+ *zero_hash_size = SHA256_DIGEST_SIZE;
+ *zero_digest = true;
+ } else {
+ dev_err(device_data->dev, "[%s] "
+ "Incorrect algorithm!"
+ , __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
+ if (!ctx->keylen) {
+ if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hmac_sha1[0],
+ SHA1_DIGEST_SIZE);
+ *zero_hash_size = SHA1_DIGEST_SIZE;
+ *zero_digest = true;
+ } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
+ memcpy(zero_hash, &zero_message_hmac_sha256[0],
+ SHA256_DIGEST_SIZE);
+ *zero_hash_size = SHA256_DIGEST_SIZE;
+ *zero_digest = true;
+ } else {
+ dev_err(device_data->dev, "[%s] "
+ "Incorrect algorithm!"
+ , __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ dev_dbg(device_data->dev, "[%s] Continue hash "
+ "calculation, since hmac key avalable",
+ __func__);
+ }
+ }
+out:
+
+ return ret;
+}
+
+/**
+ * hash_disable_power - Request to disable power and clock.
+ * @device_data: Structure for the hash device.
+ * @save_device_state: If true, saves the current hw state.
+ *
+ * This function request for disabling power (regulator) and clock,
+ * and could also save current hw state.
+ */
+static int hash_disable_power(
+ struct hash_device_data *device_data,
+ bool save_device_state)
+{
+ int ret = 0;
+ struct device *dev = device_data->dev;
+
+ spin_lock(&device_data->power_state_lock);
+ if (!device_data->power_state)
+ goto out;
+
+ if (save_device_state) {
+ hash_save_state(device_data,
+ &device_data->state);
+ device_data->restore_dev_state = true;
+ }
+
+ clk_disable(device_data->clk);
+ ret = regulator_disable(device_data->regulator);
+ if (ret)
+ dev_err(dev, "[%s] regulator_disable() failed!", __func__);
+
+ device_data->power_state = false;
+
+out:
+ spin_unlock(&device_data->power_state_lock);
+
+ return ret;
+}
+
+/**
+ * hash_enable_power - Request to enable power and clock.
+ * @device_data: Structure for the hash device.
+ * @restore_device_state: If true, restores a previous saved hw state.
+ *
+ * This function request for enabling power (regulator) and clock,
+ * and could also restore a previously saved hw state.
+ */
+static int hash_enable_power(
+ struct hash_device_data *device_data,
+ bool restore_device_state)
+{
+ int ret = 0;
+ struct device *dev = device_data->dev;
+
+ spin_lock(&device_data->power_state_lock);
+ if (!device_data->power_state) {
+ ret = regulator_enable(device_data->regulator);
+ if (ret) {
+ dev_err(dev, "[%s]: regulator_enable() failed!",
+ __func__);
+ goto out;
+ }
+ ret = clk_enable(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s]: clk_enable() failed!",
+ __func__);
+ ret = regulator_disable(
+ device_data->regulator);
+ goto out;
+ }
+ device_data->power_state = true;
+ }
+
+ if (device_data->restore_dev_state) {
+ if (restore_device_state) {
+ device_data->restore_dev_state = false;
+ hash_resume_state(device_data,
+ &device_data->state);
+ }
+ }
+out:
+ spin_unlock(&device_data->power_state_lock);
+
+ return ret;
+}
+
+/**
+ * hash_get_device_data - Checks for an available hash device and return it.
+ * @hash_ctx: Structure for the hash context.
+ * @device_data: Structure for the hash device.
+ *
+ * This function check for an available hash device and return it to
+ * the caller.
+ * Note! Caller need to release the device, calling up().
+ */
+static int hash_get_device_data(struct hash_ctx *ctx,
+ struct hash_device_data **device_data)
+{
+ int ret;
+ struct klist_iter device_iterator;
+ struct klist_node *device_node;
+ struct hash_device_data *local_device_data = NULL;
+
+ /* Wait until a device is available */
+ ret = down_interruptible(&driver_data.device_allocation);
+ if (ret)
+ return ret; /* Interrupted */
+
+ /* Select a device */
+ klist_iter_init(&driver_data.device_list, &device_iterator);
+ device_node = klist_next(&device_iterator);
+ while (device_node) {
+ local_device_data = container_of(device_node,
+ struct hash_device_data, list_node);
+ spin_lock(&local_device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (local_device_data->current_ctx) {
+ device_node = klist_next(&device_iterator);
+ } else {
+ local_device_data->current_ctx = ctx;
+ ctx->device = local_device_data;
+ spin_unlock(&local_device_data->ctx_lock);
+ break;
+ }
+ spin_unlock(&local_device_data->ctx_lock);
+ }
+ klist_iter_exit(&device_iterator);
+
+ if (!device_node) {
+ /**
+ * No free device found.
+ * Since we allocated a device with down_interruptible, this
+ * should not be able to happen.
+ * Number of available devices, which are contained in
+ * device_allocation, is therefore decremented by not doing
+ * an up(device_allocation).
+ */
+ return -EBUSY;
+ }
+
+ *device_data = local_device_data;
+
+ return 0;
+}
+
+/**
+ * hash_hw_write_key - Writes the key to the hardware registries.
+ *
+ * @device_data: Structure for the hash device.
+ * @key: Key to be written.
+ * @keylen: The lengt of the key.
+ *
+ * Note! This function DOES NOT write to the NBLW registry, even though
+ * specified in the the hw design spec. Either due to incorrect info in the
+ * spec or due to a bug in the hw.
+ */
+static void hash_hw_write_key(struct hash_device_data *device_data,
+ const u8 *key, unsigned int keylen)
+{
+ u32 word = 0;
+ int nwords = 1;
+
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ while (keylen >= 4) {
+ u32 *key_word = (u32 *)key;
+
+ HASH_SET_DIN(key_word, nwords);
+ keylen -= 4;
+ key += 4;
+ }
+
+ /* Take care of the remaining bytes in the last word */
+ if (keylen) {
+ word = 0;
+ while (keylen) {
+ word |= (key[keylen - 1] << (8 * (keylen - 1)));
+ keylen--;
+ }
+
+ HASH_SET_DIN(&word, nwords);
+ }
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ HASH_SET_DCAL;
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+}
+
+/**
+ * init_hash_hw - Initialise the hash hardware for a new calculation.
+ * @device_data: Structure for the hash device.
+ * @ctx: The hash context.
+ *
+ * This function will enable the bits needed to clear and start a new
+ * calculation.
+ */
+static int init_hash_hw(struct hash_device_data *device_data,
+ struct hash_ctx *ctx)
+{
+ int ret = 0;
+
+ ret = hash_setconfiguration(device_data, &ctx->config);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_setconfiguration() "
+ "failed!", __func__);
+ return ret;
+ }
+
+ hash_begin(device_data, ctx);
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+ hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+ return ret;
+}
+
+/**
+ * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
+ *
+ * @sg: Scatterlist.
+ * @size: Size in bytes.
+ * @aligned: True if sg data aligned to work in DMA mode.
+ *
+ */
+static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
+{
+ int nents = 0;
+ bool aligned_data = true;
+
+ while (size > 0 && sg) {
+ nents++;
+ size -= sg->length;
+
+ /* hash_set_dma_transfer will align last nent */
+ if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE))
+ || (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) &&
+ size > 0))
+ aligned_data = false;
+
+ sg = sg_next(sg);
+ }
+
+ if (aligned)
+ *aligned = aligned_data;
+
+ if (size != 0)
+ return -EFAULT;
+
+ return nents;
+}
+
+/**
+ * hash_dma_valid_data - checks for dma valid sg data.
+ * @sg: Scatterlist.
+ * @datasize: Datasize in bytes.
+ *
+ * NOTE! This function checks for dma valid sg data, since dma
+ * only accept datasizes of even wordsize.
+ */
+static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
+{
+ bool aligned;
+
+ /* Need to include at least one nent, else error */
+ if (hash_get_nents(sg, datasize, &aligned) < 1)
+ return false;
+
+ return aligned;
+}
+
+/**
+ * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ *
+ * Initialize structures.
+ */
+static int hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ if (!ctx->key)
+ ctx->keylen = 0;
+
+ memset(&req_ctx->state, 0, sizeof(struct hash_state));
+ req_ctx->updated = 0;
+ if (hash_mode == HASH_MODE_DMA) {
+ if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
+ req_ctx->dma_mode = false; /* Don't use DMA */
+
+ pr_debug(DEV_DBG_NAME " [%s] DMA mode, but direct "
+ "to CPU mode for data size < %d",
+ __func__, HASH_DMA_ALIGN_SIZE);
+ } else {
+ if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
+ hash_dma_valid_data(req->src,
+ req->nbytes)) {
+ req_ctx->dma_mode = true;
+ } else {
+ req_ctx->dma_mode = false;
+ pr_debug(DEV_DBG_NAME " [%s] DMA mode, but use"
+ " CPU mode for datalength < %d"
+ " or non-aligned data, except "
+ "in last nent", __func__,
+ HASH_DMA_PERFORMANCE_MIN_SIZE);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * hash_processblock - This function processes a single block of 512 bits (64
+ * bytes), word aligned, starting at message.
+ * @device_data: Structure for the hash device.
+ * @message: Block (512 bits) of message to be written to
+ * the HASH hardware.
+ *
+ */
+static void hash_processblock(
+ struct hash_device_data *device_data,
+ const u32 *message, int length)
+{
+ int len = length / HASH_BYTES_PER_WORD;
+ /*
+ * NBLW bits. Reset the number of bits in last word (NBLW).
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ /*
+ * Write message data to the HASH_DIN register.
+ */
+ HASH_SET_DIN(message, len);
+}
+
+/**
+ * hash_messagepad - Pads a message and write the nblw bits.
+ * @device_data: Structure for the hash device.
+ * @message: Last word of a message.
+ * @index_bytes: The number of bytes in the last message.
+ *
+ * This function manages the final part of the digest calculation, when less
+ * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
+ *
+ */
+static void hash_messagepad(struct hash_device_data *device_data,
+ const u32 *message, u8 index_bytes)
+{
+ int nwords = 1;
+
+ /*
+ * Clear hash str register, only clear NBLW
+ * since DCAL will be reset by hardware.
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+
+ /* Main loop */
+ while (index_bytes >= 4) {
+ HASH_SET_DIN(message, nwords);
+ index_bytes -= 4;
+ message++;
+ }
+
+ if (index_bytes)
+ HASH_SET_DIN(message, nwords);
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
+ HASH_SET_NBLW(index_bytes * 8);
+ dev_dbg(device_data->dev, "[%s] DIN=0x%08x NBLW=%d", __func__,
+ readl_relaxed(&device_data->base->din),
+ (int)(readl_relaxed(&device_data->base->str) &
+ HASH_STR_NBLW_MASK));
+ HASH_SET_DCAL;
+ dev_dbg(device_data->dev, "[%s] after dcal -> DIN=0x%08x NBLW=%d",
+ __func__, readl_relaxed(&device_data->base->din),
+ (int)(readl_relaxed(&device_data->base->str) &
+ HASH_STR_NBLW_MASK));
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+}
+
+/**
+ * hash_incrementlength - Increments the length of the current message.
+ * @ctx: Hash context
+ * @incr: Length of message processed already
+ *
+ * Overflow cannot occur, because conditions for overflow are checked in
+ * hash_hw_update.
+ */
+static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
+{
+ ctx->state.length.low_word += incr;
+
+ /* Check for wrap-around */
+ if (ctx->state.length.low_word < incr)
+ ctx->state.length.high_word++;
+}
+
+/**
+ * hash_setconfiguration - Sets the required configuration for the hash
+ * hardware.
+ * @device_data: Structure for the hash device.
+ * @config: Pointer to a configuration structure.
+ */
+int hash_setconfiguration(struct hash_device_data *device_data,
+ struct hash_config *config)
+{
+ int ret = 0;
+
+ if (config->algorithm != HASH_ALGO_SHA1 &&
+ config->algorithm != HASH_ALGO_SHA256)
+ return -EPERM;
+
+ /*
+ * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
+ * to be written to HASH_DIN is considered as 32 bits.
+ */
+ HASH_SET_DATA_FORMAT(config->data_format);
+
+ /*
+ * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
+ */
+ switch (config->algorithm) {
+ case HASH_ALGO_SHA1:
+ HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+ break;
+
+ case HASH_ALGO_SHA256:
+ HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
+ break;
+
+ default:
+ dev_err(device_data->dev, "[%s] Incorrect algorithm.",
+ __func__);
+ return -EPERM;
+ }
+
+ /*
+ * MODE bit. This bit selects between HASH or HMAC mode for the
+ * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
+ */
+ if (HASH_OPER_MODE_HASH == config->oper_mode)
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_MODE_MASK);
+ else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_MODE_MASK);
+ if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
+ /* Truncate key to blocksize */
+ dev_dbg(device_data->dev, "[%s] LKEY set", __func__);
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_LKEY_MASK);
+ } else {
+ dev_dbg(device_data->dev, "[%s] LKEY cleared",
+ __func__);
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_LKEY_MASK);
+ }
+ } else { /* Wrong hash mode */
+ ret = -EPERM;
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ }
+ return ret;
+}
+
+/**
+ * hash_begin - This routine resets some globals and initializes the hash
+ * hardware.
+ * @device_data: Structure for the hash device.
+ * @ctx: Hash context.
+ */
+void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
+{
+ /* HW and SW initializations */
+ /* Note: there is no need to initialize buffer and digest members */
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ /*
+ * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+ * prepare the initialize the HASH accelerator to compute the message
+ * digest of a new message.
+ */
+ HASH_INITIALIZE;
+
+ /*
+ * NBLW bits. Reset the number of bits in last word (NBLW).
+ */
+ HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
+}
+
+int hash_process_data(
+ struct hash_device_data *device_data,
+ struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
+ int msg_length, u8 *data_buffer, u8 *buffer, u8 *index)
+{
+ int ret = 0;
+ u32 count;
+
+ do {
+ if ((*index + msg_length) < HASH_BLOCK_SIZE) {
+ for (count = 0; count < msg_length; count++) {
+ buffer[*index + count] =
+ *(data_buffer + count);
+ }
+ *index += msg_length;
+ msg_length = 0;
+ } else {
+ if (req_ctx->updated) {
+
+ ret = hash_resume_state(device_data,
+ &device_data->state);
+ memmove(req_ctx->state.buffer,
+ device_data->state.buffer,
+ HASH_BLOCK_SIZE / sizeof(u32));
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_resume_state()"
+ " failed!", __func__);
+ goto out;
+ }
+ } else {
+ ret = init_hash_hw(device_data, ctx);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "init_hash_hw()"
+ " failed!", __func__);
+ goto out;
+ }
+ req_ctx->updated = 1;
+ }
+ /*
+ * If 'data_buffer' is four byte aligned and
+ * local buffer does not have any data, we can
+ * write data directly from 'data_buffer' to
+ * HW peripheral, otherwise we first copy data
+ * to a local buffer
+ */
+ if ((0 == (((u32)data_buffer) % 4))
+ && (0 == *index))
+ hash_processblock(device_data,
+ (const u32 *)
+ data_buffer, HASH_BLOCK_SIZE);
+ else {
+ for (count = 0; count <
+ (u32)(HASH_BLOCK_SIZE -
+ *index);
+ count++) {
+ buffer[*index + count] =
+ *(data_buffer + count);
+ }
+ hash_processblock(device_data,
+ (const u32 *)buffer,
+ HASH_BLOCK_SIZE);
+ }
+ hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
+ data_buffer += (HASH_BLOCK_SIZE - *index);
+
+ msg_length -= (HASH_BLOCK_SIZE - *index);
+ *index = 0;
+
+ ret = hash_save_state(device_data,
+ &device_data->state);
+
+ memmove(device_data->state.buffer,
+ req_ctx->state.buffer,
+ HASH_BLOCK_SIZE / sizeof(u32));
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_save_state()"
+ " failed!", __func__);
+ goto out;
+ }
+ }
+ } while (msg_length != 0);
+out:
+
+ return ret;
+}
+
+/**
+ * hash_dma_final - The hash dma final function for SHA1/SHA256.
+ * @req: The hash request for the job.
+ */
+static int hash_dma_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct hash_device_data *device_data;
+ u8 digest[SHA256_DIGEST_SIZE];
+ int bytes_written = 0;
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+
+ if (req_ctx->updated) {
+ ret = hash_resume_state(device_data, &device_data->state);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_resume_state() "
+ "failed!", __func__);
+ goto out;
+ }
+
+ }
+
+ if (!req_ctx->updated) {
+ ret = hash_setconfiguration(device_data, &ctx->config);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_setconfiguration() failed!",
+ __func__);
+ goto out;
+ }
+
+ /* Enable DMA input */
+ if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
+ HASH_CLEAR_BITS(&device_data->base->cr,
+ HASH_CR_DMAE_MASK);
+ } else {
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_DMAE_MASK);
+ HASH_SET_BITS(&device_data->base->cr,
+ HASH_CR_PRIVN_MASK);
+ }
+
+ HASH_INITIALIZE;
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
+ hash_hw_write_key(device_data, ctx->key, ctx->keylen);
+
+ /* Number of bits in last word = (nbytes * 8) % 32 */
+ HASH_SET_NBLW((req->nbytes * 8) % 32);
+ req_ctx->updated = 1;
+ }
+
+ /* Store the nents in the dma struct. */
+ ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
+ if (!ctx->device->dma.nents) {
+ dev_err(device_data->dev, "[%s] "
+ "ctx->device->dma.nents = 0", __func__);
+ goto out;
+ }
+
+ bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
+ if (bytes_written != req->nbytes) {
+ dev_err(device_data->dev, "[%s] "
+ "hash_dma_write() failed!", __func__);
+ goto out;
+ }
+
+ wait_for_completion(&ctx->device->dma.complete);
+ hash_dma_done(ctx);
+
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+ unsigned int keylen = ctx->keylen;
+ u8 *key = ctx->key;
+
+ dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
+ ctx->keylen);
+ hash_hw_write_key(device_data, key, keylen);
+ }
+
+ hash_get_digest(device_data, digest, ctx->config.algorithm);
+ memcpy(req->result, digest, ctx->digestsize);
+
+out:
+ release_hash_device(device_data);
+
+ /**
+ * Allocated in setkey, and only used in HMAC.
+ */
+ kfree(ctx->key);
+
+ return ret;
+}
+
+/**
+ * hash_hw_final - The final hash calculation function
+ * @req: The hash request for the job.
+ */
+int hash_hw_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct hash_device_data *device_data;
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ dev_dbg(device_data->dev, "[%s] (ctx=0x%x)!", __func__, (u32) ctx);
+
+ if (req_ctx->updated) {
+ ret = hash_resume_state(device_data, &device_data->state);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_resume_state() "
+ "failed!", __func__);
+ goto out;
+ }
+ } else if (req->nbytes == 0 && ctx->keylen == 0) {
+ u8 zero_hash[SHA256_DIGEST_SIZE];
+ u32 zero_hash_size = 0;
+ bool zero_digest = false;
+ /**
+ * Use a pre-calculated empty message digest
+ * (workaround since hw return zeroes, hw bug!?)
+ */
+ ret = get_empty_message_digest(device_data, &zero_hash[0],
+ &zero_hash_size, &zero_digest);
+ if (!ret && likely(zero_hash_size == ctx->digestsize) &&
+ zero_digest) {
+ memcpy(req->result, &zero_hash[0], ctx->digestsize);
+ goto out;
+ } else if (!ret && !zero_digest) {
+ dev_dbg(device_data->dev, "[%s] HMAC zero msg with "
+ "key, continue...", __func__);
+ } else {
+ dev_err(device_data->dev, "[%s] ret=%d, or wrong "
+ "digest size? %s", __func__, ret,
+ (zero_hash_size == ctx->digestsize) ?
+ "true" : "false");
+ /* Return error */
+ goto out;
+ }
+ } else if (req->nbytes == 0 && ctx->keylen > 0) {
+ dev_err(device_data->dev, "[%s] Empty message with "
+ "keylength > 0, NOT supported.", __func__);
+ goto out;
+ }
+
+ if (!req_ctx->updated) {
+ ret = init_hash_hw(device_data, ctx);
+ if (ret) {
+ dev_err(device_data->dev, "[%s] init_hash_hw() "
+ "failed!", __func__);
+ goto out;
+ }
+ }
+
+ if (req_ctx->state.index) {
+ hash_messagepad(device_data, req_ctx->state.buffer,
+ req_ctx->state.index);
+ } else {
+ HASH_SET_DCAL;
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+ }
+
+ if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
+ unsigned int keylen = ctx->keylen;
+ u8 *key = ctx->key;
+
+ dev_dbg(device_data->dev, "[%s] keylen: %d", __func__,
+ ctx->keylen);
+ hash_hw_write_key(device_data, key, keylen);
+ }
+
+ hash_get_digest(device_data, digest, ctx->config.algorithm);
+ memcpy(req->result, digest, ctx->digestsize);
+
+out:
+ release_hash_device(device_data);
+
+ /**
+ * Allocated in setkey, and only used in HMAC.
+ */
+ kfree(ctx->key);
+
+ return ret;
+}
+
+/**
+ * hash_hw_update - Updates current HASH computation hashing another part of
+ * the message.
+ * @req: Byte array containing the message to be hashed (caller
+ * allocated).
+ */
+int hash_hw_update(struct ahash_request *req)
+{
+ int ret = 0;
+ u8 index = 0;
+ u8 *buffer;
+ struct hash_device_data *device_data;
+ u8 *data_buffer;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_hash_walk walk;
+ int msg_length = crypto_hash_walk_first(req, &walk);
+
+ /* Empty message ("") is correct indata */
+ if (msg_length == 0)
+ return ret;
+
+ index = req_ctx->state.index;
+ buffer = (u8 *)req_ctx->state.buffer;
+
+ /* Check if ctx->state.length + msg_length
+ overflows */
+ if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
+ HASH_HIGH_WORD_MAX_VAL ==
+ req_ctx->state.length.high_word) {
+ pr_err(DEV_DBG_NAME " [%s] HASH_MSG_LENGTH_OVERFLOW!",
+ __func__);
+ return -EPERM;
+ }
+
+ ret = hash_get_device_data(ctx, &device_data);
+ if (ret)
+ return ret;
+
+ /* Main loop */
+ while (0 != msg_length) {
+ data_buffer = walk.data;
+ ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
+ data_buffer, buffer, &index);
+
+ if (ret) {
+ dev_err(device_data->dev, "[%s] hash_internal_hw_"
+ "update() failed!", __func__);
+ goto out;
+ }
+
+ msg_length = crypto_hash_walk_done(&walk, 0);
+ }
+
+ req_ctx->state.index = index;
+ dev_dbg(device_data->dev, "[%s] indata length=%d, bin=%d))",
+ __func__, req_ctx->state.index,
+ req_ctx->state.bit_index);
+
+out:
+ release_hash_device(device_data);
+
+ return ret;
+}
+
+/**
+ * hash_resume_state - Function that resumes the state of an calculation.
+ * @device_data: Pointer to the device structure.
+ * @device_state: The state to be restored in the hash hardware
+ */
+int hash_resume_state(struct hash_device_data *device_data,
+ const struct hash_state *device_state)
+{
+ u32 temp_cr;
+ s32 count;
+ int hash_mode = HASH_OPER_MODE_HASH;
+
+ if (NULL == device_state) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /* Check correctness of index and length members */
+ if (device_state->index > HASH_BLOCK_SIZE
+ || (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -EPERM;
+ }
+
+ /*
+ * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
+ * prepare the initialize the HASH accelerator to compute the message
+ * digest of a new message.
+ */
+ HASH_INITIALIZE;
+
+ temp_cr = device_state->temp_cr;
+ writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
+
+ if (device_data->base->cr & HASH_CR_MODE_MASK)
+ hash_mode = HASH_OPER_MODE_HMAC;
+ else
+ hash_mode = HASH_OPER_MODE_HASH;
+
+ for (count = 0; count < HASH_CSR_COUNT; count++) {
+ if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+ break;
+
+ writel_relaxed(device_state->csr[count],
+ &device_data->base->csrx[count]);
+ }
+
+ writel_relaxed(device_state->csfull, &device_data->base->csfull);
+ writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
+
+ writel_relaxed(device_state->str_reg, &device_data->base->str);
+ writel_relaxed(temp_cr, &device_data->base->cr);
+
+ return 0;
+}
+
+/**
+ * hash_save_state - Function that saves the state of hardware.
+ * @device_data: Pointer to the device structure.
+ * @device_state: The strucure where the hardware state should be saved.
+ */
+int hash_save_state(struct hash_device_data *device_data,
+ struct hash_state *device_state)
+{
+ u32 temp_cr;
+ u32 count;
+ int hash_mode = HASH_OPER_MODE_HASH;
+
+ if (NULL == device_state) {
+ dev_err(device_data->dev, "[%s] HASH_INVALID_PARAMETER!",
+ __func__);
+ return -ENOTSUPP;
+ }
+
+ /* Write dummy value to force digest intermediate calculation. This
+ * actually makes sure that there isn't any ongoing calculation in the
+ * hardware.
+ */
+ while (device_data->base->str & HASH_STR_DCAL_MASK)
+ cpu_relax();
+
+ temp_cr = readl_relaxed(&device_data->base->cr);
+
+ device_state->str_reg = readl_relaxed(&device_data->base->str);
+
+ device_state->din_reg = readl_relaxed(&device_data->base->din);
+
+ if (device_data->base->cr & HASH_CR_MODE_MASK)
+ hash_mode = HASH_OPER_MODE_HMAC;
+ else
+ hash_mode = HASH_OPER_MODE_HASH;
+
+ for (count = 0; count < HASH_CSR_COUNT; count++) {
+ if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
+ break;
+
+ device_state->csr[count] =
+ readl_relaxed(&device_data->base->csrx[count]);
+ }
+
+ device_state->csfull = readl_relaxed(&device_data->base->csfull);
+ device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
+
+ device_state->temp_cr = temp_cr;
+
+ return 0;
+}
+
+/**
+ * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
+ * @device_data:
+ *
+ */
+int hash_check_hw(struct hash_device_data *device_data)
+{
+ /* Checking Peripheral Ids */
+ if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0)
+ && HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1)
+ && HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2)
+ && HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3)
+ && HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0)
+ && HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1)
+ && HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2)
+ && HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)
+ ) {
+ return 0;
+ }
+
+ dev_err(device_data->dev, "[%s] HASH_UNSUPPORTED_HW!",
+ __func__);
+ return -ENOTSUPP;
+}
+
+/**
+ * hash_get_digest - Gets the digest.
+ * @device_data: Pointer to the device structure.
+ * @digest: User allocated byte array for the calculated digest.
+ * @algorithm: The algorithm in use.
+ */
+void hash_get_digest(struct hash_device_data *device_data,
+ u8 *digest, int algorithm)
+{
+ u32 temp_hx_val, count;
+ int loop_ctr;
+
+ if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
+ dev_err(device_data->dev, "[%s] Incorrect algorithm %d",
+ __func__, algorithm);
+ return;
+ }
+
+ if (algorithm == HASH_ALGO_SHA1)
+ loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
+ else
+ loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
+
+ dev_dbg(device_data->dev, "[%s] digest array:(0x%x)",
+ __func__, (u32) digest);
+
+ /* Copy result into digest array */
+ for (count = 0; count < loop_ctr; count++) {
+ temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
+ digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
+ digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
+ digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
+ digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
+ }
+}
+
+/**
+ * hash_update - The hash update function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ */
+static int ahash_update(struct ahash_request *req)
+{
+ int ret = 0;
+ struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
+ ret = hash_hw_update(req);
+ /* Skip update for DMA, all data will be passed to DMA in final */
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME " [%s] hash_hw_update() failed!",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * hash_final - The hash final function for SHA1/SHA2 (SHA256).
+ * @req: The hash request for the job.
+ */
+static int ahash_final(struct ahash_request *req)
+{
+ int ret = 0;
+ struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ pr_debug(DEV_DBG_NAME " [%s] data size: %d", __func__, req->nbytes);
+
+ if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
+ ret = hash_dma_final(req);
+ else
+ ret = hash_hw_final(req);
+
+ if (ret) {
+ pr_err(DEV_DBG_NAME " [%s] hash_hw/dma_final() failed",
+ __func__);
+ }
+
+ return ret;
+}
+
+static int hash_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen, int alg)
+{
+ int ret = 0;
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /**
+ * Freed in final.
+ */
+ ctx->key = kmalloc(keylen, GFP_KERNEL);
+ if (!ctx->key) {
+ pr_err(DEV_DBG_NAME " [%s] Failed to allocate ctx->key "
+ "for %d\n", __func__, alg);
+ return -ENOMEM;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return ret;
+}
+
+static int ahash_sha1_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA1;
+ ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+ ctx->digestsize = SHA1_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int ahash_sha256_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA256;
+ ctx->config.oper_mode = HASH_OPER_MODE_HASH;
+ ctx->digestsize = SHA256_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int ahash_sha1_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = ahash_sha1_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int ahash_sha256_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = ahash_sha256_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha1_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA1;
+ ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
+ ctx->digestsize = SHA1_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int hmac_sha256_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = HASH_ALGO_SHA256;
+ ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
+ ctx->digestsize = SHA256_DIGEST_SIZE;
+
+ return hash_init(req);
+}
+
+static int hmac_sha1_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = hmac_sha1_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha256_digest(struct ahash_request *req)
+{
+ int ret2, ret1;
+
+ ret1 = hmac_sha256_init(req);
+ if (ret1)
+ goto out;
+
+ ret1 = ahash_update(req);
+ ret2 = ahash_final(req);
+
+out:
+ return ret1 ? ret1 : ret2;
+}
+
+static int hmac_sha1_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
+}
+
+static int hmac_sha256_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
+}
+
+struct hash_algo_template {
+ struct hash_config conf;
+ struct ahash_alg hash;
+};
+
+static int hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct hash_algo_template *hash_alg;
+
+ hash_alg = container_of(__crypto_ahash_alg(alg),
+ struct hash_algo_template,
+ hash);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct hash_req_ctx));
+
+ ctx->config.data_format = HASH_DATA_8_BITS;
+ ctx->config.algorithm = hash_alg->conf.algorithm;
+ ctx->config.oper_mode = hash_alg->conf.oper_mode;
+
+ ctx->digestsize = hash_alg->hash.halg.digestsize;
+
+ return 0;
+}
+
+static struct hash_algo_template hash_algs[] = {
+ {
+ .conf.algorithm = HASH_ALGO_SHA1,
+ .conf.oper_mode = HASH_OPER_MODE_HASH,
+ .hash = {
+ .init = hash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = ahash_sha1_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_init = hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .conf.algorithm = HASH_ALGO_SHA256,
+ .conf.oper_mode = HASH_OPER_MODE_HASH,
+ .hash = {
+ .init = hash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = ahash_sha256_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_init = hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+
+ },
+ {
+ .conf.algorithm = HASH_ALGO_SHA1,
+ .conf.oper_mode = HASH_OPER_MODE_HMAC,
+ .hash = {
+ .init = hash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = hmac_sha1_digest,
+ .setkey = hmac_sha1_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac-sha1-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_init = hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .conf.algorithm = HASH_ALGO_SHA256,
+ .conf.oper_mode = HASH_OPER_MODE_HMAC,
+ .hash = {
+ .init = hash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .digest = hmac_sha256_digest,
+ .setkey = hmac_sha256_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct hash_ctx),
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-ux500",
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct hash_ctx),
+ .cra_type = &crypto_ahash_type,
+ .cra_init = hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+/**
+ * hash_algs_register_all -
+ */
+static int ahash_algs_register_all(struct hash_device_data *device_data)
+{
+ int ret;
+ int i;
+ int count;
+
+ for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
+ ret = crypto_register_ahash(&hash_algs[i].hash);
+ if (ret) {
+ count = i;
+ dev_err(device_data->dev, "[%s] alg registration failed",
+ hash_algs[i].hash.halg.base.cra_driver_name);
+ goto unreg;
+ }
+ }
+ return 0;
+unreg:
+ for (i = 0; i < count; i++)
+ crypto_unregister_ahash(&hash_algs[i].hash);
+ return ret;
+}
+
+/**
+ * hash_algs_unregister_all -
+ */
+static void ahash_algs_unregister_all(struct hash_device_data *device_data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
+ crypto_unregister_ahash(&hash_algs[i].hash);
+}
+
+/**
+ * ux500_hash_probe - Function that probes the hash hardware.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res = NULL;
+ struct hash_device_data *device_data;
+ struct device *dev = &pdev->dev;
+
+ device_data = kzalloc(sizeof(struct hash_device_data), GFP_ATOMIC);
+ if (!device_data) {
+ dev_dbg(dev, "[%s] kzalloc() failed!", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ device_data->dev = dev;
+ device_data->current_ctx = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "[%s] platform_get_resource() failed!", __func__);
+ ret = -ENODEV;
+ goto out_kfree;
+ }
+
+ res = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (res == NULL) {
+ dev_dbg(dev, "[%s] request_mem_region() failed!", __func__);
+ ret = -EBUSY;
+ goto out_kfree;
+ }
+
+ device_data->base = ioremap(res->start, resource_size(res));
+ if (!device_data->base) {
+ dev_err(dev, "[%s] ioremap() failed!",
+ __func__);
+ ret = -ENOMEM;
+ goto out_free_mem;
+ }
+ spin_lock_init(&device_data->ctx_lock);
+ spin_lock_init(&device_data->power_state_lock);
+
+ /* Enable power for HASH1 hardware block */
+ device_data->regulator = regulator_get(dev, "v-ape");
+ if (IS_ERR(device_data->regulator)) {
+ dev_err(dev, "[%s] regulator_get() failed!", __func__);
+ ret = PTR_ERR(device_data->regulator);
+ device_data->regulator = NULL;
+ goto out_unmap;
+ }
+
+ /* Enable the clock for HASH1 hardware block */
+ device_data->clk = clk_get(dev, NULL);
+ if (IS_ERR(device_data->clk)) {
+ dev_err(dev, "[%s] clk_get() failed!", __func__);
+ ret = PTR_ERR(device_data->clk);
+ goto out_regulator;
+ }
+
+ /* Enable device power (and clock) */
+ ret = hash_enable_power(device_data, false);
+ if (ret) {
+ dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
+ goto out_clk;
+ }
+
+ ret = hash_check_hw(device_data);
+ if (ret) {
+ dev_err(dev, "[%s] hash_check_hw() failed!", __func__);
+ goto out_power;
+ }
+
+ if (hash_mode == HASH_MODE_DMA)
+ hash_dma_setup_channel(device_data, dev);
+
+ platform_set_drvdata(pdev, device_data);
+
+ /* Put the new device into the device list... */
+ klist_add_tail(&device_data->list_node, &driver_data.device_list);
+ /* ... and signal that a new device is available. */
+ up(&driver_data.device_allocation);
+
+ ret = ahash_algs_register_all(device_data);
+ if (ret) {
+ dev_err(dev, "[%s] ahash_algs_register_all() "
+ "failed!", __func__);
+ goto out_power;
+ }
+
+ dev_info(dev, "[%s] successfully probed\n", __func__);
+ return 0;
+
+out_power:
+ hash_disable_power(device_data, false);
+
+out_clk:
+ clk_put(device_data->clk);
+
+out_regulator:
+ regulator_put(device_data->regulator);
+
+out_unmap:
+ iounmap(device_data->base);
+
+out_free_mem:
+ release_mem_region(res->start, resource_size(res));
+
+out_kfree:
+ kfree(device_data);
+out:
+ return ret;
+}
+
+/**
+ * ux500_hash_remove - Function that removes the hash device from the platform.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct hash_device_data *device_data;
+ struct device *dev = &pdev->dev;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(dev, "[%s]: platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Try to decrease the number of available devices. */
+ if (down_trylock(&driver_data.device_allocation))
+ return -EBUSY;
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (device_data->current_ctx) {
+ /* The device is busy */
+ spin_unlock(&device_data->ctx_lock);
+ /* Return the device to the pool. */
+ up(&driver_data.device_allocation);
+ return -EBUSY;
+ }
+
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ ahash_algs_unregister_all(device_data);
+
+ if (hash_disable_power(device_data, false))
+ dev_err(dev, "[%s]: hash_disable_power() failed",
+ __func__);
+
+ clk_put(device_data->clk);
+ regulator_put(device_data->regulator);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(device_data);
+
+ return 0;
+}
+
+/**
+ * ux500_hash_shutdown - Function that shutdown the hash device.
+ * @pdev: The platform device
+ */
+static void ux500_hash_shutdown(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ struct hash_device_data *device_data;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return;
+ }
+
+ /* Check that the device is free */
+ spin_lock(&device_data->ctx_lock);
+ /* current_ctx allocates a device, NULL = unallocated */
+ if (!device_data->current_ctx) {
+ if (down_trylock(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: Cryp still in use!"
+ "Shutting down anyway...", __func__);
+ /**
+ * (Allocate the device)
+ * Need to set this to non-null (dummy) value,
+ * to avoid usage if context switching.
+ */
+ device_data->current_ctx++;
+ }
+ spin_unlock(&device_data->ctx_lock);
+
+ /* Remove the device from the list */
+ if (klist_node_attached(&device_data->list_node))
+ klist_remove(&device_data->list_node);
+
+ /* If this was the last device, remove the services */
+ if (list_empty(&driver_data.device_list.k_list))
+ ahash_algs_unregister_all(device_data);
+
+ iounmap(device_data->base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ if (hash_disable_power(device_data, false))
+ dev_err(&pdev->dev, "[%s] hash_disable_power() failed",
+ __func__);
+}
+
+/**
+ * ux500_hash_suspend - Function that suspends the hash device.
+ * @pdev: The platform device.
+ * @state: -
+ */
+static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int ret;
+ struct hash_device_data *device_data;
+ struct hash_ctx *temp_ctx = NULL;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (!device_data->current_ctx)
+ device_data->current_ctx++;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (device_data->current_ctx == ++temp_ctx) {
+ if (down_interruptible(&driver_data.device_allocation))
+ dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
+ "failed", __func__);
+ ret = hash_disable_power(device_data, false);
+
+ } else
+ ret = hash_disable_power(device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__);
+
+ return ret;
+}
+
+/**
+ * ux500_hash_resume - Function that resume the hash device.
+ * @pdev: The platform device.
+ */
+static int ux500_hash_resume(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct hash_device_data *device_data;
+ struct hash_ctx *temp_ctx = NULL;
+
+ device_data = platform_get_drvdata(pdev);
+ if (!device_data) {
+ dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
+ __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock(&device_data->ctx_lock);
+ if (device_data->current_ctx == ++temp_ctx)
+ device_data->current_ctx = NULL;
+ spin_unlock(&device_data->ctx_lock);
+
+ if (!device_data->current_ctx)
+ up(&driver_data.device_allocation);
+ else
+ ret = hash_enable_power(device_data, true);
+
+ if (ret)
+ dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!",
+ __func__);
+
+ return ret;
+}
+
+static struct platform_driver hash_driver = {
+ .probe = ux500_hash_probe,
+ .remove = ux500_hash_remove,
+ .shutdown = ux500_hash_shutdown,
+ .suspend = ux500_hash_suspend,
+ .resume = ux500_hash_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "hash1",
+ }
+};
+
+/**
+ * ux500_hash_mod_init - The kernel module init function.
+ */
+static int __init ux500_hash_mod_init(void)
+{
+ klist_init(&driver_data.device_list, NULL, NULL);
+ /* Initialize the semaphore to 0 devices (locked state) */
+ sema_init(&driver_data.device_allocation, 0);
+
+ return platform_driver_register(&hash_driver);
+}
+
+/**
+ * ux500_hash_mod_fini - The kernel module exit function.
+ */
+static void __exit ux500_hash_mod_fini(void)
+{
+ platform_driver_unregister(&hash_driver);
+ return;
+}
+
+module_init(ux500_hash_mod_init);
+module_exit(ux500_hash_mod_fini);
+
+MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("sha1-all");
+MODULE_ALIAS("sha256-all");
+MODULE_ALIAS("hmac-sha1-all");
+MODULE_ALIAS("hmac-sha256-all");
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 464fa2147dfb..f6b0a6e2ea50 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -16,7 +16,7 @@ menuconfig PM_DEVFREQ
is attached to a single device and returns a "representative"
clock frequency of the device, which is also attached
to a device by 1-to-1. The device registering devfreq takes the
- responsiblity to "interpret" the representative frequency and
+ responsibility to "interpret" the representative frequency and
to set its every clock accordingly with the "target" callback
given to devfreq.
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index 574a06b1b1de..af75ddd4f158 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -10,6 +10,7 @@
*/
#include <linux/devfreq.h>
+#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
unsigned long *freq)
@@ -25,8 +26,14 @@ static int devfreq_performance_func(struct devfreq *df,
return 0;
}
+static int performance_init(struct devfreq *devfreq)
+{
+ return update_devfreq(devfreq);
+}
+
const struct devfreq_governor devfreq_performance = {
.name = "performance",
+ .init = performance_init,
.get_target_freq = devfreq_performance_func,
.no_central_polling = true,
};
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index d742d4a82d6a..fec0cdbd2477 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -10,6 +10,7 @@
*/
#include <linux/devfreq.h>
+#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
unsigned long *freq)
@@ -22,8 +23,14 @@ static int devfreq_powersave_func(struct devfreq *df,
return 0;
}
+static int powersave_init(struct devfreq *devfreq)
+{
+ return update_devfreq(devfreq);
+}
+
const struct devfreq_governor devfreq_powersave = {
.name = "powersave",
+ .init = powersave_init,
.get_target_freq = devfreq_powersave_func,
.no_central_polling = true,
};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ef378b5b17e4..aadeb5be9dba 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -238,6 +238,7 @@ config IMX_DMA
config MXS_DMA
bool "MXS DMA support"
depends on SOC_IMX23 || SOC_IMX28
+ select STMP_DEVICE
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 3d704abd7912..49ecbbb8932d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -95,10 +95,14 @@ static struct amba_driver pl08x_amba_driver;
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @channels: the number of channels available in this variant
* @dualmaster: whether this version supports dual AHB masters or not.
+ * @nomadik: whether the channels have Nomadik security extension bits
+ * that need to be checked for permission before use and some registers are
+ * missing
*/
struct vendor_data {
u8 channels;
bool dualmaster;
+ bool nomadik;
};
/*
@@ -385,7 +389,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
spin_lock_irqsave(&ch->lock, flags);
- if (!ch->serving) {
+ if (!ch->locked && !ch->serving) {
ch->serving = virt_chan;
ch->signal = -1;
spin_unlock_irqrestore(&ch->lock, flags);
@@ -1324,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
int ret, tmp;
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sgl->length, plchan->name);
+ __func__, sg_dma_len(sgl), plchan->name);
txd = pl08x_get_txd(plchan, flags);
if (!txd) {
@@ -1378,11 +1382,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
dsg->len = sg_dma_len(sg);
if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_phys(sg);
+ dsg->src_addr = sg_dma_address(sg);
dsg->dst_addr = slave_addr;
} else {
dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_phys(sg);
+ dsg->dst_addr = sg_dma_address(sg);
}
}
@@ -1484,6 +1488,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
*/
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{
+ /* The Nomadik variant does not have the config register */
+ if (pl08x->vd->nomadik)
+ return;
writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
}
@@ -1616,7 +1623,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
__func__, err);
writel(err, pl08x->base + PL080_ERR_CLEAR);
}
- tc = readl(pl08x->base + PL080_INT_STATUS);
+ tc = readl(pl08x->base + PL080_TC_STATUS);
if (tc)
writel(tc, pl08x->base + PL080_TC_CLEAR);
@@ -1773,8 +1780,10 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
spin_lock_irqsave(&ch->lock, flags);
virt_chan = ch->serving;
- seq_printf(s, "%d\t\t%s\n",
- ch->id, virt_chan ? virt_chan->name : "(none)");
+ seq_printf(s, "%d\t\t%s%s\n",
+ ch->id,
+ virt_chan ? virt_chan->name : "(none)",
+ ch->locked ? " LOCKED" : "");
spin_unlock_irqrestore(&ch->lock, flags);
}
@@ -1918,7 +1927,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
}
/* Initialize physical channels */
- pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
+ pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
GFP_KERNEL);
if (!pl08x->phy_chans) {
dev_err(&adev->dev, "%s failed to allocate "
@@ -1933,8 +1942,23 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
spin_lock_init(&ch->lock);
- ch->serving = NULL;
ch->signal = -1;
+
+ /*
+ * Nomadik variants can have channels that are locked
+ * down for the secure world only. Lock up these channels
+ * by perpetually serving a dummy virtual channel.
+ */
+ if (vd->nomadik) {
+ u32 val;
+
+ val = readl(ch->base + PL080_CH_CONFIG);
+ if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
+ dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
+ ch->locked = true;
+ }
+ }
+
dev_dbg(&adev->dev, "physical channel %d is %s\n",
i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
}
@@ -2017,6 +2041,12 @@ static struct vendor_data vendor_pl080 = {
.dualmaster = true,
};
+static struct vendor_data vendor_nomadik = {
+ .channels = 8,
+ .dualmaster = true,
+ .nomadik = true,
+};
+
static struct vendor_data vendor_pl081 = {
.channels = 2,
.dualmaster = false,
@@ -2037,9 +2067,9 @@ static struct amba_id pl08x_ids[] = {
},
/* Nomadik 8815 PL080 variant */
{
- .id = 0x00280880,
+ .id = 0x00280080,
.mask = 0x00ffffff,
- .data = &vendor_pl080,
+ .data = &vendor_nomadik,
},
{ 0, 0 },
};
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index bf0d7e4e345b..7292aa87b2dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -39,7 +39,6 @@
*/
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
-#define ATC_DEFAULT_CTRLA (0)
#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
|ATC_DIF(AT_DMA_MEM_IF))
@@ -574,7 +573,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return NULL;
}
- ctrla = ATC_DEFAULT_CTRLA;
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
| ATC_SRC_ADDR_MODE_INCR
| ATC_DST_ADDR_MODE_INCR
@@ -585,13 +583,13 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* of the most common optimization.
*/
if (!((src | dest | len) & 3)) {
- ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
+ ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
src_width = dst_width = 2;
} else if (!((src | dest | len) & 1)) {
- ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
+ ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
src_width = dst_width = 1;
} else {
- ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
+ ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
src_width = dst_width = 0;
}
@@ -668,7 +666,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
- ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
+ ctrla = ATC_SCSIZE(sconfig->src_maxburst)
+ | ATC_DCSIZE(sconfig->dst_maxburst);
ctrlb = ATC_IEN;
switch (direction) {
@@ -796,12 +795,12 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
enum dma_transfer_direction direction)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_dma_slave *atslave = chan->private;
struct dma_slave_config *sconfig = &atchan->dma_sconfig;
u32 ctrla;
/* prepare common CRTLA value */
- ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
+ ctrla = ATC_SCSIZE(sconfig->src_maxburst)
+ | ATC_DCSIZE(sconfig->dst_maxburst)
| ATC_DST_WIDTH(reg_width)
| ATC_SRC_WIDTH(reg_width)
| period_len >> reg_width;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 897a8bcaec90..8a6c8e8b2940 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -87,7 +87,26 @@
/* Bitfields in CTRLA */
#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */
#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
-/* Chunck Tranfer size definitions are in at_hdmac.h */
+#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
+#define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16))
+#define ATC_SCSIZE_1 (0x0 << 16)
+#define ATC_SCSIZE_4 (0x1 << 16)
+#define ATC_SCSIZE_8 (0x2 << 16)
+#define ATC_SCSIZE_16 (0x3 << 16)
+#define ATC_SCSIZE_32 (0x4 << 16)
+#define ATC_SCSIZE_64 (0x5 << 16)
+#define ATC_SCSIZE_128 (0x6 << 16)
+#define ATC_SCSIZE_256 (0x7 << 16)
+#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
+#define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20))
+#define ATC_DCSIZE_1 (0x0 << 20)
+#define ATC_DCSIZE_4 (0x1 << 20)
+#define ATC_DCSIZE_8 (0x2 << 20)
+#define ATC_DCSIZE_16 (0x3 << 20)
+#define ATC_DCSIZE_32 (0x4 << 20)
+#define ATC_DCSIZE_64 (0x5 << 20)
+#define ATC_DCSIZE_128 (0x6 << 20)
+#define ATC_DCSIZE_256 (0x7 << 20)
#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */
#define ATC_SRC_WIDTH(x) ((x) << 24)
#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 750925f9638b..e67b4e06a918 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1033,7 +1033,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!sgl)
goto out;
- if (sgl->length == 0)
+ if (sg_dma_len(sgl) == 0)
goto out;
spin_lock_irqsave(&cohc->lock, flg);
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 6c0e2d4c6682..780e0429b38c 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -270,10 +270,10 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
if (dir == DMA_MEM_TO_DEV)
/* increment source address */
- src = sg_phys(sg);
+ src = sg_dma_address(sg);
else
/* increment destination address */
- dst = sg_phys(sg);
+ dst = sg_dma_address(sg);
bytes_to_transfer = sg_dma_len(sg);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 7439079f5eed..721296157577 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -742,7 +743,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dw_desc *desc;
u32 len, dlen, mem;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
if (!((mem | len) & 7))
@@ -809,7 +810,7 @@ slave_sg_todev_fill_desc:
struct dw_desc *desc;
u32 len, dlen, mem;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
if (!((mem | len) & 7))
@@ -1429,7 +1430,7 @@ static int __init dw_probe(struct platform_device *pdev)
err = PTR_ERR(dw->clk);
goto err_clk;
}
- clk_enable(dw->clk);
+ clk_prepare_enable(dw->clk);
/* force dma off, just in case */
dw_dma_off(dw);
@@ -1510,7 +1511,7 @@ static int __init dw_probe(struct platform_device *pdev)
return 0;
err_irq:
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
clk_put(dw->clk);
err_clk:
iounmap(dw->regs);
@@ -1540,7 +1541,7 @@ static int __exit dw_remove(struct platform_device *pdev)
channel_clear_bit(dw, CH_EN, dwc->mask);
}
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
clk_put(dw->clk);
iounmap(dw->regs);
@@ -1559,7 +1560,7 @@ static void dw_shutdown(struct platform_device *pdev)
struct dw_dma *dw = platform_get_drvdata(pdev);
dw_dma_off(platform_get_drvdata(pdev));
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
}
static int dw_suspend_noirq(struct device *dev)
@@ -1568,7 +1569,7 @@ static int dw_suspend_noirq(struct device *dev)
struct dw_dma *dw = platform_get_drvdata(pdev);
dw_dma_off(platform_get_drvdata(pdev));
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
return 0;
}
@@ -1578,7 +1579,7 @@ static int dw_resume_noirq(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma *dw = platform_get_drvdata(pdev);
- clk_enable(dw->clk);
+ clk_prepare_enable(dw->clk);
dma_writel(dw, CFG, DW_CFG_DMA_EN);
return 0;
}
@@ -1592,12 +1593,21 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
.poweroff_noirq = dw_suspend_noirq,
};
+#ifdef CONFIG_OF
+static const struct of_device_id dw_dma_id_table[] = {
+ { .compatible = "snps,dma-spear1340" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_dma_id_table);
+#endif
+
static struct platform_driver dw_driver = {
.remove = __exit_p(dw_remove),
.shutdown = dw_shutdown,
.driver = {
.name = "dw_dmac",
.pm = &dw_dev_pm_ops,
+ .of_match_table = of_match_ptr(dw_dma_id_table),
},
};
@@ -1616,4 +1626,4 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f6e9b572b998..c64917ec313d 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -71,6 +71,7 @@
#define M2M_CONTROL_TM_SHIFT 13
#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_NFBINT BIT(21)
#define M2M_CONTROL_RSS_SHIFT 22
#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
@@ -79,7 +80,22 @@
#define M2M_CONTROL_PWSC_SHIFT 25
#define M2M_INTERRUPT 0x0004
-#define M2M_INTERRUPT_DONEINT BIT(1)
+#define M2M_INTERRUPT_MASK 6
+
+#define M2M_STATUS 0x000c
+#define M2M_STATUS_CTL_SHIFT 1
+#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_BUF_SHIFT 4
+#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_DONE BIT(6)
#define M2M_BCR0 0x0010
#define M2M_BCR1 0x0014
@@ -426,15 +442,6 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
/*
* M2M DMA implementation
- *
- * For the M2M transfers we don't use NFB at all. This is because it simply
- * doesn't work well with memcpy transfers. When you submit both buffers it is
- * extremely unlikely that you get an NFB interrupt, but it instead reports
- * DONE interrupt and both buffers are already transferred which means that we
- * weren't able to update the next buffer.
- *
- * So for now we "simulate" NFB by just submitting buffer after buffer
- * without double buffering.
*/
static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
@@ -543,6 +550,11 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
m2m_fill_desc(edmac);
control |= M2M_CONTROL_DONEINT;
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2m_fill_desc(edmac);
+ control |= M2M_CONTROL_NFBINT;
+ }
+
/*
* Now we can finally enable the channel. For M2M channel this must be
* done _after_ the BCRx registers are programmed.
@@ -560,32 +572,89 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
}
}
+/*
+ * According to EP93xx User's Guide, we should receive DONE interrupt when all
+ * M2M DMA controller transactions complete normally. This is not always the
+ * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
+ * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
+ * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
+ * In effect, disabling the channel when only DONE bit is set could stop
+ * currently running DMA transfer. To avoid this, we use Buffer FSM and
+ * Control FSM to check current state of DMA channel.
+ */
static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
{
+ u32 status = readl(edmac->regs + M2M_STATUS);
+ u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
+ u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
+ bool done = status & M2M_STATUS_DONE;
+ bool last_done;
u32 control;
+ struct ep93xx_dma_desc *desc;
- if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+ /* Accept only DONE and NFB interrupts */
+ if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
return INTERRUPT_UNKNOWN;
- /* Clear the DONE bit */
- writel(0, edmac->regs + M2M_INTERRUPT);
+ if (done) {
+ /* Clear the DONE bit */
+ writel(0, edmac->regs + M2M_INTERRUPT);
+ }
- /* Disable interrupts and the channel */
- control = readl(edmac->regs + M2M_CONTROL);
- control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
- writel(control, edmac->regs + M2M_CONTROL);
+ /*
+ * Check whether we are done with descriptors or not. This, together
+ * with DMA channel state, determines action to take in interrupt.
+ */
+ desc = ep93xx_dma_get_active(edmac);
+ last_done = !desc || desc->txd.cookie;
/*
- * Since we only get DONE interrupt we have to find out ourselves
- * whether there still is something to process. So we try to advance
- * the chain an see whether it succeeds.
+ * Use M2M DMA Buffer FSM and Control FSM to check current state of
+ * DMA channel. Using DONE and NFB bits from channel status register
+ * or bits from channel interrupt register is not reliable.
*/
- if (ep93xx_dma_advance_active(edmac)) {
- edmac->edma->hw_submit(edmac);
- return INTERRUPT_NEXT_BUFFER;
+ if (!last_done &&
+ (buf_fsm == M2M_STATUS_BUF_NO ||
+ buf_fsm == M2M_STATUS_BUF_ON)) {
+ /*
+ * Two buffers are ready for update when Buffer FSM is in
+ * DMA_NO_BUF state. Only one buffer can be prepared without
+ * disabling the channel or polling the DONE bit.
+ * To simplify things, always prepare only one buffer.
+ */
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2m_fill_desc(edmac);
+ if (done && !edmac->chan.private) {
+ /* Software trigger for memcpy channel */
+ control = readl(edmac->regs + M2M_CONTROL);
+ control |= M2M_CONTROL_START;
+ writel(control, edmac->regs + M2M_CONTROL);
+ }
+ return INTERRUPT_NEXT_BUFFER;
+ } else {
+ last_done = true;
+ }
+ }
+
+ /*
+ * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
+ * and Control FSM is in DMA_STALL state.
+ */
+ if (last_done &&
+ buf_fsm == M2M_STATUS_BUF_NO &&
+ ctl_fsm == M2M_STATUS_CTL_STALL) {
+ /* Disable interrupts and the channel */
+ control = readl(edmac->regs + M2M_CONTROL);
+ control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
+ | M2M_CONTROL_ENABLE);
+ writel(control, edmac->regs + M2M_CONTROL);
+ return INTERRUPT_DONE;
}
- return INTERRUPT_DONE;
+ /*
+ * Nothing to do this time.
+ */
+ return INTERRUPT_NEXT_BUFFER;
}
/*
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index bb787d8e1529..fcfeb3cd8d31 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
struct scatterlist *sg = d->sg;
unsigned long now;
- now = min(d->len, sg->length);
+ now = min(d->len, sg_dma_len(sg));
if (d->len != IMX_DMA_LENGTH_LOOP)
d->len -= now;
@@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
for_each_sg(sgl, sg, sg_len, i) {
- dma_length += sg->length;
+ dma_length += sg_dma_len(sg);
}
switch (imxdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES:
- if (sgl->length & 3 || sgl->dma_address & 3)
+ if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
return NULL;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- if (sgl->length & 1 || sgl->dma_address & 1)
+ if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
return NULL;
break;
case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[i].page_link = 0;
imxdmac->sg_list[i].offset = 0;
imxdmac->sg_list[i].dma_address = dma_addr;
- imxdmac->sg_list[i].length = period_len;
+ sg_dma_len(&imxdmac->sg_list[i]) = period_len;
dma_addr += period_len;
}
/* close the loop */
imxdmac->sg_list[periods].offset = 0;
- imxdmac->sg_list[periods].length = 0;
+ sg_dma_len(&imxdmac->sg_list[periods]) = 0;
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d3e38e28bb6b..1dc2a4ad0026 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,7 @@
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-#include <linux/wait.h>
+#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
@@ -271,6 +271,7 @@ struct sdma_channel {
enum dma_status status;
unsigned int chn_count;
unsigned int chn_real_count;
+ struct tasklet_struct tasklet;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -322,8 +323,9 @@ struct sdma_engine {
struct sdma_context_data *context;
dma_addr_t context_phys;
struct dma_device dma_device;
- struct clk *clk;
- struct mutex channel_0_lock;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ spinlock_t channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
};
@@ -401,19 +403,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
}
/*
- * sdma_run_channel - run a channel and wait till it's done
+ * sdma_run_channel0 - run a channel and wait till it's done
*/
-static int sdma_run_channel(struct sdma_channel *sdmac)
+static int sdma_run_channel0(struct sdma_engine *sdma)
{
- struct sdma_engine *sdma = sdmac->sdma;
- int channel = sdmac->channel;
int ret;
+ unsigned long timeout = 500;
- init_completion(&sdmac->done);
+ sdma_enable_channel(sdma, 0);
- sdma_enable_channel(sdma, channel);
+ while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
+ if (timeout-- <= 0)
+ break;
+ udelay(1);
+ }
- ret = wait_for_completion_timeout(&sdmac->done, HZ);
+ if (ret) {
+ /* Clear the interrupt status */
+ writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
+ } else {
+ dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
+ }
return ret ? 0 : -ETIMEDOUT;
}
@@ -425,17 +435,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
void *buf_virt;
dma_addr_t buf_phys;
int ret;
-
- mutex_lock(&sdma->channel_0_lock);
+ unsigned long flags;
buf_virt = dma_alloc_coherent(NULL,
size,
&buf_phys, GFP_KERNEL);
if (!buf_virt) {
- ret = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
}
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
+
bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
bd0->mode.count = size / 2;
@@ -444,12 +454,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
memcpy(buf_virt, buf, size);
- ret = sdma_run_channel(&sdma->channel[0]);
+ ret = sdma_run_channel0(sdma);
- dma_free_coherent(NULL, size, buf_virt, buf_phys);
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
-err_out:
- mutex_unlock(&sdma->channel_0_lock);
+ dma_free_coherent(NULL, size, buf_virt, buf_phys);
return ret;
}
@@ -534,13 +543,11 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
sdmac->desc.callback(sdmac->desc.callback_param);
}
-static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
+static void sdma_tasklet(unsigned long data)
{
- complete(&sdmac->done);
+ struct sdma_channel *sdmac = (struct sdma_channel *) data;
- /* not interested in channel 0 interrupts */
- if (sdmac->channel == 0)
- return;
+ complete(&sdmac->done);
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_handle_channel_loop(sdmac);
@@ -554,13 +561,15 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
unsigned long stat;
stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+ /* not interested in channel 0 interrupts */
+ stat &= ~1;
writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
while (stat) {
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
- mxc_sdma_handle_channel(sdmac);
+ tasklet_schedule(&sdmac->tasklet);
__clear_bit(channel, &stat);
}
@@ -659,6 +668,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
struct sdma_context_data *context = sdma->context;
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
int ret;
+ unsigned long flags;
if (sdmac->direction == DMA_DEV_TO_MEM) {
load_address = sdmac->pc_from_device;
@@ -676,7 +686,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
- mutex_lock(&sdma->channel_0_lock);
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
memset(context, 0, sizeof(*context));
context->channel_state.pc = load_address;
@@ -695,10 +705,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
bd0->mode.count = sizeof(*context) / 4;
bd0->buffer_addr = sdma->context_phys;
bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
+ ret = sdma_run_channel0(sdma);
- ret = sdma_run_channel(&sdma->channel[0]);
-
- mutex_unlock(&sdma->channel_0_lock);
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
return ret;
}
@@ -806,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
init_completion(&sdmac->done);
- sdmac->buf_tail = 0;
-
return 0;
out:
@@ -859,7 +866,8 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
sdmac->peripheral_type = data->peripheral_type;
sdmac->event_id0 = data->dma_request;
- clk_enable(sdmac->sdma->clk);
+ clk_enable(sdmac->sdma->clk_ipg);
+ clk_enable(sdmac->sdma->clk_ahb);
ret = sdma_request_channel(sdmac);
if (ret)
@@ -896,7 +904,8 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
}
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
@@ -916,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
sdmac->flags = 0;
+ sdmac->buf_tail = 0;
+
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
sg_len, channel);
@@ -938,7 +949,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
bd->buffer_addr = sg->dma_address;
- count = sg->length;
+ count = sg_dma_len(sg);
if (count > 0xffff) {
dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
@@ -1016,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
+ sdmac->buf_tail = 0;
+
sdmac->flags |= IMX_DMA_SG_LOOP;
sdmac->direction = direction;
ret = sdma_load_context(sdmac);
@@ -1169,12 +1182,14 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
addr = (void *)header + header->script_addrs_start;
ram_code = (void *)header + header->ram_code_start;
- clk_enable(sdma->clk);
+ clk_enable(sdma->clk_ipg);
+ clk_enable(sdma->clk_ahb);
/* download the RAM image for SDMA */
sdma_load_script(sdma, ram_code,
header->ram_code_size,
addr->ram_code_start_addr);
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
sdma_add_scripts(sdma, addr);
@@ -1216,7 +1231,8 @@ static int __init sdma_init(struct sdma_engine *sdma)
return -ENODEV;
}
- clk_enable(sdma->clk);
+ clk_enable(sdma->clk_ipg);
+ clk_enable(sdma->clk_ahb);
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1269,12 +1285,14 @@ static int __init sdma_init(struct sdma_engine *sdma)
/* Initializes channel's priorities */
sdma_set_channel_priority(&sdma->channel[0], 7);
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
return 0;
err_dma_alloc:
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
dev_err(sdma->dev, "initialisation failed with %d\n", ret);
return ret;
}
@@ -1297,7 +1315,7 @@ static int __init sdma_probe(struct platform_device *pdev)
if (!sdma)
return -ENOMEM;
- mutex_init(&sdma->channel_0_lock);
+ spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
@@ -1313,12 +1331,21 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_request_region;
}
- sdma->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(sdma->clk)) {
- ret = PTR_ERR(sdma->clk);
+ sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(sdma->clk_ipg)) {
+ ret = PTR_ERR(sdma->clk_ipg);
goto err_clk;
}
+ sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sdma->clk_ahb)) {
+ ret = PTR_ERR(sdma->clk_ahb);
+ goto err_clk;
+ }
+
+ clk_prepare(sdma->clk_ipg);
+ clk_prepare(sdma->clk_ahb);
+
sdma->regs = ioremap(iores->start, resource_size(iores));
if (!sdma->regs) {
ret = -ENOMEM;
@@ -1359,6 +1386,8 @@ static int __init sdma_probe(struct platform_device *pdev)
dma_cookie_init(&sdmac->chan);
sdmac->channel = i;
+ tasklet_init(&sdmac->tasklet, sdma_tasklet,
+ (unsigned long) sdmac);
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
@@ -1426,7 +1455,6 @@ err_alloc:
err_request_irq:
iounmap(sdma->regs);
err_ioremap:
- clk_put(sdma->clk);
err_clk:
release_mem_region(iores->start, resource_size(iores));
err_request_region:
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c900ca7aaec4..222e907bfaaa 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -394,11 +394,11 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
}
}
/*Populate CTL_HI values*/
- ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+ ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
desc->width,
midc->dma->block_size);
/*Populate SAR and DAR values*/
- sg_phy_addr = sg_phys(sg);
+ sg_phy_addr = sg_dma_address(sg);
if (desc->dirn == DMA_MEM_TO_DEV) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
@@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
txd = intel_mid_dma_prep_memcpy(chan,
mids->dma_slave.dst_addr,
mids->dma_slave.src_addr,
- sgl->length,
+ sg_dma_len(sgl),
flags);
return txd;
} else {
@@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
sg_len, direction, flags);
- txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+ txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
if (NULL == txd) {
pr_err("MDMA: Prep memcpy failed\n");
return NULL;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 62e3f8ec2461..5ec72044ea4c 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1715,7 +1715,7 @@ static int __init ipu_probe(struct platform_device *pdev)
}
/* Make sure IPU HSP clock is running */
- clk_enable(ipu_data.ipu_clk);
+ clk_prepare_enable(ipu_data.ipu_clk);
/* Disable all interrupts */
idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
@@ -1747,7 +1747,7 @@ static int __init ipu_probe(struct platform_device *pdev)
err_idmac_init:
err_attach_irq:
ipu_irq_detach_irq(&ipu_data, pdev);
- clk_disable(ipu_data.ipu_clk);
+ clk_disable_unprepare(ipu_data.ipu_clk);
clk_put(ipu_data.ipu_clk);
err_clk_get:
iounmap(ipu_data.reg_ic);
@@ -1765,7 +1765,7 @@ static int __exit ipu_remove(struct platform_device *pdev)
ipu_idmac_exit(ipu);
ipu_irq_detach_irq(ipu, pdev);
- clk_disable(ipu->ipu_clk);
+ clk_disable_unprepare(ipu->ipu_clk);
clk_put(ipu->ipu_clk);
iounmap(ipu->reg_ic);
iounmap(ipu->reg_ipu);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa5d55fea46c..0b12e68bf79c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/memory.h>
+#include <linux/clk.h>
#include <plat/mv_xor.h>
#include "dmaengine.h"
@@ -1307,11 +1308,25 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
if (dram)
mv_xor_conf_mbus_windows(msp, dram);
+ /* Not all platforms can gate the clock, so it is not
+ * an error if the clock does not exists.
+ */
+ msp->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(msp->clk))
+ clk_prepare_enable(msp->clk);
+
return 0;
}
static int mv_xor_shared_remove(struct platform_device *pdev)
{
+ struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
+
+ if (!IS_ERR(msp->clk)) {
+ clk_disable_unprepare(msp->clk);
+ clk_put(msp->clk);
+ }
+
return 0;
}
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 654876b7ba1d..a5b422f5a8ab 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -55,6 +55,7 @@
struct mv_xor_shared_private {
void __iomem *xor_base;
void __iomem *xor_high_base;
+ struct clk *clk;
};
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 655d4ce6ed0d..c96ab15319f2 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,11 +22,14 @@
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/fsl/mxs-dma.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/irq.h>
#include <mach/mxs.h>
-#include <mach/common.h>
#include "dmaengine.h"
@@ -36,12 +39,8 @@
* dma can program the controller registers of peripheral devices.
*/
-#define MXS_DMA_APBH 0
-#define MXS_DMA_APBX 1
-#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH)
-
-#define APBH_VERSION_LATEST 3
-#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST)
+#define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH)
+#define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
@@ -51,13 +50,14 @@
#define HW_APBHX_CTRL2 0x020
#define HW_APBHX_CHANNEL_CTRL 0x030
#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
-#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
-#define HW_APBX_VERSION 0x800
-#define BP_APBHX_VERSION_MAJOR 24
-#define HW_APBHX_CHn_NXTCMDAR(n) \
- (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
-#define HW_APBHX_CHn_SEMA(n) \
- (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
+/*
+ * The offset of NXTCMDAR register is different per both dma type and version,
+ * while stride for each channel is all the same 0x70.
+ */
+#define HW_APBHX_CHn_NXTCMDAR(d, n) \
+ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
+#define HW_APBHX_CHn_SEMA(d, n) \
+ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
/*
* ccw bits definitions
@@ -121,9 +121,19 @@ struct mxs_dma_chan {
#define MXS_DMA_CHANNELS 16
#define MXS_DMA_CHANNELS_MASK 0xffff
+enum mxs_dma_devtype {
+ MXS_DMA_APBH,
+ MXS_DMA_APBX,
+};
+
+enum mxs_dma_id {
+ IMX23_DMA,
+ IMX28_DMA,
+};
+
struct mxs_dma_engine {
- int dev_id;
- unsigned int version;
+ enum mxs_dma_id dev_id;
+ enum mxs_dma_devtype type;
void __iomem *base;
struct clk *clk;
struct dma_device dma_device;
@@ -131,17 +141,86 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
+struct mxs_dma_type {
+ enum mxs_dma_id id;
+ enum mxs_dma_devtype type;
+};
+
+static struct mxs_dma_type mxs_dma_types[] = {
+ {
+ .id = IMX23_DMA,
+ .type = MXS_DMA_APBH,
+ }, {
+ .id = IMX23_DMA,
+ .type = MXS_DMA_APBX,
+ }, {
+ .id = IMX28_DMA,
+ .type = MXS_DMA_APBH,
+ }, {
+ .id = IMX28_DMA,
+ .type = MXS_DMA_APBX,
+ }
+};
+
+static struct platform_device_id mxs_dma_ids[] = {
+ {
+ .name = "imx23-dma-apbh",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
+ }, {
+ .name = "imx23-dma-apbx",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
+ }, {
+ .name = "imx28-dma-apbh",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
+ }, {
+ .name = "imx28-dma-apbx",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
+ }, {
+ /* end of list */
+ }
+};
+
+static const struct of_device_id mxs_dma_dt_ids[] = {
+ { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
+ { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
+ { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
+ { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
+
+static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct mxs_dma_chan, chan);
+}
+
+int mxs_dma_is_apbh(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+ return dma_is_apbh(mxs_dma);
+}
+
+int mxs_dma_is_apbx(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+ return !dma_is_apbh(mxs_dma);
+}
+
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
- if (dma_is_apbh() && apbh_is_old())
+ if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
else
writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
- mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
}
static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@@ -151,10 +230,10 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
- mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
+ mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
/* write 1 to SEMA to kick off the channel */
- writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
+ writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
}
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
@@ -168,12 +247,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
int chan_id = mxs_chan->chan.chan_id;
/* freeze the channel */
- if (dma_is_apbh() && apbh_is_old())
+ if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
else
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
mxs_chan->status = DMA_PAUSED;
}
@@ -184,21 +263,16 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
int chan_id = mxs_chan->chan.chan_id;
/* unfreeze the channel */
- if (dma_is_apbh() && apbh_is_old())
+ if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
else
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
mxs_chan->status = DMA_IN_PROGRESS;
}
-static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct mxs_dma_chan, chan);
-}
-
static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
return dma_cookie_assign(tx);
@@ -220,11 +294,11 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
/* completion status */
stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
stat1 &= MXS_DMA_CHANNELS_MASK;
- writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
+ writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
/* error status */
stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
- writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
+ writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
/*
* When both completion and error of termination bits set at the
@@ -415,9 +489,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
} else {
for_each_sg(sgl, sg, sg_len, i) {
- if (sg->length > MAX_XFER_BYTES) {
+ if (sg_dma_len(sg) > MAX_XFER_BYTES) {
dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
- sg->length, MAX_XFER_BYTES);
+ sg_dma_len(sg), MAX_XFER_BYTES);
goto err_out;
}
@@ -425,7 +499,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
ccw->bufaddr = sg->dma_address;
- ccw->xfer_bytes = sg->length;
+ ccw->xfer_bytes = sg_dma_len(sg);
ccw->bits = 0;
ccw->bits |= CCW_CHAIN;
@@ -567,27 +641,21 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
if (ret)
return ret;
- ret = mxs_reset_block(mxs_dma->base);
+ ret = stmp_reset_block(mxs_dma->base);
if (ret)
goto err_out;
- /* only major version matters */
- mxs_dma->version = readl(mxs_dma->base +
- ((mxs_dma->dev_id == MXS_DMA_APBX) ?
- HW_APBX_VERSION : HW_APBH_VERSION)) >>
- BP_APBHX_VERSION_MAJOR;
-
/* enable apbh burst */
- if (dma_is_apbh()) {
+ if (dma_is_apbh(mxs_dma)) {
writel(BM_APBH_CTRL0_APB_BURST_EN,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_APBH_CTRL0_APB_BURST8_EN,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
}
/* enable irq for all the channels */
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
- mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
err_out:
clk_disable_unprepare(mxs_dma->clk);
@@ -596,8 +664,9 @@ err_out:
static int __init mxs_dma_probe(struct platform_device *pdev)
{
- const struct platform_device_id *id_entry =
- platform_get_device_id(pdev);
+ const struct platform_device_id *id_entry;
+ const struct of_device_id *of_id;
+ const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
struct resource *iores;
int ret, i;
@@ -606,7 +675,15 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
if (!mxs_dma)
return -ENOMEM;
- mxs_dma->dev_id = id_entry->driver_data;
+ of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
+ if (of_id)
+ id_entry = of_id->data;
+ else
+ id_entry = platform_get_device_id(pdev);
+
+ dma_type = (struct mxs_dma_type *)id_entry->driver_data;
+ mxs_dma->type = dma_type->type;
+ mxs_dma->dev_id = dma_type->id;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -689,23 +766,12 @@ err_request_region:
return ret;
}
-static struct platform_device_id mxs_dma_type[] = {
- {
- .name = "mxs-dma-apbh",
- .driver_data = MXS_DMA_APBH,
- }, {
- .name = "mxs-dma-apbx",
- .driver_data = MXS_DMA_APBX,
- }, {
- /* end of list */
- }
-};
-
static struct platform_driver mxs_dma_driver = {
.driver = {
.name = "mxs-dma",
+ .of_match_table = mxs_dma_dt_ids,
},
- .id_table = mxs_dma_type,
+ .id_table = mxs_dma_ids,
};
static int __init mxs_dma_module_init(void)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 65c0495a6d40..987ab5cd2617 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -621,7 +621,7 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
goto err_desc_get;
desc->regs.dev_addr = reg;
- desc->regs.mem_addr = sg_phys(sg);
+ desc->regs.mem_addr = sg_dma_address(sg);
desc->regs.size = sg_dma_len(sg);
desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa3fb21e60be..e4feba6b03c0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -21,7 +21,6 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
@@ -393,6 +392,8 @@ struct pl330_req {
struct pl330_reqcfg *cfg;
/* Pointer to first xfer in the request. */
struct pl330_xfer *x;
+ /* Hook to attach to DMAC's list of reqs with due callback */
+ struct list_head rqd;
};
/*
@@ -462,8 +463,6 @@ struct _pl330_req {
/* Number of bytes taken to setup MC for the req */
u32 mc_len;
struct pl330_req *r;
- /* Hook to attach to DMAC's list of reqs with due callback */
- struct list_head rqd;
};
/* ToBeDone for tasklet */
@@ -1684,7 +1683,7 @@ static void pl330_dotask(unsigned long data)
/* Returns 1 if state was updated, 0 otherwise */
static int pl330_update(const struct pl330_info *pi)
{
- struct _pl330_req *rqdone;
+ struct pl330_req *rqdone, *tmp;
struct pl330_dmac *pl330;
unsigned long flags;
void __iomem *regs;
@@ -1751,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi)
if (active == -1) /* Aborted */
continue;
- rqdone = &thrd->req[active];
+ /* Detach the req */
+ rqdone = thrd->req[active].r;
+ thrd->req[active].r = NULL;
+
mark_free(thrd, active);
/* Get going again ASAP */
@@ -1763,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi)
}
/* Now that we are in no hurry, do the callbacks */
- while (!list_empty(&pl330->req_done)) {
- struct pl330_req *r;
-
- rqdone = container_of(pl330->req_done.next,
- struct _pl330_req, rqd);
-
- list_del_init(&rqdone->rqd);
-
- /* Detach the req */
- r = rqdone->r;
- rqdone->r = NULL;
+ list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
+ list_del(&rqdone->rqd);
spin_unlock_irqrestore(&pl330->lock, flags);
- _callback(r, PL330_ERR_NONE);
+ _callback(rqdone, PL330_ERR_NONE);
spin_lock_irqsave(&pl330->lock, flags);
}
@@ -2322,7 +2315,7 @@ static void pl330_tasklet(unsigned long data)
/* Pick up ripe tomatoes */
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
if (desc->status == DONE) {
- if (pch->cyclic)
+ if (!pch->cyclic)
dma_cookie_complete(&desc->txd);
list_move_tail(&desc->node, &list);
}
@@ -2540,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
}
/* Returns the number of descriptors added to the DMAC pool */
-int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
{
struct dma_pl330_desc *desc;
unsigned long flags;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2ed1ac3513f3..000d309602b2 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
}
sg[periods].offset = 0;
- sg[periods].length = 0;
+ sg_dma_len(&sg[periods]) = 0;
sg[periods].page_link =
((unsigned long)sg | 0x01) & ~0x02;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7ef73c919c5d..7be9b7288e90 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -715,25 +715,6 @@ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
input_addr_to_dram_addr(mci, input_addr));
}
-/*
- * Find the minimum and maximum InputAddr values that map to the given @csrow.
- * Pass back these values in *input_addr_min and *input_addr_max.
- */
-static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
- u64 *input_addr_min, u64 *input_addr_max)
-{
- struct amd64_pvt *pvt;
- u64 base, mask;
-
- pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
-
- get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
-
- *input_addr_min = base & ~mask;
- *input_addr_max = base | mask;
-}
-
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
u32 *page, u32 *offset)
@@ -1058,6 +1039,37 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
int channel, csrow;
u32 page, offset;
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+ /*
+ * Find out which node the error address belongs to. This may be
+ * different from the node that detected the error.
+ */
+ src_mci = find_mc_by_sys_addr(mci, sys_addr);
+ if (!src_mci) {
+ amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
+ (unsigned long)sys_addr);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a node",
+ NULL);
+ return;
+ }
+
+ /* Now map the sys_addr to a CSROW */
+ csrow = sys_addr_to_csrow(src_mci, sys_addr);
+ if (csrow < 0) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a csrow",
+ NULL);
+ return;
+ }
+
/* CHIPKILL enabled */
if (pvt->nbcfg & NBCFG_CHIPKILL) {
channel = get_channel_from_ecc_syndrome(mci, syndrome);
@@ -1067,9 +1079,15 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
* 2 DIMMs is in error. So we need to ID 'both' of them
* as suspect.
*/
- amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
- "error reporting race\n", syndrome);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
+ "possible error reporting race\n",
+ syndrome);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ csrow, -1, -1,
+ EDAC_MOD_STR,
+ "unknown syndrome - possible error reporting race",
+ NULL);
return;
}
} else {
@@ -1084,28 +1102,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
channel = ((sys_addr & BIT(3)) != 0);
}
- /*
- * Find out which node the error address belongs to. This may be
- * different from the node that detected the error.
- */
- src_mci = find_mc_by_sys_addr(mci, sys_addr);
- if (!src_mci) {
- amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
-
- /* Now map the sys_addr to a CSROW */
- csrow = sys_addr_to_csrow(src_mci, sys_addr);
- if (csrow < 0) {
- edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
- } else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
- edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
- channel, EDAC_MOD_STR);
- }
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
+ page, offset, syndrome,
+ csrow, channel, -1,
+ EDAC_MOD_STR, "", NULL);
}
static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1611,15 +1611,20 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
u32 page, offset;
int nid, csrow, chan = 0;
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a csrow",
+ NULL);
return;
}
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
@@ -1628,16 +1633,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
- if (chan >= 0)
- edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
- EDAC_MOD_STR);
- else
- /*
- * Channel unknown, report all channels on this CSROW as failed.
- */
- for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
- edac_mc_handle_ce(mci, page, offset, syndrome,
- csrow, chan, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ csrow, chan, -1,
+ EDAC_MOD_STR, "", NULL);
}
/*
@@ -1918,7 +1917,12 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
/* Ensure that the Error Address is VALID */
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "HW has no ERROR_ADDRESS available",
+ NULL);
return;
}
@@ -1942,11 +1946,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "HW has no ERROR_ADDRESS available",
+ NULL);
return;
}
sys_addr = get_error_address(m);
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
/*
* Find out which node the error address belongs to. This may be
@@ -1956,7 +1966,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!src_mci) {
amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
(unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "ERROR ADDRESS NOT mapped to a MC", NULL);
return;
}
@@ -1966,10 +1980,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (csrow < 0) {
amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
(unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "ERROR ADDRESS NOT mapped to CS",
+ NULL);
} else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
- edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ csrow, -1, -1,
+ EDAC_MOD_STR, "", NULL);
}
}
@@ -2171,7 +2192,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
- debugf0(" nr_pages= %u channel-count = %d\n",
+ debugf0(" nr_pages/channel= %u channel-count = %d\n",
nr_pages, pvt->channel_count);
return nr_pages;
@@ -2185,9 +2206,12 @@ static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr, base, mask;
+ u64 base, mask;
u32 val;
- int i, empty = 1;
+ int i, j, empty = 1;
+ enum mem_type mtype;
+ enum edac_type edac_mode;
+ int nr_pages = 0;
amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
@@ -2211,41 +2235,32 @@ static int init_csrows(struct mem_ctl_info *mci)
empty = 0;
if (csrow_enabled(i, 0, pvt))
- csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+ nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
if (csrow_enabled(i, 1, pvt))
- csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
- find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
- csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
- csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+ nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
get_cs_base_and_mask(pvt, i, 0, &base, &mask);
- csrow->page_mask = ~mask;
/* 8 bytes of resolution */
- csrow->mtype = amd64_determine_memory_type(pvt, i);
+ mtype = amd64_determine_memory_type(pvt, i);
debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
- debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
- (unsigned long)input_addr_min,
- (unsigned long)input_addr_max);
- debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
- (unsigned long)sys_addr, csrow->page_mask);
- debugf1(" nr_pages: %u first_page: 0x%lx "
- "last_page: 0x%lx\n",
- (unsigned)csrow->nr_pages,
- csrow->first_page, csrow->last_page);
+ debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
if (pvt->nbcfg & NBCFG_ECC_ENABLE)
- csrow->edac_mode =
- (pvt->nbcfg & NBCFG_CHIPKILL) ?
- EDAC_S4ECD4ED : EDAC_SECDED;
+ edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
+ EDAC_S4ECD4ED : EDAC_SECDED;
else
- csrow->edac_mode = EDAC_NONE;
+ edac_mode = EDAC_NONE;
+
+ for (j = 0; j < pvt->channel_count; j++) {
+ csrow->channels[j].dimm->mtype = mtype;
+ csrow->channels[j].dimm->edac_mode = edac_mode;
+ csrow->channels[j].dimm->nr_pages = nr_pages;
+ }
}
return empty;
@@ -2540,6 +2555,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
struct amd64_pvt *pvt = NULL;
struct amd64_family_type *fam_type = NULL;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
int err = 0, ret;
u8 nid = get_node_id(F2);
@@ -2574,7 +2590,13 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = pvt->csels[0].b_cnt;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = pvt->channel_count;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
if (!mci)
goto err_siblings;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f8fd3c807bde..9774d443fa57 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -29,7 +29,6 @@
edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
#define AMD76X_NR_CSROWS 8
-#define AMD76X_NR_CHANS 1
#define AMD76X_NR_DIMMS 4
/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
@@ -146,8 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf;
- edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
- row, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ mci->csrows[row].first_page, 0, 0,
+ row, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -159,8 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = info->ecc_mode_status & 0xf;
- edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
- 0, row, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ mci->csrows[row].first_page, 0, 0,
+ row, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -186,11 +189,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 mba, mba_base, mba_mask, dms;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
@@ -203,13 +208,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
csrow->first_page = mba_base >> PAGE_SHIFT;
- csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
csrow->page_mask = mba_mask >> PAGE_SHIFT;
- csrow->grain = csrow->nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_RDDR;
- csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
+ dimm->grain = dimm->nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_RDDR;
+ dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+ dimm->edac_mode = edac_mode;
}
}
@@ -230,7 +235,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
EDAC_SECDED,
EDAC_SECDED
};
- struct mem_ctl_info *mci = NULL;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u32 ems;
u32 ems_mode;
struct amd76x_error_info discard;
@@ -238,11 +244,17 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
debugf0("%s()\n", __func__);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
- mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
- if (mci == NULL) {
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = AMD76X_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
+
+ if (mci == NULL)
return -ENOMEM;
- }
debugf0("%s(): mci = %p\n", __func__, mci);
mci->dev = &pdev->dev;
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 9a6a274e6925..69ee6aab5c71 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -48,8 +48,9 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
syndrome = (ar & 0x000000001fe00000ul) >> 21;
/* TODO: Decoding of the error address */
- edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
- syndrome, 0, chan, "");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ csrow->first_page + pfn, offset, syndrome,
+ 0, chan, -1, "", "", NULL);
}
static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
@@ -69,7 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
offset = address & ~PAGE_MASK;
/* TODO: Decoding of the error address */
- edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ csrow->first_page + pfn, offset, 0,
+ 0, chan, -1, "", "", NULL);
}
static void cell_edac_check(struct mem_ctl_info *mci)
@@ -124,8 +127,11 @@ static void cell_edac_check(struct mem_ctl_info *mci)
static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = &mci->csrows[0];
+ struct dimm_info *dimm;
struct cell_edac_priv *priv = mci->pvt_info;
struct device_node *np;
+ int j;
+ u32 nr_pages;
for (np = NULL;
(np = of_find_node_by_name(np, "memory")) != NULL;) {
@@ -140,15 +146,20 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
if (of_node_to_nid(np) != priv->node)
continue;
csrow->first_page = r.start >> PAGE_SHIFT;
- csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->mtype = MEM_XDR;
- csrow->edac_mode = EDAC_SECDED;
+ nr_pages = resource_size(&r) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + nr_pages - 1;
+
+ for (j = 0; j < csrow->nr_channels; j++) {
+ dimm = csrow->channels[j].dimm;
+ dimm->mtype = MEM_XDR;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ }
dev_dbg(mci->dev,
"Initialized on node %d, chanmask=0x%x,"
" first_page=0x%lx, nr_pages=0x%x\n",
priv->node, priv->chanmask,
- csrow->first_page, csrow->nr_pages);
+ csrow->first_page, nr_pages);
break;
}
}
@@ -157,9 +168,10 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
{
struct cbe_mic_tm_regs __iomem *regs;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct cell_edac_priv *priv;
u64 reg;
- int rc, chanmask;
+ int rc, chanmask, num_chans;
regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
if (regs == NULL)
@@ -184,8 +196,16 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
in_be64(&regs->mic_fir));
/* Allocate & init EDAC MC data structure */
- mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
- chanmask == 3 ? 2 : 1, pdev->id);
+ num_chans = chanmask == 3 ? 2 : 1;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+ sizeof(struct cell_edac_priv));
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index a774c0ddaf5b..e22030a9de66 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -329,9 +329,10 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
{
struct cpc925_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
- int index;
+ struct dimm_info *dimm;
+ int index, j;
u32 mbmr, mbbar, bba;
- unsigned long row_size, last_nr_pages = 0;
+ unsigned long row_size, nr_pages, last_nr_pages = 0;
get_total_mem(pdata);
@@ -350,36 +351,41 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
row_size = bba * (1UL << 28); /* 256M */
csrow->first_page = last_nr_pages;
- csrow->nr_pages = row_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ nr_pages = row_size >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + nr_pages - 1;
last_nr_pages = csrow->last_page + 1;
- csrow->mtype = MEM_RDDR;
- csrow->edac_mode = EDAC_SECDED;
-
- switch (csrow->nr_channels) {
- case 1: /* Single channel */
- csrow->grain = 32; /* four-beat burst of 32 bytes */
- break;
- case 2: /* Dual channel */
- default:
- csrow->grain = 64; /* four-beat burst of 64 bytes */
- break;
- }
-
- switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
- case 6: /* 0110, no way to differentiate X8 VS X16 */
- case 5: /* 0101 */
- case 8: /* 1000 */
- csrow->dtype = DEV_X16;
- break;
- case 7: /* 0111 */
- case 9: /* 1001 */
- csrow->dtype = DEV_X8;
- break;
- default:
- csrow->dtype = DEV_UNKNOWN;
- break;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ dimm->mtype = MEM_RDDR;
+ dimm->edac_mode = EDAC_SECDED;
+
+ switch (csrow->nr_channels) {
+ case 1: /* Single channel */
+ dimm->grain = 32; /* four-beat burst of 32 bytes */
+ break;
+ case 2: /* Dual channel */
+ default:
+ dimm->grain = 64; /* four-beat burst of 64 bytes */
+ break;
+ }
+
+ switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
+ case 6: /* 0110, no way to differentiate X8 VS X16 */
+ case 5: /* 0101 */
+ case 8: /* 1000 */
+ dimm->dtype = DEV_X16;
+ break;
+ case 7: /* 0111 */
+ case 9: /* 1001 */
+ dimm->dtype = DEV_X8;
+ break;
+ default:
+ dimm->dtype = DEV_UNKNOWN;
+ break;
+ }
}
}
}
@@ -549,13 +555,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
if (apiexcp & CECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
channel = cpc925_mc_find_channel(mci, syndrome);
- edac_mc_handle_ce(mci, pfn, offset, syndrome,
- csrow, channel, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, syndrome,
+ csrow, channel, -1,
+ mci->ctl_name, "", NULL);
}
if (apiexcp & UECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
- edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, 0,
+ csrow, -1, -1,
+ mci->ctl_name, "", NULL);
}
cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -927,6 +938,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
{
static int edac_mc_idx;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
void __iomem *vbase;
struct cpc925_mc_pdata *pdata;
struct resource *r;
@@ -962,9 +974,16 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
goto err2;
}
- nr_channels = cpc925_mc_get_channels(vbase);
- mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
- CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
+ nr_channels = cpc925_mc_get_channels(vbase) + 1;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = CPC925_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(struct cpc925_mc_pdata));
if (!mci) {
cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
res = -ENOMEM;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 41223261ede9..3186512c9739 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -4,7 +4,11 @@
* This file may be distributed under the terms of the
* GNU General Public License.
*
- * See "enum e752x_chips" below for supported chipsets
+ * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
+ *
+ * Datasheets:
+ * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
+ * ftp://download.intel.com/design/intarch/datashts/31345803.pdf
*
* Written by Tom Zimmerman
*
@@ -13,8 +17,6 @@
* Wang Zhenyu at intel.com
* Dave Jiang at mvista.com
*
- * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
- *
*/
#include <linux/module.h>
@@ -187,6 +189,25 @@ enum e752x_chips {
I3100 = 3
};
+/*
+ * Those chips Support single-rank and dual-rank memories only.
+ *
+ * On e752x chips, the odd rows are present only on dual-rank memories.
+ * Dividing the rank by two will provide the dimm#
+ *
+ * i3100 MC has a different mapping: it supports only 4 ranks.
+ *
+ * The mapping is (from 1 to n):
+ * slot single-ranked double-ranked
+ * dimm #1 -> rank #4 NA
+ * dimm #2 -> rank #3 NA
+ * dimm #3 -> rank #2 Ranks 2 and 3
+ * dimm #4 -> rank $1 Ranks 1 and 4
+ *
+ * FIXME: The current mapping for i3100 considers that it supports up to 8
+ * ranks/chanel, but datasheet says that the MC supports only 4 ranks.
+ */
+
struct e752x_pvt {
struct pci_dev *bridge_ck;
struct pci_dev *dev_d0f0;
@@ -350,8 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
channel = !(error_one & 1);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
- sec1_syndrome, row, channel, "e752x CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset_in_page(sec1_add << 4), sec1_syndrome,
+ row, channel, -1,
+ "e752x CE", "", NULL);
}
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -385,9 +408,12 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Read");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ block_page,
+ offset_in_page(error_2b << 4), 0,
+ row, -1, -1,
+ "e752x UE from Read", "", NULL);
+
}
if (error_one & 0x0404) {
error_2b = scrb_add;
@@ -401,9 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Scruber");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ block_page,
+ offset_in_page(error_2b << 4), 0,
+ row, -1, -1,
+ "e752x UE from Scruber", "", NULL);
}
}
@@ -426,7 +454,9 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
return;
debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "e752x UE log memory write", "", NULL);
}
static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -1044,7 +1074,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u8 value;
- u32 dra, drc, cumul_size;
+ u32 dra, drc, cumul_size, i, nr_pages;
dra = 0;
for (index = 0; index < 4; index++) {
@@ -1053,7 +1083,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
dra |= dra_reg << (index * 8);
}
pci_read_config_dword(pdev, E752X_DRC, &drc);
- drc_chan = dual_channel_active(ddrcsr);
+ drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
drc_ddim = (drc >> 20) & 0x3;
@@ -1078,26 +1108,33 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
+
+ for (i = 0; i < csrow->nr_channels; i++) {
+ struct dimm_info *dimm = csrow->channels[i].dimm;
+
+ debugf3("Initializing rank at (%i,%i)\n", index, i);
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ dimm->mtype = MEM_RDDR; /* only one type supported */
+ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ dimm->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ dimm->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ dimm->edac_mode = EDAC_NONE;
+ }
}
}
@@ -1226,6 +1263,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
u16 pci_data;
u8 stat8;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct e752x_pvt *pvt;
u16 ddrcsr;
int drc_chan; /* Number of channels 0=1chan,1=2chan */
@@ -1252,11 +1290,15 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
/* Dual channel = 1, Single channel = 0 */
drc_chan = dual_channel_active(ddrcsr);
- mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
-
- if (mci == NULL) {
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = E752X_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = drc_chan + 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+ if (mci == NULL)
return -ENOMEM;
- }
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 68dea87b72e6..9a9c1a546797 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -10,6 +10,9 @@
* Based on work by Dan Hollis <goemon at anime dot net> and others.
* http://www.anime.net/~goemon/linux-ecc/
*
+ * Datasheet:
+ * http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
+ *
* Contributors:
* Eric Biederman (Linux Networx)
* Tom Zimmerman (Linux Networx)
@@ -71,7 +74,7 @@
#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
#define E7XXX_NR_CSROWS 8 /* number of csrows */
-#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
+#define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */
/* E7XXX register addresses - device 0 function 0 */
#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
@@ -216,13 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
row = edac_mc_find_csrow_by_page(mci, page);
/* convert syndrome to channel */
channel = e7xxx_find_channel(syndrome);
- edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
+ row, channel, -1, "e7xxx CE", "", NULL);
}
static void process_ce_no_info(struct mem_ctl_info *mci)
{
debugf3("%s()\n", __func__);
- edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+ "e7xxx CE log register overflow", "", NULL);
}
static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -236,13 +241,17 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
/* FIXME - should use PAGE_SHIFT */
block_page = error_2b >> 6; /* convert to 4k address */
row = edac_mc_find_csrow_by_page(mci, block_page);
- edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
+ row, -1, -1, "e7xxx UE", "", NULL);
}
static void process_ue_no_info(struct mem_ctl_info *mci)
{
debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+ "e7xxx UE log register overflow", "", NULL);
}
static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -347,11 +356,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int dev_idx, u32 drc)
{
unsigned long last_cumul_size;
- int index;
+ int index, j;
u8 value;
- u32 dra, cumul_size;
+ u32 dra, cumul_size, nr_pages;
int drc_chan, drc_drbg, drc_ddim, mem_dev;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
pci_read_config_dword(pdev, E7XXX_DRA, &dra);
drc_chan = dual_channel_active(drc, dev_idx);
@@ -379,26 +389,32 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
+
+ for (j = 0; j < drc_chan + 1; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / (drc_chan + 1);
+ dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ dimm->mtype = MEM_RDDR; /* only one type supported */
+ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ dimm->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ dimm->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ dimm->edac_mode = EDAC_NONE;
+ }
}
}
@@ -406,6 +422,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
struct e7xxx_pvt *pvt = NULL;
u32 drc;
int drc_chan;
@@ -416,8 +433,21 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
drc_chan = dual_channel_active(drc, dev_idx);
- mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
-
+ /*
+ * According with the datasheet, this device has a maximum of
+ * 4 DIMMS per channel, either single-rank or dual-rank. So, the
+ * total amount of dimms is 8 (E7XXX_NR_DIMMS).
+ * That means that the DIMM is mapped as CSROWs, and the channel
+ * will map the rank. So, an error to either channel should be
+ * attributed to the same dimm.
+ */
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = E7XXX_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = drc_chan + 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index e48ab3108ad8..117490d4f835 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -107,13 +107,13 @@ extern int edac_debug_level;
*
* CPU caches (L1 and L2)
* DMA engines
- * Core CPU swithces
+ * Core CPU switches
* Fabric switch units
* PCIe interface controllers
* other EDAC/ECC type devices that can be monitored for
* errors, etc.
*
- * It allows for a 2 level set of hiearchry. For example:
+ * It allows for a 2 level set of hierarchy. For example:
*
* cache could be composed of L1, L2 and L3 levels of cache.
* Each CPU core would have its own L1 cache, while sharing
@@ -447,8 +447,10 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
#endif /* CONFIG_PCI */
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index);
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt);
extern int edac_mc_add_mc(struct mem_ctl_info *mci);
extern void edac_mc_free(struct mem_ctl_info *mci);
extern struct mem_ctl_info *edac_mc_find(int idx);
@@ -456,35 +458,17 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
unsigned long page);
-
-/*
- * The no info errors are used when error overflows are reported.
- * There are a limited number of error logging registers that can
- * be exausted. When all registers are exhausted and an additional
- * error occurs then an error overflow register records that an
- * error occurred and the type of error, but doesn't have any
- * further information. The ce/ue versions make for cleaner
- * reporting logic and function interface - reduces conditional
- * statement clutter and extra function arguments.
- */
-extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page,
- unsigned long syndrome, int row, int channel,
- const char *msg);
-extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row,
- const char *msg);
-extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel0, unsigned int channel1,
- char *msg);
-extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel, char *msg);
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int layer0,
+ const int layer1,
+ const int layer2,
+ const char *msg,
+ const char *other_detail,
+ const void *mcelog);
/*
* edac_device APIs
@@ -496,6 +480,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
extern int edac_device_alloc_index(void);
+extern const char *edac_layer_name[];
/*
* edac_pci APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 4b154593343a..ee3f1f810c1e 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -56,7 +56,7 @@ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
*
* The control structure is allocated in complete chunk
* from the OS. It is in turn sub allocated to the
- * various objects that compose the struture
+ * various objects that compose the structure
*
* The structure has a 'nr_instance' array within itself.
* Each instance represents a major component
@@ -79,7 +79,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
unsigned total_size;
unsigned count;
unsigned instance, block, attr;
- void *pvt;
+ void *pvt, *p;
int err;
debugf4("%s() instances=%d blocks=%d\n",
@@ -92,35 +92,30 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
* to be at least as stringent as what the compiler would
* provide if we could simply hardcode everything into a single struct.
*/
- dev_ctl = (struct edac_device_ctl_info *)NULL;
+ p = NULL;
+ dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
/* Calc the 'end' offset past end of ONE ctl_info structure
* which will become the start of the 'instance' array
*/
- dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+ dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
/* Calc the 'end' offset past the instance array within the ctl_info
* which will become the start of the block array
*/
- dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+ count = nr_instances * nr_blocks;
+ dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
/* Calc the 'end' offset past the dev_blk array
* which will become the start of the attrib array, if any.
*/
- count = nr_instances * nr_blocks;
- dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
-
- /* Check for case of when an attribute array is specified */
- if (nr_attrib > 0) {
- /* calc how many nr_attrib we need */
+ /* calc how many nr_attrib we need */
+ if (nr_attrib > 0)
count *= nr_attrib;
+ dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
- /* Calc the 'end' offset past the attributes array */
- pvt = edac_align_ptr(&dev_attrib[count], sz_private);
- } else {
- /* no attribute array specificed */
- pvt = edac_align_ptr(dev_attrib, sz_private);
- }
+ /* Calc the 'end' offset past the attributes array */
+ pvt = edac_align_ptr(&p, sz_private, 1);
/* 'pvt' now points to where the private data area is.
* At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
@@ -394,7 +389,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
/* Reschedule the workq for the next time period to start again
* if the number of msec is for 1 sec, then adjust to the next
- * whole one second to save timers fireing all over the period
+ * whole one second to save timers firing all over the period
* between integral seconds
*/
if (edac_dev->poll_msec == 1000)
@@ -563,7 +558,7 @@ EXPORT_SYMBOL_GPL(edac_device_add_device);
* Remove sysfs entries for specified edac_device structure and
* then remove edac_device structure from global list
*
- * @pdev:
+ * @dev:
* Pointer to 'struct device' representing edac_device
* structure to remove.
*
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index feef7733fae7..de5ba86e8b89 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -43,9 +43,26 @@ static void edac_mc_dump_channel(struct rank_info *chan)
{
debugf4("\tchannel = %p\n", chan);
debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
- debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
- debugf4("\tchannel->label = '%s'\n", chan->label);
debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+ debugf4("\tchannel->dimm = %p\n", chan->dimm);
+}
+
+static void edac_mc_dump_dimm(struct dimm_info *dimm)
+{
+ int i;
+
+ debugf4("\tdimm = %p\n", dimm);
+ debugf4("\tdimm->label = '%s'\n", dimm->label);
+ debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
+ debugf4("\tdimm location ");
+ for (i = 0; i < dimm->mci->n_layers; i++) {
+ printk(KERN_CONT "%d", dimm->location[i]);
+ if (i < dimm->mci->n_layers - 1)
+ printk(KERN_CONT ".");
+ }
+ printk(KERN_CONT "\n");
+ debugf4("\tdimm->grain = %d\n", dimm->grain);
+ debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
}
static void edac_mc_dump_csrow(struct csrow_info *csrow)
@@ -55,7 +72,6 @@ static void edac_mc_dump_csrow(struct csrow_info *csrow)
debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
- debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
debugf4("\tcsrow->channels = %p\n", csrow->channels);
debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
@@ -70,6 +86,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
debugf4("\tmci->edac_check = %p\n", mci->edac_check);
debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
mci->nr_csrows, mci->csrows);
+ debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
+ mci->tot_dimms, mci->dimms);
debugf3("\tdev = %p\n", mci->dev);
debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
@@ -101,18 +119,37 @@ const char *edac_mem_types[] = {
};
EXPORT_SYMBOL_GPL(edac_mem_types);
-/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
- * Adjust 'ptr' so that its alignment is at least as stringent as what the
- * compiler would provide for X and return the aligned result.
+/**
+ * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
+ * @p: pointer to a pointer with the memory offset to be used. At
+ * return, this will be incremented to point to the next offset
+ * @size: Size of the data structure to be reserved
+ * @n_elems: Number of elements that should be reserved
*
* If 'size' is a constant, the compiler will optimize this whole function
- * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ * down to either a no-op or the addition of a constant to the value of '*p'.
+ *
+ * The 'p' pointer is absolutely needed to keep the proper advancing
+ * further in memory to the proper offsets when allocating the struct along
+ * with its embedded structs, as edac_device_alloc_ctl_info() does it
+ * above, for example.
+ *
+ * At return, the pointer 'p' will be incremented to be used on a next call
+ * to this function.
*/
-void *edac_align_ptr(void *ptr, unsigned size)
+void *edac_align_ptr(void **p, unsigned size, int n_elems)
{
unsigned align, r;
+ void *ptr = *p;
+
+ *p += size * n_elems;
- /* Here we assume that the alignment of a "long long" is the most
+ /*
+ * 'p' can possibly be an unaligned item X such that sizeof(X) is
+ * 'size'. Adjust 'p' so that its alignment is at least as
+ * stringent as what the compiler would provide for X and return
+ * the aligned result.
+ * Here we assume that the alignment of a "long long" is the most
* stringent alignment that the compiler will ever provide by default.
* As far as I know, this is a reasonable assumption.
*/
@@ -127,19 +164,23 @@ void *edac_align_ptr(void *ptr, unsigned size)
else
return (char *)ptr;
- r = size % align;
+ r = (unsigned long)p % align;
if (r == 0)
return (char *)ptr;
+ *p += align - r;
+
return (void *)(((unsigned long)ptr) + align - r);
}
/**
- * edac_mc_alloc: Allocate a struct mem_ctl_info structure
- * @size_pvt: size of private storage needed
- * @nr_csrows: Number of CWROWS needed for this MC
- * @nr_chans: Number of channels for the MC
+ * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
+ * @mc_num: Memory controller number
+ * @n_layers: Number of MC hierarchy layers
+ * layers: Describes each layer as seen by the Memory Controller
+ * @size_pvt: size of private storage needed
+ *
*
* Everything is kmalloc'ed as one big chunk - more efficient.
* Only can be used if all structures have the same lifetime - otherwise
@@ -147,32 +188,77 @@ void *edac_align_ptr(void *ptr, unsigned size)
*
* Use edac_mc_free() to free mc structures allocated by this function.
*
+ * NOTE: drivers handle multi-rank memories in different ways: in some
+ * drivers, one multi-rank memory stick is mapped as one entry, while, in
+ * others, a single multi-rank memory stick would be mapped into several
+ * entries. Currently, this function will allocate multiple struct dimm_info
+ * on such scenarios, as grouping the multiple ranks require drivers change.
+ *
* Returns:
- * NULL allocation failed
- * struct mem_ctl_info pointer
+ * On failure: NULL
+ * On success: struct mem_ctl_info pointer
*/
-struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index)
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt)
{
struct mem_ctl_info *mci;
- struct csrow_info *csi, *csrow;
+ struct edac_mc_layer *layer;
+ struct csrow_info *csi, *csr;
struct rank_info *chi, *chp, *chan;
- void *pvt;
- unsigned size;
- int row, chn;
- int err;
+ struct dimm_info *dimm;
+ u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+ unsigned pos[EDAC_MAX_LAYERS];
+ unsigned size, tot_dimms = 1, count = 1;
+ unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+ void *pvt, *p, *ptr = NULL;
+ int i, j, err, row, chn, n, len;
+ bool per_rank = false;
+
+ BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
+ /*
+ * Calculate the total amount of dimms and csrows/cschannels while
+ * in the old API emulation mode
+ */
+ for (i = 0; i < n_layers; i++) {
+ tot_dimms *= layers[i].size;
+ if (layers[i].is_virt_csrow)
+ tot_csrows *= layers[i].size;
+ else
+ tot_channels *= layers[i].size;
+
+ if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
+ per_rank = true;
+ }
/* Figure out the offsets of the various items from the start of an mc
* structure. We want the alignment of each item to be at least as
* stringent as what the compiler would provide if we could simply
* hardcode everything into a single struct.
*/
- mci = (struct mem_ctl_info *)0;
- csi = edac_align_ptr(&mci[1], sizeof(*csi));
- chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
- pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+ mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
+ layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
+ csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
+ chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
+ dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
+ for (i = 0; i < n_layers; i++) {
+ count *= layers[i].size;
+ debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
+ ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+ ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+ tot_errcount += 2 * count;
+ }
+
+ debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
+ pvt = edac_align_ptr(&ptr, sz_pvt, 1);
size = ((unsigned long)pvt) + sz_pvt;
+ debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
+ __func__, size,
+ tot_dimms,
+ per_rank ? "ranks" : "dimms",
+ tot_csrows * tot_channels);
mci = kzalloc(size, GFP_KERNEL);
if (mci == NULL)
return NULL;
@@ -180,28 +266,103 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
/* Adjust pointers so they point within the memory we just allocated
* rather than an imaginary chunk of memory located at address 0.
*/
+ layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
+ dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
+ for (i = 0; i < n_layers; i++) {
+ mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
+ mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
+ }
pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
/* setup index and various internal pointers */
- mci->mc_idx = edac_index;
+ mci->mc_idx = mc_num;
mci->csrows = csi;
+ mci->dimms = dimm;
+ mci->tot_dimms = tot_dimms;
mci->pvt_info = pvt;
- mci->nr_csrows = nr_csrows;
-
- for (row = 0; row < nr_csrows; row++) {
- csrow = &csi[row];
- csrow->csrow_idx = row;
- csrow->mci = mci;
- csrow->nr_channels = nr_chans;
- chp = &chi[row * nr_chans];
- csrow->channels = chp;
+ mci->n_layers = n_layers;
+ mci->layers = layer;
+ memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
+ mci->nr_csrows = tot_csrows;
+ mci->num_cschannel = tot_channels;
+ mci->mem_is_per_rank = per_rank;
- for (chn = 0; chn < nr_chans; chn++) {
+ /*
+ * Fill the csrow struct
+ */
+ for (row = 0; row < tot_csrows; row++) {
+ csr = &csi[row];
+ csr->csrow_idx = row;
+ csr->mci = mci;
+ csr->nr_channels = tot_channels;
+ chp = &chi[row * tot_channels];
+ csr->channels = chp;
+
+ for (chn = 0; chn < tot_channels; chn++) {
chan = &chp[chn];
chan->chan_idx = chn;
- chan->csrow = csrow;
+ chan->csrow = csr;
+ }
+ }
+
+ /*
+ * Fill the dimm struct
+ */
+ memset(&pos, 0, sizeof(pos));
+ row = 0;
+ chn = 0;
+ debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
+ per_rank ? "ranks" : "dimms");
+ for (i = 0; i < tot_dimms; i++) {
+ chan = &csi[row].channels[chn];
+ dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
+ pos[0], pos[1], pos[2]);
+ dimm->mci = mci;
+
+ debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
+ i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
+ pos[0], pos[1], pos[2], row, chn);
+
+ /*
+ * Copy DIMM location and initialize it.
+ */
+ len = sizeof(dimm->label);
+ p = dimm->label;
+ n = snprintf(p, len, "mc#%u", mc_num);
+ p += n;
+ len -= n;
+ for (j = 0; j < n_layers; j++) {
+ n = snprintf(p, len, "%s#%u",
+ edac_layer_name[layers[j].type],
+ pos[j]);
+ p += n;
+ len -= n;
+ dimm->location[j] = pos[j];
+
+ if (len <= 0)
+ break;
+ }
+
+ /* Link it to the csrows old API data */
+ chan->dimm = dimm;
+ dimm->csrow = row;
+ dimm->cschannel = chn;
+
+ /* Increment csrow location */
+ row++;
+ if (row == tot_csrows) {
+ row = 0;
+ chn++;
+ }
+
+ /* Increment dimm location */
+ for (j = n_layers - 1; j >= 0; j--) {
+ pos[j]++;
+ if (pos[j] < layers[j].size)
+ break;
+ pos[j] = 0;
}
}
@@ -490,7 +651,6 @@ EXPORT_SYMBOL(edac_mc_find);
* edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
* create sysfs entries associated with mci structure
* @mci: pointer to the mci structure to be added to the list
- * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
*
* Return:
* 0 Success
@@ -517,6 +677,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
edac_mc_dump_channel(&mci->csrows[i].
channels[j]);
}
+ for (i = 0; i < mci->tot_dimms; i++)
+ edac_mc_dump_dimm(&mci->dimms[i]);
}
#endif
mutex_lock(&mem_ctls_mutex);
@@ -636,15 +798,19 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
{
struct csrow_info *csrows = mci->csrows;
- int row, i;
+ int row, i, j, n;
debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
row = -1;
for (i = 0; i < mci->nr_csrows; i++) {
struct csrow_info *csrow = &csrows[i];
-
- if (csrow->nr_pages == 0)
+ n = 0;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
+ n += dimm->nr_pages;
+ }
+ if (n == 0)
continue;
debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
@@ -670,249 +836,307 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
}
EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
-/* FIXME - setable log (warning/emerg) levels */
-/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
-void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, unsigned long syndrome,
- int row, int channel, const char *msg)
-{
- unsigned long remapped_page;
+const char *edac_layer_name[] = {
+ [EDAC_MC_LAYER_BRANCH] = "branch",
+ [EDAC_MC_LAYER_CHANNEL] = "channel",
+ [EDAC_MC_LAYER_SLOT] = "slot",
+ [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
+};
+EXPORT_SYMBOL_GPL(edac_layer_name);
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+static void edac_inc_ce_error(struct mem_ctl_info *mci,
+ bool enable_per_layer_report,
+ const int pos[EDAC_MAX_LAYERS])
+{
+ int i, index = 0;
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ mci->ce_mc++;
- if (channel >= mci->csrows[row].nr_channels || channel < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range "
- "(%d >= %d)\n", channel,
- mci->csrows[row].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ if (!enable_per_layer_report) {
+ mci->ce_noinfo_count++;
return;
}
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
- "0x%lx, row %d, channel %d, label \"%s\": %s\n",
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, syndrome, row, channel,
- mci->csrows[row].channels[channel].label, msg);
-
- mci->ce_count++;
- mci->csrows[row].ce_count++;
- mci->csrows[row].channels[channel].ce_count++;
-
- if (mci->scrub_mode & SCRUB_SW_SRC) {
- /*
- * Some MC's can remap memory so that it is still available
- * at a different address when PCI devices map into memory.
- * MC's that can't do this lose the memory where PCI devices
- * are mapped. This mapping is MC dependent and so we call
- * back into the MC driver for it to map the MC page to
- * a physical (CPU) page which can then be mapped to a virtual
- * page - which can then be scrubbed.
- */
- remapped_page = mci->ctl_page_to_phys ?
- mci->ctl_page_to_phys(mci, page_frame_number) :
- page_frame_number;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ break;
+ index += pos[i];
+ mci->ce_per_layer[i][index]++;
- edac_mc_scrub_block(remapped_page, offset_in_page,
- mci->csrows[row].grain);
+ if (i < mci->n_layers - 1)
+ index *= mci->layers[i + 1].size;
}
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
-void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_inc_ue_error(struct mem_ctl_info *mci,
+ bool enable_per_layer_report,
+ const int pos[EDAC_MAX_LAYERS])
{
- if (edac_mc_get_log_ce())
- edac_mc_printk(mci, KERN_WARNING,
- "CE - no information available: %s\n", msg);
+ int i, index = 0;
- mci->ce_noinfo_count++;
- mci->ce_count++;
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
+ mci->ue_mc++;
-void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row, const char *msg)
-{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chan;
- int chars;
-
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
-
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ if (!enable_per_layer_report) {
+ mci->ce_noinfo_count++;
return;
}
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[row].channels[0].label);
- len -= chars;
- pos += chars;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ break;
+ index += pos[i];
+ mci->ue_per_layer[i][index]++;
- for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
- chan++) {
- chars = snprintf(pos, len + 1, ":%s",
- mci->csrows[row].channels[chan].label);
- len -= chars;
- pos += chars;
+ if (i < mci->n_layers - 1)
+ index *= mci->layers[i + 1].size;
}
+}
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
- "labels \"%s\": %s\n", page_frame_number,
- offset_in_page, mci->csrows[row].grain, row,
- labels, msg);
+static void edac_ce_error(struct mem_ctl_info *mci,
+ const int pos[EDAC_MAX_LAYERS],
+ const char *msg,
+ const char *location,
+ const char *label,
+ const char *detail,
+ const char *other_detail,
+ const bool enable_per_layer_report,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ u32 grain)
+{
+ unsigned long remapped_page;
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
- "row %d, labels \"%s\": %s\n", mci->mc_idx,
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, row, labels, msg);
+ if (edac_mc_get_log_ce()) {
+ if (other_detail && *other_detail)
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE %s on %s (%s%s - %s)\n",
+ msg, label, location,
+ detail, other_detail);
+ else
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE %s on %s (%s%s)\n",
+ msg, label, location,
+ detail);
+ }
+ edac_inc_ce_error(mci, enable_per_layer_report, pos);
- mci->ue_count++;
- mci->csrows[row].ue_count++;
+ if (mci->scrub_mode & SCRUB_SW_SRC) {
+ /*
+ * Some memory controllers (called MCs below) can remap
+ * memory so that it is still available at a different
+ * address when PCI devices map into memory.
+ * MC's that can't do this, lose the memory where PCI
+ * devices are mapped. This mapping is MC-dependent
+ * and so we call back into the MC driver for it to
+ * map the MC page to a physical (CPU) page which can
+ * then be mapped to a virtual page - which can then
+ * be scrubbed.
+ */
+ remapped_page = mci->ctl_page_to_phys ?
+ mci->ctl_page_to_phys(mci, page_frame_number) :
+ page_frame_number;
+
+ edac_mc_scrub_block(remapped_page,
+ offset_in_page, grain);
+ }
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
-void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_ue_error(struct mem_ctl_info *mci,
+ const int pos[EDAC_MAX_LAYERS],
+ const char *msg,
+ const char *location,
+ const char *label,
+ const char *detail,
+ const char *other_detail,
+ const bool enable_per_layer_report)
{
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+ if (edac_mc_get_log_ue()) {
+ if (other_detail && *other_detail)
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE %s on %s (%s%s - %s)\n",
+ msg, label, location, detail,
+ other_detail);
+ else
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE %s on %s (%s%s)\n",
+ msg, label, location, detail);
+ }
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_WARNING,
- "UE - no information available: %s\n", msg);
- mci->ue_noinfo_count++;
- mci->ue_count++;
+ if (edac_mc_get_panic_on_ue()) {
+ if (other_detail && *other_detail)
+ panic("UE %s on %s (%s%s - %s)\n",
+ msg, label, location, detail, other_detail);
+ else
+ panic("UE %s on %s (%s%s)\n",
+ msg, label, location, detail);
+ }
+
+ edac_inc_ue_error(mci, enable_per_layer_report, pos);
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process UE events
- */
-void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channela,
- unsigned int channelb, char *msg)
+#define OTHER_LABEL " or "
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int layer0,
+ const int layer1,
+ const int layer2,
+ const char *msg,
+ const char *other_detail,
+ const void *mcelog)
{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chars;
+ /* FIXME: too much for stack: move it to some pre-alocated area */
+ char detail[80], location[80];
+ char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
+ char *p;
+ int row = -1, chan = -1;
+ int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 };
+ int i;
+ u32 grain;
+ bool enable_per_layer_report = false;
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
- if (channela >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-a out of range "
- "(%d >= %d)\n",
- channela, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
+ /*
+ * Check if the event report is consistent and if the memory
+ * location is known. If it is known, enable_per_layer_report will be
+ * true, the DIMM(s) label info will be filled and the per-layer
+ * error counters will be incremented.
+ */
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] >= (int)mci->layers[i].size) {
+ if (type == HW_EVENT_ERR_CORRECTED)
+ p = "CE";
+ else
+ p = "UE";
+
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
+ edac_layer_name[mci->layers[i].type],
+ pos[i], mci->layers[i].size);
+ /*
+ * Instead of just returning it, let's use what's
+ * known about the error. The increment routines and
+ * the DIMM filter logic will do the right thing by
+ * pointing the likely damaged DIMMs.
+ */
+ pos[i] = -1;
+ }
+ if (pos[i] >= 0)
+ enable_per_layer_report = true;
}
- if (channelb >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-b out of range "
- "(%d >= %d)\n",
- channelb, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ /*
+ * Get the dimm label/grain that applies to the match criteria.
+ * As the error algorithm may not be able to point to just one memory
+ * stick, the logic here will get all possible labels that could
+ * pottentially be affected by the error.
+ * On FB-DIMM memory controllers, for uncorrected errors, it is common
+ * to have only the MC channel and the MC dimm (also called "branch")
+ * but the channel is not known, as the memory is arranged in pairs,
+ * where each memory belongs to a separate channel within the same
+ * branch.
+ */
+ grain = 0;
+ p = label;
+ *p = '\0';
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm = &mci->dimms[i];
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
+ if (layer0 >= 0 && layer0 != dimm->location[0])
+ continue;
+ if (layer1 >= 0 && layer1 != dimm->location[1])
+ continue;
+ if (layer2 >= 0 && layer2 != dimm->location[2])
+ continue;
- /* Generate the DIMM labels from the specified channels */
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[csrow].channels[channela].label);
- len -= chars;
- pos += chars;
- chars = snprintf(pos, len + 1, "-%s",
- mci->csrows[csrow].channels[channelb].label);
+ /* get the max grain, over the error match range */
+ if (dimm->grain > grain)
+ grain = dimm->grain;
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela, channelb,
- labels, msg);
+ /*
+ * If the error is memory-controller wide, there's no need to
+ * seek for the affected DIMMs because the whole
+ * channel/memory controller/... may be affected.
+ * Also, don't show errors for empty DIMM slots.
+ */
+ if (enable_per_layer_report && dimm->nr_pages) {
+ if (p != label) {
+ strcpy(p, OTHER_LABEL);
+ p += strlen(OTHER_LABEL);
+ }
+ strcpy(p, dimm->label);
+ p += strlen(p);
+ *p = '\0';
+
+ /*
+ * get csrow/channel of the DIMM, in order to allow
+ * incrementing the compat API counters
+ */
+ debugf4("%s: %s csrows map: (%d,%d)\n",
+ __func__,
+ mci->mem_is_per_rank ? "rank" : "dimm",
+ dimm->csrow, dimm->cschannel);
+
+ if (row == -1)
+ row = dimm->csrow;
+ else if (row >= 0 && row != dimm->csrow)
+ row = -2;
+
+ if (chan == -1)
+ chan = dimm->cschannel;
+ else if (chan >= 0 && chan != dimm->cschannel)
+ chan = -2;
+ }
+ }
- if (edac_mc_get_panic_on_ue())
- panic("UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela,
- channelb, labels, msg);
-}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
+ if (!enable_per_layer_report) {
+ strcpy(label, "any memory");
+ } else {
+ debugf4("%s: csrow/channel to increment: (%d,%d)\n",
+ __func__, row, chan);
+ if (p == label)
+ strcpy(label, "unknown memory");
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ if (row >= 0) {
+ mci->csrows[row].ce_count++;
+ if (chan >= 0)
+ mci->csrows[row].channels[chan].ce_count++;
+ }
+ } else
+ if (row >= 0)
+ mci->csrows[row].ue_count++;
+ }
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process CE events
- */
-void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
- unsigned int csrow, unsigned int channel, char *msg)
-{
+ /* Fill the RAM location data */
+ p = location;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ continue;
- /* Ensure boundary values */
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
- if (channel >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range (%d >= %d)\n",
- channel, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
+ p += sprintf(p, "%s:%d ",
+ edac_layer_name[mci->layers[i].type],
+ pos[i]);
}
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE row %d, channel %d, label \"%s\": %s\n",
- csrow, channel,
- mci->csrows[csrow].channels[channel].label, msg);
+ /* Memory type dependent details about the error */
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx",
+ page_frame_number, offset_in_page,
+ grain, syndrome);
+ edac_ce_error(mci, pos, msg, location, label, detail,
+ other_detail, enable_per_layer_report,
+ page_frame_number, offset_in_page, grain);
+ } else {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%d",
+ page_frame_number, offset_in_page, grain);
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[channel].ce_count++;
+ edac_ue_error(mci, pos, msg, location, label, detail,
+ other_detail, enable_per_layer_report);
+ }
}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
+EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e9a28f576d14..f6a29b0eedc8 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -144,25 +144,31 @@ static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+ int i;
+ u32 nr_pages = 0;
+
+ for (i = 0; i < csrow->nr_channels; i++)
+ nr_pages += csrow->channels[i].dimm->nr_pages;
+
+ return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
}
static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+ return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
}
static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+ return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
}
static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+ return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
@@ -170,11 +176,11 @@ static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
char *data, int channel)
{
/* if field has not been initialized, there is nothing to send */
- if (!csrow->channels[channel].label[0])
+ if (!csrow->channels[channel].dimm->label[0])
return 0;
return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
- csrow->channels[channel].label);
+ csrow->channels[channel].dimm->label);
}
static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
@@ -184,8 +190,8 @@ static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
ssize_t max_size = 0;
max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
- strncpy(csrow->channels[channel].label, data, max_size);
- csrow->channels[channel].label[max_size] = '\0';
+ strncpy(csrow->channels[channel].dimm->label, data, max_size);
+ csrow->channels[channel].dimm->label[max_size] = '\0';
return max_size;
}
@@ -419,8 +425,8 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
mci->ue_noinfo_count = 0;
mci->ce_noinfo_count = 0;
- mci->ue_count = 0;
- mci->ce_count = 0;
+ mci->ue_mc = 0;
+ mci->ce_mc = 0;
for (row = 0; row < mci->nr_csrows; row++) {
struct csrow_info *ri = &mci->csrows[row];
@@ -489,12 +495,12 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
/* default attribute files for the MCI object */
static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
{
- return sprintf(data, "%d\n", mci->ue_count);
+ return sprintf(data, "%d\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
{
- return sprintf(data, "%d\n", mci->ce_count);
+ return sprintf(data, "%d\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
@@ -519,16 +525,16 @@ static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
{
- int total_pages, csrow_idx;
+ int total_pages = 0, csrow_idx, j;
- for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
- csrow_idx++) {
+ for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
struct csrow_info *csrow = &mci->csrows[csrow_idx];
- if (!csrow->nr_pages)
- continue;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- total_pages += csrow->nr_pages;
+ total_pages += dimm->nr_pages;
+ }
}
return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
@@ -900,7 +906,7 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
*/
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ int i, j;
int err;
struct csrow_info *csrow;
struct kobject *kobj_mci = &mci->edac_mci_kobj;
@@ -934,10 +940,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
/* Make directories for each CSROW object under the mc<id> kobject
*/
for (i = 0; i < mci->nr_csrows; i++) {
+ int nr_pages = 0;
+
csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
- /* Only expose populated CSROWs */
- if (csrow->nr_pages > 0) {
+ if (nr_pages > 0) {
err = edac_create_csrow_object(mci, csrow, i);
if (err) {
debugf1("%s() failure: create csrow %d obj\n",
@@ -949,12 +958,15 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
return 0;
- /* CSROW error: backout what has already been registered, */
fail1:
for (i--; i >= 0; i--) {
- if (csrow->nr_pages > 0) {
+ int nr_pages = 0;
+
+ csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
+ if (nr_pages > 0)
kobject_put(&mci->csrows[i].kobj);
- }
}
/* remove the mci instance's attributes, if any */
@@ -973,14 +985,20 @@ fail0:
*/
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ struct csrow_info *csrow;
+ int i, j;
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
- if (mci->csrows[i].nr_pages > 0) {
+ int nr_pages = 0;
+
+ csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
+ if (nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
kobject_put(&mci->csrows[i].kobj);
}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 00f81b47a51f..0ea7d14cb930 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -50,7 +50,7 @@ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
extern void edac_mc_reset_delay_period(int value);
-extern void *edac_align_ptr(void *ptr, unsigned size);
+extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
/*
* EDAC PCI functions
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 63af1c5673d1..f1ac86649886 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -42,13 +42,13 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
const char *edac_pci_name)
{
struct edac_pci_ctl_info *pci;
- void *pvt;
+ void *p = NULL, *pvt;
unsigned int size;
debugf1("%s()\n", __func__);
- pci = (struct edac_pci_ctl_info *)0;
- pvt = edac_align_ptr(&pci[1], sz_pvt);
+ pci = edac_align_ptr(&p, sizeof(*pci), 1);
+ pvt = edac_align_ptr(&p, 1, sz_pvt);
size = ((unsigned long)pvt) + sz_pvt;
/* Alloc the needed control struct memory */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 277689a68841..8ad1744faacd 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -245,7 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -256,10 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, pfn);
if (info->errsts & I3000_ERRSTS_UE)
- edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ pfn, offset, 0,
+ row, -1, -1,
+ "i3000 UE", "", NULL);
else
- edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
- multi_chan ? channel : 0, "i3000 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, info->derrsyn,
+ row, multi_chan ? channel : 0, -1,
+ "i3000 CE", "", NULL);
return 1;
}
@@ -304,9 +311,10 @@ static int i3000_is_interleaved(const unsigned char *c0dra,
static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_cumul_size;
+ struct edac_mc_layer layers[2];
+ unsigned long last_cumul_size, nr_pages;
int interleaved, nr_channels;
unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
@@ -347,7 +355,14 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
*/
interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
nr_channels = interleaved ? 2 : 1;
- mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I3000_RANKS / nr_channels;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
@@ -386,19 +401,23 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
cumul_size <<= 1;
debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
__func__, i, cumul_size);
- if (cumul_size == last_cumul_size) {
- csrow->mtype = MEM_EMPTY;
+ if (cumul_size == last_cumul_size)
continue;
- }
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = I3000_DEAP_GRAIN;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / nr_channels;
+ dimm->grain = I3000_DEAP_GRAIN;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
/*
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 046808c6357d..bbe43ef71823 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -23,6 +23,7 @@
#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
+#define I3200_DIMMS 4
#define I3200_RANKS 8
#define I3200_RANKS_PER_CHANNEL 4
#define I3200_CHANNELS 2
@@ -217,21 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
if (log & I3200_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log),
- "i3200 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "i3000 UE", "", NULL);
} else if (log & I3200_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0,
- "i3200 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "i3000 UE", "", NULL);
}
}
}
@@ -319,9 +324,9 @@ static unsigned long drb_to_nr_pages(
static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
+ struct edac_mc_layer layers[2];
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
@@ -336,8 +341,14 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
i3200_get_drbs(window, drbs);
nr_channels = how_many_channels(pdev);
- mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
- nr_channels, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I3200_DIMMS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct i3200_priv));
if (!mci)
return -ENOMEM;
@@ -366,7 +377,6 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- last_page = -1UL;
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i];
@@ -375,20 +385,18 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
i / I3200_RANKS_PER_CHANNEL,
i % I3200_RANKS_PER_CHANNEL);
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
+ if (nr_pages == 0)
continue;
- }
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+ dimm->nr_pages = nr_pages / nr_channels;
+ dimm->grain = nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
i3200_clear_error_info(mci);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index a2680d8e744b..11ea835f155a 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -270,7 +270,8 @@
#define MTR3 0x8C
#define NUM_MTRS 4
-#define CHANNELS_PER_BRANCH (2)
+#define CHANNELS_PER_BRANCH 2
+#define MAX_BRANCHES 2
/* Defines to extract the vaious fields from the
* MTRx - Memory Technology Registers
@@ -473,7 +474,6 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
char msg[EDAC_MC_LABEL_LEN + 1 + 160];
char *specific = NULL;
u32 allErrors;
- int branch;
int channel;
int bank;
int rank;
@@ -485,8 +485,7 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
if (!allErrors)
return; /* if no error, return now */
- branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
- channel = branch;
+ channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
/* Use the NON-Recoverable macros to extract data */
bank = NREC_BANK(info->nrecmema);
@@ -495,9 +494,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
+ debugf0("\t\tCSROW= %d Channel= %d "
+ "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, bank,
rdwr ? "Write" : "Read", ras, cas);
/* Only 1 bit will be on */
@@ -533,13 +532,14 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
- "FATAL Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- allErrors, specific);
+ "Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)",
+ bank, ras, cas, allErrors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ channel >> 1, channel & 1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/*
@@ -633,13 +633,14 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
- "CAS=%d, UE Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- ue_errors, specific);
+ "Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)",
+ rank, bank, ras, cas, ue_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ channel >> 1, -1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/* Check correctable errors */
@@ -685,13 +686,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+ "Rank=%d Bank=%d RDWR=%s RAS=%d "
"CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
rdwr ? "Write" : "Read", ras, cas, ce_errors,
specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ channel >> 1, channel % 2, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
if (!misc_messages)
@@ -731,11 +735,12 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d Err=%#x (%s))", branch >> 1,
- misc_errors, specific);
+ "Err=%#x (%s)", misc_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, 0, 0, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ branch >> 1, -1, -1,
+ "Misc error", msg, NULL);
}
}
@@ -956,14 +961,14 @@ static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
*
* return the proper MTR register as determine by the csrow and channel desired
*/
-static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel)
{
int mtr;
if (channel < CHANNELS_PER_BRANCH)
- mtr = pvt->b0_mtr[csrow >> 1];
+ mtr = pvt->b0_mtr[slot];
else
- mtr = pvt->b1_mtr[csrow >> 1];
+ mtr = pvt->b1_mtr[slot];
return mtr;
}
@@ -988,37 +993,34 @@ static void decode_mtr(int slot_row, u16 mtr)
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}
-static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
struct i5000_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
- mtr = determine_mtr(pvt, csrow, channel);
+ mtr = determine_mtr(pvt, slot, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
- /* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << (csrow >> 1))) {
+ /* Determine if there is a DIMM present in this DIMM slot */
+ if (amb_present_reg) {
dinfo->dual_rank = MTR_DIMM_RANK(mtr);
- if (!((dinfo->dual_rank == 0) &&
- ((csrow & 0x1) == 0x1))) {
- /* Start with the number of bits for a Bank
- * on the DRAM */
- addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
- /* Add thenumber of ROW bits */
- addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
- /* add the number of COLUMN bits */
- addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
-
- addrBits += 6; /* add 64 bits per DIMM */
- addrBits -= 20; /* divide by 2^^20 */
- addrBits -= 3; /* 8 bits per bytes */
-
- dinfo->megabytes = 1 << addrBits;
- }
+ /* Start with the number of bits for a Bank
+ * on the DRAM */
+ addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+ /* Add the number of ROW bits */
+ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+ /* add the number of COLUMN bits */
+ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+
+ addrBits += 6; /* add 64 bits per DIMM */
+ addrBits -= 20; /* divide by 2^^20 */
+ addrBits -= 3; /* 8 bits per bytes */
+
+ dinfo->megabytes = 1 << addrBits;
}
}
}
@@ -1032,10 +1034,9 @@ static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
static void calculate_dimm_size(struct i5000_pvt *pvt)
{
struct i5000_dimm_info *dinfo;
- int csrow, max_csrows;
+ int slot, channel, branch;
char *p, *mem_buffer;
int space, n;
- int channel;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
@@ -1046,22 +1047,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
return;
}
- n = snprintf(p, space, "\n");
- p += n;
- space -= n;
-
- /* Scan all the actual CSROWS (which is # of DIMMS * 2)
+ /* Scan all the actual slots
* and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
+ * Start with the highest slot first, to display it first
+ * and work toward the 0th slot
*/
- max_csrows = pvt->maxdimmperch * 2;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+ for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) {
- /* on an odd csrow, first output a 'boundary' marker,
+ /* on an odd slot, first output a 'boundary' marker,
* then reset the message buffer */
- if (csrow & 0x1) {
- n = snprintf(p, space, "---------------------------"
+ if (slot & 0x1) {
+ n = snprintf(p, space, "--------------------------"
"--------------------------------");
p += n;
space -= n;
@@ -1069,30 +1065,39 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
p = mem_buffer;
space = PAGE_SIZE;
}
- n = snprintf(p, space, "csrow %2d ", csrow);
+ n = snprintf(p, space, "slot %2d ", slot);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
- n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
+ dinfo = &pvt->dimm_info[slot][channel];
+ handle_channel(pvt, slot, channel, dinfo);
+ if (dinfo->megabytes)
+ n = snprintf(p, space, "%4d MB %dR| ",
+ dinfo->megabytes, dinfo->dual_rank + 1);
+ else
+ n = snprintf(p, space, "%4d MB | ", 0);
p += n;
space -= n;
}
- n = snprintf(p, space, "\n");
p += n;
space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
}
/* Output the last bottom 'boundary' marker */
- n = snprintf(p, space, "---------------------------"
- "--------------------------------\n");
+ n = snprintf(p, space, "--------------------------"
+ "--------------------------------");
p += n;
space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
/* now output the 'channel' labels */
- n = snprintf(p, space, " ");
+ n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1100,9 +1105,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
p += n;
space -= n;
}
- n = snprintf(p, space, "\n");
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+
+ n = snprintf(p, space, " ");
p += n;
- space -= n;
+ for (branch = 0; branch < MAX_BRANCHES; branch++) {
+ n = snprintf(p, space, " branch %d | ", branch);
+ p += n;
+ space -= n;
+ }
/* output the last message and free buffer */
debugf2("%s\n", mem_buffer);
@@ -1235,13 +1248,13 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
static int i5000_init_csrows(struct mem_ctl_info *mci)
{
struct i5000_pvt *pvt;
- struct csrow_info *p_csrow;
+ struct dimm_info *dimm;
int empty, channel_count;
int max_csrows;
- int mtr, mtr1;
+ int mtr;
int csrow_megs;
int channel;
- int csrow;
+ int slot;
pvt = mci->pvt_info;
@@ -1250,43 +1263,40 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
empty = 1; /* Assume NO memory */
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = pvt->b0_mtr[csrow >> 1];
- mtr1 = pvt->b1_mtr[csrow >> 1];
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
- continue;
+ /*
+ * FIXME: The memory layout used to map slot/channel into the
+ * real memory architecture is weird: branch+slot are "csrows"
+ * and channel is channel. That required an extra array (dimm_info)
+ * to map the dimms. A good cleanup would be to remove this array,
+ * and do a loop here with branch, channel, slot
+ */
+ for (slot = 0; slot < max_csrows; slot++) {
+ for (channel = 0; channel < pvt->maxch; channel++) {
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
+ mtr = determine_mtr(pvt, slot, channel);
- p_csrow->grain = 8;
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++) {
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
- }
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ channel / MAX_BRANCHES,
+ channel % MAX_BRANCHES, slot);
- p_csrow->nr_pages = csrow_megs << 8;
+ csrow_megs = pvt->dimm_info[slot][channel].megabytes;
+ dimm->grain = 8;
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
+ /* Assume DDR2 for now */
+ dimm->mtype = MEM_FB_DDR2;
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
+ /* ask what device type on this row */
+ if (MTR_DRAM_WIDTH(mtr))
+ dimm->dtype = DEV_X8;
+ else
+ dimm->dtype = DEV_X4;
- p_csrow->edac_mode = EDAC_S8ECD8ED;
+ dimm->edac_mode = EDAC_S8ECD8ED;
+ dimm->nr_pages = csrow_megs << 8;
+ }
empty = 0;
}
@@ -1317,7 +1327,7 @@ static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
}
/*
- * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ * i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels)
*
* ask the device how many channels are present and how many CSROWS
* as well
@@ -1332,7 +1342,7 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
* supported on this memory controller
*/
pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
- *num_dimms_per_channel = (int)value *2;
+ *num_dimms_per_channel = (int)value;
pci_read_config_byte(pdev, MAXCH, &value);
*num_channels = (int)value;
@@ -1348,10 +1358,10 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[3];
struct i5000_pvt *pvt;
int num_channels;
int num_dimms_per_channel;
- int num_csrows;
debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
__FILE__, __func__,
@@ -1377,14 +1387,22 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
*/
i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
&num_channels);
- num_csrows = num_dimms_per_channel * 2;
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
+ debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n",
+ __func__, num_channels, num_dimms_per_channel);
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_channels / MAX_BRANCHES;
+ layers[1].is_virt_csrow = false;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = num_dimms_per_channel;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index d500749464ea..e9e7c2a29dc3 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -14,6 +14,11 @@
* rows for each respective channel are laid out one after another,
* the first half belonging to channel 0, the second half belonging
* to channel 1.
+ *
+ * This driver is for DDR2 DIMMs, and it uses chip select to select among the
+ * several ranks. However, instead of showing memories as ranks, it outputs
+ * them as DIMM's. An internal table creates the association between ranks
+ * and DIMM's.
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -410,14 +415,6 @@ static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
return csrow / priv->ranksperchan;
}
-static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int chan, int rank)
-{
- const struct i5100_priv *priv = mci->pvt_info;
-
- return chan * priv->ranksperchan + rank;
-}
-
static void i5100_handle_ce(struct mem_ctl_info *mci,
int chan,
unsigned bank,
@@ -427,17 +424,17 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ char detail[80];
- printk(KERN_ERR
- "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
+ /* Form out message */
+ snprintf(detail, sizeof(detail),
+ "bank %u, cas %u, ras %u\n",
+ bank, cas, ras);
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[0].ce_count++;
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, syndrome,
+ chan, rank, -1,
+ msg, detail, NULL);
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -449,16 +446,17 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ char detail[80];
- printk(KERN_ERR
- "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
+ /* Form out message */
+ snprintf(detail, sizeof(detail),
+ "bank %u, cas %u, ras %u\n",
+ bank, cas, ras);
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, syndrome,
+ chan, rank, -1,
+ msg, detail, NULL);
}
static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -835,10 +833,10 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
{
int i;
- unsigned long total_pages = 0UL;
struct i5100_priv *priv = mci->pvt_info;
- for (i = 0; i < mci->nr_csrows; i++) {
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm;
const unsigned long npages = i5100_npages(mci, i);
const unsigned chan = i5100_csrow_to_chan(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
@@ -846,33 +844,23 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
if (!npages)
continue;
- /*
- * FIXME: these two are totally bogus -- I don't see how to
- * map them correctly to this structure...
- */
- mci->csrows[i].first_page = total_pages;
- mci->csrows[i].last_page = total_pages + npages - 1;
- mci->csrows[i].page_mask = 0UL;
-
- mci->csrows[i].nr_pages = npages;
- mci->csrows[i].grain = 32;
- mci->csrows[i].csrow_idx = i;
- mci->csrows[i].dtype =
- (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
- mci->csrows[i].ue_count = 0;
- mci->csrows[i].ce_count = 0;
- mci->csrows[i].mtype = MEM_RDDR2;
- mci->csrows[i].edac_mode = EDAC_SECDED;
- mci->csrows[i].mci = mci;
- mci->csrows[i].nr_channels = 1;
- mci->csrows[i].channels[0].chan_idx = 0;
- mci->csrows[i].channels[0].ce_count = 0;
- mci->csrows[i].channels[0].csrow = mci->csrows + i;
- snprintf(mci->csrows[i].channels[0].label,
- sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
-
- total_pages += npages;
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ chan, rank, 0);
+
+ dimm->nr_pages = npages;
+ if (npages) {
+ dimm->grain = 32;
+ dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
+ DEV_X4 : DEV_X8;
+ dimm->mtype = MEM_RDDR2;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "DIMM%u",
+ i5100_rank_to_slot(mci, chan, rank));
+ }
+
+ debugf2("dimm channel %d, rank %d, size %ld\n",
+ chan, rank, (long)PAGES_TO_MiB(npages));
}
}
@@ -881,6 +869,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
{
int rc;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i5100_priv *priv;
struct pci_dev *ch0mm, *ch1mm;
int ret = 0;
@@ -941,7 +930,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
goto bail_ch1;
}
- mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 2;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = ranksperch;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(*priv));
if (!mci) {
ret = -ENOMEM;
goto bail_disable_ch1;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 1869a1018fb5..6640c29e1885 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -18,6 +18,10 @@
* Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
* http://developer.intel.com/design/chipsets/datashts/313070.htm
*
+ * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
+ * 2 channels operating in lockstep no-mirror mode. Each channel can have up to
+ * 4 dimm's, each with up to 8GB.
+ *
*/
#include <linux/module.h>
@@ -44,12 +48,10 @@
edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
/* Limits for i5400 */
-#define NUM_MTRS_PER_BRANCH 4
+#define MAX_BRANCHES 2
#define CHANNELS_PER_BRANCH 2
-#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH
-#define MAX_CHANNELS 4
-/* max possible csrows per channel */
-#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
+#define DIMMS_PER_CHANNEL 4
+#define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH)
/* Device 16,
* Function 0: System Address
@@ -347,16 +349,16 @@ struct i5400_pvt {
u16 mir0, mir1;
- u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
+ u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
- u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
+ u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
/* DIMM information matrix, allocating architecture maximums */
- struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+ struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
/* Actual values for this controller */
int maxch; /* Max channels */
@@ -532,13 +534,15 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
int ras, cas;
int errnum;
char *type = NULL;
+ enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
if (!allErrors)
return; /* if no error, return now */
- if (allErrors & ERROR_FAT_MASK)
+ if (allErrors & ERROR_FAT_MASK) {
type = "FATAL";
- else if (allErrors & FERR_NF_UNCORRECTABLE)
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else if (allErrors & FERR_NF_UNCORRECTABLE)
type = "NON-FATAL uncorrected";
else
type = "NON-FATAL recoverable";
@@ -556,7 +560,7 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
ras = nrec_ras(info);
cas = nrec_cas(info);
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
+ debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d "
"DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, channel + 1, branch >> 1, bank,
buf_id, rdwr_str(rdwr), ras, cas);
@@ -566,13 +570,13 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
- "RAS=%d CAS=%d %s Err=0x%lx (%s))",
- type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
- type, allErrors, error_name[errnum]);
+ "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
+ bank, buf_id, ras, cas, allErrors, error_name[errnum]);
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ branch >> 1, -1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/*
@@ -630,7 +634,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
- debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
+ debugf0("\t\tDIMM= %d Channel= %d (Branch %d "
"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, branch >> 1, bank,
rdwr_str(rdwr), ras, cas);
@@ -642,8 +646,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
branch >> 1, bank, rdwr_str(rdwr), ras, cas,
allErrors, error_name[errnum]);
- /* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ branch >> 1, channel % 2, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
return;
}
@@ -831,8 +837,8 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
/*
* determine_amb_present
*
- * the information is contained in NUM_MTRS_PER_BRANCH different
- * registers determining which of the NUM_MTRS_PER_BRANCH requires
+ * the information is contained in DIMMS_PER_CHANNEL different
+ * registers determining which of the DIMMS_PER_CHANNEL requires
* knowing which channel is in question
*
* 2 branches, each with 2 channels
@@ -861,11 +867,11 @@ static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
}
/*
- * determine_mtr(pvt, csrow, channel)
+ * determine_mtr(pvt, dimm, channel)
*
- * return the proper MTR register as determine by the csrow and desired channel
+ * return the proper MTR register as determine by the dimm and desired channel
*/
-static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
{
int mtr;
int n;
@@ -873,11 +879,11 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
/* There is one MTR for each slot pair of FB-DIMMs,
Each slot pair may be at branch 0 or branch 1.
*/
- n = csrow;
+ n = dimm;
- if (n >= NUM_MTRS_PER_BRANCH) {
- debugf0("ERROR: trying to access an invalid csrow: %d\n",
- csrow);
+ if (n >= DIMMS_PER_CHANNEL) {
+ debugf0("ERROR: trying to access an invalid dimm: %d\n",
+ dimm);
return 0;
}
@@ -913,19 +919,19 @@ static void decode_mtr(int slot_row, u16 mtr)
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}
-static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
struct i5400_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
- mtr = determine_mtr(pvt, csrow, channel);
+ mtr = determine_mtr(pvt, dimm, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
/* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << csrow)) {
+ if (amb_present_reg & (1 << dimm)) {
/* Start with the number of bits for a Bank
* on the DRAM */
addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
@@ -954,10 +960,10 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
static void calculate_dimm_size(struct i5400_pvt *pvt)
{
struct i5400_dimm_info *dinfo;
- int csrow, max_csrows;
+ int dimm, max_dimms;
char *p, *mem_buffer;
int space, n;
- int channel;
+ int channel, branch;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
@@ -968,32 +974,32 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
return;
}
- /* Scan all the actual CSROWS
+ /* Scan all the actual DIMMS
* and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
+ * Start with the highest dimm first, to display it first
+ * and work toward the 0th dimm
*/
- max_csrows = pvt->maxdimmperch;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+ max_dimms = pvt->maxdimmperch;
+ for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
- /* on an odd csrow, first output a 'boundary' marker,
+ /* on an odd dimm, first output a 'boundary' marker,
* then reset the message buffer */
- if (csrow & 0x1) {
+ if (dimm & 0x1) {
n = snprintf(p, space, "---------------------------"
- "--------------------------------");
+ "-------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
- n = snprintf(p, space, "csrow %2d ", csrow);
+ n = snprintf(p, space, "dimm %2d ", dimm);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
+ dinfo = &pvt->dimm_info[dimm][channel];
+ handle_channel(pvt, dimm, channel, dinfo);
n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
p += n;
space -= n;
@@ -1005,7 +1011,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
/* Output the last bottom 'boundary' marker */
n = snprintf(p, space, "---------------------------"
- "--------------------------------");
+ "-------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
@@ -1013,7 +1019,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
space = PAGE_SIZE;
/* now output the 'channel' labels */
- n = snprintf(p, space, " ");
+ n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1022,6 +1028,19 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
space -= n;
}
+ space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+
+ n = snprintf(p, space, " ");
+ p += n;
+ for (branch = 0; branch < MAX_BRANCHES; branch++) {
+ n = snprintf(p, space, " branch %d | ", branch);
+ p += n;
+ space -= n;
+ }
+
/* output the last message and free buffer */
debugf2("%s\n", mem_buffer);
kfree(mem_buffer);
@@ -1080,7 +1099,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
/* Get the set of MTR[0-3] regs by each branch */
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
int where = MTR0 + (slot_row * sizeof(u16));
/* Branch 0 set of MTR registers */
@@ -1105,7 +1124,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
/* Read and dump branch 0's MTRs */
debugf2("\nMemory Technology Registers:\n");
debugf2(" Branch 0:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
@@ -1122,7 +1141,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
} else {
/* Read and dump branch 1's MTRs */
debugf2(" Branch 1:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
@@ -1141,7 +1160,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
}
/*
- * i5400_init_csrows Initialize the 'csrows' table within
+ * i5400_init_dimms Initialize the 'dimms' table within
* the mci control structure with the
* addressing of memory.
*
@@ -1149,64 +1168,68 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
* 0 success
* 1 no actual memory found on this MC
*/
-static int i5400_init_csrows(struct mem_ctl_info *mci)
+static int i5400_init_dimms(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
- struct csrow_info *p_csrow;
- int empty, channel_count;
- int max_csrows;
+ struct dimm_info *dimm;
+ int ndimms, channel_count;
+ int max_dimms;
int mtr;
- int csrow_megs;
- int channel;
- int csrow;
+ int size_mb;
+ int channel, slot;
pvt = mci->pvt_info;
channel_count = pvt->maxch;
- max_csrows = pvt->maxdimmperch;
+ max_dimms = pvt->maxdimmperch;
- empty = 1; /* Assume NO memory */
+ ndimms = 0;
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = determine_mtr(pvt, csrow, 0);
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr))
- continue;
-
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
-
- p_csrow->grain = 8;
-
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++)
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
-
- p_csrow->nr_pages = csrow_megs << 8;
-
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
-
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
-
- p_csrow->edac_mode = EDAC_S8ECD8ED;
-
- empty = 0;
+ /*
+ * FIXME: remove pvt->dimm_info[slot][channel] and use the 3
+ * layers here.
+ */
+ for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
+ channel++) {
+ for (slot = 0; slot < mci->layers[2].size; slot++) {
+ mtr = determine_mtr(pvt, slot, channel);
+
+ /* if no DIMMS on this slot, continue */
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ channel / 2, channel % 2, slot);
+
+ size_mb = pvt->dimm_info[slot][channel].megabytes;
+
+ debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n",
+ __func__, dimm - mci->dimms,
+ channel / 2, channel % 2, slot,
+ size_mb / 1000, size_mb % 1000);
+
+ dimm->nr_pages = size_mb << 8;
+ dimm->grain = 8;
+ dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+ dimm->mtype = MEM_FB_DDR2;
+ /*
+ * The eccc mechanism is SDDC (aka SECC), with
+ * is similar to Chipkill.
+ */
+ dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+ EDAC_S8ECD8ED : EDAC_S4ECD4ED;
+ ndimms++;
+ }
}
- return empty;
+ /*
+ * When just one memory is provided, it should be at location (0,0,0).
+ * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
+ */
+ if (ndimms == 1)
+ mci->dimms[0].edac_mode = EDAC_SECDED;
+
+ return (ndimms == 0);
}
/*
@@ -1242,9 +1265,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct i5400_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
+ struct edac_mc_layer layers[3];
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
@@ -1258,23 +1279,21 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
+ /*
+ * allocate a new MC control structure
+ *
+ * This drivers uses the DIMM slot as "csrow" and the rest as "channel".
*/
- num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
- num_channels = MAX_CHANNELS;
- num_csrows = num_dimms_per_channel;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
- /* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = CHANNELS_PER_BRANCH;
+ layers[1].is_virt_csrow = false;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = DIMMS_PER_CHANNEL;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
@@ -1284,8 +1303,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
pvt = mci->pvt_info;
pvt->system_address = pdev; /* Record this device in our private */
- pvt->maxch = num_channels;
- pvt->maxdimmperch = num_dimms_per_channel;
+ pvt->maxch = MAX_CHANNELS;
+ pvt->maxdimmperch = DIMMS_PER_CHANNEL;
/* 'get' the pci devices we want to reserve for our use */
if (i5400_get_devices(mci, dev_idx))
@@ -1307,13 +1326,13 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
/* Set the function pointer to an actual operation function */
mci->edac_check = i5400_check_error;
- /* initialize the MC control structure 'csrows' table
+ /* initialize the MC control structure 'dimms' table
* with the mapping and control information */
- if (i5400_init_csrows(mci)) {
+ if (i5400_init_dimms(mci)) {
debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5400_init_csrows() returned nonzero "
+ " because i5400_init_dimms() returned nonzero "
"value\n");
- mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
+ mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
} else {
debugf1("MC: Enable error reporting now\n");
i5400_enable_error_reporting(mci);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 3bafa3bca148..97c22fd650ee 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -464,17 +464,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
FERR_FAT_FBD, error_reg);
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "FATAL (Branch=%d DRAM-Bank=%d %s "
- "RAS=%d CAS=%d Err=0x%lx (%s))",
- branch, bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, specific);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, branch << 1,
- (branch << 1) + 1,
- pvt->tmp_prt_buffer);
+ "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
+ bank, ras, cas, errors, specific);
+
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ branch, -1, rank,
+ is_wr ? "Write error" : "Read error",
+ pvt->tmp_prt_buffer, NULL);
+
}
/* read in the 1st NON-FATAL error register */
@@ -513,23 +510,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
/* Form out message */
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "Corrected error (Branch=%d, Channel %d), "
- " DRAM-Bank=%d %s "
- "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))",
- branch, channel,
- bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, syndrome, specific);
-
- /*
- * Call the helper to output message
- * NOTE: Errors are reported per-branch, and not per-channel
- * Currently, we don't know how to identify the right
- * channel.
- */
- edac_mc_handle_fbd_ce(mci, rank, channel,
- pvt->tmp_prt_buffer);
+ "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
+ bank, ras, cas, errors, specific);
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0,
+ syndrome,
+ branch >> 1, channel % 2, rank,
+ is_wr ? "Write error" : "Read error",
+ pvt->tmp_prt_buffer, NULL);
}
return;
}
@@ -617,8 +605,7 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
static int decode_mtr(struct i7300_pvt *pvt,
int slot, int ch, int branch,
struct i7300_dimm_info *dinfo,
- struct csrow_info *p_csrow,
- u32 *nr_pages)
+ struct dimm_info *dimm)
{
int mtr, ans, addrBits, channel;
@@ -650,7 +637,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
addrBits -= 3; /* 8 bits per bytes */
dinfo->megabytes = 1 << addrBits;
- *nr_pages = dinfo->megabytes << 8;
debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
@@ -663,11 +649,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
- p_csrow->grain = 8;
- p_csrow->mtype = MEM_FB_DDR2;
- p_csrow->csrow_idx = slot;
- p_csrow->page_mask = 0;
-
/*
* The type of error detection actually depends of the
* mode of operation. When it is just one single memory chip, at
@@ -677,15 +658,18 @@ static int decode_mtr(struct i7300_pvt *pvt,
* See datasheet Sections 7.3.6 to 7.3.8
*/
+ dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
+ dimm->grain = 8;
+ dimm->mtype = MEM_FB_DDR2;
if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
- p_csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
} else {
debugf2("\t\tECC code is on Lockstep mode\n");
if (MTR_DRAM_WIDTH(mtr) == 8)
- p_csrow->edac_mode = EDAC_S8ECD8ED;
+ dimm->edac_mode = EDAC_S8ECD8ED;
else
- p_csrow->edac_mode = EDAC_S4ECD4ED;
+ dimm->edac_mode = EDAC_S4ECD4ED;
}
/* ask what device type on this row */
@@ -694,9 +678,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
"enhanced" : "normal");
- p_csrow->dtype = DEV_X8;
+ dimm->dtype = DEV_X8;
} else
- p_csrow->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
return mtr;
}
@@ -774,11 +758,10 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
struct i7300_dimm_info *dinfo;
- struct csrow_info *p_csrow;
int rc = -ENODEV;
int mtr;
int ch, branch, slot, channel;
- u32 last_page = 0, nr_pages;
+ struct dimm_info *dimm;
pvt = mci->pvt_info;
@@ -809,25 +792,23 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
where,
&pvt->mtr[slot][branch]);
- for (ch = 0; ch < MAX_BRANCHES; ch++) {
+ for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) {
int channel = to_channel(ch, branch);
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, branch, ch, slot);
+
dinfo = &pvt->dimm_info[slot][channel];
- p_csrow = &mci->csrows[slot];
mtr = decode_mtr(pvt, slot, ch, branch,
- dinfo, p_csrow, &nr_pages);
+ dinfo, dimm);
+
/* if no DIMMS on this row, continue */
if (!MTR_DIMMS_PRESENT(mtr))
continue;
- /* Update per_csrow memory count */
- p_csrow->nr_pages += nr_pages;
- p_csrow->first_page = last_page;
- last_page += nr_pages;
- p_csrow->last_page = last_page;
-
rc = 0;
+
}
}
}
@@ -1042,10 +1023,8 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[3];
struct i7300_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
int rc;
/* wake up device */
@@ -1062,23 +1041,17 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
- */
- num_dimms_per_channel = MAX_SLOTS;
- num_channels = MAX_CHANNELS;
- num_csrows = MAX_SLOTS * MAX_CHANNELS;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = MAX_CH_PER_BRANCH;
+ layers[1].is_virt_csrow = true;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = MAX_SLOTS;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 85226ccf5290..a499c7ed820a 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -90,7 +90,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
#define MC_MAX_DOD 0x64
/*
- * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
+ * OFFSETS for Device 3 Function 4, as indicated on Xeon 5500 datasheet:
* http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
*/
@@ -101,7 +101,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
#define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
#define DIMM0_COR_ERR(r) ((r) & 0x7fff)
-/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
+/* OFFSETS for Device 3 Function 2, as indicated on Xeon 5500 datasheet */
#define MC_SSRCONTROL 0x48
#define SSR_MODE_DISABLE 0x00
#define SSR_MODE_ENABLE 0x01
@@ -221,7 +221,9 @@ struct i7core_inject {
};
struct i7core_channel {
- u32 ranks;
+ bool is_3dimms_present;
+ bool is_single_4rank;
+ bool has_4rank;
u32 dimms;
};
@@ -257,7 +259,6 @@ struct i7core_pvt {
struct i7core_channel channel[NUM_CHANS];
int ce_count_available;
- int csrow_map[NUM_CHANS][MAX_DIMMS];
/* ECC corrected errors counts per udimm */
unsigned long udimm_ce_count[MAX_DIMMS];
@@ -398,7 +399,7 @@ static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = {
};
/****************************************************************************
- Anciliary status routines
+ Ancillary status routines
****************************************************************************/
/* MC_CONTROL bits */
@@ -492,116 +493,15 @@ static void free_i7core_dev(struct i7core_dev *i7core_dev)
/****************************************************************************
Memory check routines
****************************************************************************/
-static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
- unsigned func)
-{
- struct i7core_dev *i7core_dev = get_i7core_dev(socket);
- int i;
-
- if (!i7core_dev)
- return NULL;
-
- for (i = 0; i < i7core_dev->n_devs; i++) {
- if (!i7core_dev->pdev[i])
- continue;
-
- if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
- PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
- return i7core_dev->pdev[i];
- }
- }
-
- return NULL;
-}
-
-/**
- * i7core_get_active_channels() - gets the number of channels and csrows
- * @socket: Quick Path Interconnect socket
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket.
- * this is used in order to properly allocate the size of mci components.
- *
- * It should be noticed that none of the current available datasheets explain
- * or even mention how csrows are seen by the memory controller. So, we need
- * to add a fake description for csrows.
- * So, this driver is attributing one DIMM memory for one csrow.
- */
-static int i7core_get_active_channels(const u8 socket, unsigned *channels,
- unsigned *csrows)
-{
- struct pci_dev *pdev = NULL;
- int i, j;
- u32 status, control;
-
- *channels = 0;
- *csrows = 0;
-
- pdev = get_pdev_slot_func(socket, 3, 0);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
- socket);
- return -ENODEV;
- }
-
- /* Device 3 function 0 reads */
- pci_read_config_dword(pdev, MC_STATUS, &status);
- pci_read_config_dword(pdev, MC_CONTROL, &control);
-
- for (i = 0; i < NUM_CHANS; i++) {
- u32 dimm_dod[3];
- /* Check if the channel is active */
- if (!(control & (1 << (8 + i))))
- continue;
-
- /* Check if the channel is disabled */
- if (status & (1 << i))
- continue;
-
- pdev = get_pdev_slot_func(socket, i + 4, 1);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d "
- "fn %d.%d!!!\n",
- socket, i + 4, 1);
- return -ENODEV;
- }
- /* Devices 4-6 function 1 */
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM0, &dimm_dod[0]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM1, &dimm_dod[1]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM2, &dimm_dod[2]);
-
- (*channels)++;
-
- for (j = 0; j < 3; j++) {
- if (!DIMM_PRESENT(dimm_dod[j]))
- continue;
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels on socket %d: %d\n",
- socket, *channels);
- return 0;
-}
-
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
struct pci_dev *pdev;
int i, j;
- int csrow = 0;
- unsigned long last_page = 0;
enum edac_type mode;
enum mem_type mtype;
+ struct dimm_info *dimm;
/* Get data from the MC register, function 0 */
pdev = pvt->pci_mcr[0];
@@ -657,21 +557,20 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_ch[i][0],
MC_CHANNEL_DIMM_INIT_PARAMS, &data);
- pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
- 4 : 2;
+
+ if (data & THREE_DIMMS_PRESENT)
+ pvt->channel[i].is_3dimms_present = true;
+
+ if (data & SINGLE_QUAD_RANK_PRESENT)
+ pvt->channel[i].is_single_4rank = true;
+
+ if (data & QUAD_RANK_PRESENT)
+ pvt->channel[i].has_4rank = true;
if (data & REGISTERED_DIMM)
mtype = MEM_RDDR3;
else
mtype = MEM_DDR3;
-#if 0
- if (data & THREE_DIMMS_PRESENT)
- pvt->channel[i].dimms = 3;
- else if (data & SINGLE_QUAD_RANK_PRESENT)
- pvt->channel[i].dimms = 1;
- else
- pvt->channel[i].dimms = 2;
-#endif
/* Devices 4-6 function 1 */
pci_read_config_dword(pvt->pci_ch[i][1],
@@ -682,11 +581,13 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
MC_DOD_CH_DIMM2, &dimm_dod[2]);
debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
- "%d ranks, %cDIMMs\n",
+ "%s%s%s%cDIMMs\n",
i,
RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
data,
- pvt->channel[i].ranks,
+ pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
+ pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
+ pvt->channel[i].has_4rank ? "HAS_4R " : "",
(data & REGISTERED_DIMM) ? 'R' : 'U');
for (j = 0; j < 3; j++) {
@@ -696,6 +597,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
if (!DIMM_PRESENT(dimm_dod[j]))
continue;
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ i, j, 0);
banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
@@ -704,8 +607,6 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
- pvt->channel[i].dimms++;
-
debugf0("\tdimm %d %d Mb offset: %x, "
"bank: %d, rank: %d, row: %#x, col: %#x\n",
j, size,
@@ -714,44 +615,28 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
npages = MiB_TO_PAGES(size);
- csr = &mci->csrows[csrow];
- csr->first_page = last_page + 1;
- last_page += npages;
- csr->last_page = last_page;
- csr->nr_pages = npages;
-
- csr->page_mask = 0;
- csr->grain = 8;
- csr->csrow_idx = csrow;
- csr->nr_channels = 1;
-
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
-
- pvt->csrow_map[i][j] = csrow;
+ dimm->nr_pages = npages;
switch (banks) {
case 4:
- csr->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
break;
case 8:
- csr->dtype = DEV_X8;
+ dimm->dtype = DEV_X8;
break;
case 16:
- csr->dtype = DEV_X16;
+ dimm->dtype = DEV_X16;
break;
default:
- csr->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
}
- csr->edac_mode = mode;
- csr->mtype = mtype;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
- "CPU#%uChannel#%u_DIMM#%u",
- pvt->i7core_dev->socket, i, j);
-
- csrow++;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "CPU#%uChannel#%u_DIMM#%u",
+ pvt->i7core_dev->socket, i, j);
+ dimm->grain = 8;
+ dimm->edac_mode = mode;
+ dimm->mtype = mtype;
}
pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -1361,7 +1246,7 @@ static int i7core_get_onedevice(struct pci_dev **prev,
dev_descr->dev_id, *prev);
/*
- * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
+ * On Xeon 55xx, the Intel QuickPath Arch Generic Non-core regs
* is at addr 8086:2c40, instead of 8086:2c41. So, we need
* to probe for the alternate address in case of failure
*/
@@ -1567,22 +1452,16 @@ error:
/****************************************************************************
Error check routines
****************************************************************************/
-static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
+static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
const int chan,
const int dimm,
const int add)
{
- char *msg;
- struct i7core_pvt *pvt = mci->pvt_info;
- int row = pvt->csrow_map[chan][dimm], i;
+ int i;
for (i = 0; i < add; i++) {
- msg = kasprintf(GFP_KERNEL, "Corrected error "
- "(Socket=%d channel=%d dimm=%d)",
- pvt->i7core_dev->socket, chan, dimm);
-
- edac_mc_handle_fbd_ce(mci, row, 0, msg);
- kfree (msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ chan, dimm, -1, "error", "", NULL);
}
}
@@ -1623,11 +1502,11 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
/*updated the edac core */
if (add0 != 0)
- i7core_rdimm_update_csrow(mci, chan, 0, add0);
+ i7core_rdimm_update_errcount(mci, chan, 0, add0);
if (add1 != 0)
- i7core_rdimm_update_csrow(mci, chan, 1, add1);
+ i7core_rdimm_update_errcount(mci, chan, 1, add1);
if (add2 != 0)
- i7core_rdimm_update_csrow(mci, chan, 2, add2);
+ i7core_rdimm_update_errcount(mci, chan, 2, add2);
}
@@ -1747,20 +1626,30 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
- char *type, *optype, *err, *msg;
+ char *type, *optype, *err, msg[80];
+ enum hw_event_mc_err_type tp_event;
unsigned long error = m->status & 0x1ff0000l;
+ bool uncorrected_error = m->mcgstatus & 1ll << 61;
+ bool ripv = m->mcgstatus & 1;
u32 optypenum = (m->status >> 4) & 0x07;
u32 core_err_cnt = (m->status >> 38) & 0x7fff;
u32 dimm = (m->misc >> 16) & 0x3;
u32 channel = (m->misc >> 18) & 0x3;
u32 syndrome = m->misc >> 32;
u32 errnum = find_first_bit(&error, 32);
- int csrow;
- if (m->mcgstatus & 1)
- type = "FATAL";
- else
- type = "NON_FATAL";
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
switch (optypenum) {
case 0:
@@ -1815,27 +1704,20 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
err = "unknown";
}
- /* FIXME: should convert addr into bank and rank information */
- msg = kasprintf(GFP_ATOMIC,
- "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
- "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
- type, (long long) m->addr, m->cpu, dimm, channel,
- syndrome, core_err_cnt, (long long)m->status,
- (long long)m->misc, optype, err);
-
- debugf0("%s", msg);
+ snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
- csrow = pvt->csrow_map[channel][dimm];
-
- /* Call the helper to output message */
- if (m->mcgstatus & 1)
- edac_mc_handle_fbd_ue(mci, csrow, 0,
- 0 /* FIXME: should be channel here */, msg);
- else if (!pvt->is_registered)
- edac_mc_handle_fbd_ce(mci, csrow,
- 0 /* FIXME: should be channel here */, msg);
-
- kfree(msg);
+ /*
+ * Call the helper to output message
+ * FIXME: what to do if core_err_cnt > 1? Currently, it generates
+ * only one event
+ */
+ if (uncorrected_error || !pvt->is_registered)
+ edac_mc_handle_error(tp_event, mci,
+ m->addr >> PAGE_SHIFT,
+ m->addr & ~PAGE_MASK,
+ syndrome,
+ channel, dimm, -1,
+ err, msg, m);
}
/*
@@ -1932,12 +1814,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->bank != 8)
return NOTIFY_DONE;
-#ifdef CONFIG_SMP
- /* Only handle if it is the right mc controller */
- if (mce->socketid != pvt->i7core_dev->socket)
- return NOTIFY_DONE;
-#endif
-
smp_rmb();
if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
smp_wmb();
@@ -2132,7 +2008,7 @@ static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
/*
* get_sdram_scrub_rate This routine convert current scrub rate value
- * into byte/sec bandwidth accourding to
+ * into byte/sec bandwidth according to
* SCRUBINTERVAL formula found in datasheet.
*/
static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
@@ -2234,8 +2110,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
if (pvt->enable_scrub)
disable_sdram_scrub_setting(mci);
- mce_unregister_decode_chain(&i7_mce_dec);
-
/* Disable EDAC polling */
i7core_pci_ctl_release(pvt);
@@ -2252,15 +2126,19 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
{
struct mem_ctl_info *mci;
struct i7core_pvt *pvt;
- int rc, channels, csrows;
-
- /* Check the number of active and not disabled channels */
- rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
- if (unlikely(rc < 0))
- return rc;
+ int rc;
+ struct edac_mc_layer layers[2];
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = MAX_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
if (unlikely(!mci))
return -ENOMEM;
@@ -2336,8 +2214,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* DCLK for scrub rate setting */
pvt->dclk_freq = get_dclk_freq();
- mce_register_decode_chain(&i7_mce_dec);
-
return 0;
fail0:
@@ -2481,8 +2357,10 @@ static int __init i7core_init(void)
pci_rc = pci_register_driver(&i7core_driver);
- if (pci_rc >= 0)
+ if (pci_rc >= 0) {
+ mce_register_decode_chain(&i7_mce_dec);
return 0;
+ }
i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
pci_rc);
@@ -2498,6 +2376,7 @@ static void __exit i7core_exit(void)
{
debugf2("MC: " __FILE__ ": %s()\n", __func__);
pci_unregister_driver(&i7core_driver);
+ mce_unregister_decode_chain(&i7_mce_dec);
}
module_init(i7core_init);
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 3bf2b2f490e7..52072c28a8a6 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -12,7 +12,7 @@
* 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
*
* Written with reference to 82443BX Host Bridge Datasheet:
- * http://download.intel.com/design/chipsets/datashts/29063301.pdf
+ * http://download.intel.com/design/chipsets/datashts/29063301.pdf
* references to this document given in [].
*
* This module doesn't support the 440LX, but it may be possible to
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(mci, page, pageoffset,
- /* 440BX/GX don't make syndrome information
- * available */
- 0, edac_mc_find_csrow_by_page(mci, page), 0,
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, pageoffset, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1, mci->ctl_name, "", NULL);
}
if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_ue(mci, page, pageoffset,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, pageoffset, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1, mci->ctl_name, "", NULL);
}
return error_found;
@@ -189,6 +189,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
enum mem_type mtype)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
u8 drbar, dramc;
u32 row_base, row_high_limit, row_high_limit_last;
@@ -197,6 +198,8 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
mci->mc_idx, __FILE__, __func__, index, drbar);
@@ -217,14 +220,14 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+ dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* EAP reports in 4kilobyte granularity [61] */
- csrow->grain = 1 << 12;
- csrow->mtype = mtype;
+ dimm->grain = 1 << 12;
+ dimm->mtype = mtype;
/* I don't think 440BX can tell you device type? FIXME? */
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
/* Mode is global to all rows on 440BX */
- csrow->edac_mode = edac_mode;
+ dimm->edac_mode = edac_mode;
row_high_limit_last = row_high_limit;
}
}
@@ -232,6 +235,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u8 dramc;
u32 nbxcfg, ecc_mode;
enum mem_type mtype;
@@ -245,8 +249,13 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
return -EIO;
- mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82443BXGX_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = I82443BXGX_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index c779092d18d1..08045059d10b 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -99,6 +99,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
struct i82860_error_info *info,
int handle_errors)
{
+ struct dimm_info *dimm;
int row;
if (!(info->errsts2 & 0x0003))
@@ -108,18 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
info->eap >>= PAGE_SHIFT;
row = edac_mc_find_csrow_by_page(mci, info->eap);
+ dimm = mci->csrows[row].channels[0].dimm;
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, 0,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 UE", "", NULL);
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
- "i82860 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, info->derrsyn,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 CE", "", NULL);
return 1;
}
@@ -140,6 +148,7 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
u16 value;
u32 cumul_size;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
@@ -153,6 +162,8 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
*/
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
@@ -164,30 +175,38 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ dimm->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
- csrow->mtype = MEM_RMBS;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+ dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
+ dimm->mtype = MEM_RMBS;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
}
}
static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82860_error_info discard;
- /* RDRAM has channels but these don't map onto the abstractions that
- edac uses.
- The device groups from the GRA registers seem to map reasonably
- well onto the notion of a chip select row.
- There are 16 GRA registers and since the name is associated with
- the channel and the GRA registers map to physical devices so we are
- going to make 1 channel for group.
+ /*
+ * RDRAM has channels but these don't map onto the csrow abstraction.
+ * According with the datasheet, there are 2 Rambus channels, supporting
+ * up to 16 direct RDRAM devices.
+ * The device groups from the GRA registers seem to map reasonably
+ * well onto the notion of a chip select row.
+ * There are 16 GRA registers and since the name is associated with
+ * the channel and the GRA registers map to physical devices so we are
+ * going to make 1 channel for group.
*/
- mci = edac_mc_alloc(0, 16, 1, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 2;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = 8;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 10f15d85fb5e..b613e31c16e5 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -38,7 +38,8 @@
#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
/* four csrows in dual channel, eight in single channel */
-#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82875P_NR_DIMMS 8
+#define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans))
/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
@@ -235,7 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0081) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -243,11 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, info->eap);
if (info->errsts & 0x0080)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, 0,
+ row, -1, -1,
+ "i82875p UE", "", NULL);
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
- multi_chan ? (info->des & 0x1) : 0,
- "i82875p CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ info->eap, 0, info->derrsyn,
+ row, multi_chan ? (info->des & 0x1) : 0,
+ -1, "i82875p CE", "", NULL);
return 1;
}
@@ -342,11 +349,13 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
void __iomem * ovrfl_window, u32 drc)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
+ unsigned nr_chans = dual_channel_active(drc) + 1;
unsigned long last_cumul_size;
u8 value;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
- u32 cumul_size;
- int index;
+ u32 cumul_size, nr_pages;
+ int index, j;
drc_ddim = (drc >> 18) & 0x1;
last_cumul_size = 0;
@@ -369,12 +378,18 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+
+ for (j = 0; j < nr_chans; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / nr_chans;
+ dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
+ dimm->mtype = MEM_DDR;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
}
}
@@ -382,6 +397,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82875p_pvt *pvt;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window;
@@ -397,9 +413,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
return -ENODEV;
drc = readl(ovrfl_window + I82875P_DRC);
nr_chans = dual_channel_active(drc) + 1;
- mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
- nr_chans, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82875P_NR_CSROWS(nr_chans);
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0cd8368f88f8..433332c7cdba 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -29,7 +29,8 @@
#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
-#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82975X_NR_DIMMS 8
+#define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans))
/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
@@ -287,7 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -309,13 +311,18 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
offst = info->eap
& ((1 << PAGE_SHIFT) -
- (1 << mci->csrows[row].grain));
+ (1 << mci->csrows[row].channels[chan].dimm->grain));
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offst, 0,
+ row, -1, -1,
+ "i82975x UE", "", NULL);
else
- edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
- chan, "i82975x CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offst, info->derrsyn,
+ row, chan ? chan : 0, -1,
+ "i82975x CE", "", NULL);
return 1;
}
@@ -370,8 +377,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
- u32 cumul_size;
+ u32 cumul_size, nr_pages;
int index, chan;
+ struct dimm_info *dimm;
+ enum dev_type dtype;
last_cumul_size = 0;
@@ -400,28 +409,33 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
+ nr_pages = cumul_size - last_cumul_size;
+ if (!nr_pages)
+ continue;
+
/*
* Initialise dram labels
* index values:
* [0-7] for single-channel; i.e. csrow->nr_channels = 1
* [0-3] for dual-channel; i.e. csrow->nr_channels = 2
*/
- for (chan = 0; chan < csrow->nr_channels; chan++)
- strncpy(csrow->channels[chan].label,
+ dtype = i82975x_dram_type(mch_window, index);
+ for (chan = 0; chan < csrow->nr_channels; chan++) {
+ dimm = mci->csrows[index].channels[chan].dimm;
+
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ strncpy(csrow->channels[chan].dimm->label,
labels[(index >> 1) + (chan * 2)],
EDAC_MC_LABEL_LEN);
-
- if (cumul_size == last_cumul_size)
- continue; /* not populated */
+ dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
+ dimm->dtype = i82975x_dram_type(mch_window, index);
+ dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
+ dimm->edac_mode = EDAC_SECDED; /* only supported */
+ }
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
- csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
- csrow->dtype = i82975x_dram_type(mch_window, index);
- csrow->edac_mode = EDAC_SECDED; /* only supported */
}
}
@@ -463,6 +477,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82975x_pvt *pvt;
void __iomem *mch_window;
u32 mchbar;
@@ -531,8 +546,13 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
chans = dual_channel_active(mch_window) + 1;
/* assuming only one controller, index thus is 0 */
- mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
- chans, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82975X_NR_DIMMS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = I82975X_NR_CSROWS(chans);
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail1;
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index c6074c5cd1ef..8c87a5e87057 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -5,8 +5,6 @@
#include <asm/mce.h>
-#define BIT_64(n) (U64_C(1) << (n))
-
#define EC(x) ((x) & 0xffff)
#define XEC(x, mask) (((x) >> 16) & mask)
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 73464a62adf7..0e374625f6f8 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -854,12 +854,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
if (err_detect & DDR_EDE_SBE)
- edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
- syndrome, row_index, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "", NULL);
if (err_detect & DDR_EDE_MBE)
- edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
- row_index, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "", NULL);
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
}
@@ -883,6 +887,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
{
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 sdram_ctl;
u32 sdtype;
enum mem_type mtype;
@@ -929,6 +934,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
u32 end;
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
(index * MPC85XX_MC_CS_BNDS_OFS));
@@ -944,19 +951,21 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
csrow->first_page = start;
csrow->last_page = end;
- csrow->nr_pages = end + 1 - start;
- csrow->grain = 8;
- csrow->mtype = mtype;
- csrow->dtype = DEV_UNKNOWN;
+
+ dimm->nr_pages = end + 1 - start;
+ dimm->grain = 8;
+ dimm->mtype = mtype;
+ dimm->dtype = DEV_UNKNOWN;
if (sdram_ctl & DSC_X32_EN)
- csrow->dtype = DEV_X32;
- csrow->edac_mode = EDAC_SECDED;
+ dimm->dtype = DEV_X32;
+ dimm->edac_mode = EDAC_SECDED;
}
}
static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct mpc85xx_mc_pdata *pdata;
struct resource r;
u32 sdram_ctl;
@@ -965,7 +974,14 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
- mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 4;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(*pdata));
if (!mci) {
devres_release_group(&op->dev, mpc85xx_mc_err_probe);
return -ENOMEM;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 7e5ff367705c..b0bb5a3d2527 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -611,12 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
if (!(reg & 0x1))
- edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, syndrome, 0, 0,
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, syndrome,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
else /* 2 bit error, UE */
- edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, 0,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
/* clear the error */
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -656,6 +661,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
struct mv64x60_mc_pdata *pdata)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
+
u32 devtype;
u32 ctl;
@@ -664,35 +671,36 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
csrow = &mci->csrows[0];
- csrow->first_page = 0;
- csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = 8;
+ dimm = csrow->channels[0].dimm;
+
+ dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+ dimm->grain = 8;
- csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+ dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
devtype = (ctl >> 20) & 0x3;
switch (devtype) {
case 0x0:
- csrow->dtype = DEV_X32;
+ dimm->dtype = DEV_X32;
break;
case 0x2: /* could be X8 too, but no way to tell */
- csrow->dtype = DEV_X16;
+ dimm->dtype = DEV_X16;
break;
case 0x3:
- csrow->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
break;
default:
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
break;
}
- csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
}
static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct mv64x60_mc_pdata *pdata;
struct resource *r;
u32 ctl;
@@ -701,7 +709,14 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
- mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(struct mv64x60_mc_pdata));
if (!mci) {
printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 7f71ee436744..b095a906a994 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -110,15 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
/* uncorrectable/multi-bit errors */
if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
MCDEBUG_ERRSTA_RFL_STATUS)) {
- edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
- cs, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ mci->csrows[cs].first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "", NULL);
}
/* correctable/single-bit errors */
- if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
- edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
- 0, cs, 0, mci->ctl_name);
- }
+ if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ mci->csrows[cs].first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "", NULL);
}
static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -135,11 +136,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 rankcfg;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
pci_read_config_dword(pdev,
MCDRAM_RANKCFG + (index * 12),
@@ -151,20 +154,20 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
MCDRAM_RANKCFG_TYPE_SIZE_S) {
case 0:
- csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
break;
case 1:
- csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
break;
case 2:
case 3:
- csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
break;
case 4:
- csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
break;
case 5:
- csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
break;
default:
edac_mc_printk(mci, KERN_ERR,
@@ -174,13 +177,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
}
csrow->first_page = last_page_in_mmc;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- last_page_in_mmc += csrow->nr_pages;
+ csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
+ last_page_in_mmc += dimm->nr_pages;
csrow->page_mask = 0;
- csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
+ dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
+ dimm->mtype = MEM_DDR;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = edac_mode;
}
return 0;
}
@@ -189,6 +192,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
u32 errctl1, errcor, scrub, mcen;
pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
@@ -205,9 +209,14 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
MCDEBUG_ERRCTL1_RFL_LOG_EN;
pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
- mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
- system_mmc_id++);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = PASEMI_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = PASEMI_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
+ 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index d427c69bb8b1..f3f9fed06ad7 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,7 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ce_no_info(mci, message);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ row, 0, -1,
+ message, "", NULL);
}
/**
@@ -755,7 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ue(mci, page, offset, row, message);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ row, 0, -1,
+ message, "", NULL);
}
/**
@@ -895,9 +901,8 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
enum mem_type mtype;
enum dev_type dtype;
enum edac_type edac_mode;
- int row;
- u32 mbxcf, size;
- static u32 ppc4xx_last_page;
+ int row, j;
+ u32 mbxcf, size, nr_pages;
/* Establish the memory type and width */
@@ -948,7 +953,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
case SDRAM_MBCF_SZ_2GB:
case SDRAM_MBCF_SZ_4GB:
case SDRAM_MBCF_SZ_8GB:
- csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
+ nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
break;
default:
ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -959,10 +964,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
goto done;
}
- csi->first_page = ppc4xx_last_page;
- csi->last_page = csi->first_page + csi->nr_pages - 1;
- csi->page_mask = 0;
-
/*
* It's unclear exactly what grain should be set to
* here. The SDRAM_ECCES register allows resolution of
@@ -975,15 +976,17 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
* possible values would be the PLB width (16), the
* page size (PAGE_SIZE) or the memory width (2 or 4).
*/
+ for (j = 0; j < csi->nr_channels; j++) {
+ struct dimm_info *dimm = csi->channels[j].dimm;
- csi->grain = 1;
-
- csi->mtype = mtype;
- csi->dtype = dtype;
+ dimm->nr_pages = nr_pages / csi->nr_channels;
+ dimm->grain = 1;
- csi->edac_mode = edac_mode;
+ dimm->mtype = mtype;
+ dimm->dtype = dtype;
- ppc4xx_last_page += csi->nr_pages;
+ dimm->edac_mode = edac_mode;
+ }
}
done:
@@ -1236,6 +1239,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
dcr_host_t dcr_host;
const struct device_node *np = op->dev.of_node;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
static int ppc4xx_edac_instance;
/*
@@ -1281,12 +1285,14 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
* controller instance and perform the appropriate
* initialization.
*/
-
- mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
- ppc4xx_edac_nr_csrows,
- ppc4xx_edac_nr_chans,
- ppc4xx_edac_instance);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = ppc4xx_edac_nr_csrows;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = ppc4xx_edac_nr_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
+ sizeof(struct ppc4xx_edac_pdata));
if (mci == NULL) {
ppc4xx_edac_printk(KERN_ERR, "%s: "
"Failed to allocate EDAC MC instance!\n",
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 6d908ad72d64..e1cacd164f31 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -179,10 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(mci, page, 0, /* not avail */
- syndrome,
- edac_mc_find_csrow_by_page(mci, page),
- 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, 0, syndrome,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1,
+ mci->ctl_name, "", NULL);
}
if (info->eapr & BIT(1)) { /* UE? */
@@ -190,9 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
if (handle_errors)
/* 82600 doesn't give enough info */
- edac_mc_handle_ue(mci, page, 0,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, 0, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1,
+ mci->ctl_name, "", NULL);
}
return error_found;
@@ -216,6 +219,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u8 dramcr)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
u8 drbar; /* SDRAM Row Boundary Address Register */
u32 row_high_limit, row_high_limit_last;
@@ -227,6 +231,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
@@ -247,16 +252,17 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+
+ dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* Error address is top 19 bits - so granularity is *
* 14 bits */
- csrow->grain = 1 << 14;
- csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+ dimm->grain = 1 << 14;
+ dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
/* FIXME - check that this is unknowable with this chipset */
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
/* Mode is global on 82600 */
- csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+ dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
row_high_limit_last = row_high_limit;
}
}
@@ -264,6 +270,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u8 dramcr;
u32 eapr;
u32 scrub_disabled;
@@ -278,8 +285,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
sdram_refresh_rate);
debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
- mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = R82600_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = R82600_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index a203536d90dd..36ad17e79d61 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -58,7 +58,7 @@ static int probed;
/*
* FIXME: For now, let's order by device function, as it makes
- * easier for driver's development proccess. This table should be
+ * easier for driver's development process. This table should be
* moved to pci_id.h when submitted upstream
*/
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
@@ -314,8 +314,6 @@ struct sbridge_pvt {
struct sbridge_info info;
struct sbridge_channel channel[NUM_CHANNELS];
- int csrow_map[NUM_CHANNELS][MAX_DIMMS];
-
/* Memory type detection */
bool is_mirrored, is_lockstep, is_close_pg;
@@ -375,7 +373,7 @@ static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = {
/****************************************************************************
- Anciliary status routines
+ Ancillary status routines
****************************************************************************/
static inline int numrank(u32 mtr)
@@ -487,29 +485,14 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
}
/**
- * sbridge_get_active_channels() - gets the number of channels and csrows
+ * check_if_ecc_is_active() - Checks if ECC is active
* bus: Device bus
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket, identified
- * by the associated PCI bus.
- * this is used in order to properly allocate the size of mci components.
- * Note: one csrow is one dimm.
*/
-static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
- unsigned *csrows)
+static int check_if_ecc_is_active(const u8 bus)
{
struct pci_dev *pdev = NULL;
- int i, j;
u32 mcmtr;
- *channels = 0;
- *csrows = 0;
-
pdev = get_pdev_slot_func(bus, 15, 0);
if (!pdev) {
sbridge_printk(KERN_ERR, "Couldn't find PCI device "
@@ -523,41 +506,14 @@ static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
return -ENODEV;
}
-
- for (i = 0; i < NUM_CHANNELS; i++) {
- u32 mtr;
-
- /* Device 15 functions 2 - 5 */
- pdev = get_pdev_slot_func(bus, 15, 2 + i);
- if (!pdev) {
- sbridge_printk(KERN_ERR, "Couldn't find PCI device "
- "%2x.%02d.%d!!!\n",
- bus, 15, 2 + i);
- return -ENODEV;
- }
- (*channels)++;
-
- for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
- pci_read_config_dword(pdev, mtr_regs[j], &mtr);
- debugf1("Bus#%02x channel #%d MTR%d = %x\n", bus, i, j, mtr);
- if (IS_DIMM_PRESENT(mtr))
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels: %d, number of active dimms: %d\n",
- *channels, *csrows);
-
return 0;
}
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
+ struct dimm_info *dimm;
int i, j, banks, ranks, rows, cols, size, npages;
- int csrow = 0;
- unsigned long last_page = 0;
u32 reg;
enum edac_type mode;
enum mem_type mtype;
@@ -599,7 +555,7 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pvt->is_close_pg = false;
}
- pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg);
+ pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
if (IS_RDIMM_ENABLED(reg)) {
/* FIXME: Can also be LRDIMM */
debugf0("Memory is registered\n");
@@ -616,6 +572,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
u32 mtr;
for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ i, j, 0);
pci_read_config_dword(pvt->pci_tad[i],
mtr_regs[j], &mtr);
debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
@@ -634,29 +592,15 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pvt->sbridge_dev->mc, i, j,
size, npages,
banks, ranks, rows, cols);
- csr = &mci->csrows[csrow];
-
- csr->first_page = last_page;
- csr->last_page = last_page + npages - 1;
- csr->page_mask = 0UL; /* Unused */
- csr->nr_pages = npages;
- csr->grain = 32;
- csr->csrow_idx = csrow;
- csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
- csr->ce_count = 0;
- csr->ue_count = 0;
- csr->mtype = mtype;
- csr->edac_mode = mode;
- csr->nr_channels = 1;
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
- pvt->csrow_map[i][j] = csrow;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
+
+ dimm->nr_pages = npages;
+ dimm->grain = 32;
+ dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
+ dimm->mtype = mtype;
+ dimm->edac_mode = mode;
+ snprintf(dimm->label, sizeof(dimm->label),
"CPU_SrcID#%u_Channel#%u_DIMM#%u",
pvt->sbridge_dev->source_id, i, j);
- last_page += npages;
- csrow++;
}
}
}
@@ -844,11 +788,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
u8 *socket,
long *channel_mask,
u8 *rank,
- char *area_type)
+ char **area_type, char *msg)
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
- char msg[256];
int n_rir, n_sads, n_tads, sad_way, sck_xch;
int sad_interl, idx, base_ch;
int interleave_mode;
@@ -870,12 +813,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
*/
if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr >= (u64)pvt->tohm) {
sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
@@ -892,7 +833,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
limit = SAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr <= limit)
@@ -901,10 +841,9 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
if (n_sads == MAX_SAD) {
sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
- area_type = get_dram_attr(reg);
+ *area_type = get_dram_attr(reg);
interleave_mode = INTERLEAVE_MODE(reg);
pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -942,7 +881,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Can't discover socket interleave");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
*socket = sad_interleave[idx];
@@ -957,7 +895,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (!new_mci) {
sprintf(msg, "Struct for socket #%u wasn't initialized",
*socket);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
mci = new_mci;
@@ -973,7 +910,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
limit = TAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory channel");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr <= limit)
@@ -1013,7 +949,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Can't discover the TAD target");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
*channel_mask = 1 << base_ch;
@@ -1027,7 +962,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
} else
@@ -1055,7 +989,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (offset > addr) {
sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
offset, addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
addr -= offset;
@@ -1095,7 +1028,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (n_rir == MAX_RIR_RANGES) {
sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
ch_addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
rir_way = RIR_WAY(reg);
@@ -1409,7 +1341,8 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
- char *type, *optype, *msg, *recoverable_msg;
+ enum hw_event_mc_err_type tp_event;
+ char *type, *optype, msg[256];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1421,16 +1354,24 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
long channel_mask, first_channel;
u8 rank, socket;
- int csrow, rc, dimm;
- char *area_type = "Unknown";
-
- if (ripv)
- type = "NON_FATAL";
- else
- type = "FATAL";
+ int rc, dimm;
+ char *area_type = NULL;
+
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
/*
- * According with Table 15-9 of the Intel Archictecture spec vol 3A,
+ * According with Table 15-9 of the Intel Architecture spec vol 3A,
* memory errors should fit in this mask:
* 000f 0000 1mmm cccc (binary)
* where:
@@ -1445,19 +1386,19 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
} else {
switch (optypenum) {
case 0:
- optype = "generic undef request";
+ optype = "generic undef request error";
break;
case 1:
- optype = "memory read";
+ optype = "memory read error";
break;
case 2:
- optype = "memory write";
+ optype = "memory write error";
break;
case 3:
- optype = "addr/cmd";
+ optype = "addr/cmd error";
break;
case 4:
- optype = "memory scrubbing";
+ optype = "memory scrubbing error";
break;
default:
optype = "reserved";
@@ -1466,13 +1407,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
}
rc = get_memory_error_data(mci, m->addr, &socket,
- &channel_mask, &rank, area_type);
+ &channel_mask, &rank, &area_type, msg);
if (rc < 0)
- return;
+ goto err_parsing;
new_mci = get_mci_for_node_id(socket);
if (!new_mci) {
- edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!");
- return;
+ strcpy(msg, "Error: socket got corrupted!");
+ goto err_parsing;
}
mci = new_mci;
pvt = mci->pvt_info;
@@ -1486,45 +1427,39 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
else
dimm = 2;
- csrow = pvt->csrow_map[first_channel][dimm];
-
- if (uncorrected_error && recoverable)
- recoverable_msg = " recoverable";
- else
- recoverable_msg = "";
/*
- * FIXME: What should we do with "channel" information on mcelog?
- * Probably, we can just discard it, as the channel information
- * comes from the get_memory_error_data() address decoding
+ * FIXME: On some memory configurations (mirror, lockstep), the
+ * Memory Controller can't point the error to a single DIMM. The
+ * EDAC core should be handling the channel mask, in order to point
+ * to the group of dimm's where the error may be happening.
*/
- msg = kasprintf(GFP_ATOMIC,
- "%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), "
- "addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n",
- core_err_cnt,
- area_type,
- optype,
- type,
- recoverable_msg,
- overflow ? "OVERFLOW" : "",
- m->cpu,
- mscod, errcode,
- channel, /* 1111b means not specified */
- (long long) m->addr,
- socket,
- first_channel, /* This is the real channel on SB */
- channel_mask,
- rank);
+ snprintf(msg, sizeof(msg),
+ "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
+ core_err_cnt,
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable) ? " recoverable" : "",
+ area_type,
+ mscod, errcode,
+ socket,
+ channel_mask,
+ rank);
debugf0("%s", msg);
+ /* FIXME: need support for channel mask */
+
/* Call the helper to output message */
- if (uncorrected_error)
- edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg);
- else
- edac_mc_handle_fbd_ce(mci, csrow, 0, msg);
+ edac_mc_handle_error(tp_event, mci,
+ m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+ channel, dimm, -1,
+ optype, msg, m);
+ return;
+err_parsing:
+ edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ -1, -1, -1,
+ msg, "", m);
- kfree(msg);
}
/*
@@ -1669,8 +1604,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
__func__, mci, &sbridge_dev->pdev[0]->dev);
- mce_unregister_decode_chain(&sbridge_mce_dec);
-
/* Remove MC sysfs nodes */
edac_mc_del_mc(mci->dev);
@@ -1683,16 +1616,25 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct sbridge_pvt *pvt;
- int rc, channels, csrows;
+ int rc;
/* Check the number of active and not disabled channels */
- rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows);
+ rc = check_if_ecc_is_active(sbridge_dev->bus);
if (unlikely(rc < 0))
return rc;
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc);
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANNELS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = MAX_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
+
if (unlikely(!mci))
return -ENOMEM;
@@ -1738,7 +1680,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
goto fail0;
}
- mce_register_decode_chain(&sbridge_mce_dec);
return 0;
fail0:
@@ -1867,8 +1808,10 @@ static int __init sbridge_init(void)
pci_rc = pci_register_driver(&sbridge_driver);
- if (pci_rc >= 0)
+ if (pci_rc >= 0) {
+ mce_register_decode_chain(&sbridge_mce_dec);
return 0;
+ }
sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
pci_rc);
@@ -1884,6 +1827,7 @@ static void __exit sbridge_exit(void)
{
debugf2("MC: " __FILE__ ": %s()\n", __func__);
pci_unregister_driver(&sbridge_driver);
+ mce_unregister_decode_chain(&sbridge_mce_dec);
}
module_init(sbridge_init);
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index e99d00976189..7bb4614730db 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -71,7 +71,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
if (mem_error.sbe_count != priv->ce_count) {
dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
priv->ce_count = mem_error.sbe_count;
- edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -84,6 +87,7 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
struct csrow_info *csrow = &mci->csrows[0];
struct tile_edac_priv *priv = mci->pvt_info;
struct mshim_mem_info mem_info;
+ struct dimm_info *dimm = csrow->channels[0].dimm;
if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -93,27 +97,25 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
}
if (mem_info.mem_ecc)
- csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
else
- csrow->edac_mode = EDAC_NONE;
+ dimm->edac_mode = EDAC_NONE;
switch (mem_info.mem_type) {
case DDR2:
- csrow->mtype = MEM_DDR2;
+ dimm->mtype = MEM_DDR2;
break;
case DDR3:
- csrow->mtype = MEM_DDR3;
+ dimm->mtype = MEM_DDR3;
break;
default:
return -1;
}
- csrow->first_page = 0;
- csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = TILE_EDAC_ERROR_GRAIN;
- csrow->dtype = DEV_UNKNOWN;
+ dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
+ dimm->grain = TILE_EDAC_ERROR_GRAIN;
+ dimm->dtype = DEV_UNKNOWN;
return 0;
}
@@ -123,6 +125,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
char hv_file[32];
int hv_devhdl;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct tile_edac_priv *priv;
int rc;
@@ -132,8 +135,14 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
return -EINVAL;
/* A TILE MC has a single channel and one chip-select row. */
- mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
- TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = TILE_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = TILE_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+ sizeof(struct tile_edac_priv));
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index a438297389e5..1ac7962d63ea 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -215,19 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
for (channel = 0; channel < x38_channel_num; channel++) {
log = info->eccerrlog[channel];
if (log & X38_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log), "x38 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "x38 UE", "", NULL);
} else if (log & X38_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0, "x38 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "x38 CE", "", NULL);
}
}
}
@@ -317,9 +324,9 @@ static unsigned long drb_to_nr_pages(
static int x38_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
+ struct edac_mc_layer layers[2];
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
@@ -335,7 +342,13 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
how_many_channel(pdev);
/* FIXME: unconventional pvt_info usage */
- mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = X38_RANKS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = x38_channel_num;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
@@ -363,7 +376,6 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- last_page = -1UL;
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i];
@@ -372,20 +384,18 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
i / X38_RANKS_PER_CHANNEL,
i % X38_RANKS_PER_CHANNEL);
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
+ if (nr_pages == 0)
continue;
- }
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
+ for (j = 0; j < x38_channel_num; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+ dimm->nr_pages = nr_pages / x38_channel_num;
+ dimm->grain = nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
x38_clear_error_info(mci);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 23416e443765..a4ed30bd9a41 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -116,8 +116,8 @@ const char *max8997_extcon_cable[] = {
[5] = "Charge-downstream",
[6] = "MHL",
[7] = "Dock-desk",
- [7] = "Dock-card",
- [8] = "JIG",
+ [8] = "Dock-card",
+ [9] = "JIG",
NULL,
};
@@ -514,6 +514,7 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev)
extcon_dev_unregister(info->edev);
+ kfree(info->edev);
kfree(info);
return 0;
diff --git a/drivers/extcon/extcon_class.c b/drivers/extcon/extcon_class.c
index f598a700ec15..159aeb07b3ba 100644
--- a/drivers/extcon/extcon_class.c
+++ b/drivers/extcon/extcon_class.c
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
#if defined(CONFIG_ANDROID)
if (switch_class)
ret = class_compat_create_link(switch_class, edev->dev,
- dev);
+ NULL);
#endif /* CONFIG_ANDROID */
spin_lock_init(&edev->lock);
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c
index fe7a07b47336..8a0dcc11c7c7 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon_gpio.c
@@ -125,6 +125,7 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
if (ret < 0)
goto err_request_irq;
+ platform_set_drvdata(pdev, extcon_data);
/* Perform initial detection */
gpio_extcon_work(&extcon_data->work.work);
@@ -146,6 +147,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&extcon_data->work);
+ free_irq(extcon_data->irq, extcon_data);
gpio_free(extcon_data->gpio);
extcon_dev_unregister(&extcon_data->edev);
devm_kfree(&pdev->dev, extcon_data);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index cc595eba7ba9..57ea7f464178 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -421,8 +421,8 @@ static void bm_work(struct work_struct *work)
* root, and thus, IRM.
*/
new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "BM lock failed", new_root_id);
+ fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
+ fw_rcode_string(rcode), new_root_id);
goto pick_me;
}
} else if (card->bm_generation != generation) {
@@ -676,6 +676,7 @@ void fw_card_release(struct kref *kref)
complete(&card->done);
}
+EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
{
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 2e6b24547e2a..2783f69dada6 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -22,6 +22,7 @@
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
@@ -70,6 +71,7 @@ struct client {
u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
+ bool buffer_is_mapped;
struct list_head phy_receiver_link;
u64 phy_receiver_closure;
@@ -959,11 +961,20 @@ static void iso_mc_callback(struct fw_iso_context *context,
sizeof(e->interrupt), NULL, 0);
}
+static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
+{
+ if (context->type == FW_ISO_CONTEXT_TRANSMIT)
+ return DMA_TO_DEVICE;
+ else
+ return DMA_FROM_DEVICE;
+}
+
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
fw_iso_callback_t cb;
+ int ret;
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
@@ -1004,8 +1015,21 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
if (client->iso_context != NULL) {
spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
+
return -EBUSY;
}
+ if (!client->buffer_is_mapped) {
+ ret = fw_iso_buffer_map_dma(&client->buffer,
+ client->device->card,
+ iso_dma_direction(context));
+ if (ret < 0) {
+ spin_unlock_irq(&client->lock);
+ fw_iso_context_destroy(context);
+
+ return ret;
+ }
+ client->buffer_is_mapped = true;
+ }
client->iso_closure = a->closure;
client->iso_context = context;
spin_unlock_irq(&client->lock);
@@ -1651,7 +1675,6 @@ static long fw_device_op_compat_ioctl(struct file *file,
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
struct client *client = file->private_data;
- enum dma_data_direction direction;
unsigned long size;
int page_count, ret;
@@ -1674,20 +1697,28 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
if (size & ~PAGE_MASK)
return -EINVAL;
- if (vma->vm_flags & VM_WRITE)
- direction = DMA_TO_DEVICE;
- else
- direction = DMA_FROM_DEVICE;
-
- ret = fw_iso_buffer_init(&client->buffer, client->device->card,
- page_count, direction);
+ ret = fw_iso_buffer_alloc(&client->buffer, page_count);
if (ret < 0)
return ret;
- ret = fw_iso_buffer_map(&client->buffer, vma);
+ spin_lock_irq(&client->lock);
+ if (client->iso_context) {
+ ret = fw_iso_buffer_map_dma(&client->buffer,
+ client->device->card,
+ iso_dma_direction(client->iso_context));
+ client->buffer_is_mapped = (ret == 0);
+ }
+ spin_unlock_irq(&client->lock);
if (ret < 0)
- fw_iso_buffer_destroy(&client->buffer, client->device->card);
+ goto fail;
+ ret = fw_iso_buffer_map_vma(&client->buffer, vma);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+ fail:
+ fw_iso_buffer_destroy(&client->buffer, client->device->card);
return ret;
}
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 68109e9bb04e..4d460ef87161 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -481,6 +481,7 @@ static int read_rom(struct fw_device *device,
* generation changes under us, read_config_rom will fail and get retried.
* It's better to start all over in this case because the node from which we
* are reading the ROM may have changed the ROM during the reset.
+ * Returns either a result code or a negative error code.
*/
static int read_config_rom(struct fw_device *device, int generation)
{
@@ -488,7 +489,7 @@ static int read_config_rom(struct fw_device *device, int generation)
const u32 *old_rom, *new_rom;
u32 *rom, *stack;
u32 sp, key;
- int i, end, length, ret = -1;
+ int i, end, length, ret;
rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
@@ -502,18 +503,21 @@ static int read_config_rom(struct fw_device *device, int generation)
/* First read the bus info block. */
for (i = 0; i < 5; i++) {
- if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+ ret = read_rom(device, generation, i, &rom[i]);
+ if (ret != RCODE_COMPLETE)
goto out;
/*
- * As per IEEE1212 7.2, during power-up, devices can
+ * As per IEEE1212 7.2, during initialization, devices can
* reply with a 0 for the first quadlet of the config
* rom to indicate that they are booting (for example,
* if the firmware is on the disk of a external
* harddisk). In that case we just fail, and the
* retry mechanism will try again later.
*/
- if (i == 0 && rom[i] == 0)
+ if (i == 0 && rom[i] == 0) {
+ ret = RCODE_BUSY;
goto out;
+ }
}
device->max_speed = device->node->max_speed;
@@ -563,11 +567,14 @@ static int read_config_rom(struct fw_device *device, int generation)
*/
key = stack[--sp];
i = key & 0xffffff;
- if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
+ if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) {
+ ret = -ENXIO;
goto out;
+ }
/* Read header quadlet for the block to get the length. */
- if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
+ ret = read_rom(device, generation, i, &rom[i]);
+ if (ret != RCODE_COMPLETE)
goto out;
end = i + (rom[i] >> 16) + 1;
if (end > MAX_CONFIG_ROM_SIZE) {
@@ -590,8 +597,8 @@ static int read_config_rom(struct fw_device *device, int generation)
* it references another block, and push it in that case.
*/
for (; i < end; i++) {
- if (read_rom(device, generation, i, &rom[i]) !=
- RCODE_COMPLETE)
+ ret = read_rom(device, generation, i, &rom[i]);
+ if (ret != RCODE_COMPLETE)
goto out;
if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
@@ -619,8 +626,10 @@ static int read_config_rom(struct fw_device *device, int generation)
old_rom = device->config_rom;
new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
- if (new_rom == NULL)
+ if (new_rom == NULL) {
+ ret = -ENOMEM;
goto out;
+ }
down_write(&fw_device_rwsem);
device->config_rom = new_rom;
@@ -628,7 +637,7 @@ static int read_config_rom(struct fw_device *device, int generation)
up_write(&fw_device_rwsem);
kfree(old_rom);
- ret = 0;
+ ret = RCODE_COMPLETE;
device->max_rec = rom[2] >> 12 & 0xf;
device->cmc = rom[2] >> 30 & 1;
device->irmc = rom[2] >> 31 & 1;
@@ -967,15 +976,17 @@ static void fw_device_init(struct work_struct *work)
* device.
*/
- if (read_config_rom(device, device->generation) < 0) {
+ ret = read_config_rom(device, device->generation);
+ if (ret != RCODE_COMPLETE) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
fw_schedule_device_work(device, RETRY_DELAY);
} else {
if (device->node->link_on)
- fw_notice(card, "giving up on Config ROM for node id %x\n",
- device->node_id);
+ fw_notice(card, "giving up on node %x: reading config rom failed: %s\n",
+ device->node_id,
+ fw_rcode_string(ret));
if (device->node == card->root_node)
fw_schedule_bm_work(card, 0);
fw_device_release(&device->device);
@@ -1069,31 +1080,30 @@ static void fw_device_init(struct work_struct *work)
put_device(&device->device); /* our reference */
}
-enum {
- REREAD_BIB_ERROR,
- REREAD_BIB_GONE,
- REREAD_BIB_UNCHANGED,
- REREAD_BIB_CHANGED,
-};
-
/* Reread and compare bus info block and header of root directory */
-static int reread_config_rom(struct fw_device *device, int generation)
+static int reread_config_rom(struct fw_device *device, int generation,
+ bool *changed)
{
u32 q;
- int i;
+ int i, rcode;
for (i = 0; i < 6; i++) {
- if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
- return REREAD_BIB_ERROR;
+ rcode = read_rom(device, generation, i, &q);
+ if (rcode != RCODE_COMPLETE)
+ return rcode;
if (i == 0 && q == 0)
- return REREAD_BIB_GONE;
+ /* inaccessible (see read_config_rom); retry later */
+ return RCODE_BUSY;
- if (q != device->config_rom[i])
- return REREAD_BIB_CHANGED;
+ if (q != device->config_rom[i]) {
+ *changed = true;
+ return RCODE_COMPLETE;
+ }
}
- return REREAD_BIB_UNCHANGED;
+ *changed = false;
+ return RCODE_COMPLETE;
}
static void fw_device_refresh(struct work_struct *work)
@@ -1101,23 +1111,14 @@ static void fw_device_refresh(struct work_struct *work)
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct fw_card *card = device->card;
- int node_id = device->node_id;
-
- switch (reread_config_rom(device, device->generation)) {
- case REREAD_BIB_ERROR:
- if (device->config_rom_retries < MAX_RETRIES / 2 &&
- atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
- device->config_rom_retries++;
- fw_schedule_device_work(device, RETRY_DELAY / 2);
-
- return;
- }
- goto give_up;
+ int ret, node_id = device->node_id;
+ bool changed;
- case REREAD_BIB_GONE:
- goto gone;
+ ret = reread_config_rom(device, device->generation, &changed);
+ if (ret != RCODE_COMPLETE)
+ goto failed_config_rom;
- case REREAD_BIB_UNCHANGED:
+ if (!changed) {
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
@@ -1126,9 +1127,6 @@ static void fw_device_refresh(struct work_struct *work)
fw_device_update(work);
device->config_rom_retries = 0;
goto out;
-
- case REREAD_BIB_CHANGED:
- break;
}
/*
@@ -1137,16 +1135,9 @@ static void fw_device_refresh(struct work_struct *work)
*/
device_for_each_child(&device->device, NULL, shutdown_unit);
- if (read_config_rom(device, device->generation) < 0) {
- if (device->config_rom_retries < MAX_RETRIES &&
- atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
- device->config_rom_retries++;
- fw_schedule_device_work(device, RETRY_DELAY);
-
- return;
- }
- goto give_up;
- }
+ ret = read_config_rom(device, device->generation);
+ if (ret != RCODE_COMPLETE)
+ goto failed_config_rom;
fw_device_cdev_update(device);
create_units(device);
@@ -1163,9 +1154,16 @@ static void fw_device_refresh(struct work_struct *work)
device->config_rom_retries = 0;
goto out;
- give_up:
- fw_notice(card, "giving up on refresh of device %s\n",
- dev_name(&device->device));
+ failed_config_rom:
+ if (device->config_rom_retries < MAX_RETRIES &&
+ atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
+ device->config_rom_retries++;
+ fw_schedule_device_work(device, RETRY_DELAY);
+ return;
+ }
+
+ fw_notice(card, "giving up on refresh of device %s: %s\n",
+ dev_name(&device->device), fw_rcode_string(ret));
gone:
atomic_set(&device->state, FW_DEVICE_GONE);
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index d1565828ae2c..8382e27e9a27 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -39,52 +39,73 @@
* Isochronous DMA context management
*/
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
- int page_count, enum dma_data_direction direction)
+int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
{
- int i, j;
- dma_addr_t address;
-
- buffer->page_count = page_count;
- buffer->direction = direction;
+ int i;
+ buffer->page_count = 0;
+ buffer->page_count_mapped = 0;
buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
GFP_KERNEL);
if (buffer->pages == NULL)
- goto out;
+ return -ENOMEM;
- for (i = 0; i < buffer->page_count; i++) {
+ for (i = 0; i < page_count; i++) {
buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (buffer->pages[i] == NULL)
- goto out_pages;
+ break;
+ }
+ buffer->page_count = i;
+ if (i < page_count) {
+ fw_iso_buffer_destroy(buffer, NULL);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
+ enum dma_data_direction direction)
+{
+ dma_addr_t address;
+ int i;
+
+ buffer->direction = direction;
+
+ for (i = 0; i < buffer->page_count; i++) {
address = dma_map_page(card->device, buffer->pages[i],
0, PAGE_SIZE, direction);
- if (dma_mapping_error(card->device, address)) {
- __free_page(buffer->pages[i]);
- goto out_pages;
- }
+ if (dma_mapping_error(card->device, address))
+ break;
+
set_page_private(buffer->pages[i], address);
}
+ buffer->page_count_mapped = i;
+ if (i < buffer->page_count)
+ return -ENOMEM;
return 0;
+}
- out_pages:
- for (j = 0; j < i; j++) {
- address = page_private(buffer->pages[j]);
- dma_unmap_page(card->device, address,
- PAGE_SIZE, direction);
- __free_page(buffer->pages[j]);
- }
- kfree(buffer->pages);
- out:
- buffer->pages = NULL;
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction)
+{
+ int ret;
+
+ ret = fw_iso_buffer_alloc(buffer, page_count);
+ if (ret < 0)
+ return ret;
+
+ ret = fw_iso_buffer_map_dma(buffer, card, direction);
+ if (ret < 0)
+ fw_iso_buffer_destroy(buffer, card);
- return -ENOMEM;
+ return ret;
}
EXPORT_SYMBOL(fw_iso_buffer_init);
-int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
+int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
+ struct vm_area_struct *vma)
{
unsigned long uaddr;
int i, err;
@@ -107,15 +128,18 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
int i;
dma_addr_t address;
- for (i = 0; i < buffer->page_count; i++) {
+ for (i = 0; i < buffer->page_count_mapped; i++) {
address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
PAGE_SIZE, buffer->direction);
- __free_page(buffer->pages[i]);
}
+ for (i = 0; i < buffer->page_count; i++)
+ __free_page(buffer->pages[i]);
kfree(buffer->pages);
buffer->pages = NULL;
+ buffer->page_count = 0;
+ buffer->page_count_mapped = 0;
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index dea2dcc9310d..780708dc6e25 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -820,6 +820,15 @@ void fw_send_response(struct fw_card *card,
}
EXPORT_SYMBOL(fw_send_response);
+/**
+ * fw_get_request_speed() - returns speed at which the @request was received
+ */
+int fw_get_request_speed(struct fw_request *request)
+{
+ return request->response.speed;
+}
+EXPORT_SYMBOL(fw_get_request_speed);
+
static void handle_exclusive_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
@@ -994,6 +1003,32 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
}
EXPORT_SYMBOL(fw_core_handle_response);
+/**
+ * fw_rcode_string - convert a firewire result code to an error description
+ * @rcode: the result code
+ */
+const char *fw_rcode_string(int rcode)
+{
+ static const char *const names[] = {
+ [RCODE_COMPLETE] = "no error",
+ [RCODE_CONFLICT_ERROR] = "conflict error",
+ [RCODE_DATA_ERROR] = "data error",
+ [RCODE_TYPE_ERROR] = "type error",
+ [RCODE_ADDRESS_ERROR] = "address error",
+ [RCODE_SEND_ERROR] = "send error",
+ [RCODE_CANCELLED] = "timeout",
+ [RCODE_BUSY] = "busy",
+ [RCODE_GENERATION] = "bus reset",
+ [RCODE_NO_ACK] = "no ack",
+ };
+
+ if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
+ return names[rcode];
+ else
+ return "unknown";
+}
+EXPORT_SYMBOL(fw_rcode_string);
+
static const struct fw_address_region topology_map_region =
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 9047f5547d98..515a42c786d0 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -3,6 +3,7 @@
#include <linux/compiler.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/idr.h>
@@ -120,21 +121,6 @@ int fw_compute_block_crc(__be32 *block);
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
-static inline struct fw_card *fw_card_get(struct fw_card *card)
-{
- kref_get(&card->kref);
-
- return card;
-}
-
-void fw_card_release(struct kref *kref);
-
-static inline void fw_card_put(struct fw_card *card)
-{
- kref_put(&card->kref, fw_card_release);
-}
-
-
/* -cdev */
extern const struct file_operations fw_device_ops;
@@ -169,7 +155,11 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* -iso */
-int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
+int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
+int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
+ enum dma_data_direction direction);
+int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
+ struct vm_area_struct *vma);
/* -topology */
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index a7c4422a688e..4ebfb2273672 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -693,6 +693,8 @@ static struct pci_device_id pci_table[] __devinitdata = {
{ } /* Terminating entry */
};
+MODULE_DEVICE_TABLE(pci, pci_table);
+
static struct pci_driver lynx_pci_driver = {
.name = driver_name,
.id_table = pci_table,
@@ -700,22 +702,8 @@ static struct pci_driver lynx_pci_driver = {
.remove = remove_card,
};
+module_pci_driver(lynx_pci_driver);
+
MODULE_AUTHOR("Kristian Hoegsberg");
MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, pci_table);
-
-static int __init nosy_init(void)
-{
- return pci_register_driver(&lynx_pci_driver);
-}
-
-static void __exit nosy_cleanup(void)
-{
- pci_unregister_driver(&lynx_pci_driver);
-
- pr_info("Unloaded %s\n", driver_name);
-}
-
-module_init(nosy_init);
-module_exit(nosy_cleanup);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 2b5460075a9f..c1af05e834b6 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1821,9 +1821,8 @@ static void bus_reset_work(struct work_struct *work)
{
struct fw_ohci *ohci =
container_of(work, struct fw_ohci, bus_reset_work);
- int self_id_count, i, j, reg;
- int generation, new_generation;
- unsigned long flags;
+ int self_id_count, generation, new_generation, i, j;
+ u32 reg;
void *free_rom = NULL;
dma_addr_t free_rom_bus = 0;
bool is_new_root;
@@ -1930,13 +1929,13 @@ static void bus_reset_work(struct work_struct *work)
}
/* FIXME: Document how the locking works. */
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
ohci->generation = -1; /* prevent AT packet queueing */
context_stop(&ohci->at_request_ctx);
context_stop(&ohci->at_response_ctx);
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
/*
* Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
@@ -1946,7 +1945,7 @@ static void bus_reset_work(struct work_struct *work)
at_context_flush(&ohci->at_request_ctx);
at_context_flush(&ohci->at_response_ctx);
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
ohci->generation = generation;
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
@@ -1990,7 +1989,7 @@ static void bus_reset_work(struct work_struct *work)
reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
#endif
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
if (free_rom)
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
@@ -2402,7 +2401,6 @@ static int ohci_set_config_rom(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci;
- unsigned long flags;
__be32 *next_config_rom;
dma_addr_t uninitialized_var(next_config_rom_bus);
@@ -2441,7 +2439,7 @@ static int ohci_set_config_rom(struct fw_card *card,
if (next_config_rom == NULL)
return -ENOMEM;
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
/*
* If there is not an already pending config_rom update,
@@ -2467,7 +2465,7 @@ static int ohci_set_config_rom(struct fw_card *card,
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
/* If we didn't use the DMA allocation, delete it. */
if (next_config_rom != NULL)
@@ -2891,10 +2889,9 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
descriptor_callback_t uninitialized_var(callback);
u64 *uninitialized_var(channels);
u32 *uninitialized_var(mask), uninitialized_var(regs);
- unsigned long flags;
int index, ret = -EBUSY;
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
switch (type) {
case FW_ISO_CONTEXT_TRANSMIT:
@@ -2938,7 +2935,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
ret = -ENOSYS;
}
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
if (index < 0)
return ERR_PTR(ret);
@@ -2964,7 +2961,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
out_with_header:
free_page((unsigned long)ctx->header);
out:
- spin_lock_irqsave(&ohci->lock, flags);
+ spin_lock_irq(&ohci->lock);
switch (type) {
case FW_ISO_CONTEXT_RECEIVE:
@@ -2977,7 +2974,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
}
*mask |= 1 << index;
- spin_unlock_irqrestore(&ohci->lock, flags);
+ spin_unlock_irq(&ohci->lock);
return ERR_PTR(ret);
}
@@ -3789,6 +3786,8 @@ static struct pci_driver fw_ohci_pci_driver = {
#endif
};
+module_pci_driver(fw_ohci_pci_driver);
+
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
MODULE_LICENSE("GPL");
@@ -3797,16 +3796,3 @@ MODULE_LICENSE("GPL");
#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
MODULE_ALIAS("ohci1394");
#endif
-
-static int __init fw_ohci_init(void)
-{
- return pci_register_driver(&fw_ohci_pci_driver);
-}
-
-static void __exit fw_ohci_cleanup(void)
-{
- pci_unregister_driver(&fw_ohci_pci_driver);
-}
-
-module_init(fw_ohci_init);
-module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index b7e65d7eab64..1162d6b3bf85 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -207,9 +207,8 @@ static const struct device *lu_dev(const struct sbp2_logical_unit *lu)
#define SBP2_MAX_CDB_SIZE 16
/*
- * The default maximum s/g segment size of a FireWire controller is
- * usually 0x10000, but SBP-2 only allows 0xffff. Since buffers have to
- * be quadlet-aligned, we set the length limit to 0xffff & ~3.
+ * The maximum SBP-2 data buffer size is 0xffff. We quadlet-align this
+ * for compatibility with earlier versions of this driver.
*/
#define SBP2_MAX_SEG_SIZE 0xfffc
@@ -1163,7 +1162,8 @@ static int sbp2_probe(struct device *dev)
shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
- if (scsi_add_host(shost, &unit->device) < 0)
+ if (scsi_add_host_with_dma(shost, &unit->device,
+ device->card->device) < 0)
goto fail_shost_put;
/* implicit directory ID */
@@ -1295,10 +1295,7 @@ static struct fw_driver sbp2_driver = {
static void sbp2_unmap_scatterlist(struct device *card_device,
struct sbp2_command_orb *orb)
{
- if (scsi_sg_count(orb->cmd))
- dma_unmap_sg(card_device, scsi_sglist(orb->cmd),
- scsi_sg_count(orb->cmd),
- orb->cmd->sc_data_direction);
+ scsi_dma_unmap(orb->cmd);
if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
dma_unmap_single(card_device, orb->page_table_bus,
@@ -1404,9 +1401,8 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
struct scatterlist *sg = scsi_sglist(orb->cmd);
int i, n;
- n = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
- orb->cmd->sc_data_direction);
- if (n == 0)
+ n = scsi_dma_map(orb->cmd);
+ if (n <= 0)
goto fail;
/*
@@ -1452,8 +1448,7 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
return 0;
fail_page_table:
- dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
- scsi_sg_count(orb->cmd), orb->cmd->sc_data_direction);
+ scsi_dma_unmap(orb->cmd);
fail:
return -ENOMEM;
}
@@ -1534,7 +1529,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
sdev->allow_restart = 1;
- /* SBP-2 requires quadlet alignment of the data buffers. */
+ /*
+ * SBP-2 does not require any alignment, but we set it anyway
+ * for compatibility with earlier versions of this driver.
+ */
blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
@@ -1568,8 +1566,6 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
- blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE);
-
return 0;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index eb80ba300452..542f0c04b695 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -2,6 +2,14 @@
# GPIO infrastructure and drivers
#
+config ARCH_HAVE_CUSTOM_GPIO_H
+ bool
+ help
+ Selecting this config option from the architecture Kconfig allows
+ the architecture to provide a custom asm/gpio.h implementation
+ overriding the default implementations. New uses of this are
+ strongly discouraged.
+
config ARCH_WANT_OPTIONAL_GPIOLIB
bool
help
@@ -37,6 +45,10 @@ menuconfig GPIOLIB
if GPIOLIB
+config OF_GPIO
+ def_bool y
+ depends on OF && !SPARC
+
config DEBUG_GPIO
bool "Debug GPIO calls"
depends on DEBUG_KERNEL
@@ -102,6 +114,14 @@ config GPIO_EP93XX
depends on ARCH_EP93XX
select GPIO_GENERIC
+config GPIO_MM_LANTIQ
+ bool "Lantiq Memory mapped GPIOs"
+ depends on LANTIQ && SOC_XWAY
+ help
+ This enables support for memory mapped GPIOs on the External Bus Unit
+ (EBU) found on Lantiq SoCs. The gpios are output only as they are
+ created by attaching a 16bit latch to the bus.
+
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
@@ -116,7 +136,7 @@ config GPIO_MPC8XXX
config GPIO_MSM_V1
tristate "Qualcomm MSM GPIO v1"
- depends on GPIOLIB && ARCH_MSM
+ depends on GPIOLIB && ARCH_MSM && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
help
Say yes here to support the GPIO interface on ARM v6 based
Qualcomm MSM chips. Most of the pins on the MSM can be
@@ -155,6 +175,14 @@ config GPIO_PXA
help
Say yes here to support the PXA GPIO device
+config GPIO_STA2X11
+ bool "STA2x11/ConneXt GPIO support"
+ depends on MFD_STA2X11
+ select GENERIC_IRQ_CHIP
+ help
+ Say yes here to support the STA2x11/ConneXt GPIO device.
+ The GPIO module has 128 GPIO pins with alternate functions.
+
config GPIO_XILINX
bool "Xilinx GPIO support"
depends on PPC_OF || MICROBLAZE
@@ -168,13 +196,13 @@ config GPIO_VR41XX
Say yes here to support the NEC VR4100 series General-purpose I/O Uint
config GPIO_SCH
- tristate "Intel SCH/TunnelCreek GPIO"
+ tristate "Intel SCH/TunnelCreek/Centerton GPIO"
depends on PCI && X86
select MFD_CORE
select LPC_SCH
help
- Say yes here to support GPIO interface on Intel Poulsbo SCH
- or Intel Tunnel Creek processor.
+ Say yes here to support GPIO interface on Intel Poulsbo SCH,
+ Intel Tunnel Creek processor or Intel Centerton processor.
The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
powered by the core power rail and are turned off during sleep
modes (S3 and higher). The remaining four GPIOs are powered by
@@ -183,6 +211,22 @@ config GPIO_SCH
system from the Suspend-to-RAM state.
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
+ The Intel Centerton processor has a total of 30 GPIO pins.
+ Twenty-one are powered by the core power rail and 9 from the
+ suspend power supply.
+
+config GPIO_ICH
+ tristate "Intel ICH GPIO"
+ depends on PCI && X86
+ select MFD_CORE
+ select LPC_ICH
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
+ ICH9, ICH10, Series 5/3400 (eg Ibex Peak), Series 6/C200 (eg
+ Cougar Point), NM10 (Tiger Point), and 3100 (Whitmore Lake).
+
+ If unsure, say N.
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
@@ -249,7 +293,7 @@ config GPIO_MC9S08DZ60
Select this to enable the MC9S08DZ60 GPIO driver
config GPIO_PCA953X
- tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
+ tristate "PCA953x, PCA955x, PCA957x, TCA64xx, and MAX7310 I/O ports"
depends on I2C
help
Say yes here to provide access to several register-oriented
@@ -258,10 +302,11 @@ config GPIO_PCA953X
4 bits: pca9536, pca9537
- 8 bits: max7310, pca9534, pca9538, pca9554, pca9557,
- tca6408
+ 8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
+ pca9556, pca9557, pca9574, tca6408
- 16 bits: pca9535, pca9539, pca9555, tca6416
+ 16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
+ tca6416
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
@@ -294,6 +339,15 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
+config GPIO_RC5T583
+ bool "RICOH RC5T583 GPIO"
+ depends on MFD_RC5T583
+ help
+ Select this option to enable GPIO driver for the Ricoh RC5T583
+ chip family.
+ This driver provides the support for driving/reading the gpio pins
+ of RC5T583 device through standard gpio library.
+
config GPIO_SX150X
bool "Semtech SX150x I2C GPIO expander"
depends on I2C=y
@@ -312,6 +366,16 @@ config GPIO_STMPE
This enables support for the GPIOs found on the STMPE I/O
Expanders.
+config GPIO_STP_XWAY
+ bool "XWAY STP GPIOs"
+ depends on SOC_XWAY
+ help
+ This enables support for the Serial To Parallel (STP) unit found on
+ XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
+ that can be up to 24 bit. This peripheral is aimed at driving leds.
+ Some of the gpios/leds can be auto updated by the soc with dsl and
+ phy status.
+
config GPIO_TC3589X
bool "TC3589X GPIOs"
depends on MFD_TC3589X
@@ -405,6 +469,7 @@ config GPIO_BT8XX
config GPIO_LANGWELL
bool "Intel Langwell/Penwell GPIO support"
depends on PCI && X86
+ select IRQ_DOMAIN
help
Say Y here to support Intel Langwell/Penwell GPIO.
@@ -520,4 +585,12 @@ config GPIO_TPS65910
help
Select this option to enable GPIO driver for the TPS65910
chip family.
+
+config GPIO_MSIC
+ bool "Intel MSIC mixed signal gpio support"
+ depends on MFD_INTEL_MSIC
+ help
+ Enable support for GPIO on intel MSIC controllers found in
+ intel MID devices
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 708ffb2165ea..0f55662002c3 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -3,6 +3,7 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o
+obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EM) += gpio-em.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
+obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
@@ -31,8 +33,10 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
+obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
+obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o
obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
@@ -43,12 +47,15 @@ obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
+obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
+obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 8950f6261bbb..1077754f8289 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -71,6 +71,36 @@ int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
EXPORT_SYMBOL(devm_gpio_request);
/**
+ * devm_gpio_request_one - request a single GPIO with initial setup
+ * @dev: device to request for
+ * @gpio: the GPIO number
+ * @flags: GPIO configuration as specified by GPIOF_*
+ * @label: a literal description string of this GPIO
+ */
+int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label)
+{
+ unsigned *dr;
+ int rc;
+
+ dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = gpio_request_one(gpio, flags, label);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ *dr = gpio;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_gpio_request_one);
+
+/**
* devm_gpio_free - free an interrupt
* @dev: device to free gpio for
* @gpio: gpio to free
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 5ca4098ba092..e4cc7eb69bb2 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -328,17 +328,7 @@ static struct pci_driver bt8xxgpio_pci_driver = {
.resume = bt8xxgpio_resume,
};
-static int __init bt8xxgpio_init(void)
-{
- return pci_register_driver(&bt8xxgpio_pci_driver);
-}
-module_init(bt8xxgpio_init)
-
-static void __exit bt8xxgpio_exit(void)
-{
- pci_unregister_driver(&bt8xxgpio_pci_driver);
-}
-module_exit(bt8xxgpio_exit)
+module_pci_driver(bt8xxgpio_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Buesch");
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 776b772523e5..9fe5b8fe9be8 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -325,7 +325,7 @@ static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev,
void __iomem *dir = mmio_base + bank->dir;
int err;
- err = bgpio_init(bgc, dev, 1, data, NULL, NULL, dir, NULL, false);
+ err = bgpio_init(bgc, dev, 1, data, NULL, NULL, dir, NULL, 0);
if (err)
return err;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index e38dd0c31973..82e2e4fe599e 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -364,7 +364,7 @@ EXPORT_SYMBOL_GPL(bgpio_remove);
int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
unsigned long sz, void __iomem *dat, void __iomem *set,
void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- bool big_endian)
+ unsigned long flags)
{
int ret;
@@ -385,7 +385,7 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
if (ret)
return ret;
- ret = bgpio_setup_accessors(dev, bgc, big_endian);
+ ret = bgpio_setup_accessors(dev, bgc, flags & BGPIOF_BIG_ENDIAN);
if (ret)
return ret;
@@ -394,6 +394,11 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
return ret;
bgc->data = bgc->read_reg(bgc->reg_dat);
+ if (bgc->gc.set == bgpio_set_set &&
+ !(flags & BGPIOF_UNREADABLE_REG_SET))
+ bgc->data = bgc->read_reg(bgc->reg_set);
+ if (bgc->reg_dir && !(flags & BGPIOF_UNREADABLE_REG_DIR))
+ bgc->dir = bgc->read_reg(bgc->reg_dir);
return ret;
}
@@ -449,7 +454,7 @@ static int __devinit bgpio_pdev_probe(struct platform_device *pdev)
void __iomem *dirout;
void __iomem *dirin;
unsigned long sz;
- bool be;
+ unsigned long flags = 0;
int err;
struct bgpio_chip *bgc;
struct bgpio_pdata *pdata = dev_get_platdata(dev);
@@ -480,13 +485,14 @@ static int __devinit bgpio_pdev_probe(struct platform_device *pdev)
if (err)
return err;
- be = !strcmp(platform_get_device_id(pdev)->name, "basic-mmio-gpio-be");
+ if (!strcmp(platform_get_device_id(pdev)->name, "basic-mmio-gpio-be"))
+ flags |= BGPIOF_BIG_ENDIAN;
bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL);
if (!bgc)
return -ENOMEM;
- err = bgpio_init(bgc, dev, sz, dat, set, clr, dirout, dirin, be);
+ err = bgpio_init(bgc, dev, sz, dat, set, clr, dirout, dirin, flags);
if (err)
return err;
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
new file mode 100644
index 000000000000..b7c06517403d
--- /dev/null
+++ b/drivers/gpio/gpio-ich.c
@@ -0,0 +1,419 @@
+/*
+ * Intel ICH6-10, Series 5 and 6 GPIO driver
+ *
+ * Copyright (C) 2010 Extreme Engineering Solutions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define DRV_NAME "gpio_ich"
+
+/*
+ * GPIO register offsets in GPIO I/O space.
+ * Each chunk of 32 GPIOs is manipulated via its own USE_SELx, IO_SELx, and
+ * LVLx registers. Logic in the read/write functions takes a register and
+ * an absolute bit number and determines the proper register offset and bit
+ * number in that register. For example, to read the value of GPIO bit 50
+ * the code would access offset ichx_regs[2(=GPIO_LVL)][1(=50/32)],
+ * bit 18 (50%32).
+ */
+enum GPIO_REG {
+ GPIO_USE_SEL = 0,
+ GPIO_IO_SEL,
+ GPIO_LVL,
+};
+
+static const u8 ichx_regs[3][3] = {
+ {0x00, 0x30, 0x40}, /* USE_SEL[1-3] offsets */
+ {0x04, 0x34, 0x44}, /* IO_SEL[1-3] offsets */
+ {0x0c, 0x38, 0x48}, /* LVL[1-3] offsets */
+};
+
+#define ICHX_WRITE(val, reg, base_res) outl(val, (reg) + (base_res)->start)
+#define ICHX_READ(reg, base_res) inl((reg) + (base_res)->start)
+
+struct ichx_desc {
+ /* Max GPIO pins the chipset can have */
+ uint ngpio;
+
+ /* Whether the chipset has GPIO in GPE0_STS in the PM IO region */
+ bool uses_gpe0;
+
+ /* USE_SEL is bogus on some chipsets, eg 3100 */
+ u32 use_sel_ignore[3];
+
+ /* Some chipsets have quirks, let these use their own request/get */
+ int (*request)(struct gpio_chip *chip, unsigned offset);
+ int (*get)(struct gpio_chip *chip, unsigned offset);
+};
+
+static struct {
+ spinlock_t lock;
+ struct platform_device *dev;
+ struct gpio_chip chip;
+ struct resource *gpio_base; /* GPIO IO base */
+ struct resource *pm_base; /* Power Mangagment IO base */
+ struct ichx_desc *desc; /* Pointer to chipset-specific description */
+ u32 orig_gpio_ctrl; /* Orig CTRL value, used to restore on exit */
+} ichx_priv;
+
+static int modparam_gpiobase = -1; /* dynamic */
+module_param_named(gpiobase, modparam_gpiobase, int, 0444);
+MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, "
+ "which is the default.");
+
+static int ichx_write_bit(int reg, unsigned nr, int val, int verify)
+{
+ unsigned long flags;
+ u32 data, tmp;
+ int reg_nr = nr / 32;
+ int bit = nr & 0x1f;
+ int ret = 0;
+
+ spin_lock_irqsave(&ichx_priv.lock, flags);
+
+ data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ if (val)
+ data |= 1 << bit;
+ else
+ data &= ~(1 << bit);
+ ICHX_WRITE(data, ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ tmp = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ if (verify && data != tmp)
+ ret = -EPERM;
+
+ spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+ return ret;
+}
+
+static int ichx_read_bit(int reg, unsigned nr)
+{
+ unsigned long flags;
+ u32 data;
+ int reg_nr = nr / 32;
+ int bit = nr & 0x1f;
+
+ spin_lock_irqsave(&ichx_priv.lock, flags);
+
+ data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+
+ spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+ return data & (1 << bit) ? 1 : 0;
+}
+
+static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ /*
+ * Try setting pin as an input and verify it worked since many pins
+ * are output-only.
+ */
+ if (ichx_write_bit(GPIO_IO_SEL, nr, 1, 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+ int val)
+{
+ /* Set GPIO output value. */
+ ichx_write_bit(GPIO_LVL, nr, val, 0);
+
+ /*
+ * Try setting pin as an output and verify it worked since many pins
+ * are input-only.
+ */
+ if (ichx_write_bit(GPIO_IO_SEL, nr, 0, 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ichx_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+ return ichx_read_bit(GPIO_LVL, nr);
+}
+
+static int ich6_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+ unsigned long flags;
+ u32 data;
+
+ /*
+ * GPI 0 - 15 need to be read from the power management registers on
+ * a ICH6/3100 bridge.
+ */
+ if (nr < 16) {
+ if (!ichx_priv.pm_base)
+ return -ENXIO;
+
+ spin_lock_irqsave(&ichx_priv.lock, flags);
+
+ /* GPI 0 - 15 are latched, write 1 to clear*/
+ ICHX_WRITE(1 << (16 + nr), 0, ichx_priv.pm_base);
+ data = ICHX_READ(0, ichx_priv.pm_base);
+
+ spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+ return (data >> 16) & (1 << nr) ? 1 : 0;
+ } else {
+ return ichx_gpio_get(chip, nr);
+ }
+}
+
+static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+ /*
+ * Note we assume the BIOS properly set a bridge's USE value. Some
+ * chips (eg Intel 3100) have bogus USE values though, so first see if
+ * the chipset's USE value can be trusted for this specific bit.
+ * If it can't be trusted, assume that the pin can be used as a GPIO.
+ */
+ if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
+ return 1;
+
+ return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
+}
+
+static int ich6_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+ /*
+ * Fixups for bits 16 and 17 are necessary on the Intel ICH6/3100
+ * bridge as they are controlled by USE register bits 0 and 1. See
+ * "Table 704 GPIO_USE_SEL1 register" in the i3100 datasheet for
+ * additional info.
+ */
+ if (nr == 16 || nr == 17)
+ nr -= 16;
+
+ return ichx_gpio_request(chip, nr);
+}
+
+static void ichx_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
+{
+ ichx_write_bit(GPIO_LVL, nr, val, 0);
+}
+
+static void __devinit ichx_gpiolib_setup(struct gpio_chip *chip)
+{
+ chip->owner = THIS_MODULE;
+ chip->label = DRV_NAME;
+ chip->dev = &ichx_priv.dev->dev;
+
+ /* Allow chip-specific overrides of request()/get() */
+ chip->request = ichx_priv.desc->request ?
+ ichx_priv.desc->request : ichx_gpio_request;
+ chip->get = ichx_priv.desc->get ?
+ ichx_priv.desc->get : ichx_gpio_get;
+
+ chip->set = ichx_gpio_set;
+ chip->direction_input = ichx_gpio_direction_input;
+ chip->direction_output = ichx_gpio_direction_output;
+ chip->base = modparam_gpiobase;
+ chip->ngpio = ichx_priv.desc->ngpio;
+ chip->can_sleep = 0;
+ chip->dbg_show = NULL;
+}
+
+/* ICH6-based, 631xesb-based */
+static struct ichx_desc ich6_desc = {
+ /* Bridges using the ICH6 controller need fixups for GPIO 0 - 17 */
+ .request = ich6_gpio_request,
+ .get = ich6_gpio_get,
+
+ /* GPIO 0-15 are read in the GPE0_STS PM register */
+ .uses_gpe0 = true,
+
+ .ngpio = 50,
+};
+
+/* Intel 3100 */
+static struct ichx_desc i3100_desc = {
+ /*
+ * Bits 16,17, 20 of USE_SEL and bit 16 of USE_SEL2 always read 0 on
+ * the Intel 3100. See "Table 712. GPIO Summary Table" of 3100
+ * Datasheet for more info.
+ */
+ .use_sel_ignore = {0x00130000, 0x00010000, 0x0},
+
+ /* The 3100 needs fixups for GPIO 0 - 17 */
+ .request = ich6_gpio_request,
+ .get = ich6_gpio_get,
+
+ /* GPIO 0-15 are read in the GPE0_STS PM register */
+ .uses_gpe0 = true,
+
+ .ngpio = 50,
+};
+
+/* ICH7 and ICH8-based */
+static struct ichx_desc ich7_desc = {
+ .ngpio = 50,
+};
+
+/* ICH9-based */
+static struct ichx_desc ich9_desc = {
+ .ngpio = 61,
+};
+
+/* ICH10-based - Consumer/corporate versions have different amount of GPIO */
+static struct ichx_desc ich10_cons_desc = {
+ .ngpio = 61,
+};
+static struct ichx_desc ich10_corp_desc = {
+ .ngpio = 72,
+};
+
+/* Intel 5 series, 6 series, 3400 series, and C200 series */
+static struct ichx_desc intel5_desc = {
+ .ngpio = 76,
+};
+
+static int __devinit ichx_gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res_base, *res_pm;
+ int err;
+ struct lpc_ich_info *ich_info = pdev->dev.platform_data;
+
+ if (!ich_info)
+ return -ENODEV;
+
+ ichx_priv.dev = pdev;
+
+ switch (ich_info->gpio_version) {
+ case ICH_I3100_GPIO:
+ ichx_priv.desc = &i3100_desc;
+ break;
+ case ICH_V5_GPIO:
+ ichx_priv.desc = &intel5_desc;
+ break;
+ case ICH_V6_GPIO:
+ ichx_priv.desc = &ich6_desc;
+ break;
+ case ICH_V7_GPIO:
+ ichx_priv.desc = &ich7_desc;
+ break;
+ case ICH_V9_GPIO:
+ ichx_priv.desc = &ich9_desc;
+ break;
+ case ICH_V10CORP_GPIO:
+ ichx_priv.desc = &ich10_corp_desc;
+ break;
+ case ICH_V10CONS_GPIO:
+ ichx_priv.desc = &ich10_cons_desc;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
+ if (!res_base || !res_base->start || !res_base->end)
+ return -ENODEV;
+
+ if (!request_region(res_base->start, resource_size(res_base),
+ pdev->name))
+ return -EBUSY;
+
+ ichx_priv.gpio_base = res_base;
+
+ /*
+ * If necessary, determine the I/O address of ACPI/power management
+ * registers which are needed to read the the GPE0 register for GPI pins
+ * 0 - 15 on some chipsets.
+ */
+ if (!ichx_priv.desc->uses_gpe0)
+ goto init;
+
+ res_pm = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPE0);
+ if (!res_pm) {
+ pr_warn("ACPI BAR is unavailable, GPI 0 - 15 unavailable\n");
+ goto init;
+ }
+
+ if (!request_region(res_pm->start, resource_size(res_pm),
+ pdev->name)) {
+ pr_warn("ACPI BAR is busy, GPI 0 - 15 unavailable\n");
+ goto init;
+ }
+
+ ichx_priv.pm_base = res_pm;
+
+init:
+ ichx_gpiolib_setup(&ichx_priv.chip);
+ err = gpiochip_add(&ichx_priv.chip);
+ if (err) {
+ pr_err("Failed to register GPIOs\n");
+ goto add_err;
+ }
+
+ pr_info("GPIO from %d to %d on %s\n", ichx_priv.chip.base,
+ ichx_priv.chip.base + ichx_priv.chip.ngpio - 1, DRV_NAME);
+
+ return 0;
+
+add_err:
+ release_region(ichx_priv.gpio_base->start,
+ resource_size(ichx_priv.gpio_base));
+ if (ichx_priv.pm_base)
+ release_region(ichx_priv.pm_base->start,
+ resource_size(ichx_priv.pm_base));
+ return err;
+}
+
+static int __devexit ichx_gpio_remove(struct platform_device *pdev)
+{
+ int err;
+
+ err = gpiochip_remove(&ichx_priv.chip);
+ if (err) {
+ dev_err(&pdev->dev, "%s failed, %d\n",
+ "gpiochip_remove()", err);
+ return err;
+ }
+
+ release_region(ichx_priv.gpio_base->start,
+ resource_size(ichx_priv.gpio_base));
+ if (ichx_priv.pm_base)
+ release_region(ichx_priv.pm_base->start,
+ resource_size(ichx_priv.pm_base));
+
+ return 0;
+}
+
+static struct platform_driver ichx_gpio_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ },
+ .probe = ichx_gpio_probe,
+ .remove = __devexit_p(ichx_gpio_remove),
+};
+
+module_platform_driver(ichx_gpio_driver);
+
+MODULE_AUTHOR("Peter Tyser <ptyser@xes-inc.com>");
+MODULE_DESCRIPTION("GPIO interface for Intel ICH series");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:"DRV_NAME);
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index 00692e89ef87..a1c8754f52cf 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -36,6 +36,7 @@
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/irqdomain.h>
/*
* Langwell chip has 64 pins and thus there are 2 32bit registers to control
@@ -66,8 +67,8 @@ struct lnw_gpio {
struct gpio_chip chip;
void *reg_base;
spinlock_t lock;
- unsigned irq_base;
struct pci_dev *pdev;
+ struct irq_domain *domain;
};
static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
@@ -176,13 +177,13 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
- return lnw->irq_base + offset;
+ return irq_create_mapping(lnw->domain, offset);
}
static int lnw_irq_type(struct irq_data *d, unsigned type)
{
struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
- u32 gpio = d->irq - lnw->irq_base;
+ u32 gpio = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
@@ -249,20 +250,55 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
/* check GPIO controller to check which pin triggered the interrupt */
for (base = 0; base < lnw->chip.ngpio; base += 32) {
gedr = gpio_reg(&lnw->chip, base, GEDR);
- pending = readl(gedr);
- while (pending) {
+ while ((pending = readl(gedr))) {
gpio = __ffs(pending);
mask = BIT(gpio);
- pending &= ~mask;
/* Clear before handling so we can't lose an edge */
writel(mask, gedr);
- generic_handle_irq(lnw->irq_base + base + gpio);
+ generic_handle_irq(irq_find_mapping(lnw->domain,
+ base + gpio));
}
}
chip->irq_eoi(data);
}
+static void lnw_irq_init_hw(struct lnw_gpio *lnw)
+{
+ void __iomem *reg;
+ unsigned base;
+
+ for (base = 0; base < lnw->chip.ngpio; base += 32) {
+ /* Clear the rising-edge detect register */
+ reg = gpio_reg(&lnw->chip, base, GRER);
+ writel(0, reg);
+ /* Clear the falling-edge detect register */
+ reg = gpio_reg(&lnw->chip, base, GFER);
+ writel(0, reg);
+ /* Clear the edge detect status register */
+ reg = gpio_reg(&lnw->chip, base, GEDR);
+ writel(~0, reg);
+ }
+}
+
+static int lnw_gpio_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct lnw_gpio *lnw = d->host_data;
+
+ irq_set_chip_and_handler_name(virq, &lnw_irqchip, handle_simple_irq,
+ "demux");
+ irq_set_chip_data(virq, lnw);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static const struct irq_domain_ops lnw_gpio_irq_ops = {
+ .map = lnw_gpio_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
#ifdef CONFIG_PM
static int lnw_gpio_runtime_resume(struct device *dev)
{
@@ -300,23 +336,22 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void *base;
- int i;
resource_size_t start, len;
struct lnw_gpio *lnw;
- u32 irq_base;
u32 gpio_base;
int retval = 0;
+ int ngpio = id->driver_data;
retval = pci_enable_device(pdev);
if (retval)
- goto done;
+ return retval;
retval = pci_request_regions(pdev, "langwell_gpio");
if (retval) {
dev_err(&pdev->dev, "error requesting resources\n");
goto err2;
}
- /* get the irq_base from bar1 */
+ /* get the gpio_base from bar1 */
start = pci_resource_start(pdev, 1);
len = pci_resource_len(pdev, 1);
base = ioremap_nocache(start, len);
@@ -324,28 +359,32 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "error mapping bar1\n");
goto err3;
}
- irq_base = *(u32 *)base;
gpio_base = *((u32 *)base + 1);
/* release the IO mapping, since we already get the info from bar1 */
iounmap(base);
/* get the register base from bar0 */
start = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
- base = ioremap_nocache(start, len);
+ base = devm_ioremap_nocache(&pdev->dev, start, len);
if (!base) {
dev_err(&pdev->dev, "error mapping bar0\n");
retval = -EFAULT;
goto err3;
}
- lnw = kzalloc(sizeof(struct lnw_gpio), GFP_KERNEL);
+ lnw = devm_kzalloc(&pdev->dev, sizeof(struct lnw_gpio), GFP_KERNEL);
if (!lnw) {
dev_err(&pdev->dev, "can't allocate langwell_gpio chip data\n");
retval = -ENOMEM;
- goto err4;
+ goto err3;
}
+
+ lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
+ &lnw_gpio_irq_ops, lnw);
+ if (!lnw->domain)
+ goto err3;
+
lnw->reg_base = base;
- lnw->irq_base = irq_base;
lnw->chip.label = dev_name(&pdev->dev);
lnw->chip.request = lnw_gpio_request;
lnw->chip.direction_input = lnw_gpio_direction_input;
@@ -354,38 +393,32 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->chip.set = lnw_gpio_set;
lnw->chip.to_irq = lnw_gpio_to_irq;
lnw->chip.base = gpio_base;
- lnw->chip.ngpio = id->driver_data;
+ lnw->chip.ngpio = ngpio;
lnw->chip.can_sleep = 0;
lnw->pdev = pdev;
pci_set_drvdata(pdev, lnw);
retval = gpiochip_add(&lnw->chip);
if (retval) {
dev_err(&pdev->dev, "langwell gpiochip_add error %d\n", retval);
- goto err5;
+ goto err3;
}
+
+ lnw_irq_init_hw(lnw);
+
irq_set_handler_data(pdev->irq, lnw);
irq_set_chained_handler(pdev->irq, lnw_irq_handler);
- for (i = 0; i < lnw->chip.ngpio; i++) {
- irq_set_chip_and_handler_name(i + lnw->irq_base, &lnw_irqchip,
- handle_simple_irq, "demux");
- irq_set_chip_data(i + lnw->irq_base, lnw);
- }
spin_lock_init(&lnw->lock);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
- goto done;
-err5:
- kfree(lnw);
-err4:
- iounmap(base);
+ return 0;
+
err3:
pci_release_regions(pdev);
err2:
pci_disable_device(pdev);
-done:
return retval;
}
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 61c2d08d37b6..c2199beca98a 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -21,6 +21,9 @@
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <mach/hardware.h>
#include <mach/platform.h>
@@ -454,10 +457,57 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
},
};
+/* Empty now, can be removed later when mach-lpc32xx is finally switched over
+ * to DT support
+ */
void __init lpc32xx_gpio_init(void)
{
+}
+
+static int lpc32xx_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags)
+{
+ /* Is this the correct bank? */
+ u32 bank = gpiospec->args[0];
+ if ((bank > ARRAY_SIZE(lpc32xx_gpiochip) ||
+ (gc != &lpc32xx_gpiochip[bank].chip)))
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[2];
+ return gpiospec->args[1];
+}
+
+static int __devinit lpc32xx_gpio_probe(struct platform_device *pdev)
+{
int i;
- for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++)
+ for (i = 0; i < ARRAY_SIZE(lpc32xx_gpiochip); i++) {
+ if (pdev->dev.of_node) {
+ lpc32xx_gpiochip[i].chip.of_xlate = lpc32xx_of_xlate;
+ lpc32xx_gpiochip[i].chip.of_gpio_n_cells = 3;
+ lpc32xx_gpiochip[i].chip.of_node = pdev->dev.of_node;
+ }
gpiochip_add(&lpc32xx_gpiochip[i].chip);
+ }
+
+ return 0;
}
+
+#ifdef CONFIG_OF
+static struct of_device_id lpc32xx_gpio_of_match[] __devinitdata = {
+ { .compatible = "nxp,lpc3220-gpio", },
+ { },
+};
+#endif
+
+static struct platform_driver lpc32xx_gpio_driver = {
+ .driver = {
+ .name = "lpc32xx-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(lpc32xx_gpio_of_match),
+ },
+ .probe = lpc32xx_gpio_probe,
+};
+
+module_platform_driver(lpc32xx_gpio_driver);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index c5d83a8a91c2..0f425189de11 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -353,7 +353,7 @@ static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
chip->base + t, bank, t, label,
(mcp->cache[MCP_IODIR] & mask) ? "in " : "out",
(mcp->cache[MCP_GPIO] & mask) ? "hi" : "lo",
- (mcp->cache[MCP_GPPU] & mask) ? " " : "up");
+ (mcp->cache[MCP_GPPU] & mask) ? "up" : " ");
/* NOTE: ignoring the irq-related registers */
seq_printf(s, "\n");
}
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index f0febe5b8221..db01f151d41c 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -611,17 +611,7 @@ static struct pci_driver ioh_gpio_driver = {
.resume = ioh_gpio_resume
};
-static int __init ioh_gpio_pci_init(void)
-{
- return pci_register_driver(&ioh_gpio_driver);
-}
-module_init(ioh_gpio_pci_init);
-
-static void __exit ioh_gpio_pci_exit(void)
-{
- pci_unregister_driver(&ioh_gpio_driver);
-}
-module_exit(ioh_gpio_pci_exit);
+module_pci_driver(ioh_gpio_driver);
MODULE_DESCRIPTION("OKI SEMICONDUCTOR ML-IOH series GPIO Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
new file mode 100644
index 000000000000..2983dfbd0668
--- /dev/null
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -0,0 +1,158 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * By attaching hardware latches to the EBU it is possible to create output
+ * only gpios. This driver configures a special memory address, which when
+ * written to outputs 16 bit to the latches.
+ */
+
+#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
+#define LTQ_EBU_WP 0x80000000 /* write protect bit */
+
+struct ltq_mm {
+ struct of_mm_gpio_chip mmchip;
+ u16 shadow; /* shadow the latches state */
+};
+
+/**
+ * ltq_mm_apply() - write the shadow value to the ebu address.
+ * @chip: Pointer to our private data structure.
+ *
+ * Write the shadow value to the EBU to set the gpios. We need to set the
+ * global EBU lock to make sure that PCI/MTD dont break.
+ */
+static void ltq_mm_apply(struct ltq_mm *chip)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
+ __raw_writew(chip->shadow, chip->mmchip.regs);
+ ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+/**
+ * ltq_mm_set() - gpio_chip->set - set gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_mm_apply.
+ */
+static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct ltq_mm *chip =
+ container_of(mm_gc, struct ltq_mm, mmchip);
+
+ if (value)
+ chip->shadow |= (1 << offset);
+ else
+ chip->shadow &= ~(1 << offset);
+ ltq_mm_apply(chip);
+}
+
+/**
+ * ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Same as ltq_mm_set, always returns 0.
+ */
+static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
+{
+ ltq_mm_set(gc, offset, value);
+
+ return 0;
+}
+
+/**
+ * ltq_mm_save_regs() - Set initial values of GPIO pins
+ * @mm_gc: pointer to memory mapped GPIO chip structure
+ */
+static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct ltq_mm *chip =
+ container_of(mm_gc, struct ltq_mm, mmchip);
+
+ /* tell the ebu controller which memory address we will be using */
+ ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
+
+ ltq_mm_apply(chip);
+}
+
+static int ltq_mm_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct ltq_mm *chip;
+ const __be32 *shadow;
+ int ret = 0;
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory resource\n");
+ return -ENOENT;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->mmchip.gc.ngpio = 16;
+ chip->mmchip.gc.label = "gpio-mm-ltq";
+ chip->mmchip.gc.direction_output = ltq_mm_dir_out;
+ chip->mmchip.gc.set = ltq_mm_set;
+ chip->mmchip.save_regs = ltq_mm_save_regs;
+
+ /* store the shadow value if one was passed by the devicetree */
+ shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+ if (shadow)
+ chip->shadow = be32_to_cpu(*shadow);
+
+ ret = of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
+ if (ret)
+ kfree(chip);
+ return ret;
+}
+
+static const struct of_device_id ltq_mm_match[] = {
+ { .compatible = "lantiq,gpio-mm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mm_match);
+
+static struct platform_driver ltq_mm_driver = {
+ .probe = ltq_mm_probe,
+ .driver = {
+ .name = "gpio-mm-ltq",
+ .owner = THIS_MODULE,
+ .of_match_table = ltq_mm_match,
+ },
+};
+
+static int __init ltq_mm_init(void)
+{
+ return platform_driver_register(&ltq_mm_driver);
+}
+
+subsys_initcall(ltq_mm_init);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index e6568c19c939..5a1817eedd1b 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -163,7 +163,8 @@ static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
if (mask)
generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
32 - ffs(mask)));
- chip->irq_eoi(&desc->irq_data);
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
}
static void mpc8xxx_irq_unmask(struct irq_data *d)
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
new file mode 100644
index 000000000000..71a838f44501
--- /dev/null
+++ b/drivers/gpio/gpio-msic.c
@@ -0,0 +1,339 @@
+/*
+ * Intel Medfield MSIC GPIO driver>
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Based on intel_pmic_gpio.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_msic.h>
+
+/* the offset for the mapping of global gpio pin to irq */
+#define MSIC_GPIO_IRQ_OFFSET 0x100
+
+#define MSIC_GPIO_DIR_IN 0
+#define MSIC_GPIO_DIR_OUT BIT(5)
+#define MSIC_GPIO_TRIG_FALL BIT(1)
+#define MSIC_GPIO_TRIG_RISE BIT(2)
+
+/* masks for msic gpio output GPIOxxxxCTLO registers */
+#define MSIC_GPIO_DIR_MASK BIT(5)
+#define MSIC_GPIO_DRV_MASK BIT(4)
+#define MSIC_GPIO_REN_MASK BIT(3)
+#define MSIC_GPIO_RVAL_MASK (BIT(2) | BIT(1))
+#define MSIC_GPIO_DOUT_MASK BIT(0)
+
+/* masks for msic gpio input GPIOxxxxCTLI registers */
+#define MSIC_GPIO_GLBYP_MASK BIT(5)
+#define MSIC_GPIO_DBNC_MASK (BIT(4) | BIT(3))
+#define MSIC_GPIO_INTCNT_MASK (BIT(2) | BIT(1))
+#define MSIC_GPIO_DIN_MASK BIT(0)
+
+#define MSIC_NUM_GPIO 24
+
+struct msic_gpio {
+ struct platform_device *pdev;
+ struct mutex buslock;
+ struct gpio_chip chip;
+ int irq;
+ unsigned irq_base;
+ unsigned long trig_change_mask;
+ unsigned trig_type;
+};
+
+/*
+ * MSIC has 24 gpios, 16 low voltage (1.2-1.8v) and 8 high voltage (3v).
+ * Both the high and low voltage gpios are divided in two banks.
+ * GPIOs are numbered with GPIO0LV0 as gpio_base in the following order:
+ * GPIO0LV0..GPIO0LV7: low voltage, bank 0, gpio_base
+ * GPIO1LV0..GPIO1LV7: low voltage, bank 1, gpio_base + 8
+ * GPIO0HV0..GPIO0HV3: high voltage, bank 0, gpio_base + 16
+ * GPIO1HV0..GPIO1HV3: high voltage, bank 1, gpio_base + 20
+ */
+
+static int msic_gpio_to_ireg(unsigned offset)
+{
+ if (offset >= MSIC_NUM_GPIO)
+ return -EINVAL;
+
+ if (offset < 8)
+ return INTEL_MSIC_GPIO0LV0CTLI - offset;
+ if (offset < 16)
+ return INTEL_MSIC_GPIO1LV0CTLI - offset + 8;
+ if (offset < 20)
+ return INTEL_MSIC_GPIO0HV0CTLI - offset + 16;
+
+ return INTEL_MSIC_GPIO1HV0CTLI - offset + 20;
+}
+
+static int msic_gpio_to_oreg(unsigned offset)
+{
+ if (offset >= MSIC_NUM_GPIO)
+ return -EINVAL;
+
+ if (offset < 8)
+ return INTEL_MSIC_GPIO0LV0CTLO - offset;
+ if (offset < 16)
+ return INTEL_MSIC_GPIO1LV0CTLO - offset + 8;
+ if (offset < 20)
+ return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
+
+ return INTEL_MSIC_GPIO1HV0CTLO + offset + 20;
+}
+
+static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ int reg;
+
+ reg = msic_gpio_to_oreg(offset);
+ if (reg < 0)
+ return reg;
+
+ return intel_msic_reg_update(reg, MSIC_GPIO_DIR_IN, MSIC_GPIO_DIR_MASK);
+}
+
+static int msic_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ int reg;
+ unsigned mask;
+
+ value = (!!value) | MSIC_GPIO_DIR_OUT;
+ mask = MSIC_GPIO_DIR_MASK | MSIC_GPIO_DOUT_MASK;
+
+ reg = msic_gpio_to_oreg(offset);
+ if (reg < 0)
+ return reg;
+
+ return intel_msic_reg_update(reg, value, mask);
+}
+
+static int msic_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ u8 r;
+ int ret;
+ int reg;
+
+ reg = msic_gpio_to_ireg(offset);
+ if (reg < 0)
+ return reg;
+
+ ret = intel_msic_reg_read(reg, &r);
+ if (ret < 0)
+ return ret;
+
+ return r & MSIC_GPIO_DIN_MASK;
+}
+
+static void msic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ int reg;
+
+ reg = msic_gpio_to_oreg(offset);
+ if (reg < 0)
+ return;
+
+ intel_msic_reg_update(reg, !!value , MSIC_GPIO_DOUT_MASK);
+}
+
+/*
+ * This is called from genirq with mg->buslock locked and
+ * irq_desc->lock held. We can not access the scu bus here, so we
+ * store the change and update in the bus_sync_unlock() function below
+ */
+static int msic_irq_type(struct irq_data *data, unsigned type)
+{
+ struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
+ u32 gpio = data->irq - mg->irq_base;
+
+ if (gpio >= mg->chip.ngpio)
+ return -EINVAL;
+
+ /* mark for which gpio the trigger changed, protected by buslock */
+ mg->trig_change_mask |= (1 << gpio);
+ mg->trig_type = type;
+
+ return 0;
+}
+
+static int msic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct msic_gpio *mg = container_of(chip, struct msic_gpio, chip);
+ return mg->irq_base + offset;
+}
+
+static void msic_bus_lock(struct irq_data *data)
+{
+ struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
+ mutex_lock(&mg->buslock);
+}
+
+static void msic_bus_sync_unlock(struct irq_data *data)
+{
+ struct msic_gpio *mg = irq_data_get_irq_chip_data(data);
+ int offset;
+ int reg;
+ u8 trig = 0;
+
+ /* We can only get one change at a time as the buslock covers the
+ entire transaction. The irq_desc->lock is dropped before we are
+ called but that is fine */
+ if (mg->trig_change_mask) {
+ offset = __ffs(mg->trig_change_mask);
+
+ reg = msic_gpio_to_ireg(offset);
+ if (reg < 0)
+ goto out;
+
+ if (mg->trig_type & IRQ_TYPE_EDGE_RISING)
+ trig |= MSIC_GPIO_TRIG_RISE;
+ if (mg->trig_type & IRQ_TYPE_EDGE_FALLING)
+ trig |= MSIC_GPIO_TRIG_FALL;
+
+ intel_msic_reg_update(reg, trig, MSIC_GPIO_INTCNT_MASK);
+ mg->trig_change_mask = 0;
+ }
+out:
+ mutex_unlock(&mg->buslock);
+}
+
+/* Firmware does all the masking and unmasking for us, no masking here. */
+static void msic_irq_unmask(struct irq_data *data) { }
+
+static void msic_irq_mask(struct irq_data *data) { }
+
+static struct irq_chip msic_irqchip = {
+ .name = "MSIC-GPIO",
+ .irq_mask = msic_irq_mask,
+ .irq_unmask = msic_irq_unmask,
+ .irq_set_type = msic_irq_type,
+ .irq_bus_lock = msic_bus_lock,
+ .irq_bus_sync_unlock = msic_bus_sync_unlock,
+};
+
+static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct msic_gpio *mg = irq_data_get_irq_handler_data(data);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ struct intel_msic *msic = pdev_to_intel_msic(mg->pdev);
+ int i;
+ int bitnr;
+ u8 pin;
+ unsigned long pending = 0;
+
+ for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) {
+ intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin);
+ pending = pin;
+
+ if (pending) {
+ for_each_set_bit(bitnr, &pending, BITS_PER_BYTE)
+ generic_handle_irq(mg->irq_base +
+ (i * BITS_PER_BYTE) + bitnr);
+ }
+ }
+ chip->irq_eoi(data);
+}
+
+static int __devinit platform_msic_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_msic_gpio_pdata *pdata = dev->platform_data;
+ struct msic_gpio *mg;
+ int irq = platform_get_irq(pdev, 0);
+ int retval;
+ int i;
+
+ if (irq < 0) {
+ dev_err(dev, "no IRQ line\n");
+ return -EINVAL;
+ }
+
+ if (!pdata || !pdata->gpio_base) {
+ dev_err(dev, "incorrect or missing platform data\n");
+ return -EINVAL;
+ }
+
+ mg = kzalloc(sizeof(*mg), GFP_KERNEL);
+ if (!mg)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, mg);
+
+ mg->pdev = pdev;
+ mg->irq = irq;
+ mg->irq_base = pdata->gpio_base + MSIC_GPIO_IRQ_OFFSET;
+ mg->chip.label = "msic_gpio";
+ mg->chip.direction_input = msic_gpio_direction_input;
+ mg->chip.direction_output = msic_gpio_direction_output;
+ mg->chip.get = msic_gpio_get;
+ mg->chip.set = msic_gpio_set;
+ mg->chip.to_irq = msic_gpio_to_irq;
+ mg->chip.base = pdata->gpio_base;
+ mg->chip.ngpio = MSIC_NUM_GPIO;
+ mg->chip.can_sleep = 1;
+ mg->chip.dev = dev;
+
+ mutex_init(&mg->buslock);
+
+ retval = gpiochip_add(&mg->chip);
+ if (retval) {
+ dev_err(dev, "Adding MSIC gpio chip failed\n");
+ goto err;
+ }
+
+ for (i = 0; i < mg->chip.ngpio; i++) {
+ irq_set_chip_data(i + mg->irq_base, mg);
+ irq_set_chip_and_handler_name(i + mg->irq_base,
+ &msic_irqchip,
+ handle_simple_irq,
+ "demux");
+ }
+ irq_set_chained_handler(mg->irq, msic_gpio_irq_handler);
+ irq_set_handler_data(mg->irq, mg);
+
+ return 0;
+err:
+ kfree(mg);
+ return retval;
+}
+
+static struct platform_driver platform_msic_gpio_driver = {
+ .driver = {
+ .name = "msic_gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = platform_msic_gpio_probe,
+};
+
+static int __init platform_msic_gpio_init(void)
+{
+ return platform_driver_register(&platform_msic_gpio_driver);
+}
+
+subsys_initcall(platform_msic_gpio_init);
+
+MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Medfield MSIC GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index e79147634573..c89c4c1e668d 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -398,10 +398,12 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
writel(~0, port->base + GPIO_ISR);
if (mxc_gpio_hwtype == IMX21_GPIO) {
- /* setup one handler for all GPIO interrupts */
- if (pdev->id == 0)
- irq_set_chained_handler(port->irq,
- mx2_gpio_irq_handler);
+ /*
+ * Setup one handler for all GPIO interrupts. Actually setting
+ * the handler is needed only once, but doing it for every port
+ * is more robust and easier.
+ */
+ irq_set_chained_handler(port->irq, mx2_gpio_irq_handler);
} else {
/* setup one handler for each entry */
irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
@@ -417,7 +419,7 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
err = bgpio_init(&port->bgc, &pdev->dev, 4,
port->base + GPIO_PSR,
port->base + GPIO_DR, NULL,
- port->base + GPIO_GDIR, NULL, false);
+ port->base + GPIO_GDIR, NULL, 0);
if (err)
goto out_iounmap;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 385c58e8405b..39e495669961 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -25,23 +25,25 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/basic_mmio_gpio.h>
#include <linux/module.h>
-#include <mach/mxs.h>
#define MXS_SET 0x4
#define MXS_CLR 0x8
-#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
-#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
-#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
-#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
-#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
-#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
-#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
-#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
+#define PINCTRL_DOUT(p) ((is_imx23_gpio(p) ? 0x0500 : 0x0700) + (p->id) * 0x10)
+#define PINCTRL_DIN(p) ((is_imx23_gpio(p) ? 0x0600 : 0x0900) + (p->id) * 0x10)
+#define PINCTRL_DOE(p) ((is_imx23_gpio(p) ? 0x0700 : 0x0b00) + (p->id) * 0x10)
+#define PINCTRL_PIN2IRQ(p) ((is_imx23_gpio(p) ? 0x0800 : 0x1000) + (p->id) * 0x10)
+#define PINCTRL_IRQEN(p) ((is_imx23_gpio(p) ? 0x0900 : 0x1100) + (p->id) * 0x10)
+#define PINCTRL_IRQLEV(p) ((is_imx23_gpio(p) ? 0x0a00 : 0x1200) + (p->id) * 0x10)
+#define PINCTRL_IRQPOL(p) ((is_imx23_gpio(p) ? 0x0b00 : 0x1300) + (p->id) * 0x10)
+#define PINCTRL_IRQSTAT(p) ((is_imx23_gpio(p) ? 0x0c00 : 0x1400) + (p->id) * 0x10)
#define GPIO_INT_FALL_EDGE 0x0
#define GPIO_INT_LOW_LEV 0x1
@@ -52,14 +54,30 @@
#define irq_to_gpio(irq) ((irq) - MXS_GPIO_IRQ_START)
+enum mxs_gpio_id {
+ IMX23_GPIO,
+ IMX28_GPIO,
+};
+
struct mxs_gpio_port {
void __iomem *base;
int id;
int irq;
int virtual_irq_start;
struct bgpio_chip bgc;
+ enum mxs_gpio_id devid;
};
+static inline int is_imx23_gpio(struct mxs_gpio_port *port)
+{
+ return port->devid == IMX23_GPIO;
+}
+
+static inline int is_imx28_gpio(struct mxs_gpio_port *port)
+{
+ return port->devid == IMX28_GPIO;
+}
+
/* Note: This driver assumes 32 GPIOs are handled in one register */
static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
@@ -89,21 +107,21 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
}
/* set level or edge */
- pin_addr = port->base + PINCTRL_IRQLEV(port->id);
+ pin_addr = port->base + PINCTRL_IRQLEV(port);
if (edge & GPIO_INT_LEV_MASK)
writel(pin_mask, pin_addr + MXS_SET);
else
writel(pin_mask, pin_addr + MXS_CLR);
/* set polarity */
- pin_addr = port->base + PINCTRL_IRQPOL(port->id);
+ pin_addr = port->base + PINCTRL_IRQPOL(port);
if (edge & GPIO_INT_POL_MASK)
writel(pin_mask, pin_addr + MXS_SET);
else
writel(pin_mask, pin_addr + MXS_CLR);
writel(1 << (gpio & 0x1f),
- port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+ port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
return 0;
}
@@ -117,8 +135,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
desc->irq_data.chip->irq_ack(&desc->irq_data);
- irq_stat = readl(port->base + PINCTRL_IRQSTAT(port->id)) &
- readl(port->base + PINCTRL_IRQEN(port->id));
+ irq_stat = readl(port->base + PINCTRL_IRQSTAT(port)) &
+ readl(port->base + PINCTRL_IRQEN(port));
while (irq_stat != 0) {
int irqoffset = fls(irq_stat) - 1;
@@ -164,8 +182,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = mxs_gpio_set_irq_type;
ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
- ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
- ct->regs.mask = PINCTRL_IRQEN(port->id);
+ ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
+ ct->regs.mask = PINCTRL_IRQEN(port);
irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
}
@@ -179,60 +197,83 @@ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return port->virtual_irq_start + offset;
}
+static struct platform_device_id mxs_gpio_ids[] = {
+ {
+ .name = "imx23-gpio",
+ .driver_data = IMX23_GPIO,
+ }, {
+ .name = "imx28-gpio",
+ .driver_data = IMX28_GPIO,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_gpio_ids);
+
+static const struct of_device_id mxs_gpio_dt_ids[] = {
+ { .compatible = "fsl,imx23-gpio", .data = (void *) IMX23_GPIO, },
+ { .compatible = "fsl,imx28-gpio", .data = (void *) IMX28_GPIO, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
+
static int __devinit mxs_gpio_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_gpio_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *parent;
static void __iomem *base;
struct mxs_gpio_port *port;
struct resource *iores = NULL;
int err;
- port = kzalloc(sizeof(struct mxs_gpio_port), GFP_KERNEL);
+ port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
- port->id = pdev->id;
+ if (np) {
+ port->id = of_alias_get_id(np, "gpio");
+ if (port->id < 0)
+ return port->id;
+ port->devid = (enum mxs_gpio_id) of_id->data;
+ } else {
+ port->id = pdev->id;
+ port->devid = pdev->id_entry->driver_data;
+ }
port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32;
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
/*
* map memory region only once, as all the gpio ports
* share the same one
*/
if (!base) {
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores) {
- err = -ENODEV;
- goto out_kfree;
- }
-
- if (!request_mem_region(iores->start, resource_size(iores),
- pdev->name)) {
- err = -EBUSY;
- goto out_kfree;
- }
-
- base = ioremap(iores->start, resource_size(iores));
- if (!base) {
- err = -ENOMEM;
- goto out_release_mem;
+ if (np) {
+ parent = of_get_parent(np);
+ base = of_iomap(parent, 0);
+ of_node_put(parent);
+ } else {
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_request_and_ioremap(&pdev->dev, iores);
}
+ if (!base)
+ return -EADDRNOTAVAIL;
}
port->base = base;
- port->irq = platform_get_irq(pdev, 0);
- if (port->irq < 0) {
- err = -EINVAL;
- goto out_iounmap;
- }
-
/*
* select the pin interrupt functionality but initially
* disable the interrupts
*/
- writel(~0U, port->base + PINCTRL_PIN2IRQ(port->id));
- writel(0, port->base + PINCTRL_IRQEN(port->id));
+ writel(~0U, port->base + PINCTRL_PIN2IRQ(port));
+ writel(0, port->base + PINCTRL_IRQEN(port));
/* clear address has to be used to clear IRQSTAT bits */
- writel(~0U, port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+ writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
/* gpio-mxs can be a generic irq chip */
mxs_gpio_init_gc(port);
@@ -242,41 +283,32 @@ static int __devinit mxs_gpio_probe(struct platform_device *pdev)
irq_set_handler_data(port->irq, port);
err = bgpio_init(&port->bgc, &pdev->dev, 4,
- port->base + PINCTRL_DIN(port->id),
- port->base + PINCTRL_DOUT(port->id), NULL,
- port->base + PINCTRL_DOE(port->id), NULL, false);
+ port->base + PINCTRL_DIN(port),
+ port->base + PINCTRL_DOUT(port), NULL,
+ port->base + PINCTRL_DOE(port), NULL, 0);
if (err)
- goto out_iounmap;
+ return err;
port->bgc.gc.to_irq = mxs_gpio_to_irq;
port->bgc.gc.base = port->id * 32;
err = gpiochip_add(&port->bgc.gc);
- if (err)
- goto out_bgpio_remove;
+ if (err) {
+ bgpio_remove(&port->bgc);
+ return err;
+ }
return 0;
-
-out_bgpio_remove:
- bgpio_remove(&port->bgc);
-out_iounmap:
- if (iores)
- iounmap(port->base);
-out_release_mem:
- if (iores)
- release_mem_region(iores->start, resource_size(iores));
-out_kfree:
- kfree(port);
- dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
- return err;
}
static struct platform_driver mxs_gpio_driver = {
.driver = {
.name = "gpio-mxs",
.owner = THIS_MODULE,
+ .of_match_table = mxs_gpio_dt_ids,
},
.probe = mxs_gpio_probe,
+ .id_table = mxs_gpio_ids,
};
static int __init mxs_gpio_init(void)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 4461540653a8..4fbc208c32cf 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -57,14 +57,10 @@ struct gpio_bank {
u16 irq;
int irq_base;
struct irq_domain *domain;
- u32 suspend_wakeup;
- u32 saved_wakeup;
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
struct gpio_regs context;
u32 saved_datain;
- u32 saved_fallingdetect;
- u32 saved_risingdetect;
u32 level_mask;
u32 toggle_mask;
spinlock_t lock;
@@ -178,12 +174,22 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
if (bank->dbck_enable_mask && !bank->dbck_enabled) {
clk_enable(bank->dbck);
bank->dbck_enabled = true;
+
+ __raw_writel(bank->dbck_enable_mask,
+ bank->base + bank->regs->debounce_en);
}
}
static inline void _gpio_dbck_disable(struct gpio_bank *bank)
{
if (bank->dbck_enable_mask && bank->dbck_enabled) {
+ /*
+ * Disable debounce before cutting it's clock. If debounce is
+ * enabled but the clock is not, GPIO module seems to be unable
+ * to detect events and generate interrupts at least on OMAP3.
+ */
+ __raw_writel(0, bank->base + bank->regs->debounce_en);
+
clk_disable(bank->dbck);
bank->dbck_enabled = false;
}
@@ -516,11 +522,11 @@ static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
spin_lock_irqsave(&bank->lock, flags);
if (enable)
- bank->suspend_wakeup |= gpio_bit;
+ bank->context.wake_en |= gpio_bit;
else
- bank->suspend_wakeup &= ~gpio_bit;
+ bank->context.wake_en &= ~gpio_bit;
- __raw_writel(bank->suspend_wakeup, bank->base + bank->regs->wkup_en);
+ __raw_writel(bank->context.wake_en, bank->base + bank->regs->wkup_en);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -640,7 +646,6 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
u32 isr;
unsigned int gpio_irq, gpio_index;
struct gpio_bank *bank;
- u32 retrigger = 0;
int unmasked = 0;
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -677,8 +682,6 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
- isr |= retrigger;
- retrigger = 0;
if (!isr)
break;
@@ -789,8 +792,7 @@ static int omap_mpuio_suspend_noirq(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
- bank->saved_wakeup = __raw_readl(mask_reg);
- __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
+ __raw_writel(0xffff & ~bank->context.wake_en, mask_reg);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -805,7 +807,7 @@ static int omap_mpuio_resume_noirq(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
- __raw_writel(bank->saved_wakeup, mask_reg);
+ __raw_writel(bank->context.wake_en, mask_reg);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -1089,7 +1091,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
bank->is_mpuio = pdata->is_mpuio;
bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
bank->loses_context = pdata->loses_context;
- bank->get_context_loss_count = pdata->get_context_loss_count;
bank->regs = pdata->regs;
#ifdef CONFIG_OF_GPIO
bank->chip.of_node = of_node_get(node);
@@ -1143,6 +1144,9 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
omap_gpio_chip_init(bank);
omap_gpio_show_rev(bank);
+ if (bank->loses_context)
+ bank->get_context_loss_count = pdata->get_context_loss_count;
+
pm_runtime_put(bank->dev);
list_add_tail(&bank->node, &omap_gpio_list);
@@ -1152,54 +1156,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_OMAP2PLUS
-#if defined(CONFIG_PM_SLEEP)
-static int omap_gpio_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_bank *bank = platform_get_drvdata(pdev);
- void __iomem *base = bank->base;
- void __iomem *wakeup_enable;
- unsigned long flags;
-
- if (!bank->mod_usage || !bank->loses_context)
- return 0;
-
- if (!bank->regs->wkup_en || !bank->suspend_wakeup)
- return 0;
-
- wakeup_enable = bank->base + bank->regs->wkup_en;
-
- spin_lock_irqsave(&bank->lock, flags);
- bank->saved_wakeup = __raw_readl(wakeup_enable);
- _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
- _gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
- spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
-}
-
-static int omap_gpio_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_bank *bank = platform_get_drvdata(pdev);
- void __iomem *base = bank->base;
- unsigned long flags;
-
- if (!bank->mod_usage || !bank->loses_context)
- return 0;
-
- if (!bank->regs->wkup_en || !bank->saved_wakeup)
- return 0;
-
- spin_lock_irqsave(&bank->lock, flags);
- _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
- _gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
- spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
#if defined(CONFIG_PM_RUNTIME)
static void omap_gpio_restore_context(struct gpio_bank *bank);
@@ -1233,6 +1189,9 @@ static int omap_gpio_runtime_suspend(struct device *dev)
__raw_writel(wake_hi | bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
+ if (!bank->enabled_non_wakeup_gpios)
+ goto update_gpio_context_count;
+
if (bank->power_mode != OFF_MODE) {
bank->power_mode = 0;
goto update_gpio_context_count;
@@ -1244,11 +1203,9 @@ static int omap_gpio_runtime_suspend(struct device *dev)
*/
bank->saved_datain = __raw_readl(bank->base +
bank->regs->datain);
- l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
- l2 = __raw_readl(bank->base + bank->regs->risingdetect);
+ l1 = bank->context.fallingdetect;
+ l2 = bank->context.risingdetect;
- bank->saved_fallingdetect = l1;
- bank->saved_risingdetect = l2;
l1 &= ~bank->enabled_non_wakeup_gpios;
l2 &= ~bank->enabled_non_wakeup_gpios;
@@ -1290,16 +1247,10 @@ static int omap_gpio_runtime_resume(struct device *dev)
__raw_writel(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
- if (!bank->workaround_enabled) {
- spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
-
if (bank->get_context_loss_count) {
context_lost_cnt_after =
bank->get_context_loss_count(bank->dev);
- if (context_lost_cnt_after != bank->context_loss_count ||
- !context_lost_cnt_after) {
+ if (context_lost_cnt_after != bank->context_loss_count) {
omap_gpio_restore_context(bank);
} else {
spin_unlock_irqrestore(&bank->lock, flags);
@@ -1307,9 +1258,14 @@ static int omap_gpio_runtime_resume(struct device *dev)
}
}
- __raw_writel(bank->saved_fallingdetect,
+ if (!bank->workaround_enabled) {
+ spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+ }
+
+ __raw_writel(bank->context.fallingdetect,
bank->base + bank->regs->fallingdetect);
- __raw_writel(bank->saved_risingdetect,
+ __raw_writel(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
l = __raw_readl(bank->base + bank->regs->datain);
@@ -1326,14 +1282,15 @@ static int omap_gpio_runtime_resume(struct device *dev)
* No need to generate IRQs for the rising edge for gpio IRQs
* configured with falling edge only; and vice versa.
*/
- gen0 = l & bank->saved_fallingdetect;
+ gen0 = l & bank->context.fallingdetect;
gen0 &= bank->saved_datain;
- gen1 = l & bank->saved_risingdetect;
+ gen1 = l & bank->context.risingdetect;
gen1 &= ~(bank->saved_datain);
/* FIXME: Consider GPIO IRQs with level detections properly! */
- gen = l & (~(bank->saved_fallingdetect) & ~(bank->saved_risingdetect));
+ gen = l & (~(bank->context.fallingdetect) &
+ ~(bank->context.risingdetect));
/* Consider all GPIO IRQs needed to be updated */
gen |= gen0 | gen1;
@@ -1343,14 +1300,14 @@ static int omap_gpio_runtime_resume(struct device *dev)
old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ if (!bank->regs->irqstatus_raw0) {
__raw_writel(old0 | gen, bank->base +
bank->regs->leveldetect0);
__raw_writel(old1 | gen, bank->base +
bank->regs->leveldetect1);
}
- if (cpu_is_omap44xx()) {
+ if (bank->regs->irqstatus_raw0) {
__raw_writel(old0 | l, bank->base +
bank->regs->leveldetect0);
__raw_writel(old1 | l, bank->base +
@@ -1429,14 +1386,11 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
}
#endif /* CONFIG_PM_RUNTIME */
#else
-#define omap_gpio_suspend NULL
-#define omap_gpio_resume NULL
#define omap_gpio_runtime_suspend NULL
#define omap_gpio_runtime_resume NULL
#endif
static const struct dev_pm_ops gpio_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
NULL)
};
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d3f3e8f54561..1c313c710be3 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -28,6 +28,8 @@
#define PCA953X_INVERT 2
#define PCA953X_DIRECTION 3
+#define REG_ADDR_AI 0x80
+
#define PCA957X_IN 0
#define PCA957X_INVRT 1
#define PCA957X_BKEN 2
@@ -63,15 +65,15 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
- /* NYET: { "tca6424", 24, }, */
+ { "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
{ }
};
MODULE_DEVICE_TABLE(i2c, pca953x_id);
struct pca953x_chip {
unsigned gpio_start;
- uint16_t reg_output;
- uint16_t reg_direction;
+ u32 reg_output;
+ u32 reg_direction;
struct mutex i2c_lock;
#ifdef CONFIG_GPIO_PCA953X_IRQ
@@ -89,12 +91,20 @@ struct pca953x_chip {
int chip_type;
};
-static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
+static int pca953x_write_reg(struct pca953x_chip *chip, int reg, u32 val)
{
int ret = 0;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+ else if (chip->gpio_chip.ngpio == 24) {
+ ret = i2c_smbus_write_word_data(chip->client,
+ (reg << 2) | REG_ADDR_AI,
+ val & 0xffff);
+ ret = i2c_smbus_write_byte_data(chip->client,
+ (reg << 2) + 2,
+ (val & 0xff0000) >> 16);
+ }
else {
switch (chip->chip_type) {
case PCA953X_TYPE:
@@ -121,12 +131,17 @@ static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
return 0;
}
-static int pca953x_read_reg(struct pca953x_chip *chip, int reg, uint16_t *val)
+static int pca953x_read_reg(struct pca953x_chip *chip, int reg, u32 *val)
{
int ret;
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_read_byte_data(chip->client, reg);
+ else if (chip->gpio_chip.ngpio == 24) {
+ ret = i2c_smbus_read_word_data(chip->client, reg << 2);
+ ret |= (i2c_smbus_read_byte_data(chip->client,
+ (reg << 2) + 2)<<16);
+ }
else
ret = i2c_smbus_read_word_data(chip->client, reg << 1);
@@ -135,14 +150,14 @@ static int pca953x_read_reg(struct pca953x_chip *chip, int reg, uint16_t *val)
return ret;
}
- *val = (uint16_t)ret;
+ *val = (u32)ret;
return 0;
}
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
- uint16_t reg_val;
+ uint reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -173,7 +188,7 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip;
- uint16_t reg_val;
+ uint reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -223,7 +238,7 @@ exit:
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip;
- uint16_t reg_val;
+ u32 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -253,7 +268,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip;
- uint16_t reg_val;
+ u32 reg_val;
int ret, offset = 0;
chip = container_of(gc, struct pca953x_chip, gpio_chip);
@@ -386,7 +401,7 @@ static struct irq_chip pca953x_irq_chip = {
static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
{
- uint16_t cur_stat;
+ u32 cur_stat;
uint16_t old_stat;
uint16_t pending;
uint16_t trigger;
@@ -449,6 +464,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
{
struct i2c_client *client = chip->client;
int ret, offset = 0;
+ u32 temporary;
if (irq_base != -1
&& (id->driver_data & PCA_INT)) {
@@ -462,7 +478,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
offset = PCA957X_IN;
break;
}
- ret = pca953x_read_reg(chip, offset, &chip->irq_stat);
+ ret = pca953x_read_reg(chip, offset, &temporary);
+ chip->irq_stat = temporary;
if (ret)
goto out_failed;
@@ -603,7 +620,7 @@ out:
static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert)
{
int ret;
- uint16_t val = 0;
+ u32 val = 0;
/* Let every port in proper state, that could save power */
pca953x_write_reg(chip, PCA957X_PUPD, 0x0);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 2cd958e0b822..139ad3e20011 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -538,17 +538,7 @@ static struct pci_driver pch_gpio_driver = {
.resume = pch_gpio_resume
};
-static int __init pch_gpio_pci_init(void)
-{
- return pci_register_driver(&pch_gpio_driver);
-}
-module_init(pch_gpio_pci_init);
-
-static void __exit pch_gpio_pci_exit(void)
-{
- pci_unregister_driver(&pch_gpio_driver);
-}
-module_exit(pch_gpio_pci_exit);
+module_pci_driver(pch_gpio_driver);
MODULE_DESCRIPTION("PCH GPIO PCI Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
new file mode 100644
index 000000000000..08428bf17718
--- /dev/null
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -0,0 +1,180 @@
+/*
+ * GPIO driver for RICOH583 power management chip.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Author: Laxman dewangan <ldewangan@nvidia.com>
+ *
+ * Based on code
+ * Copyright (C) 2011 RICOH COMPANY,LTD
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/mfd/rc5t583.h>
+
+struct rc5t583_gpio {
+ struct gpio_chip gpio_chip;
+ struct rc5t583 *rc5t583;
+};
+
+static inline struct rc5t583_gpio *to_rc5t583_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct rc5t583_gpio, gpio_chip);
+}
+
+static int rc5t583_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+ uint8_t val = 0;
+ int ret;
+
+ ret = rc5t583_read(parent, RC5T583_GPIO_MON_IOIN, &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+ if (val)
+ rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+ else
+ rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+}
+
+static int rc5t583_gpio_dir_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+ int ret;
+
+ ret = rc5t583_clear_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset));
+ if (ret < 0)
+ return ret;
+
+ /* Set pin to gpio mode */
+ return rc5t583_clear_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset));
+}
+
+static int rc5t583_gpio_dir_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+ int ret;
+
+ rc5t583_gpio_set(gc, offset, value);
+ ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset));
+ if (ret < 0)
+ return ret;
+
+ /* Set pin to gpio mode */
+ return rc5t583_clear_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset));
+}
+
+static int rc5t583_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+
+ if ((offset >= 0) && (offset < 8))
+ return rc5t583_gpio->rc5t583->irq_base +
+ RC5T583_IRQ_GPIO0 + offset;
+ return -EINVAL;
+}
+
+static void rc5t583_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+ struct rc5t583_gpio *rc5t583_gpio = to_rc5t583_gpio(gc);
+ struct device *parent = rc5t583_gpio->rc5t583->dev;
+
+ rc5t583_set_bits(parent, RC5T583_GPIO_PGSEL, BIT(offset));
+}
+
+static int __devinit rc5t583_gpio_probe(struct platform_device *pdev)
+{
+ struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
+ struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
+ struct rc5t583_gpio *rc5t583_gpio;
+
+ rc5t583_gpio = devm_kzalloc(&pdev->dev, sizeof(*rc5t583_gpio),
+ GFP_KERNEL);
+ if (!rc5t583_gpio) {
+ dev_warn(&pdev->dev, "Mem allocation for rc5t583_gpio failed");
+ return -ENOMEM;
+ }
+
+ rc5t583_gpio->gpio_chip.label = "gpio-rc5t583",
+ rc5t583_gpio->gpio_chip.owner = THIS_MODULE,
+ rc5t583_gpio->gpio_chip.free = rc5t583_gpio_free,
+ rc5t583_gpio->gpio_chip.direction_input = rc5t583_gpio_dir_input,
+ rc5t583_gpio->gpio_chip.direction_output = rc5t583_gpio_dir_output,
+ rc5t583_gpio->gpio_chip.set = rc5t583_gpio_set,
+ rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get,
+ rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq,
+ rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO,
+ rc5t583_gpio->gpio_chip.can_sleep = 1,
+ rc5t583_gpio->gpio_chip.dev = &pdev->dev;
+ rc5t583_gpio->gpio_chip.base = -1;
+ rc5t583_gpio->rc5t583 = rc5t583;
+
+ if (pdata && pdata->gpio_base)
+ rc5t583_gpio->gpio_chip.base = pdata->gpio_base;
+
+ platform_set_drvdata(pdev, rc5t583_gpio);
+
+ return gpiochip_add(&rc5t583_gpio->gpio_chip);
+}
+
+static int __devexit rc5t583_gpio_remove(struct platform_device *pdev)
+{
+ struct rc5t583_gpio *rc5t583_gpio = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&rc5t583_gpio->gpio_chip);
+}
+
+static struct platform_driver rc5t583_gpio_driver = {
+ .driver = {
+ .name = "rc5t583-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = rc5t583_gpio_probe,
+ .remove = __devexit_p(rc5t583_gpio_remove),
+};
+
+static int __init rc5t583_gpio_init(void)
+{
+ return platform_driver_register(&rc5t583_gpio_driver);
+}
+subsys_initcall(rc5t583_gpio_init);
+
+static void __exit rc5t583_gpio_exit(void)
+{
+ platform_driver_unregister(&rc5t583_gpio_driver);
+}
+module_exit(rc5t583_gpio_exit);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("GPIO interface for RC5T583");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rc5t583-gpio");
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index e991d9171961..b6453d0e44ad 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,6 +2454,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
},
}, {
.chip = {
+ .base = EXYNOS5_GPC4(0),
+ .ngpio = EXYNOS5_GPIO_C4_NR,
+ .label = "GPC4",
+ },
+ }, {
+ .chip = {
.base = EXYNOS5_GPD0(0),
.ngpio = EXYNOS5_GPIO_D0_NR,
.label = "GPD0",
@@ -2716,14 +2722,227 @@ static __init void exynos_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
}
#endif /* defined(CONFIG_ARCH_EXYNOS) && defined(CONFIG_OF) */
+static __init void exynos4_gpiolib_init(void)
+{
+#ifdef CONFIG_CPU_EXYNOS4210
+ struct samsung_gpio_chip *chip;
+ int i, nr_chips;
+ void __iomem *gpio_base1, *gpio_base2, *gpio_base3;
+ int group = 0;
+ void __iomem *gpx_base;
+
+ /* gpio part1 */
+ gpio_base1 = ioremap(EXYNOS4_PA_GPIO1, SZ_4K);
+ if (gpio_base1 == NULL) {
+ pr_err("unable to ioremap for gpio_base1\n");
+ goto err_ioremap1;
+ }
+
+ chip = exynos4_gpios_1;
+ nr_chips = ARRAY_SIZE(exynos4_gpios_1);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO1, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_1,
+ nr_chips, gpio_base1);
+
+ /* gpio part2 */
+ gpio_base2 = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
+ if (gpio_base2 == NULL) {
+ pr_err("unable to ioremap for gpio_base2\n");
+ goto err_ioremap2;
+ }
+
+ /* need to set base address for gpx */
+ chip = &exynos4_gpios_2[16];
+ gpx_base = gpio_base2 + 0xC00;
+ for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
+ chip->base = gpx_base;
+
+ chip = exynos4_gpios_2;
+ nr_chips = ARRAY_SIZE(exynos4_gpios_2);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO2, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_2,
+ nr_chips, gpio_base2);
+
+ /* gpio part3 */
+ gpio_base3 = ioremap(EXYNOS4_PA_GPIO3, SZ_256);
+ if (gpio_base3 == NULL) {
+ pr_err("unable to ioremap for gpio_base3\n");
+ goto err_ioremap3;
+ }
+
+ chip = exynos4_gpios_3;
+ nr_chips = ARRAY_SIZE(exynos4_gpios_3);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO3, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos4_gpios_3,
+ nr_chips, gpio_base3);
+
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_S5P_GPIO_INT)
+ s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
+ s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
+#endif
+
+ return;
+
+err_ioremap3:
+ iounmap(gpio_base2);
+err_ioremap2:
+ iounmap(gpio_base1);
+err_ioremap1:
+ return;
+#endif /* CONFIG_CPU_EXYNOS4210 */
+}
+
+static __init void exynos5_gpiolib_init(void)
+{
+#ifdef CONFIG_SOC_EXYNOS5250
+ struct samsung_gpio_chip *chip;
+ int i, nr_chips;
+ void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
+ int group = 0;
+ void __iomem *gpx_base;
+
+ /* gpio part1 */
+ gpio_base1 = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
+ if (gpio_base1 == NULL) {
+ pr_err("unable to ioremap for gpio_base1\n");
+ goto err_ioremap1;
+ }
+
+ /* need to set base address for gpc4 */
+ exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
+
+ /* need to set base address for gpx */
+ chip = &exynos5_gpios_1[21];
+ gpx_base = gpio_base1 + 0xC00;
+ for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
+ chip->base = gpx_base;
+
+ chip = exynos5_gpios_1;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_1);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO1, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_1,
+ nr_chips, gpio_base1);
+
+ /* gpio part2 */
+ gpio_base2 = ioremap(EXYNOS5_PA_GPIO2, SZ_4K);
+ if (gpio_base2 == NULL) {
+ pr_err("unable to ioremap for gpio_base2\n");
+ goto err_ioremap2;
+ }
+
+ chip = exynos5_gpios_2;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_2);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO2, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_2,
+ nr_chips, gpio_base2);
+
+ /* gpio part3 */
+ gpio_base3 = ioremap(EXYNOS5_PA_GPIO3, SZ_4K);
+ if (gpio_base3 == NULL) {
+ pr_err("unable to ioremap for gpio_base3\n");
+ goto err_ioremap3;
+ }
+
+ /* need to set base address for gpv */
+ exynos5_gpios_3[0].base = gpio_base3;
+ exynos5_gpios_3[1].base = gpio_base3 + 0x20;
+ exynos5_gpios_3[2].base = gpio_base3 + 0x60;
+ exynos5_gpios_3[3].base = gpio_base3 + 0x80;
+ exynos5_gpios_3[4].base = gpio_base3 + 0xC0;
+
+ chip = exynos5_gpios_3;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_3);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO3, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_3,
+ nr_chips, gpio_base3);
+
+ /* gpio part4 */
+ gpio_base4 = ioremap(EXYNOS5_PA_GPIO4, SZ_4K);
+ if (gpio_base4 == NULL) {
+ pr_err("unable to ioremap for gpio_base4\n");
+ goto err_ioremap4;
+ }
+
+ chip = exynos5_gpios_4;
+ nr_chips = ARRAY_SIZE(exynos5_gpios_4);
+
+ for (i = 0; i < nr_chips; i++, chip++) {
+ if (!chip->config) {
+ chip->config = &exynos_gpio_cfg;
+ chip->group = group++;
+ }
+ exynos_gpiolib_attach_ofnode(chip,
+ EXYNOS5_PA_GPIO4, i * 0x20);
+ }
+ samsung_gpiolib_add_4bit_chips(exynos5_gpios_4,
+ nr_chips, gpio_base4);
+ return;
+
+err_ioremap4:
+ iounmap(gpio_base3);
+err_ioremap3:
+ iounmap(gpio_base2);
+err_ioremap2:
+ iounmap(gpio_base1);
+err_ioremap1:
+ return;
+
+#endif /* CONFIG_SOC_EXYNOS5250 */
+}
+
/* TODO: cleanup soc_is_* */
static __init int samsung_gpiolib_init(void)
{
struct samsung_gpio_chip *chip;
int i, nr_chips;
-#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
- void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
-#endif
int group = 0;
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
@@ -2789,202 +3008,15 @@ static __init int samsung_gpiolib_init(void)
s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
#endif
} else if (soc_is_exynos4210()) {
-#ifdef CONFIG_CPU_EXYNOS4210
- void __iomem *gpx_base;
-
- /* gpio part1 */
- gpio_base1 = ioremap(EXYNOS4_PA_GPIO1, SZ_4K);
- if (gpio_base1 == NULL) {
- pr_err("unable to ioremap for gpio_base1\n");
- goto err_ioremap1;
- }
-
- chip = exynos4_gpios_1;
- nr_chips = ARRAY_SIZE(exynos4_gpios_1);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS4_PA_GPIO1, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos4_gpios_1,
- nr_chips, gpio_base1);
-
- /* gpio part2 */
- gpio_base2 = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
- if (gpio_base2 == NULL) {
- pr_err("unable to ioremap for gpio_base2\n");
- goto err_ioremap2;
- }
-
- /* need to set base address for gpx */
- chip = &exynos4_gpios_2[16];
- gpx_base = gpio_base2 + 0xC00;
- for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
- chip->base = gpx_base;
-
- chip = exynos4_gpios_2;
- nr_chips = ARRAY_SIZE(exynos4_gpios_2);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS4_PA_GPIO2, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos4_gpios_2,
- nr_chips, gpio_base2);
-
- /* gpio part3 */
- gpio_base3 = ioremap(EXYNOS4_PA_GPIO3, SZ_256);
- if (gpio_base3 == NULL) {
- pr_err("unable to ioremap for gpio_base3\n");
- goto err_ioremap3;
- }
-
- chip = exynos4_gpios_3;
- nr_chips = ARRAY_SIZE(exynos4_gpios_3);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS4_PA_GPIO3, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos4_gpios_3,
- nr_chips, gpio_base3);
-
-#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_S5P_GPIO_INT)
- s5p_register_gpioint_bank(IRQ_GPIO_XA, 0, IRQ_GPIO1_NR_GROUPS);
- s5p_register_gpioint_bank(IRQ_GPIO_XB, IRQ_GPIO1_NR_GROUPS, IRQ_GPIO2_NR_GROUPS);
-#endif
-
-#endif /* CONFIG_CPU_EXYNOS4210 */
+ exynos4_gpiolib_init();
} else if (soc_is_exynos5250()) {
-#ifdef CONFIG_SOC_EXYNOS5250
- void __iomem *gpx_base;
-
- /* gpio part1 */
- gpio_base1 = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
- if (gpio_base1 == NULL) {
- pr_err("unable to ioremap for gpio_base1\n");
- goto err_ioremap1;
- }
-
- /* need to set base address for gpx */
- chip = &exynos5_gpios_1[20];
- gpx_base = gpio_base1 + 0xC00;
- for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
- chip->base = gpx_base;
-
- chip = exynos5_gpios_1;
- nr_chips = ARRAY_SIZE(exynos5_gpios_1);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS5_PA_GPIO1, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos5_gpios_1,
- nr_chips, gpio_base1);
-
- /* gpio part2 */
- gpio_base2 = ioremap(EXYNOS5_PA_GPIO2, SZ_4K);
- if (gpio_base2 == NULL) {
- pr_err("unable to ioremap for gpio_base2\n");
- goto err_ioremap2;
- }
-
- chip = exynos5_gpios_2;
- nr_chips = ARRAY_SIZE(exynos5_gpios_2);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS5_PA_GPIO2, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos5_gpios_2,
- nr_chips, gpio_base2);
-
- /* gpio part3 */
- gpio_base3 = ioremap(EXYNOS5_PA_GPIO3, SZ_4K);
- if (gpio_base3 == NULL) {
- pr_err("unable to ioremap for gpio_base3\n");
- goto err_ioremap3;
- }
-
- /* need to set base address for gpv */
- exynos5_gpios_3[0].base = gpio_base3;
- exynos5_gpios_3[1].base = gpio_base3 + 0x20;
- exynos5_gpios_3[2].base = gpio_base3 + 0x60;
- exynos5_gpios_3[3].base = gpio_base3 + 0x80;
- exynos5_gpios_3[4].base = gpio_base3 + 0xC0;
-
- chip = exynos5_gpios_3;
- nr_chips = ARRAY_SIZE(exynos5_gpios_3);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS5_PA_GPIO3, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos5_gpios_3,
- nr_chips, gpio_base3);
-
- /* gpio part4 */
- gpio_base4 = ioremap(EXYNOS5_PA_GPIO4, SZ_4K);
- if (gpio_base4 == NULL) {
- pr_err("unable to ioremap for gpio_base4\n");
- goto err_ioremap4;
- }
-
- chip = exynos5_gpios_4;
- nr_chips = ARRAY_SIZE(exynos5_gpios_4);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &exynos_gpio_cfg;
- chip->group = group++;
- }
- exynos_gpiolib_attach_ofnode(chip,
- EXYNOS5_PA_GPIO4, i * 0x20);
- }
- samsung_gpiolib_add_4bit_chips(exynos5_gpios_4,
- nr_chips, gpio_base4);
-#endif /* CONFIG_SOC_EXYNOS5250 */
+ exynos5_gpiolib_init();
} else {
WARN(1, "Unknown SoC in gpio-samsung, no GPIOs added\n");
return -ENODEV;
}
return 0;
-
-#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
-err_ioremap4:
- iounmap(gpio_base3);
-err_ioremap3:
- iounmap(gpio_base2);
-err_ioremap2:
- iounmap(gpio_base1);
-err_ioremap1:
- return -ENOMEM;
-#endif
}
core_initcall(samsung_gpiolib_init);
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 8cadf4d683a8..424dce8e3f30 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -232,6 +232,14 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
sch_gpio_resume.ngpio = 9;
break;
+ case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
+ sch_gpio_core.base = 0;
+ sch_gpio_core.ngpio = 21;
+
+ sch_gpio_resume.base = 21;
+ sch_gpio_resume.ngpio = 9;
+ break;
+
default:
return -ENODEV;
}
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 031e5d24837d..9d9891f7a607 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -224,7 +224,7 @@ static int __devinit sdv_gpio_probe(struct pci_dev *pdev,
ret = bgpio_init(&sd->bgpio, &pdev->dev, 4,
sd->gpio_pub_base + GPINR, sd->gpio_pub_base + GPOUTR,
- NULL, sd->gpio_pub_base + GPOER, NULL, false);
+ NULL, sd->gpio_pub_base + GPOER, NULL, 0);
if (ret)
goto unmap;
sd->bgpio.gc.ngpio = SDV_NUM_PUB_GPIOS;
@@ -282,17 +282,7 @@ static struct pci_driver sdv_gpio_driver = {
.remove = sdv_gpio_remove,
};
-static int __init sdv_gpio_init(void)
-{
- return pci_register_driver(&sdv_gpio_driver);
-}
-module_init(sdv_gpio_init);
-
-static void __exit sdv_gpio_exit(void)
-{
- pci_unregister_driver(&sdv_gpio_driver);
-}
-module_exit(sdv_gpio_exit);
+module_pci_driver(sdv_gpio_driver);
MODULE_AUTHOR("Hans J. Koch <hjk@linutronix.de>");
MODULE_DESCRIPTION("GPIO interface for Intel Sodaville SoCs");
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
new file mode 100644
index 000000000000..6064fb376e11
--- /dev/null
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -0,0 +1,436 @@
+/*
+ * STMicroelectronics ConneXt (STA2X11) GPIO driver
+ *
+ * Copyright 2012 ST Microelectronics (Alessandro Rubini)
+ * Based on gpio-ml-ioh.c, Copyright 2010 OKI Semiconductors Ltd.
+ * Also based on previous sta2x11 work, Copyright 2011 Wind River Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+struct gsta_regs {
+ u32 dat; /* 0x00 */
+ u32 dats;
+ u32 datc;
+ u32 pdis;
+ u32 dir; /* 0x10 */
+ u32 dirs;
+ u32 dirc;
+ u32 unused_1c;
+ u32 afsela; /* 0x20 */
+ u32 unused_24[7];
+ u32 rimsc; /* 0x40 */
+ u32 fimsc;
+ u32 is;
+ u32 ic;
+};
+
+struct gsta_gpio {
+ spinlock_t lock;
+ struct device *dev;
+ void __iomem *reg_base;
+ struct gsta_regs __iomem *regs[GSTA_NR_BLOCKS];
+ struct gpio_chip gpio;
+ int irq_base;
+ /* FIXME: save the whole config here (AF, ...) */
+ unsigned irq_type[GSTA_NR_GPIO];
+};
+
+static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr)
+{
+ return chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+}
+
+static inline u32 __bit(int nr)
+{
+ return 1U << (nr % GSTA_GPIO_PER_BLOCK);
+}
+
+/*
+ * gpio methods
+ */
+
+static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ if (val)
+ writel(bit, &regs->dats);
+ else
+ writel(bit, &regs->datc);
+}
+
+static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ return readl(&regs->dat) & bit;
+}
+
+static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+ int val)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ writel(bit, &regs->dirs);
+ /* Data register after direction, otherwise pullup/down is selected */
+ if (val)
+ writel(bit, &regs->dats);
+ else
+ writel(bit, &regs->datc);
+ return 0;
+}
+
+static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ writel(bit, &regs->dirc);
+ return 0;
+}
+
+static int gsta_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ return chip->irq_base + offset;
+}
+
+static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
+{
+ struct gpio_chip *gpio = &chip->gpio;
+
+ /*
+ * ARCH_NR_GPIOS is currently 256 and dynamic allocation starts
+ * from the end. However, for compatibility, we need the first
+ * ConneXt device to start from gpio 0: it's the main chipset
+ * on most boards so documents and drivers assume gpio0..gpio127
+ */
+ static int gpio_base;
+
+ gpio->label = dev_name(chip->dev);
+ gpio->owner = THIS_MODULE;
+ gpio->direction_input = gsta_gpio_direction_input;
+ gpio->get = gsta_gpio_get;
+ gpio->direction_output = gsta_gpio_direction_output;
+ gpio->set = gsta_gpio_set;
+ gpio->dbg_show = NULL;
+ gpio->base = gpio_base;
+ gpio->ngpio = GSTA_NR_GPIO;
+ gpio->can_sleep = 0;
+ gpio->to_irq = gsta_gpio_to_irq;
+
+ /*
+ * After the first device, turn to dynamic gpio numbers.
+ * For example, with ARCH_NR_GPIOS = 256 we can fit two cards
+ */
+ if (!gpio_base)
+ gpio_base = -1;
+}
+
+/*
+ * Special method: alternate functions and pullup/pulldown. This is only
+ * invoked on startup to configure gpio's according to platform data.
+ * FIXME : this functionality shall be managed (and exported to other drivers)
+ * via the pin control subsystem.
+ */
+static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg)
+{
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ unsigned long flags;
+ u32 bit = __bit(nr);
+ u32 val;
+ int err = 0;
+
+ pr_info("%s: %p %i %i\n", __func__, chip, nr, cfg);
+
+ if (cfg == PINMUX_TYPE_NONE)
+ return;
+
+ /* Alternate function or not? */
+ spin_lock_irqsave(&chip->lock, flags);
+ val = readl(&regs->afsela);
+ if (cfg == PINMUX_TYPE_FUNCTION)
+ val |= bit;
+ else
+ val &= ~bit;
+ writel(val | bit, &regs->afsela);
+ if (cfg == PINMUX_TYPE_FUNCTION) {
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return;
+ }
+
+ /* not alternate function: set details */
+ switch (cfg) {
+ case PINMUX_TYPE_OUTPUT_LOW:
+ writel(bit, &regs->dirs);
+ writel(bit, &regs->datc);
+ break;
+ case PINMUX_TYPE_OUTPUT_HIGH:
+ writel(bit, &regs->dirs);
+ writel(bit, &regs->dats);
+ break;
+ case PINMUX_TYPE_INPUT:
+ writel(bit, &regs->dirc);
+ val = readl(&regs->pdis) | bit;
+ writel(val, &regs->pdis);
+ break;
+ case PINMUX_TYPE_INPUT_PULLUP:
+ writel(bit, &regs->dirc);
+ val = readl(&regs->pdis) & ~bit;
+ writel(val, &regs->pdis);
+ writel(bit, &regs->dats);
+ break;
+ case PINMUX_TYPE_INPUT_PULLDOWN:
+ writel(bit, &regs->dirc);
+ val = readl(&regs->pdis) & ~bit;
+ writel(val, &regs->pdis);
+ writel(bit, &regs->datc);
+ break;
+ default:
+ err = 1;
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+ if (err)
+ pr_err("%s: chip %p, pin %i, cfg %i is invalid\n",
+ __func__, chip, nr, cfg);
+}
+
+/*
+ * Irq methods
+ */
+
+static void gsta_irq_disable(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct gsta_gpio *chip = gc->private;
+ int nr = data->irq - chip->irq_base;
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ if (chip->irq_type[nr] & IRQ_TYPE_EDGE_RISING) {
+ val = readl(&regs->rimsc) & ~bit;
+ writel(val, &regs->rimsc);
+ }
+ if (chip->irq_type[nr] & IRQ_TYPE_EDGE_FALLING) {
+ val = readl(&regs->fimsc) & ~bit;
+ writel(val, &regs->fimsc);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return;
+}
+
+static void gsta_irq_enable(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct gsta_gpio *chip = gc->private;
+ int nr = data->irq - chip->irq_base;
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+ u32 val;
+ int type;
+ unsigned long flags;
+
+ type = chip->irq_type[nr];
+
+ spin_lock_irqsave(&chip->lock, flags);
+ val = readl(&regs->rimsc);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ writel(val | bit, &regs->rimsc);
+ else
+ writel(val & ~bit, &regs->rimsc);
+ val = readl(&regs->rimsc);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ writel(val | bit, &regs->fimsc);
+ else
+ writel(val & ~bit, &regs->fimsc);
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return;
+}
+
+static int gsta_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct gsta_gpio *chip = gc->private;
+ int nr = d->irq - chip->irq_base;
+
+ /* We only support edge interrupts */
+ if (!(type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) {
+ pr_debug("%s: unsupported type 0x%x\n", __func__, type);
+ return -EINVAL;
+ }
+
+ chip->irq_type[nr] = type; /* used for enable/disable */
+
+ gsta_irq_enable(d);
+ return 0;
+}
+
+static irqreturn_t gsta_gpio_handler(int irq, void *dev_id)
+{
+ struct gsta_gpio *chip = dev_id;
+ struct gsta_regs __iomem *regs;
+ u32 is;
+ int i, nr, base;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+ regs = chip->regs[i];
+ base = chip->irq_base + i * GSTA_GPIO_PER_BLOCK;
+ while ((is = readl(&regs->is))) {
+ nr = __ffs(is);
+ irq = base + nr;
+ generic_handle_irq(irq);
+ writel(1 << nr, &regs->ic);
+ ret = IRQ_HANDLED;
+ }
+ }
+ return ret;
+}
+
+static __devinit void gsta_alloc_irq_chip(struct gsta_gpio *chip)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base,
+ chip->reg_base, handle_simple_irq);
+ gc->private = chip;
+ ct = gc->chip_types;
+
+ ct->chip.irq_set_type = gsta_irq_type;
+ ct->chip.irq_disable = gsta_irq_disable;
+ ct->chip.irq_enable = gsta_irq_enable;
+
+ /* FIXME: this makes at most 32 interrupts. Request 0 by now */
+ irq_setup_generic_chip(gc, 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, 0,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+
+ /* Set up all all 128 interrupts: code from setup_generic_chip */
+ {
+ struct irq_chip_type *ct = gc->chip_types;
+ int i, j;
+ for (j = 0; j < GSTA_NR_GPIO; j++) {
+ i = chip->irq_base + j;
+ irq_set_chip_and_handler(i, &ct->chip, ct->handler);
+ irq_set_chip_data(i, gc);
+ irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ }
+ gc->irq_cnt = i - gc->irq_base;
+ }
+}
+
+/* The platform device used here is instantiated by the MFD device */
+static int __devinit gsta_probe(struct platform_device *dev)
+{
+ int i, err;
+ struct pci_dev *pdev;
+ struct sta2x11_gpio_pdata *gpio_pdata;
+ struct gsta_gpio *chip;
+ struct resource *res;
+
+ pdev = *(struct pci_dev **)(dev->dev.platform_data);
+ gpio_pdata = dev_get_platdata(&pdev->dev);
+
+ if (gpio_pdata == NULL)
+ dev_err(&dev->dev, "no gpio config\n");
+ pr_debug("gpio config: %p\n", gpio_pdata);
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+
+ chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
+ chip->dev = &dev->dev;
+ chip->reg_base = devm_request_and_ioremap(&dev->dev, res);
+
+ for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+ chip->regs[i] = chip->reg_base + i * 4096;
+ /* disable all irqs */
+ writel(0, &chip->regs[i]->rimsc);
+ writel(0, &chip->regs[i]->fimsc);
+ writel(~0, &chip->regs[i]->ic);
+ }
+ spin_lock_init(&chip->lock);
+ gsta_gpio_setup(chip);
+ if (gpio_pdata)
+ for (i = 0; i < GSTA_NR_GPIO; i++)
+ gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+
+ /* 384 was used in previous code: be compatible for other drivers */
+ err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
+ if (err < 0) {
+ dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n",
+ -err);
+ return err;
+ }
+ chip->irq_base = err;
+ gsta_alloc_irq_chip(chip);
+
+ err = request_irq(pdev->irq, gsta_gpio_handler,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (err < 0) {
+ dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n",
+ -err);
+ goto err_free_descs;
+ }
+
+ err = gpiochip_add(&chip->gpio);
+ if (err < 0) {
+ dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
+ -err);
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(dev, chip);
+ return 0;
+
+err_free_irq:
+ free_irq(pdev->irq, chip);
+err_free_descs:
+ irq_free_descs(chip->irq_base, GSTA_NR_GPIO);
+ return err;
+}
+
+static struct platform_driver sta2x11_gpio_platform_driver = {
+ .driver = {
+ .name = "sta2x11-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = gsta_probe,
+};
+
+module_platform_driver(sta2x11_gpio_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("sta2x11_gpio GPIO driver");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
new file mode 100644
index 000000000000..e35096bf3cfb
--- /dev/null
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -0,0 +1,301 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+ * peripheral controller used to drive external shift register cascades. At most
+ * 3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+ * to drive the 2 LSBs of the cascade automatically.
+ */
+
+/* control register 0 */
+#define XWAY_STP_CON0 0x00
+/* control register 1 */
+#define XWAY_STP_CON1 0x04
+/* data register 0 */
+#define XWAY_STP_CPU0 0x08
+/* data register 1 */
+#define XWAY_STP_CPU1 0x0C
+/* access register */
+#define XWAY_STP_AR 0x10
+
+/* software or hardware update select bit */
+#define XWAY_STP_CON_SWU BIT(31)
+
+/* automatic update rates */
+#define XWAY_STP_2HZ 0
+#define XWAY_STP_4HZ BIT(23)
+#define XWAY_STP_8HZ BIT(24)
+#define XWAY_STP_10HZ (BIT(24) | BIT(23))
+#define XWAY_STP_SPEED_MASK (0xf << 23)
+
+/* clock source for automatic update */
+#define XWAY_STP_UPD_FPI BIT(31)
+#define XWAY_STP_UPD_MASK (BIT(31) | BIT(30))
+
+/* let the adsl core drive the 2 LSBs */
+#define XWAY_STP_ADSL_SHIFT 24
+#define XWAY_STP_ADSL_MASK 0x3
+
+/* 2 groups of 3 bits can be driven by the phys */
+#define XWAY_STP_PHY_MASK 0x3
+#define XWAY_STP_PHY1_SHIFT 27
+#define XWAY_STP_PHY2_SHIFT 15
+
+/* STP has 3 groups of 8 bits */
+#define XWAY_STP_GROUP0 BIT(0)
+#define XWAY_STP_GROUP1 BIT(1)
+#define XWAY_STP_GROUP2 BIT(2)
+#define XWAY_STP_GROUP_MASK (0x7)
+
+/* Edge configuration bits */
+#define XWAY_STP_FALLING BIT(26)
+#define XWAY_STP_EDGE_MASK BIT(26)
+
+#define xway_stp_r32(m, reg) __raw_readl(m + reg)
+#define xway_stp_w32(m, val, reg) __raw_writel(val, m + reg)
+#define xway_stp_w32_mask(m, clear, set, reg) \
+ ltq_w32((ltq_r32(m + reg) & ~(clear)) | (set), \
+ m + reg)
+
+struct xway_stp {
+ struct gpio_chip gc;
+ void __iomem *virt;
+ u32 edge; /* rising or falling edge triggered shift register */
+ u16 shadow; /* shadow the shift registers state */
+ u8 groups; /* we can drive 1-3 groups of 8bit each */
+ u8 dsl; /* the 2 LSBs can be driven by the dsl core */
+ u8 phy1; /* 3 bits can be driven by phy1 */
+ u8 phy2; /* 3 bits can be driven by phy2 */
+ u8 reserved; /* mask out the hw driven bits in gpio_request */
+};
+
+/**
+ * xway_stp_set() - gpio_chip->set - set gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_ebu_apply.
+ */
+static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
+{
+ struct xway_stp *chip =
+ container_of(gc, struct xway_stp, gc);
+
+ if (val)
+ chip->shadow |= BIT(gpio);
+ else
+ chip->shadow &= ~BIT(gpio);
+ xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
+ xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+}
+
+/**
+ * xway_stp_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Same as xway_stp_set, always returns 0.
+ */
+static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
+{
+ xway_stp_set(gc, gpio, val);
+
+ return 0;
+}
+
+/**
+ * xway_stp_request() - gpio_chip->request
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ *
+ * We mask out the HW driven pins
+ */
+static int xway_stp_request(struct gpio_chip *gc, unsigned gpio)
+{
+ struct xway_stp *chip =
+ container_of(gc, struct xway_stp, gc);
+
+ if ((gpio < 8) && (chip->reserved & BIT(gpio))) {
+ dev_err(gc->dev, "GPIO %d is driven by hardware\n", gpio);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * xway_stp_hw_init() - Configure the STP unit and enable the clock gate
+ * @virt: pointer to the remapped register range
+ */
+static int xway_stp_hw_init(struct xway_stp *chip)
+{
+ /* sane defaults */
+ xway_stp_w32(chip->virt, 0, XWAY_STP_AR);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CPU0);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CPU1);
+ xway_stp_w32(chip->virt, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CON1);
+
+ /* apply edge trigger settings for the shift register */
+ xway_stp_w32_mask(chip->virt, XWAY_STP_EDGE_MASK,
+ chip->edge, XWAY_STP_CON0);
+
+ /* apply led group settings */
+ xway_stp_w32_mask(chip->virt, XWAY_STP_GROUP_MASK,
+ chip->groups, XWAY_STP_CON1);
+
+ /* tell the hardware which pins are controlled by the dsl modem */
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_ADSL_MASK << XWAY_STP_ADSL_SHIFT,
+ chip->dsl << XWAY_STP_ADSL_SHIFT,
+ XWAY_STP_CON0);
+
+ /* tell the hardware which pins are controlled by the phys */
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY1_SHIFT,
+ chip->phy1 << XWAY_STP_PHY1_SHIFT,
+ XWAY_STP_CON0);
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY2_SHIFT,
+ chip->phy2 << XWAY_STP_PHY2_SHIFT,
+ XWAY_STP_CON1);
+
+ /* mask out the hw driven bits in gpio_request */
+ chip->reserved = (chip->phy2 << 5) | (chip->phy1 << 2) | chip->dsl;
+
+ /*
+ * if we have pins that are driven by hw, we need to tell the stp what
+ * clock to use as a timer.
+ */
+ if (chip->reserved)
+ xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK,
+ XWAY_STP_UPD_FPI, XWAY_STP_CON1);
+
+ return 0;
+}
+
+static int __devinit xway_stp_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ const __be32 *shadow, *groups, *dsl, *phy;
+ struct xway_stp *chip;
+ struct clk *clk;
+ int ret = 0;
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request STP resource\n");
+ return -ENOENT;
+ }
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->virt = devm_request_and_ioremap(&pdev->dev, res);
+ if (!chip->virt) {
+ dev_err(&pdev->dev, "failed to remap STP memory\n");
+ return -ENOMEM;
+ }
+ chip->gc.dev = &pdev->dev;
+ chip->gc.label = "stp-xway";
+ chip->gc.direction_output = xway_stp_dir_out;
+ chip->gc.set = xway_stp_set;
+ chip->gc.request = xway_stp_request;
+ chip->gc.base = -1;
+ chip->gc.owner = THIS_MODULE;
+
+ /* store the shadow value if one was passed by the devicetree */
+ shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+ if (shadow)
+ chip->shadow = be32_to_cpu(*shadow);
+
+ /* find out which gpio groups should be enabled */
+ groups = of_get_property(pdev->dev.of_node, "lantiq,groups", NULL);
+ if (groups)
+ chip->groups = be32_to_cpu(*groups) & XWAY_STP_GROUP_MASK;
+ else
+ chip->groups = XWAY_STP_GROUP0;
+ chip->gc.ngpio = fls(chip->groups) * 8;
+
+ /* find out which gpios are controlled by the dsl core */
+ dsl = of_get_property(pdev->dev.of_node, "lantiq,dsl", NULL);
+ if (dsl)
+ chip->dsl = be32_to_cpu(*dsl) & XWAY_STP_ADSL_MASK;
+
+ /* find out which gpios are controlled by the phys */
+ if (of_machine_is_compatible("lantiq,ar9") ||
+ of_machine_is_compatible("lantiq,gr9") ||
+ of_machine_is_compatible("lantiq,vr9")) {
+ phy = of_get_property(pdev->dev.of_node, "lantiq,phy1", NULL);
+ if (phy)
+ chip->phy1 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+ phy = of_get_property(pdev->dev.of_node, "lantiq,phy2", NULL);
+ if (phy)
+ chip->phy2 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+ }
+
+ /* check which edge trigger we should use, default to a falling edge */
+ if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
+ chip->edge = XWAY_STP_FALLING;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+ clk_enable(clk);
+
+ ret = xway_stp_hw_init(chip);
+ if (!ret)
+ ret = gpiochip_add(&chip->gc);
+
+ if (!ret)
+ dev_info(&pdev->dev, "Init done\n");
+
+ return ret;
+}
+
+static const struct of_device_id xway_stp_match[] = {
+ { .compatible = "lantiq,gpio-stp-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xway_stp_match);
+
+static struct platform_driver xway_stp_driver = {
+ .probe = xway_stp_probe,
+ .driver = {
+ .name = "gpio-stp-xway",
+ .owner = THIS_MODULE,
+ .of_match_table = xway_stp_match,
+ },
+};
+
+int __init xway_stp_init(void)
+{
+ return platform_driver_register(&xway_stp_driver);
+}
+
+subsys_initcall(xway_stp_init);
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 7eef648a3351..11f29c82253c 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -18,14 +18,27 @@
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
+
+struct tps65910_gpio {
+ struct gpio_chip gpio_chip;
+ struct tps65910 *tps65910;
+};
+
+static inline struct tps65910_gpio *to_tps65910_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct tps65910_gpio, gpio_chip);
+}
static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
- uint8_t val;
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
+ unsigned int val;
- tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
+ tps65910_reg_read(tps65910, TPS65910_GPIO0 + offset, &val);
if (val & GPIO_STS_MASK)
return 1;
@@ -36,83 +49,173 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
if (value)
- tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_SET_MASK);
else
- tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_SET_MASK);
}
static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
/* Set the initial value */
tps65910_gpio_set(gc, offset, value);
- return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ return tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
}
static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
- return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ return tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
}
-void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
+#ifdef CONFIG_OF
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+ struct tps65910 *tps65910, int chip_ngpio)
{
+ struct tps65910_board *tps65910_board = tps65910->of_plat_data;
+ unsigned int prop_array[TPS6591X_MAX_NUM_GPIO];
+ int ngpio = min(chip_ngpio, TPS6591X_MAX_NUM_GPIO);
int ret;
- struct tps65910_board *board_data;
+ int idx;
+
+ tps65910_board->gpio_base = -1;
+ ret = of_property_read_u32_array(tps65910->dev->of_node,
+ "ti,en-gpio-sleep", prop_array, ngpio);
+ if (ret < 0) {
+ dev_dbg(dev, "ti,en-gpio-sleep not specified\n");
+ return tps65910_board;
+ }
- if (!gpio_base)
- return;
+ for (idx = 0; idx < ngpio; idx++)
+ tps65910_board->en_gpio_sleep[idx] = (prop_array[idx] != 0);
- tps65910->gpio.owner = THIS_MODULE;
- tps65910->gpio.label = tps65910->i2c_client->name;
- tps65910->gpio.dev = tps65910->dev;
- tps65910->gpio.base = gpio_base;
+ return tps65910_board;
+}
+#else
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+ struct tps65910 *tps65910, int chip_ngpio)
+{
+ return NULL;
+}
+#endif
+
+static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
+ struct tps65910_gpio *tps65910_gpio;
+ int ret;
+ int i;
+
+ tps65910_gpio = devm_kzalloc(&pdev->dev,
+ sizeof(*tps65910_gpio), GFP_KERNEL);
+ if (!tps65910_gpio) {
+ dev_err(&pdev->dev, "Could not allocate tps65910_gpio\n");
+ return -ENOMEM;
+ }
+
+ tps65910_gpio->tps65910 = tps65910;
+
+ tps65910_gpio->gpio_chip.owner = THIS_MODULE;
+ tps65910_gpio->gpio_chip.label = tps65910->i2c_client->name;
switch(tps65910_chip_id(tps65910)) {
case TPS65910:
- tps65910->gpio.ngpio = TPS65910_NUM_GPIO;
+ tps65910_gpio->gpio_chip.ngpio = TPS65910_NUM_GPIO;
break;
case TPS65911:
- tps65910->gpio.ngpio = TPS65911_NUM_GPIO;
+ tps65910_gpio->gpio_chip.ngpio = TPS65911_NUM_GPIO;
break;
default:
- return;
+ return -EINVAL;
+ }
+ tps65910_gpio->gpio_chip.can_sleep = 1;
+ tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
+ tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
+ tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
+ tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
+ tps65910_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ tps65910_gpio->gpio_chip.of_node = tps65910->dev->of_node;
+#endif
+ if (pdata && pdata->gpio_base)
+ tps65910_gpio->gpio_chip.base = pdata->gpio_base;
+ else
+ tps65910_gpio->gpio_chip.base = -1;
+
+ if (!pdata && tps65910->dev->of_node)
+ pdata = tps65910_parse_dt_for_gpio(&pdev->dev, tps65910,
+ tps65910_gpio->gpio_chip.ngpio);
+
+ if (!pdata)
+ goto skip_init;
+
+ /* Configure sleep control for gpios if provided */
+ for (i = 0; i < tps65910_gpio->gpio_chip.ngpio; ++i) {
+ if (!pdata->en_gpio_sleep[i])
+ continue;
+
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
+ if (ret < 0)
+ dev_warn(tps65910->dev,
+ "GPIO Sleep setting failed with err %d\n", ret);
}
- tps65910->gpio.can_sleep = 1;
-
- tps65910->gpio.direction_input = tps65910_gpio_input;
- tps65910->gpio.direction_output = tps65910_gpio_output;
- tps65910->gpio.set = tps65910_gpio_set;
- tps65910->gpio.get = tps65910_gpio_get;
-
- /* Configure sleep control for gpios */
- board_data = dev_get_platdata(tps65910->dev);
- if (board_data) {
- int i;
- for (i = 0; i < tps65910->gpio.ngpio; ++i) {
- if (board_data->en_gpio_sleep[i]) {
- ret = tps65910_set_bits(tps65910,
- TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
- if (ret < 0)
- dev_warn(tps65910->dev,
- "GPIO Sleep setting failed\n");
- }
- }
+
+skip_init:
+ ret = gpiochip_add(&tps65910_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
}
- ret = gpiochip_add(&tps65910->gpio);
+ platform_set_drvdata(pdev, tps65910_gpio);
+
+ return ret;
+}
+
+static int __devexit tps65910_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
- if (ret)
- dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
+ return gpiochip_remove(&tps65910_gpio->gpio_chip);
}
+
+static struct platform_driver tps65910_gpio_driver = {
+ .driver.name = "tps65910-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = tps65910_gpio_probe,
+ .remove = __devexit_p(tps65910_gpio_remove),
+};
+
+static int __init tps65910_gpio_init(void)
+{
+ return platform_driver_register(&tps65910_gpio_driver);
+}
+subsys_initcall(tps65910_gpio_init);
+
+static void __exit tps65910_gpio_exit(void)
+{
+ platform_driver_unregister(&tps65910_gpio_driver);
+}
+module_exit(tps65910_gpio_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("GPIO interface for TPS65910/TPS6511 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65910-gpio");
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index deb949e75ec1..e56a2165641c 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -102,10 +102,8 @@ static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
- if (!wm831x->irq_base)
- return -EINVAL;
-
- return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
+ return irq_create_mapping(wm831x->irq_domain,
+ WM831X_IRQ_GPIO_1 + offset);
}
static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 92ea5350dfe9..aa61ad2fcaaa 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+ if (value)
+ value = WM8994_GPN_LVL;
+
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
- WM8994_GPN_DIR, 0);
+ WM8994_GPN_DIR | WM8994_GPN_LVL, value);
}
static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/of/gpio.c b/drivers/gpio/gpiolib-of.c
index bf984b6dc477..d18068a9f3ec 100644
--- a/drivers/of/gpio.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -15,11 +15,39 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
+/* Private data structure for of_gpiochip_is_match */
+struct gg_data {
+ enum of_gpio_flags *flags;
+ struct of_phandle_args gpiospec;
+
+ int out_gpio;
+};
+
+/* Private function for resolving node pointer to gpio_chip */
+static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
+{
+ struct gg_data *gg_data = data;
+ int ret;
+
+ if ((gc->of_node != gg_data->gpiospec.np) ||
+ (gc->of_gpio_n_cells != gg_data->gpiospec.args_count) ||
+ (!gc->of_xlate))
+ return false;
+
+ ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
+ if (ret < 0)
+ return false;
+
+ gg_data->out_gpio = ret + gc->base;
+ return true;
+}
+
/**
* of_get_named_gpio_flags() - Get a GPIO number and flags to use with GPIO API
* @np: device node to get GPIO from
@@ -34,46 +62,25 @@
int of_get_named_gpio_flags(struct device_node *np, const char *propname,
int index, enum of_gpio_flags *flags)
{
+ struct gg_data gg_data = { .flags = flags, .out_gpio = -ENODEV };
int ret;
- struct gpio_chip *gc;
- struct of_phandle_args gpiospec;
+
+ /* .of_xlate might decide to not fill in the flags, so clear it. */
+ if (flags)
+ *flags = 0;
ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
- &gpiospec);
+ &gg_data.gpiospec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
- goto err0;
- }
-
- gc = of_node_to_gpiochip(gpiospec.np);
- if (!gc) {
- pr_debug("%s: gpio controller %s isn't registered\n",
- np->full_name, gpiospec.np->full_name);
- ret = -ENODEV;
- goto err1;
- }
-
- if (gpiospec.args_count != gc->of_gpio_n_cells) {
- pr_debug("%s: wrong #gpio-cells for %s\n",
- np->full_name, gpiospec.np->full_name);
- ret = -EINVAL;
- goto err1;
+ return -EINVAL;
}
- /* .xlate might decide to not fill in the flags, so clear it. */
- if (flags)
- *flags = 0;
-
- ret = gc->of_xlate(gc, &gpiospec, flags);
- if (ret < 0)
- goto err1;
+ gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
- ret += gc->base;
-err1:
- of_node_put(gpiospec.np);
-err0:
+ of_node_put(gg_data.gpiospec.np);
pr_debug("%s exited with status %d\n", __func__, ret);
- return ret;
+ return gg_data.out_gpio;
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
@@ -227,14 +234,3 @@ void of_gpiochip_remove(struct gpio_chip *chip)
if (chip->of_node)
of_node_put(chip->of_node);
}
-
-/* Private function for resolving node pointer to gpio_chip */
-static int of_gpiochip_is_match(struct gpio_chip *chip, const void *data)
-{
- return chip->of_node == data;
-}
-
-struct gpio_chip *of_node_to_gpiochip(struct device_node *np)
-{
- return gpiochip_find(np, of_gpiochip_is_match);
-}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5a75510d66bb..120b2a0e3167 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1093,7 +1093,7 @@ unlock:
if (status)
goto fail;
- pr_info("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
+ pr_debug("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
chip->base, chip->base + chip->ngpio - 1,
chip->label ? : "generic");
@@ -1154,9 +1154,9 @@ EXPORT_SYMBOL_GPL(gpiochip_remove);
* non-zero, this function will return to the caller and not iterate over any
* more gpio_chips.
*/
-struct gpio_chip *gpiochip_find(const void *data,
+struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip,
- const void *data))
+ void *data))
{
struct gpio_chip *chip = NULL;
unsigned long flags;
@@ -1302,8 +1302,18 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
if (err)
- gpio_free(gpio);
+ goto free_gpio;
+
+ if (flags & GPIOF_EXPORT) {
+ err = gpio_export(gpio, flags & GPIOF_EXPORT_CHANGEABLE);
+ if (err)
+ goto free_gpio;
+ }
+
+ return 0;
+ free_gpio:
+ gpio_free(gpio);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e354bc0b052a..23120c00a881 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -186,3 +186,9 @@ source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/gma500/Kconfig"
source "drivers/gpu/drm/udl/Kconfig"
+
+source "drivers/gpu/drm/ast/Kconfig"
+
+source "drivers/gpu/drm/mgag200/Kconfig"
+
+source "drivers/gpu/drm/cirrus/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c20da5bda355..f65f65ed0ddf 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I915) += i915/
+obj-$(CONFIG_DRM_MGAG200) += mgag200/
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
@@ -42,4 +44,5 @@ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
+obj-$(CONFIG_DRM_AST) += ast/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
new file mode 100644
index 000000000000..a277b1257888
--- /dev/null
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -0,0 +1,16 @@
+config DRM_AST
+ tristate "AST server chips"
+ depends on DRM && PCI && EXPERIMENTAL
+ select DRM_TTM
+ select FB_SYS_COPYAREA
+ select FB_SYS_FILLRECT
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ Say yes for experimental AST GPU driver. Do not enable
+ this driver without having a working -modesetting,
+ and a version of AST that knows to fail if KMS
+ is bound to the driver. These GPUs are commonly found
+ in server chipsets.
+
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
new file mode 100644
index 000000000000..8df4f284ee24
--- /dev/null
+++ b/drivers/gpu/drm/ast/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o
+
+obj-$(CONFIG_DRM_AST) := ast.o \ No newline at end of file
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
new file mode 100644
index 000000000000..cc04539c0ff3
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_dram_tables.h
@@ -0,0 +1,144 @@
+#ifndef AST_DRAM_TABLES_H
+#define AST_DRAM_TABLES_H
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+ u16 index;
+ u32 data;
+};
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+ { 0x0108, 0x00000000 },
+ { 0x0120, 0x00004a21 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xFFFFFFFF },
+ { 0x0004, 0x00000089 },
+ { 0x0008, 0x22331353 },
+ { 0x000C, 0x0d07000b },
+ { 0x0010, 0x11113333 },
+ { 0x0020, 0x00110350 },
+ { 0x0028, 0x1e0828f0 },
+ { 0x0024, 0x00000001 },
+ { 0x001C, 0x00000000 },
+ { 0x0014, 0x00000003 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000131 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000031 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0028, 0x1e0828f1 },
+ { 0x0024, 0x00000003 },
+ { 0x002C, 0x1f0f28fb },
+ { 0x0030, 0xFFFFFE01 },
+ { 0xFFFF, 0xFFFFFFFF }
+};
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x000041f0 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00050000 },
+ { 0x0004, 0x00000585 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x22201724 },
+ { 0x0018, 0x1e29011a },
+ { 0x0020, 0x00c82222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x032aa02a },
+ { 0x0064, 0x002d3000 },
+ { 0x0068, 0x00000000 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000732 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000632 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00004c41 },
+ { 0xffff, 0xffffffff },
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x00004120 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00070000 },
+ { 0x0004, 0x00000489 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x32302926 },
+ { 0x0018, 0x274c0122 },
+ { 0x0020, 0x00ce2222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x0f2aa02a },
+ { 0x0064, 0x003f3005 },
+ { 0x0068, 0x02020202 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000942 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000842 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00005061 },
+ { 0xffff, 0xffffffff },
+};
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
new file mode 100644
index 000000000000..d0c4574ef49c
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_drv.h"
+
+int ast_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, ast_modeset, int, 0400);
+
+#define PCI_VENDOR_ASPEED 0x1a03
+
+static struct drm_driver driver;
+
+#define AST_VGA_DEVICE(id, info) { \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, \
+ .class_mask = 0xff0000, \
+ .vendor = PCI_VENDOR_ASPEED, \
+ .device = id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
+ AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
+ /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
+ {0, 0, 0},
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int __devinit
+ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void
+ast_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+
+
+static int ast_drm_freeze(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+
+ pci_save_state(dev->pdev);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 1);
+ console_unlock();
+ return 0;
+}
+
+static int ast_drm_thaw(struct drm_device *dev)
+{
+ int error = 0;
+
+ ast_post_gpu(dev);
+
+ drm_mode_config_reset(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 0);
+ console_unlock();
+ return error;
+}
+
+static int ast_drm_resume(struct drm_device *dev)
+{
+ int ret;
+
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ ret = ast_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int ast_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ int error;
+
+ error = ast_drm_freeze(ddev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+static int ast_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_resume(ddev);
+}
+
+static int ast_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ if (!ddev || !ddev->dev_private)
+ return -ENODEV;
+ return ast_drm_freeze(ddev);
+
+}
+
+static int ast_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_thaw(ddev);
+}
+
+static int ast_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ return ast_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops ast_pm_ops = {
+ .suspend = ast_pm_suspend,
+ .resume = ast_pm_resume,
+ .freeze = ast_pm_freeze,
+ .thaw = ast_pm_thaw,
+ .poweroff = ast_pm_poweroff,
+ .restore = ast_pm_resume,
+};
+
+static struct pci_driver ast_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = ast_pci_probe,
+ .remove = ast_pci_remove,
+ .driver.pm = &ast_pm_ops,
+};
+
+static const struct file_operations ast_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = ast_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+ .dev_priv_size = 0,
+
+ .load = ast_driver_load,
+ .unload = ast_driver_unload,
+
+ .fops = &ast_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_init_object = ast_gem_init_object,
+ .gem_free_object = ast_gem_free_object,
+ .dumb_create = ast_dumb_create,
+ .dumb_map_offset = ast_dumb_mmap_offset,
+ .dumb_destroy = ast_dumb_destroy,
+
+};
+
+static int __init ast_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && ast_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (ast_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &ast_pci_driver);
+}
+static void __exit ast_exit(void)
+{
+ drm_pci_exit(&driver, &ast_pci_driver);
+}
+
+module_init(ast_init);
+module_exit(ast_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
new file mode 100644
index 000000000000..d4af9edcbb97
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#ifndef __AST_DRV_H__
+#define __AST_DRV_H__
+
+#include "drm_fb_helper.h"
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#define DRIVER_AUTHOR "Dave Airlie"
+
+#define DRIVER_NAME "ast"
+#define DRIVER_DESC "AST"
+#define DRIVER_DATE "20120228"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define PCI_CHIP_AST2000 0x2000
+#define PCI_CHIP_AST2100 0x2010
+#define PCI_CHIP_AST1180 0x1180
+
+
+enum ast_chip {
+ AST2000,
+ AST2100,
+ AST1100,
+ AST2200,
+ AST2150,
+ AST2300,
+ AST1180,
+};
+
+#define AST_DRAM_512Mx16 0
+#define AST_DRAM_1Gx16 1
+#define AST_DRAM_512Mx32 2
+#define AST_DRAM_1Gx32 3
+#define AST_DRAM_2Gx16 6
+#define AST_DRAM_4Gx16 7
+
+struct ast_fbdev;
+
+struct ast_private {
+ struct drm_device *dev;
+
+ void __iomem *regs;
+ void __iomem *ioregs;
+
+ enum ast_chip chip;
+ bool vga2_clone;
+ uint32_t dram_bus_width;
+ uint32_t dram_type;
+ uint32_t mclk;
+ uint32_t vram_size;
+
+ struct ast_fbdev *fbdev;
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+ struct drm_gem_object *cursor_cache;
+ uint64_t cursor_cache_gpu_addr;
+ struct ttm_bo_kmap_obj cache_kmap;
+ int next_cursor;
+};
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags);
+int ast_driver_unload(struct drm_device *dev);
+
+struct ast_gem_object;
+
+#define AST_IO_AR_PORT_WRITE (0x40)
+#define AST_IO_MISC_PORT_WRITE (0x42)
+#define AST_IO_SEQ_PORT (0x44)
+#define AST_DAC_INDEX_READ (0x3c7)
+#define AST_IO_DAC_INDEX_WRITE (0x48)
+#define AST_IO_DAC_DATA (0x49)
+#define AST_IO_GR_PORT (0x4E)
+#define AST_IO_CRTC_PORT (0x54)
+#define AST_IO_INPUT_STATUS1_READ (0x5A)
+#define AST_IO_MISC_PORT_READ (0x4C)
+
+#define __ast_read(x) \
+static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->regs + reg); \
+return val;\
+}
+
+__ast_read(8);
+__ast_read(16);
+__ast_read(32)
+
+#define __ast_io_read(x) \
+static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->ioregs + reg); \
+return val;\
+}
+
+__ast_io_read(8);
+__ast_io_read(16);
+__ast_io_read(32);
+
+#define __ast_write(x) \
+static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->regs + reg);\
+ }
+
+__ast_write(8);
+__ast_write(16);
+__ast_write(32);
+
+#define __ast_io_write(x) \
+static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->ioregs + reg);\
+ }
+
+__ast_io_write(8);
+__ast_io_write(16);
+#undef __ast_io_write
+
+static inline void ast_set_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t val)
+{
+ ast_io_write16(ast, base, ((u16)val << 8) | index);
+}
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val);
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index);
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask);
+
+static inline void ast_open_key(struct ast_private *ast)
+{
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+}
+
+#define AST_VIDMEM_SIZE_8M 0x00800000
+#define AST_VIDMEM_SIZE_16M 0x01000000
+#define AST_VIDMEM_SIZE_32M 0x02000000
+#define AST_VIDMEM_SIZE_64M 0x04000000
+#define AST_VIDMEM_SIZE_128M 0x08000000
+
+#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
+#define AST_HWC_SIGNATURE_SIZE 32
+
+#define AST_DEFAULT_HWC_NUM 2
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+
+struct ast_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+};
+
+struct ast_connector {
+ struct drm_connector base;
+ struct ast_i2c_chan *i2c;
+};
+
+struct ast_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ struct drm_gem_object *cursor_bo;
+ uint64_t cursor_addr;
+ int cursor_width, cursor_height;
+ u8 offset_x, offset_y;
+};
+
+struct ast_encoder {
+ struct drm_encoder base;
+};
+
+struct ast_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct ast_fbdev {
+ struct drm_fb_helper helper;
+ struct ast_framebuffer afb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+};
+
+#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
+#define to_ast_connector(x) container_of(x, struct ast_connector, base)
+#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
+#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
+
+struct ast_vbios_stdtable {
+ u8 misc;
+ u8 seq[4];
+ u8 crtc[25];
+ u8 ar[20];
+ u8 gr[9];
+};
+
+struct ast_vbios_enhtable {
+ u32 ht;
+ u32 hde;
+ u32 hfp;
+ u32 hsync;
+ u32 vt;
+ u32 vde;
+ u32 vfp;
+ u32 vsync;
+ u32 dclk_index;
+ u32 flags;
+ u32 refresh_rate;
+ u32 refresh_rate_index;
+ u32 mode_id;
+};
+
+struct ast_vbios_dclk_info {
+ u8 param1;
+ u8 param2;
+ u8 param3;
+};
+
+struct ast_vbios_mode_info {
+ struct ast_vbios_stdtable *std_table;
+ struct ast_vbios_enhtable *enh_table;
+};
+
+extern int ast_mode_init(struct drm_device *dev);
+extern void ast_mode_fini(struct drm_device *dev);
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int ast_fbdev_init(struct drm_device *dev);
+void ast_fbdev_fini(struct drm_device *dev);
+void ast_fbdev_set_suspend(struct drm_device *dev, int state);
+
+struct ast_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
+
+static inline struct ast_bo *
+ast_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct ast_bo, bo);
+}
+
+
+#define to_ast_obj(x) container_of(x, struct ast_gem_object, base)
+
+#define AST_MM_ALIGN_SHIFT 4
+#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
+
+extern int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+extern int ast_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+extern int ast_gem_init_object(struct drm_gem_object *obj);
+extern void ast_gem_free_object(struct drm_gem_object *obj);
+extern int ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+int ast_mm_init(struct ast_private *ast);
+void ast_mm_fini(struct ast_private *ast);
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo);
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int ast_bo_unpin(struct ast_bo *bo);
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait);
+void ast_bo_unreserve(struct ast_bo *bo);
+void ast_ttm_placement(struct ast_bo *bo, int domain);
+int ast_bo_push_sysram(struct ast_bo *bo);
+int ast_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* ast post */
+void ast_post_gpu(struct drm_device *dev);
+#endif
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
new file mode 100644
index 000000000000..2fc8e9e860b1
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "ast_drv.h"
+
+static void ast_dirty_update(struct ast_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = afbdev->afb.obj;
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ ast_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_bo_unreserve(bo);
+}
+
+static void ast_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void ast_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ ast_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void ast_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ ast_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+static struct fb_ops astfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = ast_fillrect,
+ .fb_copyarea = ast_copyarea,
+ .fb_imageblit = ast_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int astfb_create_object(struct ast_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = ast_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int astfb_create(struct ast_fbdev *afbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ int size, ret;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct ast_bo *bo = NULL;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_ast_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->par = afbdev;
+
+ ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
+ if (ret)
+ goto out;
+
+ afbdev->sysram = sysram;
+ afbdev->size = size;
+
+ fb = &afbdev->afb.base;
+ afbdev->helper.fb = fb;
+ afbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "astdrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &astfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+
+ return 0;
+out:
+ return ret;
+}
+
+static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ ast_crtc->lut_r[regno] = red >> 8;
+ ast_crtc->lut_g[regno] = green >> 8;
+ ast_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ *red = ast_crtc->lut_r[regno] << 8;
+ *green = ast_crtc->lut_g[regno] << 8;
+ *blue = ast_crtc->lut_b[regno] << 8;
+}
+
+static int ast_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = astfb_create(afbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+ .gamma_set = ast_fb_gamma_set,
+ .gamma_get = ast_fb_gamma_get,
+ .fb_probe = ast_find_or_create_single,
+};
+
+static void ast_fbdev_destroy(struct drm_device *dev,
+ struct ast_fbdev *afbdev)
+{
+ struct fb_info *info;
+ struct ast_framebuffer *afb = &afbdev->afb;
+ if (afbdev->helper.fbdev) {
+ info = afbdev->helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (afb->obj) {
+ drm_gem_object_unreference_unlocked(afb->obj);
+ afb->obj = NULL;
+ }
+ drm_fb_helper_fini(&afbdev->helper);
+
+ vfree(afbdev->sysram);
+ drm_framebuffer_cleanup(&afb->base);
+}
+
+int ast_fbdev_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_fbdev *afbdev;
+ int ret;
+
+ afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
+ if (!afbdev)
+ return -ENOMEM;
+
+ ast->fbdev = afbdev;
+ afbdev->helper.funcs = &ast_fb_helper_funcs;
+ ret = drm_fb_helper_init(dev, &afbdev->helper,
+ 1, 1);
+ if (ret) {
+ kfree(afbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+ drm_fb_helper_initial_config(&afbdev->helper, 32);
+ return 0;
+}
+
+void ast_fbdev_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ ast_fbdev_destroy(dev, ast->fbdev);
+ kfree(ast->fbdev);
+ ast->fbdev = NULL;
+}
+
+void ast_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ fb_set_suspend(ast->fbdev->helper.fbdev, state);
+}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
new file mode 100644
index 000000000000..95ae55b8214b
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+
+
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_dram_tables.h"
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val)
+{
+ u8 tmp;
+ ast_io_write8(ast, base, index);
+ tmp = (ast_io_read8(ast, base + 1) & mask) | val;
+ ast_set_index_reg(ast, base, index, tmp);
+}
+
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1);
+ return ret;
+}
+
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1) & mask;
+ return ret;
+}
+
+
+static int ast_detect_chip(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (dev->pdev->device == PCI_CHIP_AST1180) {
+ ast->chip = AST1100;
+ DRM_INFO("AST 1180 detected\n");
+ } else {
+ if (dev->pdev->revision >= 0x20) {
+ ast->chip = AST2300;
+ DRM_INFO("AST 2300 detected\n");
+ } else if (dev->pdev->revision >= 0x10) {
+ uint32_t data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ data = ast_read32(ast, 0x1207c);
+ switch (data & 0x0300) {
+ case 0x0200:
+ ast->chip = AST1100;
+ DRM_INFO("AST 1100 detected\n");
+ break;
+ case 0x0100:
+ ast->chip = AST2200;
+ DRM_INFO("AST 2200 detected\n");
+ break;
+ case 0x0000:
+ ast->chip = AST2150;
+ DRM_INFO("AST 2150 detected\n");
+ break;
+ default:
+ ast->chip = AST2100;
+ DRM_INFO("AST 2100 detected\n");
+ break;
+ }
+ ast->vga2_clone = false;
+ } else {
+ ast->chip = 2000;
+ DRM_INFO("AST 2000 detected\n");
+ }
+ }
+ return 0;
+}
+
+static int ast_get_dram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, data2;
+ uint32_t denum, num, div, ref_pll;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ data = ast_read32(ast, 0x10004);
+
+ if (data & 0x400)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+
+ if (ast->chip == AST2300) {
+ switch (data & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 1:
+ ast->dram_type = AST_DRAM_1Gx16;
+ break;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (data & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (data & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
+ break;
+ }
+ }
+
+ data = ast_read32(ast, 0x10120);
+ data2 = ast_read32(ast, 0x10170);
+ if (data2 & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = data & 0x1f;
+ num = (data & 0x3fe0) >> 5;
+ data = (data & 0xc000) >> 14;
+ switch (data) {
+ case 3:
+ div = 0x4;
+ break;
+ case 2:
+ case 1:
+ div = 0x2;
+ break;
+ default:
+ div = 0x1;
+ break;
+ }
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+ return 0;
+}
+
+uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t dclk, jreg;
+ uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
+
+ dram_bus_width = ast->dram_bus_width;
+ mclk = ast->mclk;
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST1100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2150 ||
+ ast->dram_bus_width == 16)
+ dram_efficency = 600;
+ else if (ast->chip == AST2300)
+ dram_efficency = 400;
+
+ dram_bandwidth = mclk * dram_bus_width * 2 / 8;
+ actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
+
+ if (ast->chip == AST1180)
+ dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+ else {
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if ((jreg & 0x08) && (ast->chip == AST2000))
+ dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
+ else if ((jreg & 0x08) && (bpp == 8))
+ dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
+ else
+ dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+ }
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2300 ||
+ ast->chip == AST1180) {
+ if (dclk > 200)
+ dclk = 200;
+ } else {
+ if (dclk > 165)
+ dclk = 165;
+ }
+
+ return dclk;
+}
+
+static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
+ if (ast_fb->obj)
+ drm_gem_object_unreference_unlocked(ast_fb->obj);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ return -EINVAL;
+}
+
+static const struct drm_framebuffer_funcs ast_fb_funcs = {
+ .destroy = ast_user_framebuffer_destroy,
+ .create_handle = ast_user_framebuffer_create_handle,
+};
+
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+ ast_fb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+ast_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
+ if (!ast_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(ast_fb);
+ return ERR_PTR(ret);
+ }
+ return &ast_fb->base;
+}
+
+static const struct drm_mode_config_funcs ast_mode_funcs = {
+ .fb_create = ast_user_framebuffer_create,
+};
+
+static u32 ast_get_vram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+
+ ast_open_key(ast);
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
+ switch (jreg & 3) {
+ case 0: return AST_VIDMEM_SIZE_8M;
+ case 1: return AST_VIDMEM_SIZE_16M;
+ case 2: return AST_VIDMEM_SIZE_32M;
+ case 3: return AST_VIDMEM_SIZE_64M;
+ }
+ return AST_VIDMEM_DEFAULT_SIZE;
+}
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct ast_private *ast;
+ int ret = 0;
+
+ ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
+ if (!ast)
+ return -ENOMEM;
+
+ dev->dev_private = ast;
+ ast->dev = dev;
+
+ ast->regs = pci_iomap(dev->pdev, 1, 0);
+ if (!ast->regs) {
+ ret = -EIO;
+ goto out_free;
+ }
+ ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+ if (!ast->ioregs) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ ast_detect_chip(dev);
+
+ if (ast->chip != AST1180) {
+ ast_get_dram_info(dev);
+ ast->vram_size = ast_get_vram_info(dev);
+ DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ goto out_free;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.funcs = (void *)&ast_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2300 ||
+ ast->chip == AST1180) {
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 2048;
+ } else {
+ dev->mode_config.max_width = 1600;
+ dev->mode_config.max_height = 1200;
+ }
+
+ ret = ast_mode_init(dev);
+ if (ret)
+ goto out_free;
+
+ ret = ast_fbdev_init(dev);
+ if (ret)
+ goto out_free;
+
+ return 0;
+out_free:
+ kfree(ast);
+ dev->dev_private = NULL;
+ return ret;
+}
+
+int ast_driver_unload(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ ast_mode_fini(dev);
+ ast_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ ast_mm_fini(ast);
+ pci_iounmap(dev->pdev, ast->ioregs);
+ pci_iounmap(dev->pdev, ast->regs);
+ kfree(ast);
+ return 0;
+}
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct ast_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = ast_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = ast_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int ast_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int ast_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void ast_bo_unref(struct ast_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+void ast_gem_free_object(struct drm_gem_object *obj)
+{
+ struct ast_bo *ast_bo = gem_to_ast_bo(obj);
+
+ if (!ast_bo)
+ return;
+ ast_bo_unref(&ast_bo);
+}
+
+
+static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+int
+ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct ast_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_ast_bo(obj);
+ *offset = ast_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
+
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
new file mode 100644
index 000000000000..65f9d231af14
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -0,0 +1,1160 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "ast_drv.h"
+
+#include "ast_tables.h"
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y);
+
+static inline void ast_load_palette_index(struct ast_private *ast,
+ u8 index, u8 red, u8 green,
+ u8 blue)
+{
+ ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, red);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, green);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, blue);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+}
+
+static void ast_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < 256; i++)
+ ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
+ ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
+}
+
+static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+ u32 hborder, vborder;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
+ color_index = VGAModeIndex - 1;
+ break;
+ case 16:
+ vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
+ color_index = HiCModeIndex;
+ break;
+ case 24:
+ case 32:
+ vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
+ color_index = TrueCModeIndex;
+ break;
+ default:
+ return false;
+ }
+
+ switch (crtc->mode.crtc_hdisplay) {
+ case 640:
+ vbios_mode->enh_table = &res_640x480[refresh_rate_index];
+ break;
+ case 800:
+ vbios_mode->enh_table = &res_800x600[refresh_rate_index];
+ break;
+ case 1024:
+ vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
+ break;
+ case 1280:
+ if (crtc->mode.crtc_vdisplay == 800)
+ vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
+ break;
+ case 1440:
+ vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
+ break;
+ case 1600:
+ vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
+ break;
+ case 1680:
+ vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
+ break;
+ case 1920:
+ if (crtc->mode.crtc_vdisplay == 1080)
+ vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
+ break;
+ default:
+ return false;
+ }
+
+ refresh_rate = drm_mode_vrefresh(mode);
+ while (vbios_mode->enh_table->refresh_rate < refresh_rate) {
+ vbios_mode->enh_table++;
+ if ((vbios_mode->enh_table->refresh_rate > refresh_rate) ||
+ (vbios_mode->enh_table->refresh_rate == 0xff)) {
+ vbios_mode->enh_table--;
+ break;
+ }
+ }
+
+ hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
+ vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+
+ adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
+ adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
+ adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
+ adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp;
+ adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp +
+ vbios_mode->enh_table->hsync);
+
+ adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
+ adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
+ adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
+ adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp;
+ adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp +
+ vbios_mode->enh_table->vsync);
+
+ refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
+ mode_id = vbios_mode->enh_table->mode_id;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->fb->bits_per_pixel);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+ }
+
+ return true;
+
+
+}
+static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_stdtable *stdtable;
+ u32 i;
+ u8 jreg;
+
+ stdtable = vbios_mode->std_table;
+
+ jreg = stdtable->misc;
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+
+ /* Set SEQ */
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
+ for (i = 0; i < 4; i++) {
+ jreg = stdtable->seq[i];
+ if (!i)
+ jreg |= 0x20;
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
+ }
+
+ /* Set CRTC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+ for (i = 0; i < 25; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+
+ /* set AR */
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ for (i = 0; i < 20; i++) {
+ jreg = stdtable->ar[i];
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
+ }
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
+
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
+
+ /* Set GR */
+ for (i = 0; i < 9; i++)
+ ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
+}
+
+static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
+ u16 temp;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+
+ temp = (mode->crtc_htotal >> 3) - 5;
+ if (temp & 0x100)
+ jregAC |= 0x01; /* HT D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
+
+ temp = (mode->crtc_hdisplay >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x04; /* HDE D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
+
+ temp = (mode->crtc_hblank_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x10; /* HBS D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
+
+ temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
+ if (temp & 0x20)
+ jreg05 |= 0x80; /* HBE D[5] */
+ if (temp & 0x40)
+ jregAD |= 0x01; /* HBE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
+
+ temp = (mode->crtc_hsync_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x40; /* HRS D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
+
+ temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f;
+ if (temp & 0x20)
+ jregAD |= 0x04; /* HRE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
+
+ /* vert timings */
+ temp = (mode->crtc_vtotal) - 2;
+ if (temp & 0x100)
+ jreg07 |= 0x01;
+ if (temp & 0x200)
+ jreg07 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x01;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
+
+ temp = (mode->crtc_vsync_start) - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x04;
+ if (temp & 0x200)
+ jreg07 |= 0x80;
+ if (temp & 0x400)
+ jregAE |= 0x08;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
+
+ temp = (mode->crtc_vsync_end - 1) & 0x3f;
+ if (temp & 0x10)
+ jregAE |= 0x20;
+ if (temp & 0x20)
+ jregAE |= 0x40;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
+
+ temp = mode->crtc_vdisplay - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x02;
+ if (temp & 0x200)
+ jreg07 |= 0x40;
+ if (temp & 0x400)
+ jregAE |= 0x02;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
+
+ temp = mode->crtc_vblank_start - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x08;
+ if (temp & 0x200)
+ jreg09 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x04;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
+
+ temp = mode->crtc_vblank_end - 1;
+ if (temp & 0x100)
+ jregAE |= 0x10;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
+}
+
+static void ast_set_offset_reg(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ u16 offset;
+
+ offset = crtc->fb->pitches[0] >> 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
+}
+
+static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_vbios_dclk_info *clk_info;
+
+ clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
+ (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4));
+}
+
+static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ jregA0 = 0x70;
+ jregA3 = 0x01;
+ jregA8 = 0x00;
+ break;
+ case 15:
+ case 16:
+ jregA0 = 0x70;
+ jregA3 = 0x04;
+ jregA8 = 0x02;
+ break;
+ case 32:
+ jregA0 = 0x70;
+ jregA3 = 0x08;
+ jregA8 = 0x02;
+ break;
+ }
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+
+ /* Set Threshold */
+ if (ast->chip == AST2300) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
+ } else if (ast->chip == AST2100 ||
+ ast->chip == AST1100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2150) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
+ }
+}
+
+void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+
+ jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
+ jreg |= (vbios_mode->enh_table->flags & SyncNN);
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+}
+
+bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 addr;
+
+ addr = offset >> 2;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
+
+}
+
+static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ if (ast->chip == AST1180)
+ return;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+ ast_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+ break;
+ }
+}
+
+static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/* ast is different - we will force move buffers out of VRAM */
+static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ struct ast_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ ast_fb = to_ast_framebuffer(fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ ast_bo_push_sysram(bo);
+ ast_bo_unreserve(bo);
+ }
+
+ ast_fb = to_ast_framebuffer(crtc->fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ ast_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&ast->fbdev->afb == ast_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ ast_bo_unreserve(bo);
+
+ ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int ast_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_mode_info vbios_mode;
+ bool ret;
+ if (ast->chip == AST1180) {
+ DRM_ERROR("AST 1180 modesetting not supported\n");
+ return -EINVAL;
+ }
+
+ ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
+ if (ret == false)
+ return -EINVAL;
+ ast_open_key(ast);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+
+ ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_offset_reg(crtc);
+ ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
+
+ ast_crtc_mode_set_base(crtc, x, y, old_fb);
+
+ return 0;
+}
+
+static void ast_crtc_disable(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_prepare(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_commit(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+}
+
+
+static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
+ .dpms = ast_crtc_dpms,
+ .mode_fixup = ast_crtc_mode_fixup,
+ .mode_set = ast_crtc_mode_set,
+ .mode_set_base = ast_crtc_mode_set_base,
+ .disable = ast_crtc_disable,
+ .load_lut = ast_crtc_load_lut,
+ .disable = ast_crtc_disable,
+ .prepare = ast_crtc_prepare,
+ .commit = ast_crtc_commit,
+
+};
+
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int end = (start + size > 256) ? 256 : start + size, i;
+
+ /* userspace palettes are always correct as is */
+ for (i = start; i < end; i++) {
+ ast_crtc->lut_r[i] = red[i] >> 8;
+ ast_crtc->lut_g[i] = green[i] >> 8;
+ ast_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ ast_crtc_load_lut(crtc);
+}
+
+
+static void ast_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_funcs ast_crtc_funcs = {
+ .cursor_set = ast_cursor_set,
+ .cursor_move = ast_cursor_move,
+ .reset = ast_crtc_reset,
+ .set_config = drm_crtc_helper_set_config,
+ .gamma_set = ast_crtc_gamma_set,
+ .destroy = ast_crtc_destroy,
+};
+
+int ast_crtc_init(struct drm_device *dev)
+{
+ struct ast_crtc *crtc;
+ int i;
+
+ crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
+ if (!crtc)
+ return -ENOMEM;
+
+ drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&crtc->base, 256);
+ drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
+
+ for (i = 0; i < 256; i++) {
+ crtc->lut_r[i] = i;
+ crtc->lut_g[i] = i;
+ crtc->lut_b[i] = i;
+ }
+ return 0;
+}
+
+static void ast_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+
+static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+
+static const struct drm_encoder_funcs ast_enc_funcs = {
+ .destroy = ast_encoder_destroy,
+};
+
+static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool ast_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void ast_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void ast_encoder_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void ast_encoder_commit(struct drm_encoder *encoder)
+{
+
+}
+
+
+static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
+ .dpms = ast_encoder_dpms,
+ .mode_fixup = ast_mode_fixup,
+ .prepare = ast_encoder_prepare,
+ .commit = ast_encoder_commit,
+ .mode_set = ast_encoder_mode_set,
+};
+
+int ast_encoder_init(struct drm_device *dev)
+{
+ struct ast_encoder *ast_encoder;
+
+ ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
+ if (!ast_encoder)
+ return -ENOMEM;
+
+ drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
+
+ ast_encoder->base.possible_crtcs = 1;
+ return 0;
+}
+
+static int ast_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ return ret;
+ } else
+ drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+ return 0;
+}
+
+static int ast_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void ast_connector_destroy(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ ast_i2c_destroy(ast_connector->i2c);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static enum drm_connector_status
+ast_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
+ .mode_valid = ast_mode_valid,
+ .get_modes = ast_get_modes,
+ .best_encoder = ast_best_single_encoder,
+};
+
+static const struct drm_connector_funcs ast_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = ast_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = ast_connector_destroy,
+};
+
+int ast_connector_init(struct drm_device *dev)
+{
+ struct ast_connector *ast_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL);
+ if (!ast_connector)
+ return -ENOMEM;
+
+ connector = &ast_connector->base;
+ drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &ast_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ast_connector->i2c = ast_i2c_create(dev);
+ if (!ast_connector->i2c)
+ DRM_ERROR("failed to add ddc bus for connector\n");
+
+ return 0;
+}
+
+/* allocate cursor cache and pin at start of VRAM */
+int ast_cursor_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ int size;
+ int ret;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+
+ size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+
+ ret = ast_gem_create(dev, size, true, &obj);
+ if (ret)
+ return ret;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ goto fail;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ ast_bo_unreserve(bo);
+ if (ret)
+ goto fail;
+
+ /* kmap the object */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
+ if (ret)
+ goto fail;
+
+ ast->cursor_cache = obj;
+ ast->cursor_cache_gpu_addr = gpu_addr;
+ DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
+ return 0;
+fail:
+ return ret;
+}
+
+void ast_cursor_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ ttm_bo_kunmap(&ast->cache_kmap);
+ drm_gem_object_unreference_unlocked(ast->cursor_cache);
+}
+
+int ast_mode_init(struct drm_device *dev)
+{
+ ast_cursor_init(dev);
+ ast_crtc_init(dev);
+ ast_encoder_init(dev);
+ ast_connector_init(dev);
+ return 0;
+}
+
+void ast_mode_fini(struct drm_device *dev)
+{
+ ast_cursor_fini(dev);
+}
+
+static int get_clock(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
+ return val & 1 ? 1 : 0;
+}
+
+static int get_data(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
+ return val & 1 ? 1 : 0;
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((clock & 0x01) ? 0 : 1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+{
+ struct ast_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "AST i2c bit bus");
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 20;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = set_data;
+ i2c->bit.setscl = set_clock;
+ i2c->bit.getsda = get_data;
+ i2c->bit.getscl = get_clock;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register bit i2c\n");
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+}
+
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+void ast_show_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg;
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+}
+
+void ast_hide_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+{
+ union {
+ u32 ul;
+ u8 b[4];
+ } srcdata32[2], data32;
+ union {
+ u16 us;
+ u8 b[2];
+ } data16;
+ u32 csum = 0;
+ s32 alpha_dst_delta, last_alpha_dst_delta;
+ u8 *srcxor, *dstxor;
+ int i, j;
+ u32 per_pixel_copy, two_pixel_copy;
+
+ alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
+ last_alpha_dst_delta = alpha_dst_delta - (width << 1);
+
+ srcxor = src;
+ dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
+ per_pixel_copy = width & 1;
+ two_pixel_copy = width >> 1;
+
+ for (j = 0; j < height; j++) {
+ for (i = 0; i < two_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+ data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+ data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+
+ writel(data32.ul, dstxor);
+ csum += data32.ul;
+
+ dstxor += 4;
+ srcxor += 8;
+
+ }
+
+ for (i = 0; i < per_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ writew(data16.us, dstxor);
+ csum += (u32)data16.us;
+
+ dstxor += 2;
+ srcxor += 4;
+ }
+ dstxor += last_alpha_dst_delta;
+ }
+ return csum;
+}
+
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+ u32 csum;
+ int ret;
+ struct ttm_bo_kmap_obj uobj_map;
+ u8 *src, *dst;
+ bool src_isiomem, dst_isiomem;
+ if (!handle) {
+ ast_hide_cursor(crtc);
+ return 0;
+ }
+
+ if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+ return -ENOENT;
+ }
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ goto fail;
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+
+ src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+ dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
+
+ if (src_isiomem == true)
+ DRM_ERROR("src cursor bo should be in main memory\n");
+ if (dst_isiomem == false)
+ DRM_ERROR("dst bo should be in VRAM\n");
+
+ dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+
+ /* do data transfer to cursor cache */
+ csum = copy_cursor_image(src, dst, width, height);
+
+ /* write checksum + signature */
+ ttm_bo_kunmap(&uobj_map);
+ ast_bo_unreserve(bo);
+ {
+ u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+ /* set pattern offset */
+ gpu_addr = ast->cursor_cache_gpu_addr;
+ gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+ gpu_addr >>= 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+ }
+ ast_crtc->cursor_width = width;
+ ast_crtc->cursor_height = height;
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+ ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
+
+ ast_show_cursor(crtc);
+
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct ast_private *ast = crtc->dev->dev_private;
+ int x_offset, y_offset;
+ u8 *sig;
+
+ sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(x, sig + AST_HWC_SIGNATURE_X);
+ writel(y, sig + AST_HWC_SIGNATURE_Y);
+
+ x_offset = ast_crtc->offset_x;
+ y_offset = ast_crtc->offset_y;
+ if (x < 0) {
+ x_offset = (-x) + ast_crtc->offset_x;
+ x = 0;
+ }
+
+ if (y < 0) {
+ y_offset = (-y) + ast_crtc->offset_y;
+ y = 0;
+ }
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, (x & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, ((x >> 8) & 0x0f));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, (y & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
+
+ /* dummy write to fire HWC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
new file mode 100644
index 000000000000..6edbee63b0cb
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -0,0 +1,1780 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include "drmP.h"
+#include "ast_drv.h"
+
+#include "ast_dram_tables.h"
+
+static void ast_init_dram_2300(struct drm_device *dev);
+
+static void
+ast_enable_vga(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ ast_io_write8(ast, 0x43, 0x01);
+ ast_io_write8(ast, 0x42, 0x01);
+}
+
+#if 0 /* will use later */
+static bool
+ast_is_vga_enabled(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 ch;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ch = ast_io_read8(ast, 0x43);
+ if (ch) {
+ ast_open_key(ast);
+ ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
+ return ch & 0x04;
+ }
+ }
+ return 0;
+}
+#endif
+
+static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
+
+static void
+ast_set_def_ext_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x8f; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
+
+ if (ast->chip == AST2300) {
+ if (dev->pdev->revision >= 0x20)
+ ext_reg_info = extreginfo_ast2300;
+ else
+ ext_reg_info = extreginfo_ast2300a0;
+ } else
+ ext_reg_info = extreginfo;
+
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ if (ast->chip == AST2300)
+ reg |= 0x20;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
+}
+
+static inline u32 mindwm(struct ast_private *ast, u32 r)
+{
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+}
+
+static inline void moutdwm(struct ast_private *ast, u32 r, u32 v)
+{
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+}
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150 ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150 5
+#define CBR_THRESHOLD_AST2150 10
+#define CBR_THRESHOLD2_AST2150 10
+#define TIMEOUT_AST2150 5000000
+
+#define CBR_PATNUM_AST2150 8
+
+static const u32 pattern_AST2150[14] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0xFFFE0001,
+ 0x683501FE,
+ 0x0F1929B0,
+ 0x2D0B4346,
+ 0x60767F02,
+ 0x6FBE36A6,
+ 0x3A253035,
+ 0x3019686D,
+ 0x41C6167E,
+ 0x620152BF,
+ 0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+#if 0 /* unused in DDX driver - here for completeness */
+static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+#endif
+
+static int cbrtest_ast2150(struct ast_private *ast)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (mmctestburst2_ast2150(ast, i))
+ return 0;
+ return 1;
+}
+
+static int cbrscan_ast2150(struct ast_private *ast, int busw)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+ if (cbrtest_ast2150(ast))
+ break;
+ }
+ if (loop == CBR_PASSNUM_AST2150)
+ return 0;
+ }
+ return 1;
+}
+
+
+static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+{
+ u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+ dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
+ dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
+ passcnt = 0;
+
+ for (dlli = 0; dlli < 100; dlli++) {
+ moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ data = cbrscan_ast2150(ast, busw);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dll_min[0] > dlli)
+ dll_min[0] = dlli;
+ if (dll_max[0] < dlli)
+ dll_max[0] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+ }
+ if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+
+ dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+ moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+
+
+static void ast_init_dram_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 j;
+ u32 data, temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ if (ast->chip == AST2000) {
+ dram_reg_info = ast2000_dram_table_data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10100, 0xa8);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10100) != 0xa8);
+ } else {/* AST2100/1100 */
+ if (ast->chip == AST2100 || ast->chip == 2200)
+ dram_reg_info = ast2100_dram_table_data;
+ else
+ dram_reg_info = ast1100_dram_table_data;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688A8A8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x01);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ }
+
+ while (dram_reg_info->index != 0xffff) {
+ if (dram_reg_info->index == 0xff00) {/* delay fn */
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
+ data = dram_reg_info->data;
+ if (ast->dram_type == AST_DRAM_1Gx16)
+ data = 0x00000d89;
+ else if (ast->dram_type == AST_DRAM_1Gx32)
+ data = 0x00000c8d;
+
+ temp = ast_read32(ast, 0x12070);
+ temp &= 0xc;
+ temp <<= 2;
+ ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+ } else
+ ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
+ dram_reg_info++;
+ }
+
+ /* AST 2100/2150 DRAM calibration */
+ data = ast_read32(ast, 0x10120);
+ if (data == 0x5061) { /* 266Mhz */
+ data = ast_read32(ast, 0x10004);
+ if (data & 0x40)
+ cbrdlli_ast2150(ast, 16); /* 16 bits */
+ else
+ cbrdlli_ast2150(ast, 32); /* 32 bits */
+ }
+
+ switch (ast->chip) {
+ case AST2000:
+ temp = ast_read32(ast, 0x10140);
+ ast_write32(ast, 0x10140, temp | 0x40);
+ break;
+ case AST1100:
+ case AST2100:
+ case AST2200:
+ case AST2150:
+ temp = ast_read32(ast, 0x1200c);
+ ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+ temp = ast_read32(ast, 0x12040);
+ ast_write32(ast, 0x12040, temp | 0x40);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+void ast_post_gpu(struct drm_device *dev)
+{
+ u32 reg;
+ struct ast_private *ast = dev->dev_private;
+
+ pci_read_config_dword(ast->dev->pdev, 0x04, &reg);
+ reg |= 0x3;
+ pci_write_config_dword(ast->dev->pdev, 0x04, reg);
+
+ ast_enable_vga(dev);
+ ast_open_key(ast);
+ ast_set_def_ext_reg(dev);
+
+ if (ast->chip == AST2300)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+ u32 dram_type;
+ u32 dram_chipid;
+ u32 dram_freq;
+ u32 vram_size;
+ u32 odt;
+ u32 wodt;
+ u32 rodt;
+ u32 dram_config;
+ u32 reg_PERIOD;
+ u32 reg_MADJ;
+ u32 reg_SADJ;
+ u32 reg_MRS;
+ u32 reg_EMRS;
+ u32 reg_AC1;
+ u32 reg_AC2;
+ u32 reg_DQSIC;
+ u32 reg_DRV;
+ u32 reg_IOZ;
+ u32 reg_DQIDLY;
+ u32 reg_FREQ;
+ u32 madj_max;
+ u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE1 ((4 << 10) - 1)
+#define CBR_SIZE2 ((64 << 10) - 1)
+#define CBR_PASSNUM 5
+#define CBR_PASSNUM2 5
+#define CBR_THRESHOLD 10
+#define CBR_THRESHOLD2 10
+#define TIMEOUT 5000000
+#define CBR_PATNUM 8
+
+static const u32 pattern[8] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0x88778877,
+ 0x92CC4D6E,
+ 0x543D3CDE,
+ 0xF1E843C7,
+ 0x7C61D253
+};
+
+#if 0 /* unused in DDX, included for completeness */
+static int mmc_test_burst(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000) {
+ return 0;
+ }
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 1;
+}
+#endif
+
+static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+#if 0 /* Unused in DDX here for completeness */
+static int mmc_test_single(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000)
+ return 0;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return 0;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return 1;
+}
+#endif
+
+static int mmc_test_single2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+static int cbr_test(struct ast_private *ast)
+{
+ u32 data;
+ int i;
+ data = mmc_test_single2(ast, 0);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ for (i = 0; i < 8; i++) {
+ data = mmc_test_burst2(ast, i);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ }
+ if (!data)
+ return 3;
+ else if (data & 0xff)
+ return 2;
+ return 1;
+}
+
+static int cbr_scan(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 3;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test(ast)) != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test2(struct ast_private *ast)
+{
+ u32 data;
+
+ data = mmc_test_burst2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+ data |= mmc_test_single2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+
+ return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 0xffff;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test2(ast)) != 0) {
+ data2 &= data;
+ if (!data)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+#if 0 /* unused in DDX - added for completeness */
+static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+ gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff;
+ gold_sadj[1] = gold_sadj[0] >> 8;
+ gold_sadj[0] = gold_sadj[0] & 0xff;
+ gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1;
+ gold_sadj[1] = gold_sadj[0];
+
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 3) {
+ dlli = 3;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[1] >= dlli) {
+ dlli = (gold_sadj[1] - dlli) >> 1;
+ if (dlli > 3) {
+ dlli = 3;
+ } else {
+ dlli = (dlli - 1) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[1]) >> 1;
+ dlli += 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI */
+#endif
+
+static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+FINETUNE_START:
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ passcnt = 0;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ gold_sadj[0] += dllmin[cnt];
+ passcnt++;
+ }
+ }
+ if (passcnt != 16) {
+ goto FINETUNE_START;
+ }
+ gold_sadj[0] = gold_sadj[0] >> 4;
+ gold_sadj[1] = gold_sadj[0];
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[0] >= dlli) {
+ dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[1] >= dlli) {
+ dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ } else {
+ dlli = (dlli - 1) & 0x7;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+ dlli += 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L */
+
+static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2;
+
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ gold_sadj[1] = 0xFF;
+ for (cnt = 0; cnt < 8; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ if (gold_sadj[0] < dllmin[cnt]) {
+ gold_sadj[0] = dllmin[cnt];
+ }
+ if (gold_sadj[1] > dllmax[cnt]) {
+ gold_sadj[1] = dllmax[cnt];
+ }
+ }
+ }
+ gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+ gold_sadj[1] = mindwm(ast, 0x1E6E0080);
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ data2 = gold_sadj[1] & 0x7;
+ gold_sadj[1] >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 3) {
+ data2 = (data2 + dlli) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 4) {
+ data2 = (data2 - dlli) & 0x7;
+ }
+ }
+ }
+ data |= data2 << 21;
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ gold_sadj[0] = 0x0;
+ gold_sadj[1] = 0xFF;
+ for (cnt = 8; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ if (gold_sadj[0] < dllmin[cnt]) {
+ gold_sadj[0] = dllmin[cnt];
+ }
+ if (gold_sadj[1] > dllmax[cnt]) {
+ gold_sadj[1] = dllmax[cnt];
+ }
+ }
+ }
+ gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+ gold_sadj[1] = mindwm(ast, 0x1E6E0084);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ data2 = gold_sadj[1] & 0x7;
+ gold_sadj[1] >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 3) {
+ data2 = (data2 + dlli) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 4) {
+ data2 = (data2 - dlli) & 0x7;
+ }
+ }
+ }
+ data |= data2 << 21;
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L2 */
+
+static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt;
+
+
+ finetuneDQI_L(ast, param);
+ finetuneDQI_L2(ast, param);
+
+CBR_START2:
+ dllmin[0] = dllmin[1] = 0xff;
+ dllmax[0] = dllmax[1] = 0x0;
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan(ast);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dllmin[0] > dlli) {
+ dllmin[0] = dlli;
+ }
+ if (dllmax[0] < dlli) {
+ dllmax[0] = dlli;
+ }
+ }
+ if (data & 0x2) {
+ if (dllmin[1] > dlli) {
+ dllmin[1] = dlli;
+ }
+ if (dllmax[1] < dlli) {
+ dllmax[1] = dlli;
+ }
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ dlli = (dllmin[1] + dllmax[1]) >> 1;
+ dlli <<= 8;
+ dlli += (dllmin[0] + dllmax[0]) >> 1;
+ moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16));
+
+ data = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
+ data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
+ moutdwm(ast, 0x1E6E0018, data2);
+ moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
+
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = 0x00020000 + (trap << 16);
+ trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+ trap_MRS = 0x00000010 + (trap << 4);
+ trap_MRS |= ((trap & 0x2) << 18);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 336:
+ moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 0;
+ param->reg_AC1 = 0x22202725;
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x04001400 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ break;
+ default:
+ case 396:
+ moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+ break;
+
+ case 408:
+ moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xCD44961A;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00081830;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 4;
+ break;
+ case 504:
+ moutdwm(ast, 0x1E6E2020, 0x0270);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xDE44A61D;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x070000BB;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 4;
+ break;
+ case 528:
+ moutdwm(ast, 0x1E6E2020, 0x0290);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xEF44B61E;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000088;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302A37;
+ param->reg_AC2 = 0xEF56B61E;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 136;
+ param->dll2_finetune_step = 3;
+ break;
+ case 600:
+ moutdwm(ast, 0x1E6E2020, 0x02E1);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xDF56B61F;
+ param->reg_DQSIC = 0x0000014D;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000058C0;
+ param->madj_max = 132;
+ param->dll2_finetune_step = 3;
+ break;
+ case 624:
+ moutdwm(ast, 0x1E6E2020, 0x0160);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xEF56B621;
+ param->reg_DQSIC = 0x0000015A;
+ param->reg_MRS = 0x02101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000059C0;
+ param->madj_max = 128;
+ param->dll2_finetune_step = 3;
+ break;
+ } /* switch freq */
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x130;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x131;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x132;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x133;
+ break;
+ }; /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+
+}
+
+static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2;
+
+ moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ moutdwm(ast, 0x1E6E0018, 0x00000100);
+ moutdwm(ast, 0x1E6E0024, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ moutdwm(ast, 0x1E6E0004, param->dram_config);
+ moutdwm(ast, 0x1E6E0008, 0x90040f);
+ moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ moutdwm(ast, 0x1E6E0080, 0x00000000);
+ moutdwm(ast, 0x1E6E0084, 0x00000000);
+ moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ moutdwm(ast, 0x1E6E0018, 0x4040A170);
+ moutdwm(ast, 0x1E6E0018, 0x20402370);
+ moutdwm(ast, 0x1E6E0038, 0x00000000);
+ moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ moutdwm(ast, 0x1E6E0044, 0x22222222);
+ moutdwm(ast, 0x1E6E0048, 0x22222222);
+ moutdwm(ast, 0x1E6E004C, 0x00000002);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+ moutdwm(ast, 0x1E6E0054, 0);
+ moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0074, 0x00000000);
+ moutdwm(ast, 0x1E6E0078, 0x00000000);
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ data = mindwm(ast, 0x1E6E0018) | 0xC00;
+ moutdwm(ast, 0x1E6E0018, data);
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00000040);
+ udelay(50);
+ /* Mode Register Setting */
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000005);
+ moutdwm(ast, 0x1E6E0028, 0x00000007);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x300;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+ /* Wait DQI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0080);
+ } while (!(data & 0x40000000));
+ /* Wait DQSI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0020);
+ } while (!(data & 0x00000800));
+ /* Calibrate the DQSI delay */
+ cbr_dll2(ast, param);
+
+ moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ /* ECC Memory Initialization */
+#ifdef ECC
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+
+}
+
+static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = (trap << 20) | (trap << 16);
+ trap_AC2 += 0x00110000;
+ trap_MRS = 0x00000040 | (trap << 4);
+
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 264:
+ moutdwm(ast, 0x1E6E2020, 0x0130);
+ param->wodt = 0;
+ param->reg_AC1 = 0x11101513;
+ param->reg_AC2 = 0x78117011;
+ param->reg_DQSIC = 0x00000092;
+ param->reg_MRS = 0x00000842;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x000000F0;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x0000005A;
+ param->reg_FREQ = 0x00004AC0;
+ param->madj_max = 138;
+ param->dll2_finetune_step = 3;
+ break;
+ case 336:
+ moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 1;
+ param->reg_AC1 = 0x22202613;
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x00000A02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ break;
+ default:
+ case 396:
+ moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+
+ case 408:
+ moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xCD44B01E;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 3;
+ break;
+ case 504:
+ moutdwm(ast, 0x1E6E2020, 0x0261);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xDE44C022;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 3;
+ break;
+ case 528:
+ moutdwm(ast, 0x1E6E2020, 0x0120);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xEF44D024;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F9;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A7;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 552:
+ moutdwm(ast, 0x1E6E2020, 0x02A1);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E025;
+ param->reg_DQSIC = 0x00000132;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000AD;
+ param->reg_FREQ = 0x000056C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E027;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000B3;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ }
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x100;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x121;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x122;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x123;
+ break;
+ }; /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2;
+
+ moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ moutdwm(ast, 0x1E6E0018, 0x00000100);
+ moutdwm(ast, 0x1E6E0024, 0x00000000);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ moutdwm(ast, 0x1E6E0004, param->dram_config);
+ moutdwm(ast, 0x1E6E0008, 0x90040f);
+ moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ moutdwm(ast, 0x1E6E0080, 0x00000000);
+ moutdwm(ast, 0x1E6E0084, 0x00000000);
+ moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ moutdwm(ast, 0x1E6E0018, 0x4040A130);
+ moutdwm(ast, 0x1E6E0018, 0x20402330);
+ moutdwm(ast, 0x1E6E0038, 0x00000000);
+ moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ moutdwm(ast, 0x1E6E0044, 0x88848466);
+ moutdwm(ast, 0x1E6E0048, 0x44440008);
+ moutdwm(ast, 0x1E6E004C, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+ moutdwm(ast, 0x1E6E0054, 0);
+ moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0074, 0x00000000);
+ moutdwm(ast, 0x1E6E0078, 0x00000000);
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ data = mindwm(ast, 0x1E6E0018) | 0xC00;
+ moutdwm(ast, 0x1E6E0018, data);
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ udelay(50);
+ /* Mode Register Setting */
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000005);
+ moutdwm(ast, 0x1E6E0028, 0x00000007);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x500;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ moutdwm(ast, 0x1E6E0034, data | 0x3);
+ moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+ /* Wait DQI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0080);
+ } while (!(data & 0x40000000));
+ /* Wait DQSI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0020);
+ } while (!(data & 0x00000800));
+ /* Calibrate the DQSI delay */
+ cbr_dll2(ast, param);
+
+ /* ECC Memory Initialization */
+#ifdef ECC
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+}
+
+static void ast_init_dram_2300(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast2300_dram_param param;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if ((reg & 0x80) == 0) {/* vga only */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x1);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x1);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ param.dram_type = AST_DDR3;
+ if (temp & 0x01000000)
+ param.dram_type = AST_DDR2;
+ param.dram_chipid = ast->dram_type;
+ param.dram_freq = ast->mclk;
+ param.vram_size = ast->vram_size;
+
+ if (param.dram_type == AST_DDR3) {
+ get_ddr3_info(ast, &param);
+ ddr3_init(ast, &param);
+ } else {
+ get_ddr2_info(ast, &param);
+ ddr2_init(ast, &param);
+ }
+
+ temp = mindwm(ast, 0x1e6e2040);
+ moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
new file mode 100644
index 000000000000..95fa6aba26bc
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_TABLES_H
+#define AST_TABLES_H
+
+/* Std. Table Index Definition */
+#define TextModeIndex 0
+#define EGAModeIndex 1
+#define VGAModeIndex 2
+#define HiCModeIndex 3
+#define TrueCModeIndex 4
+
+#define Charx8Dot 0x00000001
+#define HalfDCLK 0x00000002
+#define DoubleScanMode 0x00000004
+#define LineCompareOff 0x00000008
+#define SyncPP 0x00000000
+#define SyncPN 0x00000040
+#define SyncNP 0x00000080
+#define SyncNN 0x000000C0
+#define HBorder 0x00000020
+#define VBorder 0x00000010
+#define WideScreenMode 0x00000100
+
+
+/* DCLK Index */
+#define VCLK25_175 0x00
+#define VCLK28_322 0x01
+#define VCLK31_5 0x02
+#define VCLK36 0x03
+#define VCLK40 0x04
+#define VCLK49_5 0x05
+#define VCLK50 0x06
+#define VCLK56_25 0x07
+#define VCLK65 0x08
+#define VCLK75 0x09
+#define VCLK78_75 0x0A
+#define VCLK94_5 0x0B
+#define VCLK108 0x0C
+#define VCLK135 0x0D
+#define VCLK157_5 0x0E
+#define VCLK162 0x0F
+/* #define VCLK193_25 0x10 */
+#define VCLK154 0x10
+#define VCLK83_5 0x11
+#define VCLK106_5 0x12
+#define VCLK146_25 0x13
+#define VCLK148_5 0x14
+
+static struct ast_vbios_dclk_info dclk_table[] = {
+ {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
+ {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
+ {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
+ {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+};
+
+static struct ast_vbios_stdtable vbios_stdtable[] = {
+ /* MD_2_3_400 */
+ {
+ 0x67,
+ {0x00,0x03,0x00,0x02},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f,
+ 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00,
+ 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x0c,0x00,0x0f,0x08},
+ {0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00,
+ 0xff}
+ },
+ /* Mode12/ExtEGATable */
+ {
+ 0xe3,
+ {0x01,0x0f,0x00,0x06},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xe9,0x8b,0xdf,0x28,0x00,0xe7,0x04,0xe3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x01,0x00,0x0f,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtVGATable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtHiCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtTrueCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+};
+
+static struct ast_vbios_enhtable res_640x480[] = {
+ { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
+ { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
+ { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
+ (SyncNN | Charx8Dot) , 75, 3, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
+ (SyncNN | Charx8Dot) , 85, 4, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
+ (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
+};
+
+static struct ast_vbios_enhtable res_800x600[] = {
+ {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
+ (SyncPP | Charx8Dot), 56, 1, 0x30 },
+ {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 2, 0x30 },
+ {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
+ (SyncPP | Charx8Dot), 72, 3, 0x30 },
+ {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 4, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 5, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
+};
+
+
+static struct ast_vbios_enhtable res_1024x768[] = {
+ {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
+ (SyncNN | Charx8Dot), 60, 1, 0x31 },
+ {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
+ (SyncNN | Charx8Dot), 70, 2, 0x31 },
+ {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 3, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 4, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
+};
+
+static struct ast_vbios_enhtable res_1280x1024[] = {
+ {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x32 },
+ {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 2, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 85, 3, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
+};
+
+static struct ast_vbios_enhtable res_1600x1200[] = {
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x33 },
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
+};
+
+static struct ast_vbios_enhtable res_1920x1200[] = {
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot), 60, 1, 0x34 },
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot), 0xFF, 1, 0x34 },
+};
+
+/* 16:10 */
+static struct ast_vbios_enhtable res_1280x800[] = {
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 },
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 },
+
+};
+
+static struct ast_vbios_enhtable res_1440x900[] = {
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 },
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 },
+};
+
+static struct ast_vbios_enhtable res_1680x1050[] = {
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 },
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 },
+};
+
+/* HDTV */
+static struct ast_vbios_enhtable res_1920x1080[] = {
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 },
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
+};
+#endif
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
new file mode 100644
index 000000000000..6cf2adea66bc
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct ast_private *
+ast_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct ast_private, ttm.bdev);
+}
+
+static int
+ast_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+ast_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int ast_ttm_global_init(struct ast_private *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &ast_ttm_mem_global_init;
+ global_ref->release = &ast_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+ast_ttm_global_release(struct ast_private *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct ast_bo *bo;
+
+ bo = container_of(tbo, struct ast_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &ast_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct ast_bo *astbo = ast_bo(bo);
+
+ if (!ast_ttm_bo_is_ast_bo(bo))
+ return;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
+ *pl = astbo->placement;
+}
+
+static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct ast_private *ast = ast_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int ast_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void ast_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func ast_tt_backend_func = {
+ .destroy = &ast_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &ast_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver ast_bo_driver = {
+ .ttm_tt_create = ast_ttm_tt_create,
+ .ttm_tt_populate = ast_ttm_tt_populate,
+ .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
+ .init_mem_type = ast_bo_init_mem_type,
+ .evict_flags = ast_bo_evict_flags,
+ .move = ast_bo_move,
+ .verify_access = ast_bo_verify_access,
+ .io_mem_reserve = &ast_ttm_io_mem_reserve,
+ .io_mem_free = &ast_ttm_io_mem_free,
+};
+
+int ast_mm_init(struct ast_private *ast)
+{
+ int ret;
+ struct drm_device *dev = ast->dev;
+ struct ttm_bo_device *bdev = &ast->ttm.bdev;
+
+ ret = ast_ttm_global_init(ast);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&ast->ttm.bdev,
+ ast->ttm.bo_global_ref.ref.object,
+ &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ ast->vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void ast_mm_fini(struct ast_private *ast)
+{
+ struct drm_device *dev = ast->dev;
+ ttm_bo_device_release(&ast->ttm.bdev);
+
+ ast_ttm_global_release(ast);
+
+ if (ast->fb_mtrr >= 0) {
+ drm_mtrr_del(ast->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ ast->fb_mtrr = -1;
+ }
+}
+
+void ast_ttm_placement(struct ast_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void ast_bo_unreserve(struct ast_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_bo *astbo;
+ size_t acc_size;
+ int ret;
+
+ astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
+ if (!astbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &astbo->gem, size);
+ if (ret) {
+ kfree(astbo);
+ return ret;
+ }
+
+ astbo->gem.driver_private = NULL;
+ astbo->bo.bdev = &ast->ttm.bdev;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
+ sizeof(struct ast_bo));
+
+ ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
+ ttm_bo_type_device, &astbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ NULL, ast_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pastbo = astbo;
+ return 0;
+}
+
+static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ }
+
+ ast_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ return 0;
+}
+
+int ast_bo_unpin(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ast_bo_push_sysram(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int ast_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct ast_private *ast;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ ast = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
new file mode 100644
index 000000000000..fc154dd75296
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -0,0 +1,12 @@
+config DRM_CIRRUS_QEMU
+ tristate "Cirrus driver for QEMU emulated device"
+ depends on DRM && PCI && EXPERIMENTAL
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for emulated cirrus device in qemu.
+ It is *NOT* intended for real cirrus devices. This requires
+ the modesetting userspace X.org driver.
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
new file mode 100644
index 000000000000..69ffe7006d55
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+cirrus-y := cirrus_main.o cirrus_mode.o \
+ cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
+
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
new file mode 100644
index 000000000000..7053140c6596
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2012 Red Hat <mjg@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "cirrus_drv.h"
+
+int cirrus_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, cirrus_modeset, int, 0400);
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+
+static struct drm_driver driver;
+
+/* only bind to the cirrus chip in qemu */
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+ 0, 0 },
+ {0,}
+};
+
+
+static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+ kfree(ap);
+}
+
+static int __devinit
+cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ cirrus_kick_out_firmware_fb(pdev);
+
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static const struct file_operations cirrus_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = cirrus_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+};
+static struct drm_driver driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+ .load = cirrus_driver_load,
+ .unload = cirrus_driver_unload,
+ .fops = &cirrus_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+ .gem_init_object = cirrus_gem_init_object,
+ .gem_free_object = cirrus_gem_free_object,
+ .dumb_create = cirrus_dumb_create,
+ .dumb_map_offset = cirrus_dumb_mmap_offset,
+ .dumb_destroy = cirrus_dumb_destroy,
+};
+
+static struct pci_driver cirrus_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = cirrus_pci_probe,
+ .remove = cirrus_pci_remove,
+};
+
+static int __init cirrus_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && cirrus_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (cirrus_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+ drm_pci_exit(&driver, &cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
new file mode 100644
index 000000000000..64ea597cb6d3
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#ifndef __CIRRUS_DRV_H__
+#define __CIRRUS_DRV_H__
+
+#include <video/vga.h>
+
+#include <drm/drm_fb_helper.h>
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "cirrus"
+#define DRIVER_DESC "qemu Cirrus emulation"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define CIRRUSFB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(SEQ_INDEX, reg); \
+ WREG8(SEQ_DATA, v); \
+ } while (0) \
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(CRT_INDEX, reg); \
+ WREG8(CRT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+/*
+ * Cirrus has a "hidden" DAC register that can be accessed by writing to
+ * the pixel mask register to reset the state, then reading from the register
+ * four times. The next write will then pass to the DAC
+ */
+#define VGA_DAC_MASK 0x6
+
+#define WREG_HDR(v) \
+ do { \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ WREG8(VGA_DAC_MASK, v); \
+ } while (0) \
+
+
+#define CIRRUS_MAX_FB_HEIGHT 4096
+#define CIRRUS_MAX_FB_WIDTH 4096
+
+#define CIRRUS_DPMS_CLEARED (-1)
+
+#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
+#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
+#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
+
+struct cirrus_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct cirrus_fbdev;
+struct cirrus_mode_info {
+ bool mode_config_initialized;
+ struct cirrus_crtc *crtc;
+ /* pointer to fbdev info structure */
+ struct cirrus_fbdev *gfbdev;
+};
+
+struct cirrus_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+struct cirrus_connector {
+ struct drm_connector base;
+};
+
+struct cirrus_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct cirrus_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+};
+
+struct cirrus_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ struct cirrus_mc mc;
+ struct cirrus_mode_info mode_info;
+
+ int num_crtc;
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+ bool mm_inited;
+};
+
+
+struct cirrus_fbdev {
+ struct drm_fb_helper helper;
+ struct cirrus_framebuffer gfb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+};
+
+struct cirrus_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
+
+static inline struct cirrus_bo *
+cirrus_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct cirrus_bo, bo);
+}
+
+
+#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+ /* cirrus_mode.c */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+
+ /* cirrus_main.c */
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ uint32_t flags);
+void cirrus_device_fini(struct cirrus_device *cdev);
+int cirrus_gem_init_object(struct drm_gem_object *obj);
+void cirrus_gem_free_object(struct drm_gem_object *obj);
+int cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int cirrus_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+ /* cirrus_display.c */
+int cirrus_modeset_init(struct cirrus_device *cdev);
+void cirrus_modeset_fini(struct cirrus_device *cdev);
+
+ /* cirrus_fbdev.c */
+int cirrus_fbdev_init(struct cirrus_device *cdev);
+void cirrus_fbdev_fini(struct cirrus_device *cdev);
+
+
+
+ /* cirrus_irq.c */
+void cirrus_driver_irq_preinstall(struct drm_device *dev);
+int cirrus_driver_irq_postinstall(struct drm_device *dev);
+void cirrus_driver_irq_uninstall(struct drm_device *dev);
+irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+
+ /* cirrus_kms.c */
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
+int cirrus_driver_unload(struct drm_device *dev);
+extern struct drm_ioctl_desc cirrus_ioctls[];
+extern int cirrus_max_ioctl;
+
+int cirrus_mm_init(struct cirrus_device *cirrus);
+void cirrus_mm_fini(struct cirrus_device *cirrus);
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo);
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait);
+void cirrus_bo_unreserve(struct cirrus_bo *bo);
+int cirrus_bo_push_sysram(struct cirrus_bo *bo);
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
new file mode 100644
index 000000000000..9a276a536992
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "cirrus_drv.h"
+
+static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct cirrus_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = afbdev->gfb.obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ ret = cirrus_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ cirrus_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_bo_unreserve(bo);
+}
+
+static void cirrus_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void cirrus_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void cirrus_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops cirrusfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cirrus_fillrect,
+ .fb_copyarea = cirrus_copyarea,
+ .fb_imageblit = cirrus_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (bpp > 24)
+ return -EINVAL;
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = cirrus_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = gfbdev->helper.dev;
+ struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct cirrus_bo *bo = NULL;
+ int size, ret;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ bo = gem_to_cirrus_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = gfbdev;
+
+ ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ gfbdev->sysram = sysram;
+ gfbdev->size = size;
+
+ fb = &gfbdev->gfb.base;
+ if (!fb) {
+ DRM_INFO("fb is NULL\n");
+ return -EINVAL;
+ }
+
+ /* setup helper */
+ gfbdev->helper.fb = fb;
+ gfbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "cirrusdrmfb");
+
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &cirrusfb_ops;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+ info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = cdev->mc.vram_size;
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
+ DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
+ DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
+ DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
+
+ return 0;
+out_iounmap:
+ return ret;
+}
+
+static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size
+ *sizes)
+{
+ struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = cirrusfb_create(gfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static int cirrus_fbdev_destroy(struct drm_device *dev,
+ struct cirrus_fbdev *gfbdev)
+{
+ struct fb_info *info;
+ struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+
+ if (gfbdev->helper.fbdev) {
+ info = gfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (gfb->obj) {
+ drm_gem_object_unreference_unlocked(gfb->obj);
+ gfb->obj = NULL;
+ }
+
+ vfree(gfbdev->sysram);
+ drm_fb_helper_fini(&gfbdev->helper);
+ drm_framebuffer_cleanup(&gfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+ .gamma_set = cirrus_crtc_fb_gamma_set,
+ .gamma_get = cirrus_crtc_fb_gamma_get,
+ .fb_probe = cirrus_fb_find_or_create_single,
+};
+
+int cirrus_fbdev_init(struct cirrus_device *cdev)
+{
+ struct cirrus_fbdev *gfbdev;
+ int ret;
+ int bpp_sel = 24;
+
+ /*bpp_sel = 8;*/
+ gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
+ if (!gfbdev)
+ return -ENOMEM;
+
+ cdev->mode_info.gfbdev = gfbdev;
+ gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
+ cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+ if (ret) {
+ kfree(gfbdev);
+ return ret;
+ }
+ drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+ drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
+
+ return 0;
+}
+
+void cirrus_fbdev_fini(struct cirrus_device *cdev)
+{
+ if (!cdev->mode_info.gfbdev)
+ return;
+
+ cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
+ kfree(cdev->mode_info.gfbdev);
+ cdev->mode_info.gfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
new file mode 100644
index 000000000000..e3c122578417
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "cirrus_drv.h"
+
+
+static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
+ if (cirrus_fb->obj)
+ drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
+ .destroy = cirrus_user_framebuffer_destroy,
+ .create_handle = cirrus_user_framebuffer_create_handle,
+};
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+cirrus_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ int ret;
+ u32 bpp, depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ /* cirrus can't handle > 24bpp framebuffers at all */
+ if (bpp > 24)
+ return ERR_PTR(-EINVAL);
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
+ if (!cirrus_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(cirrus_fb);
+ return ERR_PTR(ret);
+ }
+ return &cirrus_fb->base;
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_funcs = {
+ .fb_create = cirrus_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void cirrus_vram_fini(struct cirrus_device *cdev)
+{
+ iounmap(cdev->rmmio);
+ cdev->rmmio = NULL;
+ if (cdev->mc.vram_base)
+ release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int cirrus_vram_init(struct cirrus_device *cdev)
+{
+ /* BAR 0 is VRAM */
+ cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
+ /* We have 4MB of VRAM */
+ cdev->mc.vram_size = 4 * 1024 * 1024;
+
+ if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
+ "cirrusdrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Our emulated hardware has two sets of memory. One is video RAM and can
+ * simply be used as a linear framebuffer - the other provides mmio access
+ * to the display registers. The latter can also be accessed via IO port
+ * access, but we map the range and use mmio to program them instead
+ */
+
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev, uint32_t flags)
+{
+ int ret;
+
+ cdev->dev = ddev;
+ cdev->flags = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ cdev->num_crtc = 1;
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
+ cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
+
+ if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
+ "cirrusdrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
+
+ if (cdev->rmmio == NULL)
+ return -ENOMEM;
+
+ ret = cirrus_vram_init(cdev);
+ if (ret) {
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_device_fini(struct cirrus_device *cdev)
+{
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ cirrus_vram_fini(cdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct cirrus_device *cdev;
+ int r;
+
+ cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
+ if (cdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)cdev;
+
+ r = cirrus_device_init(cdev, dev, dev->pdev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ goto out;
+ }
+
+ r = cirrus_mm_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+
+ r = cirrus_modeset_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+
+ dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+out:
+ if (r)
+ cirrus_driver_unload(dev);
+ return r;
+}
+
+int cirrus_driver_unload(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+
+ if (cdev == NULL)
+ return 0;
+ cirrus_modeset_fini(cdev);
+ cirrus_mm_fini(cdev);
+ cirrus_device_fini(cdev);
+ kfree(cdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct cirrus_bo *cirrusbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &cirrusbo->gem;
+ return 0;
+}
+
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = cirrus_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int cirrus_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int cirrus_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void cirrus_bo_unref(struct cirrus_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void cirrus_gem_free_object(struct drm_gem_object *obj)
+{
+ struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
+
+ if (!cirrus_bo)
+ return;
+ cirrus_bo_unref(&cirrus_bo);
+}
+
+
+static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+
+int
+cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct cirrus_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_cirrus_bo(obj);
+ *offset = cirrus_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
new file mode 100644
index 000000000000..100f6308c509
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -0,0 +1,629 @@
+
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include <video/cirrus.h>
+
+#include "cirrus_drv.h"
+
+#define CIRRUS_LUT_SIZE 256
+
+#define PALETTE_INDEX 0x8
+#define PALETTE_DATA 0x9
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(PALETTE_INDEX, i);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
+ }
+}
+
+/*
+ * The DRM core requires DPMS functions, but they make little sense in our
+ * case and so are just stubs
+ */
+
+static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ u8 sr01, gr0e;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ sr01 = 0x00;
+ gr0e = 0x00;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ sr01 = 0x20;
+ gr0e = 0x02;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ sr01 = 0x20;
+ gr0e = 0x04;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ sr01 = 0x20;
+ gr0e = 0x06;
+ break;
+ default:
+ return;
+ }
+
+ WREG8(SEQ_INDEX, 0x1);
+ sr01 |= RREG8(SEQ_DATA) & ~0x20;
+ WREG_SEQ(0x1, sr01);
+
+ WREG8(GFX_INDEX, 0xe);
+ gr0e |= RREG8(GFX_DATA) & ~0x06;
+ WREG_GFX(0xe, gr0e);
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ u32 addr;
+ u8 tmp;
+
+ addr = offset >> 2;
+ WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+
+ WREG8(CRT_INDEX, 0x1b);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0xf2;
+ tmp |= (addr >> 16) & 0x01;
+ tmp |= (addr >> 15) & 0x0c;
+ WREG_CRT(0x1b, tmp);
+ WREG8(CRT_INDEX, 0x1d);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0x7f;
+ tmp |= (addr >> 12) & 0x80;
+ WREG_CRT(0x1d, tmp);
+}
+
+/* cirrus is different - we will force move buffers out of VRAM */
+static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ struct cirrus_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ cirrus_fb = to_cirrus_framebuffer(fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ cirrus_bo_push_sysram(bo);
+ cirrus_bo_unreserve(bo);
+ }
+
+ cirrus_fb = to_cirrus_framebuffer(crtc->fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ cirrus_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ cirrus_bo_unreserve(bo);
+
+ cirrus_set_start_address(crtc, (u32)gpu_addr);
+ return 0;
+}
+
+static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int hsyncstart, hsyncend, htotal, hdispend;
+ int vtotal, vdispend;
+ int tmp;
+ int sr07 = 0, hdr = 0;
+
+ htotal = mode->htotal / 8;
+ hsyncend = mode->hsync_end / 8;
+ hsyncstart = mode->hsync_start / 8;
+ hdispend = mode->hdisplay / 8;
+
+ vtotal = mode->vtotal;
+ vdispend = mode->vdisplay;
+
+ vdispend -= 1;
+ vtotal -= 2;
+
+ htotal -= 5;
+ hdispend -= 1;
+ hsyncstart += 1;
+ hsyncend += 1;
+
+ WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
+ WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
+ WREG_CRT(VGA_CRTC_H_DISP, hdispend);
+ WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
+ WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
+ WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
+ WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+ tmp = 0x40;
+ if ((vdispend + 1) & 512)
+ tmp |= 0x20;
+ WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
+
+ /*
+ * Overflow bits for values that don't fit in the standard registers
+ */
+ tmp = 16;
+ if (vtotal & 256)
+ tmp |= 1;
+ if (vdispend & 256)
+ tmp |= 2;
+ if ((vdispend + 1) & 256)
+ tmp |= 8;
+ if (vtotal & 512)
+ tmp |= 32;
+ if (vdispend & 512)
+ tmp |= 64;
+ WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
+
+ tmp = 0;
+
+ /* More overflow bits */
+
+ if ((htotal + 5) & 64)
+ tmp |= 16;
+ if ((htotal + 5) & 128)
+ tmp |= 32;
+ if (vtotal & 256)
+ tmp |= 64;
+ if (vtotal & 512)
+ tmp |= 128;
+
+ WREG_CRT(CL_CRT1A, tmp);
+
+ /* Disable Hercules/CGA compatibility */
+ WREG_CRT(VGA_CRTC_MODE, 0x03);
+
+ WREG8(SEQ_INDEX, 0x7);
+ sr07 = RREG8(SEQ_DATA);
+ sr07 &= 0xe0;
+ hdr = 0;
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ sr07 |= 0x11;
+ break;
+ case 16:
+ sr07 |= 0xc1;
+ hdr = 0xc0;
+ break;
+ case 24:
+ sr07 |= 0x15;
+ hdr = 0xc5;
+ break;
+ case 32:
+ sr07 |= 0x19;
+ hdr = 0xc5;
+ break;
+ default:
+ return -1;
+ }
+
+ WREG_SEQ(0x7, sr07);
+
+ /* Program the pitch */
+ tmp = crtc->fb->pitches[0] / 8;
+ WREG_CRT(VGA_CRTC_OFFSET, tmp);
+
+ /* Enable extended blanking and pitch bits, and enable full memory */
+ tmp = 0x22;
+ tmp |= (crtc->fb->pitches[0] >> 7) & 0x10;
+ tmp |= (crtc->fb->pitches[0] >> 6) & 0x40;
+ WREG_CRT(0x1b, tmp);
+
+ /* Enable high-colour modes */
+ WREG_GFX(VGA_GFX_MODE, 0x40);
+
+ /* And set graphics mode */
+ WREG_GFX(VGA_GFX_MISC, 0x01);
+
+ WREG_HDR(hdr);
+ cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ return 0;
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void cirrus_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ int i;
+
+ if (size != CIRRUS_LUT_SIZE)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = red[i];
+ cirrus_crtc->lut_g[i] = green[i];
+ cirrus_crtc->lut_b[i] = blue[i];
+ }
+ cirrus_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void cirrus_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(cirrus_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs cirrus_crtc_funcs = {
+ .gamma_set = cirrus_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = cirrus_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
+ .dpms = cirrus_crtc_dpms,
+ .mode_fixup = cirrus_crtc_mode_fixup,
+ .mode_set = cirrus_crtc_mode_set,
+ .mode_set_base = cirrus_crtc_mode_set_base,
+ .prepare = cirrus_crtc_prepare,
+ .commit = cirrus_crtc_commit,
+ .load_lut = cirrus_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void cirrus_crtc_init(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+ struct cirrus_crtc *cirrus_crtc;
+ int i;
+
+ cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
+ (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (cirrus_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &cirrus_crtc->base, &cirrus_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
+ cdev->mode_info.crtc = cirrus_crtc;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = i;
+ cirrus_crtc->lut_g[i] = i;
+ cirrus_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ cirrus_crtc->lut_r[regno] = red;
+ cirrus_crtc->lut_g[regno] = green;
+ cirrus_crtc->lut_b[regno] = blue;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ *red = cirrus_crtc->lut_r[regno];
+ *green = cirrus_crtc->lut_g[regno];
+ *blue = cirrus_crtc->lut_b[regno];
+}
+
+
+static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void cirrus_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void cirrus_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void cirrus_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(cirrus_encoder);
+}
+
+static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
+ .dpms = cirrus_encoder_dpms,
+ .mode_fixup = cirrus_encoder_mode_fixup,
+ .mode_set = cirrus_encoder_mode_set,
+ .prepare = cirrus_encoder_prepare,
+ .commit = cirrus_encoder_commit,
+};
+
+static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
+ .destroy = cirrus_encoder_destroy,
+};
+
+static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct cirrus_encoder *cirrus_encoder;
+
+ cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
+ if (!cirrus_encoder)
+ return NULL;
+
+ encoder = &cirrus_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+int cirrus_vga_get_modes(struct drm_connector *connector)
+{
+ /* Just add a static list of modes */
+ drm_add_modes_noedid(connector, 640, 480);
+ drm_add_modes_noedid(connector, 800, 600);
+ drm_add_modes_noedid(connector, 1024, 768);
+ drm_add_modes_noedid(connector, 1280, 1024);
+
+ return 4;
+}
+
+static int cirrus_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* Any mode we've added is valid */
+ return MODE_OK;
+}
+
+struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status cirrus_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void cirrus_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
+ .get_modes = cirrus_vga_get_modes,
+ .mode_valid = cirrus_vga_mode_valid,
+ .best_encoder = cirrus_connector_best_encoder,
+};
+
+struct drm_connector_funcs cirrus_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cirrus_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = cirrus_connector_destroy,
+};
+
+static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct cirrus_connector *cirrus_connector;
+
+ cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
+ if (!cirrus_connector)
+ return NULL;
+
+ connector = &cirrus_connector->base;
+
+ drm_connector_init(dev, connector,
+ &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
+
+ return connector;
+}
+
+
+int cirrus_modeset_init(struct cirrus_device *cdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ drm_mode_config_init(cdev->dev);
+ cdev->mode_info.mode_config_initialized = true;
+
+ cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
+ cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
+
+ cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
+ cdev->dev->mode_config.preferred_depth = 24;
+ /* don't prefer a shadow on virt GPU */
+ cdev->dev->mode_config.prefer_shadow = 0;
+
+ cirrus_crtc_init(cdev->dev);
+
+ encoder = cirrus_encoder_init(cdev->dev);
+ if (!encoder) {
+ DRM_ERROR("cirrus_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = cirrus_vga_init(cdev->dev);
+ if (!connector) {
+ DRM_ERROR("cirrus_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = cirrus_fbdev_init(cdev);
+ if (ret) {
+ DRM_ERROR("cirrus_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_modeset_fini(struct cirrus_device *cdev)
+{
+ cirrus_fbdev_fini(cdev);
+
+ if (cdev->mode_info.mode_config_initialized) {
+ drm_mode_config_cleanup(cdev->dev);
+ cdev->mode_info.mode_config_initialized = false;
+ }
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
new file mode 100644
index 000000000000..50e170f879de
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -0,0 +1,458 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "cirrus_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct cirrus_device *
+cirrus_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct cirrus_device, ttm.bdev);
+}
+
+static int
+cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &cirrus->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &cirrus_ttm_mem_global_init;
+ global_ref->release = &cirrus_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ cirrus->ttm.bo_global_ref.mem_glob =
+ cirrus->ttm.mem_global_ref.object;
+ global_ref = &cirrus->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+cirrus_ttm_global_release(struct cirrus_device *cirrus)
+{
+ if (cirrus->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ cirrus->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct cirrus_bo *bo;
+
+ bo = container_of(tbo, struct cirrus_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &cirrus_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+ if (!cirrus_ttm_bo_is_cirrus_bo(bo))
+ return;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
+ *pl = cirrusbo->placement;
+}
+
+static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct cirrus_device *cirrus = cirrus_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int cirrus_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func cirrus_tt_backend_func = {
+ .destroy = &cirrus_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &cirrus_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver cirrus_bo_driver = {
+ .ttm_tt_create = cirrus_ttm_tt_create,
+ .ttm_tt_populate = cirrus_ttm_tt_populate,
+ .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
+ .init_mem_type = cirrus_bo_init_mem_type,
+ .evict_flags = cirrus_bo_evict_flags,
+ .move = cirrus_bo_move,
+ .verify_access = cirrus_bo_verify_access,
+ .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
+ .io_mem_free = &cirrus_ttm_io_mem_free,
+};
+
+int cirrus_mm_init(struct cirrus_device *cirrus)
+{
+ int ret;
+ struct drm_device *dev = cirrus->dev;
+ struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
+
+ ret = cirrus_ttm_global_init(cirrus);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&cirrus->ttm.bdev,
+ cirrus->ttm.bo_global_ref.ref.object,
+ &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ cirrus->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ cirrus->mm_inited = true;
+ return 0;
+}
+
+void cirrus_mm_fini(struct cirrus_device *cirrus)
+{
+ struct drm_device *dev = cirrus->dev;
+
+ if (!cirrus->mm_inited)
+ return;
+
+ ttm_bo_device_release(&cirrus->ttm.bdev);
+
+ cirrus_ttm_global_release(cirrus);
+
+ if (cirrus->fb_mtrr >= 0) {
+ drm_mtrr_del(cirrus->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ cirrus->fb_mtrr = -1;
+ }
+}
+
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void cirrus_bo_unreserve(struct cirrus_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo)
+{
+ struct cirrus_device *cirrus = dev->dev_private;
+ struct cirrus_bo *cirrusbo;
+ size_t acc_size;
+ int ret;
+
+ cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
+ if (!cirrusbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
+ if (ret) {
+ kfree(cirrusbo);
+ return ret;
+ }
+
+ cirrusbo->gem.driver_private = NULL;
+ cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
+ sizeof(struct cirrus_bo));
+
+ ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
+ ttm_bo_type_device, &cirrusbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ NULL, cirrus_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pcirrusbo = cirrusbo;
+ return 0;
+}
+
+static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ }
+
+ cirrus_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ return 0;
+}
+
+int cirrus_bo_unpin(struct cirrus_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int cirrus_bo_push_sysram(struct cirrus_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct cirrus_device *cirrus;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ cirrus = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 4b8653b932f9..08758e061478 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -98,3 +98,26 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
+
+void
+drm_clflush_virt_range(char *addr, unsigned long length)
+{
+#if defined(CONFIG_X86)
+ if (cpu_has_clflush) {
+ char *end = addr + length;
+ mb();
+ for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
+ clflush(addr);
+ clflush(end - 1);
+ mb();
+ return;
+ }
+
+ if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+ printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+ printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+ WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_virt_range);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 325365f6d355..affa629589ac 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -85,11 +85,12 @@ again:
mutex_lock(&dev->struct_mutex);
ret = idr_get_new_above(&dev->ctx_idr, NULL,
DRM_RESERVED_CONTEXTS, &new_id);
- if (ret == -EAGAIN) {
- mutex_unlock(&dev->struct_mutex);
- goto again;
- }
mutex_unlock(&dev->struct_mutex);
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ret;
+
return new_id;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index c79870a75c2f..08a7aa722d6b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -227,7 +227,7 @@ static int drm_mode_object_get(struct drm_device *dev,
again:
if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Ran out memory getting a mode number\n");
- return -EINVAL;
+ return -ENOMEM;
}
mutex_lock(&dev->mode_config.idr_mutex);
@@ -235,6 +235,8 @@ again:
mutex_unlock(&dev->mode_config.idr_mutex);
if (ret == -EAGAIN)
goto again;
+ else if (ret)
+ return ret;
obj->id = new_id;
obj->type = obj_type;
@@ -361,7 +363,7 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
* @funcs: callbacks for the new CRTC
*
* LOCKING:
- * Caller must hold mode config lock.
+ * Takes mode_config lock.
*
* Inits a new object created as base part of an driver crtc object.
*
@@ -382,6 +384,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
goto out;
+ crtc->base.properties = &crtc->properties;
+
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
@@ -481,6 +485,7 @@ int drm_connector_init(struct drm_device *dev,
if (ret)
goto out;
+ connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
connector->connector_type = connector_type;
@@ -603,6 +608,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (ret)
goto out;
+ plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
@@ -1422,11 +1428,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
}
connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- props_count++;
- }
- }
+ props_count = connector->properties.count;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
@@ -1479,21 +1481,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
copied = 0;
prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- if (put_user(connector->property_ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
+ for (i = 0; i < connector->properties.count; i++) {
+ if (put_user(connector->properties.ids[i],
+ prop_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
- if (put_user(connector->property_values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ if (put_user(connector->properties.values[i],
+ prop_values + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
out_resp->count_props = props_count;
@@ -1830,7 +1830,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
- int ret = 0;
+ int ret;
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -2102,7 +2102,7 @@ int drm_mode_addfb(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
- DRM_ERROR("could not create framebuffer\n");
+ DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
@@ -2116,7 +2116,7 @@ out:
return ret;
}
-static int format_check(struct drm_mode_fb_cmd2 *r)
+static int format_check(const struct drm_mode_fb_cmd2 *r)
{
uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
@@ -2185,6 +2185,47 @@ static int format_check(struct drm_mode_fb_cmd2 *r)
}
}
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+ int ret, hsub, vsub, num_planes, i;
+
+ ret = format_check(r);
+ if (ret) {
+ DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
+ return ret;
+ }
+
+ hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+ num_planes = drm_format_num_planes(r->pixel_format);
+
+ if (r->width == 0 || r->width % hsub) {
+ DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+ return -EINVAL;
+ }
+
+ if (r->height == 0 || r->height % vsub) {
+ DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = r->width / (i != 0 ? hsub : 1);
+
+ if (!r->handles[i]) {
+ DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ return -EINVAL;
+ }
+
+ if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+ DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @inode: inode from the ioctl
@@ -2208,33 +2249,31 @@ int drm_mode_addfb2(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+ DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+ DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
return -EINVAL;
}
- ret = format_check(r);
- if (ret) {
- DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+ ret = framebuffer_check(r);
+ if (ret)
return ret;
- }
mutex_lock(&dev->mode_config.mutex);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
- DRM_ERROR("could not create framebuffer\n");
+ DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
@@ -2365,7 +2404,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2564,7 +2603,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
struct drm_display_mode *mode;
struct drm_mode_object *obj;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2618,7 +2657,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
struct drm_connector *connector;
struct drm_display_mode mode;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
- int ret = 0;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2710,6 +2749,34 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
}
EXPORT_SYMBOL(drm_property_create_enum);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values)
+{
+ struct drm_property *property;
+ int i, ret;
+
+ flags |= DRM_MODE_PROP_BITMASK;
+
+ property = drm_property_create(dev, flags, name, num_values);
+ if (!property)
+ return NULL;
+
+ for (i = 0; i < num_values; i++) {
+ ret = drm_property_add_enum(property, i,
+ props[i].type,
+ props[i].name);
+ if (ret) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+ }
+
+ return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
@@ -2734,7 +2801,14 @@ int drm_property_add_enum(struct drm_property *property, int index,
{
struct drm_property_enum *prop_enum;
- if (!(property->flags & DRM_MODE_PROP_ENUM))
+ if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+ return -EINVAL;
+
+ /*
+ * Bitmask enum properties have the additional constraint of values
+ * from 0 to 63
+ */
+ if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
return -EINVAL;
if (!list_empty(&property->enum_blob_list)) {
@@ -2778,60 +2852,78 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-int drm_connector_attach_property(struct drm_connector *connector,
+void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val)
{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == 0) {
- connector->property_ids[i] = property->base.id;
- connector->property_values[i] = init_val;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ drm_object_attach_property(&connector->base, property, init_val);
}
EXPORT_SYMBOL(drm_connector_attach_property);
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
+ return drm_object_property_set_value(&connector->base, property, value);
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t *val)
+{
+ return drm_object_property_get_value(&connector->base, property, val);
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
+{
+ int count = obj->properties->count;
+
+ if (count == DRM_OBJECT_MAX_PROPERTY) {
+ WARN(1, "Failed to attach object property (type: 0x%x). Please "
+ "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+ "you see this message on the same object type.\n",
+ obj->type);
+ return;
+ }
+
+ obj->properties->ids[count] = property->base.id;
+ obj->properties->values[count] = init_val;
+ obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
+{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- connector->property_values[i] = value;
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ obj->properties->values[i] = val;
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_set_value);
+EXPORT_SYMBOL(drm_object_property_set_value);
-int drm_connector_property_get_value(struct drm_connector *connector,
+int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- *val = connector->property_values[i];
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ *val = obj->properties->values[i];
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_get_value);
+EXPORT_SYMBOL(drm_object_property_get_value);
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -2862,7 +2954,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
property = obj_to_property(obj);
- if (property->flags & DRM_MODE_PROP_ENUM) {
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (property->flags & DRM_MODE_PROP_BLOB) {
@@ -2887,7 +2979,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
out_resp->count_values = value_count;
- if (property->flags & DRM_MODE_PROP_ENUM) {
+ if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
@@ -3009,7 +3101,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
- int ret = 0, size;
+ int ret, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -3033,75 +3125,202 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+static bool drm_property_change_is_valid(struct drm_property *property,
+ uint64_t value)
+{
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+ if (property->flags & DRM_MODE_PROP_RANGE) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+ } else if (property->flags & DRM_MODE_PROP_BITMASK) {
+ int i;
+ uint64_t valid_mask = 0;
+ for (i = 0; i < property->num_values; i++)
+ valid_mask |= (1ULL << property->values[i]);
+ return !(value & ~valid_mask);
+ } else {
+ int i;
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+ }
+}
+
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_connector_set_property *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_property *property;
- struct drm_connector *connector;
+ struct drm_mode_connector_set_property *conn_set_prop = data;
+ struct drm_mode_obj_set_property obj_set_prop = {
+ .value = conn_set_prop->value,
+ .prop_id = conn_set_prop->prop_id,
+ .obj_id = conn_set_prop->connector_id,
+ .obj_type = DRM_MODE_OBJECT_CONNECTOR
+ };
+
+ /* It does all the locking and checking we need */
+ return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
int ret = -EINVAL;
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int)value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, value);
+
+ /* store the property value if successful */
+ if (!ret)
+ drm_connector_property_set_value(connector, property, value);
+ return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->set_property)
+ ret = crtc->funcs->set_property(crtc, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (plane->funcs->set_property)
+ ret = plane->funcs->set_property(plane, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_get_properties *arg = data;
+ struct drm_mode_object *obj;
+ int ret = 0;
int i;
+ int copied = 0;
+ int props_count = 0;
+ uint32_t __user *props_ptr;
+ uint64_t __user *prop_values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!obj->properties) {
+ ret = -EINVAL;
goto out;
}
- connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == out_resp->prop_id)
- break;
+ props_count = obj->properties->count;
+
+ /* This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it. */
+ if ((arg->count_props >= props_count) && props_count) {
+ copied = 0;
+ props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ prop_values_ptr = (uint64_t __user *)(unsigned long)
+ (arg->prop_values_ptr);
+ for (i = 0; i < props_count; i++) {
+ if (put_user(obj->properties->ids[i],
+ props_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(obj->properties->values[i],
+ prop_values_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
}
+ arg->count_props = props_count;
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_set_property *arg = data;
+ struct drm_mode_object *arg_obj;
+ struct drm_mode_object *prop_obj;
+ struct drm_property *property;
+ int ret = -EINVAL;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
- if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+ arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!arg_obj)
+ goto out;
+ if (!arg_obj->properties)
goto out;
- }
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
+ for (i = 0; i < arg_obj->properties->count; i++)
+ if (arg_obj->properties->ids[i] == arg->prop_id)
+ break;
+
+ if (i == arg_obj->properties->count)
goto out;
- }
- property = obj_to_property(obj);
- if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ prop_obj = drm_mode_object_find(dev, arg->prop_id,
+ DRM_MODE_OBJECT_PROPERTY);
+ if (!prop_obj)
goto out;
+ property = obj_to_property(prop_obj);
- if (property->flags & DRM_MODE_PROP_RANGE) {
- if (out_resp->value < property->values[0])
- goto out;
+ if (!drm_property_change_is_valid(property, arg->value))
+ goto out;
- if (out_resp->value > property->values[1])
- goto out;
- } else {
- int found = 0;
- for (i = 0; i < property->num_values; i++) {
- if (property->values[i] == out_resp->value) {
- found = 1;
- break;
- }
- }
- if (!found) {
- goto out;
- }
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+ arg->value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+ break;
}
- /* Do DPMS ourselves */
- if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int) out_resp->value);
- ret = 0;
- } else if (connector->funcs->set_property)
- ret = connector->funcs->set_property(connector, property, out_resp->value);
-
- /* store the property value if successful */
- if (!ret)
- drm_connector_property_set_value(connector, property, out_resp->value);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
@@ -3173,6 +3392,11 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
}
crtc = obj_to_crtc(obj);
+ if (crtc->funcs->gamma_set == NULL) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
ret = -EINVAL;
@@ -3468,3 +3692,140 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
}
}
EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * RETURNS:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+ unsigned int depth;
+ int bpp;
+
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ drm_fb_get_bpp_depth(format, &depth, &bpp);
+ return bpp >> 3;
+ }
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 81118893264c..3252e7067d8b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -518,7 +518,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
- int ret = 0;
+ int ret;
int i;
DRM_DEBUG_KMS("\n");
@@ -1023,36 +1023,3 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
-
-
-/**
- * drm_format_num_planes - get the number of planes for format
- * @format: pixel format (DRM_FORMAT_*)
- *
- * RETURNS:
- * The number of planes used by the specified pixel format.
- */
-int drm_format_num_planes(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 3;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- return 2;
- default:
- return 1;
- }
-}
-EXPORT_SYMBOL(drm_format_num_planes);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6116e3b75393..8a9d0792e4ec 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -163,7 +163,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5a18b0df8285..a8743c399e83 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/export.h>
+#include <linux/module.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_edid_modes.h"
@@ -66,6 +66,8 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -81,7 +83,7 @@ struct detailed_mode_closure {
#define LEVEL_CVT 3
static struct edid_quirk {
- char *vendor;
+ char vendor[4];
int product_id;
u32 quirks;
} edid_quirk_list[] = {
@@ -120,6 +122,9 @@ static struct edid_quirk {
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
/*** DDC fetch and block validation ***/
@@ -144,21 +149,28 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
}
EXPORT_SYMBOL(drm_edid_header_is_valid);
+static int edid_fixup __read_mostly = 6;
+module_param_named(edid_fixup, edid_fixup, int, 0400);
+MODULE_PARM_DESC(edid_fixup,
+ "Minimum number of valid EDID header bytes (0-8, default 6)");
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
-bool drm_edid_block_valid(u8 *raw_edid)
+bool drm_edid_block_valid(u8 *raw_edid, int block)
{
int i;
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
- if (raw_edid[0] == 0x00) {
+ if (edid_fixup > 8 || edid_fixup < 0)
+ edid_fixup = 6;
+
+ if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
- else if (score >= 6) {
+ else if (score >= edid_fixup) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else {
@@ -219,7 +231,7 @@ bool drm_edid_is_valid(struct edid *edid)
return false;
for (i = 0; i <= edid->extensions; i++)
- if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
return false;
return true;
@@ -299,7 +311,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block))
+ if (drm_edid_block_valid(block, 0))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
@@ -324,7 +336,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
valid_extensions++;
break;
}
@@ -486,23 +498,47 @@ static void edid_fixup_preferred(struct drm_connector *connector,
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
+static bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+ return (mode->htotal - mode->hdisplay == 160) &&
+ (mode->hsync_end - mode->hdisplay == 80) &&
+ (mode->hsync_end - mode->hsync_start == 32) &&
+ (mode->vsync_start - mode->vdisplay == 3);
+}
+
+/*
+ * drm_mode_find_dmt - Create a copy of a mode if present in DMT
+ * @dev: Device to duplicate against
+ * @hsize: Mode width
+ * @vsize: Mode height
+ * @fresh: Mode refresh rate
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+ * Return a newly allocated copy of the mode, or NULL if not found.
+ */
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh)
+ int hsize, int vsize, int fresh,
+ bool rb)
{
- struct drm_display_mode *mode = NULL;
int i;
for (i = 0; i < drm_num_dmt_modes; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
- if (hsize == ptr->hdisplay &&
- vsize == ptr->vdisplay &&
- fresh == drm_mode_vrefresh(ptr)) {
- /* get the expected default mode */
- mode = drm_mode_duplicate(dev, ptr);
- break;
- }
+ if (hsize != ptr->hdisplay)
+ continue;
+ if (vsize != ptr->vdisplay)
+ continue;
+ if (fresh != drm_mode_vrefresh(ptr))
+ continue;
+ if (rb != mode_is_rb(ptr))
+ continue;
+
+ return drm_mode_duplicate(dev, ptr);
}
- return mode;
+
+ return NULL;
}
EXPORT_SYMBOL(drm_mode_find_dmt);
@@ -574,7 +610,7 @@ static bool
drm_monitor_supports_rb(struct edid *edid)
{
if (edid->revision >= 4) {
- bool ret;
+ bool ret = false;
drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
return ret;
}
@@ -731,10 +767,17 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
}
/* check whether it can be found in default mode table */
- mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
+ if (drm_monitor_supports_rb(edid)) {
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
+ true);
+ if (mode)
+ return mode;
+ }
+ mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
if (mode)
return mode;
+ /* okay, generate it */
switch (timing_level) {
case LEVEL_DMT:
break;
@@ -748,6 +791,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ if (!mode)
+ return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
@@ -852,12 +897,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
+
+ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
+ goto set_size;
+ }
+
mode = drm_mode_create(dev);
if (!mode)
return NULL;
- mode->type = DRM_MODE_TYPE_DRIVER;
-
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
@@ -881,8 +933,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_do_interlace_quirk(mode, pt);
- drm_mode_set_name(mode);
-
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@@ -892,6 +942,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
@@ -905,16 +956,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->height_mm = edid->height_cm * 10;
}
- return mode;
-}
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
-static bool
-mode_is_rb(const struct drm_display_mode *mode)
-{
- return (mode->htotal - mode->hdisplay == 160) &&
- (mode->hsync_end - mode->hdisplay == 80) &&
- (mode->hsync_end - mode->hsync_start == 32) &&
- (mode->vsync_start - mode->vdisplay == 3);
+ return mode;
}
static bool
@@ -994,12 +1039,26 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
return true;
}
-/*
- * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
- * need to account for them.
- */
+static bool valid_inferred_mode(const struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+ bool ok = false;
+
+ list_for_each_entry(m, &connector->probed_modes, head) {
+ if (mode->hdisplay == m->hdisplay &&
+ mode->vdisplay == m->vdisplay &&
+ drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+ return false; /* duplicated */
+ if (mode->hdisplay <= m->hdisplay &&
+ mode->vdisplay <= m->vdisplay)
+ ok = true;
+ }
+ return ok;
+}
+
static int
-drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
@@ -1007,7 +1066,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+ valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@@ -1019,17 +1079,112 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
}
+/* fix up 1366x768 mode from 1368x768;
+ * GFT/CVT can't express 1366 width which isn't dividable by 8
+ */
+static void fixup_mode_1366x768(struct drm_display_mode *mode)
+{
+ if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
+ }
+}
+
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < num_extra_modes; i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
+static int
+drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ bool rb = drm_monitor_supports_rb(edid);
+
+ for (i = 0; i < num_extra_modes; i++) {
+ const struct minimode *m = &extra_modes[i];
+ newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
+ if (!newmode)
+ return modes;
+
+ fixup_mode_1366x768(newmode);
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
+ drm_mode_destroy(dev, newmode);
+ continue;
+ }
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ return modes;
+}
+
static void
do_inferred_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
- int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+ struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ return;
- if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
+ closure->modes += drm_dmt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+
+ if (!version_greater(closure->edid, 1, 1))
+ return; /* GTF not defined yet */
+
+ switch (range->flags) {
+ case 0x02: /* secondary gtf, XXX could do more */
+ case 0x00: /* default gtf */
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->edid,
timing);
+ break;
+ case 0x04: /* cvt, only in 1.4+ */
+ if (!version_greater(closure->edid, 1, 3))
+ break;
+
+ closure->modes += drm_cvt_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+ break;
+ case 0x01: /* just the ranges, no formula */
+ default:
+ break;
+ }
}
static int
@@ -1062,8 +1217,8 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
mode = drm_mode_find_dmt(connector->dev,
est3_modes[m].w,
est3_modes[m].h,
- est3_modes[m].r
- /*, est3_modes[m].rb */);
+ est3_modes[m].r,
+ est3_modes[m].rb);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
@@ -1312,6 +1467,8 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
+#define EDID_CEA_YCRCB444 (1 << 5)
+#define EDID_CEA_YCRCB422 (1 << 4)
/**
* Search EDID for CEA extension block.
@@ -1666,13 +1823,29 @@ static void drm_add_display_info(struct edid *edid,
info->bpc = 0;
info->color_formats = 0;
- /* Only defined for 1.4 with digital displays */
- if (edid->revision < 4)
+ if (edid->revision < 3)
return;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return;
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (edid_ext) {
+ info->cea_rev = edid_ext[1];
+
+ /* The existence of a CEA block should imply RGB support */
+ info->color_formats = DRM_COLOR_FORMAT_RGB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid_ext[3] & EDID_CEA_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ }
+
+ /* Only defined for 1.4 with digital displays */
+ if (edid->revision < 4)
+ return;
+
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
info->bpc = 6;
@@ -1698,18 +1871,11 @@ static void drm_add_display_info(struct edid *edid,
break;
}
- info->color_formats = DRM_COLOR_FORMAT_RGB444;
- if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
- info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
- if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
- info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
-
- /* Get data from CEA blocks if present */
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- return;
-
- info->cea_rev = edid_ext[1];
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+ if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
/**
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index da9acba2dd6c..66d4a28ad5a2 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -173,7 +173,7 @@ static int edid_load(struct drm_connector *connector, char *name,
}
memcpy(edid, fwdata, fwsize);
- if (!drm_edid_block_valid(edid)) {
+ if (!drm_edid_block_valid(edid, 0)) {
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name);
kfree(edid);
@@ -185,7 +185,7 @@ static int edid_load(struct drm_connector *connector, char *name,
if (i != valid_extensions + 1)
memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
edid + i * EDID_LENGTH, EDID_LENGTH);
- if (drm_edid_block_valid(edid + i * EDID_LENGTH))
+ if (drm_edid_block_valid(edid + i * EDID_LENGTH, i))
valid_extensions++;
}
@@ -220,18 +220,18 @@ int drm_load_edid_firmware(struct drm_connector *connector)
{
char *connector_name = drm_get_connector_name(connector);
char *edidname = edid_firmware, *last, *colon;
- int ret = 0;
+ int ret;
if (*edidname == '\0')
- return ret;
+ return 0;
colon = strchr(edidname, ':');
if (colon != NULL) {
if (strncmp(connector_name, edidname, colon - edidname))
- return ret;
+ return 0;
edidname = colon + 1;
if (*edidname == '\0')
- return ret;
+ return 0;
}
last = edidname + strlen(edidname) - 1;
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index a91ffb117220..ff98a7eb38dd 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -30,7 +30,6 @@
/*
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
- * But the mode with Reduced blank feature is deleted.
*/
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
@@ -81,6 +80,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@120Hz RB */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+ 880, 960, 0, 600, 603, 607, 636, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
@@ -106,10 +109,18 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@120Hz RB */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+ 1104, 1184, 0, 768, 771, 775, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 790, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
@@ -122,6 +133,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@120Hz RB */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+ 1360, 1440, 0, 768, 771, 778, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@60Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 823, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
@@ -134,6 +153,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@120Hz RB */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+ 1360, 1440, 0, 800, 803, 809, 847, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
@@ -142,6 +165,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@120Hz RB */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+ 1360, 1440, 0, 960, 963, 967, 1017, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
@@ -154,22 +181,42 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@120Hz RB */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+ 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
+ /* 1360x768@120Hz RB */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+ 1440, 1520, 0, 768, 771, 776, 813, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@75Hz */
+ /* 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@85Hz */
+ /* 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1400x1050@120Hz RB */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+ 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x900@60Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 926, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
@@ -182,6 +229,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@120Hz RB */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+ 1520, 1600, 0, 900, 903, 909, 953, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
@@ -202,6 +253,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@120Hz RB */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+ 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1680x1050@60Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
@@ -214,15 +273,23 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@120Hz RB */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+ 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1729x1344@75Hz */
+ /* 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
+ /* 1792x1344@120Hz RB */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+ 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -230,6 +297,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@120Hz RB */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+ 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1200@60Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
@@ -242,6 +317,10 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@120Hz RB */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+ 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
@@ -250,6 +329,14 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@120Hz RB */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+ 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2560x1600@60Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
@@ -262,6 +349,11 @@ static const struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@120Hz RB */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+ 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+
};
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@@ -320,12 +412,14 @@ static const struct drm_display_mode edid_est_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
-static const struct {
+struct minimode {
short w;
short h;
short r;
short rb;
-} est3_modes[] = {
+};
+
+static const struct minimode est3_modes[] = {
/* byte 6 */
{ 640, 350, 85, 0 },
{ 640, 400, 85, 0 },
@@ -377,288 +471,304 @@ static const struct {
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
-static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+static const int num_est3_modes = ARRAY_SIZE(est3_modes);
+
+static const struct minimode extra_modes[] = {
+ { 1024, 576, 60, 0 },
+ { 1366, 768, 60, 0 },
+ { 1600, 900, 60, 0 },
+ { 1680, 945, 60, 0 },
+ { 1920, 1080, 60, 0 },
+ { 2048, 1152, 60, 0 },
+ { 2048, 1536, 60, 0 },
+};
+static const int num_extra_modes = ARRAY_SIZE(extra_modes);
/*
* Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode edid_cea_modes[] = {
- /* 640x480@60Hz */
+ /* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@60Hz */
+ /* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@60Hz */
+ /* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x720@60Hz */
+ /* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080i@60Hz */
+ /* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@60Hz */
+ /* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@60Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x240@60Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x240@60Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x480i@60Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x480i@60Hz */
+ /* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x240@60Hz */
+ /* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x240@60Hz */
+ /* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480@60Hz */
+ /* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480@60Hz */
+ /* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080@60Hz */
+ /* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x576@50Hz */
+ /* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x576@50Hz */
+ /* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x720@50Hz */
+ /* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080i@50Hz */
+ /* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1440x576i@50Hz */
+ /* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x576i@50Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x288@50Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x288@50Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x576i@50Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x576i@50Hz */
+ /* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 2880x288@50Hz */
+ /* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x288@50Hz */
+ /* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576@50Hz */
+ /* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576@50Hz */
+ /* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080@50Hz */
+ /* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@24Hz */
+ /* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@25Hz */
+ /* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@30Hz */
+ /* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2880x480@60Hz */
+ /* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x480@60Hz */
+ /* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x576@50Hz */
+ /* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 2880x576@50Hz */
+ /* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080i@50Hz */
+ /* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1920x1080i@100Hz */
+ /* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1280x720@100Hz */
+ /* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x576@100Hz */
+ /* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x576@100Hz */
+ /* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576i@100Hz */
+ /* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576i@100Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1920x1080i@120Hz */
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_DBLCLK) },
+ /* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
- /* 1280x720@120Hz */
+ /* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x480@120Hz */
+ /* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@120Hz */
+ /* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480i@120Hz */
+ /* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@120Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 720x576@200Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x576@200Hz */
+ /* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x576i@200Hz */
+ /* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x576i@200Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 720x480@240Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 720x480@240Hz */
+ /* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1440x480i@240 */
+ /* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1440x480i@240 */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1280x720@24Hz */
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
+ /* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x720@25Hz */
+ /* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x720@30Hz */
+ /* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@120Hz */
+ /* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1080@100Hz */
+ /* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
-static const int drm_num_cea_modes =
- sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
+static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a0d6e894d97c..5683b7fdd746 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -136,6 +136,9 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
uint16_t *r_base, *g_base, *b_base;
+ if (crtc->funcs->gamma_set == NULL)
+ return;
+
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
@@ -383,7 +386,6 @@ int drm_fb_helper_init(struct drm_device *dev,
int crtc_count, int max_conn_count)
{
struct drm_crtc *crtc;
- int ret = 0;
int i;
fb_helper->dev = dev;
@@ -408,10 +410,8 @@ int drm_fb_helper_init(struct drm_device *dev,
sizeof(struct drm_connector *),
GFP_KERNEL);
- if (!fb_helper->crtc_info[i].mode_set.connectors) {
- ret = -ENOMEM;
+ if (!fb_helper->crtc_info[i].mode_set.connectors)
goto out_free;
- }
fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
@@ -1083,7 +1083,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
/* try and find a 1024x768 mode on each connector */
can_clone = true;
- dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
+ dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
for (i = 0; i < fb_helper->connector_count; i++) {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 83114b5e3cee..d58e69da1fb5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -201,6 +201,19 @@ free:
}
EXPORT_SYMBOL(drm_gem_object_alloc);
+static void
+drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
+{
+ if (obj->import_attach) {
+ drm_prime_remove_imported_buf_handle(&filp->prime,
+ obj->import_attach->dmabuf);
+ }
+ if (obj->export_dma_buf) {
+ drm_prime_remove_imported_buf_handle(&filp->prime,
+ obj->export_dma_buf);
+ }
+}
+
/**
* Removes the mapping from handle to filp for this object.
*/
@@ -233,9 +246,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- if (obj->import_attach)
- drm_prime_remove_imported_buf_handle(&filp->prime,
- obj->import_attach->dmabuf);
+ drm_gem_remove_prime_handles(obj, filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
@@ -272,8 +283,7 @@ again:
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
-
- if (ret != 0)
+ else if (ret)
return ret;
drm_gem_object_handle_reference(obj);
@@ -329,7 +339,7 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
struct drm_local_map *map;
- int ret = 0;
+ int ret;
/* Set the object up for mmap'ing */
list = &obj->map_list;
@@ -456,8 +466,7 @@ again:
if (ret == -EAGAIN)
goto again;
-
- if (ret != 0)
+ else if (ret)
goto err;
/* Allocate a reference for the name table. */
@@ -532,9 +541,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
- if (obj->import_attach)
- drm_prime_remove_imported_buf_handle(&file_priv->prime,
- obj->import_attach->dmabuf);
+ drm_gem_remove_prime_handles(obj, file_priv);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
@@ -628,7 +635,7 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
drm_gem_object_reference(obj);
mutex_lock(&obj->dev->struct_mutex);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(obj->dev, vma);
mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
@@ -639,7 +646,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
struct drm_device *dev = obj->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(vma);
+ drm_vm_close_locked(obj->dev, vma);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
}
@@ -712,7 +719,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
*/
drm_gem_object_reference(obj);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index cf85155da2a0..64a62c697313 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -283,6 +283,10 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_DUMB_PREFER_SHADOW:
req->value = dev->mode_config.prefer_shadow;
break;
+ case DRM_CAP_PRIME:
+ req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
+ req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c869436e238a..c798eeae0a03 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -189,7 +189,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
if (dev->num_crtcs == 0)
return;
- del_timer(&dev->vblank_disable_timer);
+ del_timer_sync(&dev->vblank_disable_timer);
vblank_disable_fn((unsigned long)dev);
@@ -310,7 +310,7 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
*/
int drm_irq_install(struct drm_device *dev)
{
- int ret = 0;
+ int ret;
unsigned long sh_flags = 0;
char *irqname;
@@ -731,7 +731,7 @@ EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags)
{
- int ret = 0;
+ int ret;
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
@@ -1031,18 +1031,15 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int ret = 0;
unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
- goto out;
+ return 0;
crtc = modeset->crtc;
- if (crtc >= dev->num_crtcs) {
- ret = -EINVAL;
- goto out;
- }
+ if (crtc >= dev->num_crtcs)
+ return -EINVAL;
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
@@ -1052,12 +1049,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
drm_vblank_post_modeset(dev, crtc);
break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out:
- return ret;
+ return 0;
}
static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
@@ -1154,7 +1149,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
- int ret = 0;
+ int ret;
unsigned int flags, seq, crtc, high_crtc;
if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index c79c713eeba0..521152041691 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -331,7 +331,7 @@ static int drm_notifier(void *priv)
void drm_idlelock_take(struct drm_lock_data *lock_data)
{
- int ret = 0;
+ int ret;
spin_lock_bh(&lock_data->spinlock);
lock_data->kernel_waiters++;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1bdf2b54eaf6..f546ff98a114 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -68,6 +68,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
{
struct drm_gem_object *obj;
void *buf;
+ int ret;
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj)
@@ -100,6 +101,17 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
obj->export_dma_buf = buf;
*prime_fd = dma_buf_fd(buf, flags);
}
+ /* if we've exported this buffer the cheat and add it to the import list
+ * so we get the correct handle back
+ */
+ ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+ obj->export_dma_buf, handle);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
+ }
+
mutex_unlock(&file_priv->prime.lock);
return 0;
}
@@ -227,6 +239,42 @@ out:
}
EXPORT_SYMBOL(drm_prime_pages_to_sg);
+/* export an sg table into an array of pages and addresses
+ this is currently required by the TTM driver in order to do correct fault
+ handling */
+int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages)
+{
+ unsigned count;
+ struct scatterlist *sg;
+ struct page *page;
+ u32 len, offset;
+ int pg_index;
+ dma_addr_t addr;
+
+ pg_index = 0;
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ len = sg->length;
+ offset = sg->offset;
+ page = sg_page(sg);
+ addr = sg_dma_address(sg);
+
+ while (len > 0) {
+ if (WARN_ON(pg_index >= max_pages))
+ return -1;
+ pages[pg_index] = page;
+ if (addrs)
+ addrs[pg_index] = addr;
+
+ page++;
+ addr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ pg_index++;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
/* helper function to cleanup a GEM/prime object */
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index aa454f80e109..21bcd4a555d8 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -122,11 +122,10 @@ again:
ret = idr_get_new_above(&drm_minors_idr, NULL,
base, &new_id);
mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN) {
+ if (ret == -EAGAIN)
goto again;
- } else if (ret) {
+ else if (ret)
return ret;
- }
if (new_id >= limit) {
idr_remove(&drm_minors_idr, new_id);
@@ -211,7 +210,7 @@ EXPORT_SYMBOL(drm_master_put);
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- int ret = 0;
+ int ret;
if (file_priv->is_master)
return 0;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 5a7bd51fc3d8..45cf1dd3eb9c 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -347,17 +347,17 @@ static struct bin_attribute edid_attr = {
};
/**
- * drm_sysfs_connector_add - add an connector to sysfs
+ * drm_sysfs_connector_add - add a connector to sysfs
* @connector: connector to add
*
- * Create an connector device in sysfs, along with its associated connector
+ * Create a connector device in sysfs, along with its associated connector
* properties (so far, connection status, dpms, mode list & edid) and
* generate a hotplug event so userspace knows there's a new connector
* available.
*
* Note:
- * This routine should only be called *once* for each DRM minor registered.
- * A second call for an already registered device will trigger the BUG_ON
+ * This routine should only be called *once* for each registered connector.
+ * A second call for an already registered connector will trigger the BUG_ON
* below.
*/
int drm_sysfs_connector_add(struct drm_connector *connector)
@@ -366,7 +366,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
int attr_cnt = 0;
int opt_cnt = 0;
int i;
- int ret = 0;
+ int ret;
/* We shouldn't get called more than once for the same connector */
BUG_ON(device_is_registered(&connector->kdev));
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 149561818349..961ee08927fe 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -406,10 +406,9 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
-void drm_vm_open_locked(struct vm_area_struct *vma)
+void drm_vm_open_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -430,14 +429,13 @@ static void drm_vm_open(struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
-void drm_vm_close_locked(struct vm_area_struct *vma)
+void drm_vm_close_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -467,7 +465,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(vma);
+ drm_vm_close_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
@@ -519,7 +517,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
@@ -670,7 +668,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 3343ac437fe5..7f5096763b7d 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_DMABUF
+ bool "EXYNOS DRM DMABUF"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use DMABUF feature for DRM.
+
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
depends on DRM_EXYNOS && !FB_S3C
@@ -27,3 +33,9 @@ config DRM_EXYNOS_VIDI
depends on DRM_EXYNOS
help
Choose this option if you want to use Exynos VIDI for DRM.
+
+config DRM_EXYNOS_G2D
+ bool "Exynos DRM G2D"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 9e0bff8badf9..eb651ca8e2a8 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,10 +8,12 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
exynos_ddc.o exynos_hdmiphy.o \
exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index de8d2090bce3..b3cb0a69fbf2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -35,7 +35,7 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
dma_addr_t start_addr;
- unsigned int npages, page_size, i = 0;
+ unsigned int npages, i = 0;
struct scatterlist *sgl;
int ret = 0;
@@ -53,13 +53,13 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
if (buf->size >= SZ_1M) {
npages = buf->size >> SECTION_SHIFT;
- page_size = SECTION_SIZE;
+ buf->page_size = SECTION_SIZE;
} else if (buf->size >= SZ_64K) {
npages = buf->size >> 16;
- page_size = SZ_64K;
+ buf->page_size = SZ_64K;
} else {
npages = buf->size >> PAGE_SHIFT;
- page_size = PAGE_SIZE;
+ buf->page_size = PAGE_SIZE;
}
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
@@ -96,9 +96,9 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
while (i < npages) {
buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], page_size, 0);
+ sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
sg_dma_address(sgl) = start_addr;
- start_addr += page_size;
+ start_addr += buf->page_size;
sgl = sg_next(sgl);
i++;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 3486ffed0bf0..4afb625128d7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -105,6 +105,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
overlay->fb_y = pos->fb_y;
overlay->fb_width = fb->width;
overlay->fb_height = fb->height;
+ overlay->src_width = pos->src_w;
+ overlay->src_height = pos->src_h;
overlay->bpp = fb->bits_per_pixel;
overlay->pitch = fb->pitches[0];
overlay->pixel_format = fb->pixel_format;
@@ -153,6 +155,8 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
pos.crtc_y = 0;
pos.crtc_w = fb->width - crtc->x;
pos.crtc_h = fb->height - crtc->y;
+ pos.src_w = pos.crtc_w;
+ pos.src_h = pos.crtc_h;
return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 25f72a62cb88..16b8e2195a0d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -42,6 +42,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
* - the unit is screen coordinates.
* @fb_y: offset y on a framebuffer to be displayed
* - the unit is screen coordinates.
+ * @src_w: width of source area to be displayed from a framebuffer.
+ * @src_h: height of source area to be displayed from a framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_w: width of hardware screen.
@@ -50,6 +52,8 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
struct exynos_drm_crtc_pos {
unsigned int fb_x;
unsigned int fb_y;
+ unsigned int src_w;
+ unsigned int src_h;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_w;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
new file mode 100644
index 000000000000..274909271c36
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -0,0 +1,272 @@
+/* exynos_drm_dmabuf.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
+ unsigned int page_size)
+{
+ struct sg_table *sgt = NULL;
+ struct scatterlist *sgl;
+ int i, ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ goto out;
+
+ ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
+ if (ret)
+ goto err_free_sgt;
+
+ if (page_size < PAGE_SIZE)
+ page_size = PAGE_SIZE;
+
+ for_each_sg(sgt->sgl, sgl, nr_pages, i)
+ sg_set_page(sgl, pages[i], page_size, 0);
+
+ return sgt;
+
+err_free_sgt:
+ kfree(sgt);
+ sgt = NULL;
+out:
+ return NULL;
+}
+
+static struct sg_table *
+ exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+ struct drm_device *dev = gem_obj->base.dev;
+ struct exynos_drm_gem_buf *buf;
+ struct sg_table *sgt = NULL;
+ unsigned int npages;
+ int nents;
+
+ DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+ mutex_lock(&dev->struct_mutex);
+
+ buf = gem_obj->buffer;
+
+ /* there should always be pages allocated. */
+ if (!buf->pages) {
+ DRM_ERROR("pages is null.\n");
+ goto err_unlock;
+ }
+
+ npages = buf->size / buf->page_size;
+
+ sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
+ nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+ DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
+ npages, buf->size, buf->page_size);
+
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return sgt;
+}
+
+static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = NULL;
+}
+
+static void exynos_dmabuf_release(struct dma_buf *dmabuf)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
+
+ DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+ /*
+ * exynos_dmabuf_release() call means that file object's
+ * f_count is 0 and it calls drm_gem_object_handle_unreference()
+ * to drop the references that these values had been increased
+ * at drm_prime_handle_to_fd()
+ */
+ if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
+ exynos_gem_obj->base.export_dma_buf = NULL;
+
+ /*
+ * drop this gem object refcount to release allocated buffer
+ * and resources.
+ */
+ drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
+ }
+}
+
+static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num,
+ void *addr)
+{
+ /* TODO */
+}
+
+static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ /* TODO */
+
+ return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+ /* TODO */
+}
+
+static struct dma_buf_ops exynos_dmabuf_ops = {
+ .map_dma_buf = exynos_gem_map_dma_buf,
+ .unmap_dma_buf = exynos_gem_unmap_dma_buf,
+ .kmap = exynos_gem_dmabuf_kmap,
+ .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
+ .kunmap = exynos_gem_dmabuf_kunmap,
+ .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
+ .release = exynos_dmabuf_release,
+};
+
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
+ exynos_gem_obj->base.size, 0600);
+}
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buffer;
+ struct page *page;
+ int ret, i = 0;
+
+ DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &exynos_dmabuf_ops) {
+ struct drm_gem_object *obj;
+
+ exynos_gem_obj = dma_buf->priv;
+ obj = &exynos_gem_obj->base;
+
+ /* is it from our device? */
+ if (obj->dev == drm_dev) {
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+
+ attach = dma_buf_attach(dma_buf, drm_dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(-EINVAL);
+
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_buf_detach;
+ }
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+ ret = -ENOMEM;
+ goto err_unmap_attach;
+ }
+
+ buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
+ if (!buffer->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ ret = -ENOMEM;
+ goto err_free_buffer;
+ }
+
+ exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
+ if (!exynos_gem_obj) {
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ sgl = sgt->sgl;
+ buffer->dma_addr = sg_dma_address(sgl);
+
+ while (i < sgt->nents) {
+ buffer->pages[i] = sg_page(sgl);
+ buffer->size += sg_dma_len(sgl);
+ sgl = sg_next(sgl);
+ i++;
+ }
+
+ exynos_gem_obj->buffer = buffer;
+ buffer->sgt = sgt;
+ exynos_gem_obj->base.import_attach = attach;
+
+ DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+ buffer->size);
+
+ return &exynos_gem_obj->base;
+
+err_free_pages:
+ kfree(buffer->pages);
+ buffer->pages = NULL;
+err_free_buffer:
+ kfree(buffer);
+ buffer = NULL;
+err_unmap_attach:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+err_buf_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
new file mode 100644
index 000000000000..662a8f98ccdb
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -0,0 +1,39 @@
+/* exynos_drm_dmabuf.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_DMABUF_H_
+#define _EXYNOS_DRM_DMABUF_H_
+
+#ifdef CONFIG_DRM_EXYNOS_DMABUF
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+ struct drm_gem_object *obj, int flags);
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+ struct dma_buf *dma_buf);
+#else
+#define exynos_dmabuf_prime_export NULL
+#define exynos_dmabuf_prime_import NULL
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a6819b5f8428..d6de2e07fa03 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -39,6 +39,8 @@
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
+#include "exynos_drm_dmabuf.h"
+#include "exynos_drm_g2d.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -147,8 +149,17 @@ static int exynos_drm_unload(struct drm_device *dev)
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
+ struct drm_exynos_file_private *file_priv;
+
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ drm_prime_init_file_private(&file->prime);
+ file->driver_priv = file_priv;
+
return exynos_drm_subdrv_open(dev, file);
}
@@ -170,6 +181,7 @@ static void exynos_drm_preclose(struct drm_device *dev,
e->base.destroy(&e->base);
}
}
+ drm_prime_destroy_file_private(&file->prime);
spin_unlock_irqrestore(&dev->event_lock, flags);
exynos_drm_subdrv_close(dev, file);
@@ -193,7 +205,7 @@ static void exynos_drm_lastclose(struct drm_device *dev)
exynos_drm_fbdev_restore_mode(dev);
}
-static struct vm_operations_struct exynos_drm_gem_vm_ops = {
+static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.fault = exynos_drm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -207,10 +219,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
+ exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
+ exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
+ exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
+ exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -224,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = {
};
static struct drm_driver exynos_drm_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
- DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+ DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
.open = exynos_drm_open,
@@ -241,6 +261,10 @@ static struct drm_driver exynos_drm_driver = {
.dumb_create = exynos_drm_gem_dumb_create,
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
.dumb_destroy = exynos_drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = exynos_dmabuf_prime_export,
+ .gem_prime_import = exynos_dmabuf_prime_import,
.ioctls = exynos_ioctls,
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
@@ -307,6 +331,12 @@ static int __init exynos_drm_init(void)
goto out_vidi;
#endif
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ ret = platform_driver_register(&g2d_driver);
+ if (ret < 0)
+ goto out_g2d;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
goto out;
@@ -314,6 +344,11 @@ static int __init exynos_drm_init(void)
return 0;
out:
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ platform_driver_unregister(&g2d_driver);
+out_g2d:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_VIDI
out_vidi:
platform_driver_unregister(&vidi_driver);
@@ -341,6 +376,10 @@ static void __exit exynos_drm_exit(void)
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_G2D
+ platform_driver_unregister(&g2d_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_HDMI
platform_driver_unregister(&exynos_drm_common_hdmi_driver);
platform_driver_unregister(&mixer_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 1d814175cd49..c82c90c443e7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -77,6 +77,8 @@ struct exynos_drm_overlay_ops {
* - the unit is screen coordinates.
* @fb_width: width of a framebuffer.
* @fb_height: height of a framebuffer.
+ * @src_width: width of a partial image to be displayed from framebuffer.
+ * @src_height: height of a partial image to be displayed from framebuffer.
* @crtc_x: offset x on hardware screen.
* @crtc_y: offset y on hardware screen.
* @crtc_width: window width to be displayed (hardware screen).
@@ -108,6 +110,8 @@ struct exynos_drm_overlay {
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_height;
+ unsigned int src_width;
+ unsigned int src_height;
unsigned int crtc_x;
unsigned int crtc_y;
unsigned int crtc_width;
@@ -205,6 +209,18 @@ struct exynos_drm_manager {
struct exynos_drm_display_ops *display_ops;
};
+struct exynos_drm_g2d_private {
+ struct device *dev;
+ struct list_head inuse_cmdlist;
+ struct list_head event_list;
+ struct list_head gem_list;
+ unsigned int gem_nr;
+};
+
+struct drm_exynos_file_private {
+ struct exynos_drm_g2d_private *g2d_priv;
+};
+
/*
* Exynos drm private structure.
*/
@@ -287,4 +303,5 @@ extern struct platform_driver hdmi_driver;
extern struct platform_driver mixer_driver;
extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
+extern struct platform_driver g2d_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 6e9ac7bd1dcf..23d5ad379f86 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
manager_ops->commit(manager->dev);
}
-static struct drm_crtc *
-exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
- return encoder->crtc;
-}
-
static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
.dpms = exynos_drm_encoder_dpms,
.mode_fixup = exynos_drm_encoder_mode_fixup,
.mode_set = exynos_drm_encoder_mode_set,
.prepare = exynos_drm_encoder_prepare,
.commit = exynos_drm_encoder_commit,
- .get_crtc = exynos_drm_encoder_get_crtc,
};
static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c38c8f468fa3..4ccfe4328fab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -51,11 +51,22 @@ struct exynos_drm_fb {
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ unsigned int i;
DRM_DEBUG_KMS("%s\n", __FILE__);
drm_framebuffer_cleanup(fb);
+ for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
+ struct drm_gem_object *obj;
+
+ if (exynos_fb->exynos_gem_obj[i] == NULL)
+ continue;
+
+ obj = &exynos_fb->exynos_gem_obj[i]->base;
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
kfree(exynos_fb);
exynos_fb = NULL;
}
@@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- drm_gem_object_unreference_unlocked(obj);
-
fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb))
+ if (IS_ERR(fb)) {
+ drm_gem_object_unreference_unlocked(obj);
return fb;
+ }
exynos_fb = to_exynos_fb(fb);
nr = exynos_drm_format_num_buffers(fb->pixel_format);
@@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- drm_gem_object_unreference_unlocked(obj);
-
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
@@ -191,7 +200,7 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(fb_helper);
}
-static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
+static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 3ecb30d93552..50823756cdea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -31,10 +31,10 @@
static inline int exynos_drm_format_num_buffers(uint32_t format)
{
switch (format) {
- case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12:
case DRM_FORMAT_NV12MT:
return 2;
- case DRM_FORMAT_YUV420M:
+ case DRM_FORMAT_YUV420:
return 3;
default:
return 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
new file mode 100644
index 000000000000..d2d88f22a037
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "drmP.h"
+#include "exynos_drm.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+
+#define G2D_HW_MAJOR_VER 4
+#define G2D_HW_MINOR_VER 1
+
+/* vaild register range set from user: 0x0104 ~ 0x0880 */
+#define G2D_VALID_START 0x0104
+#define G2D_VALID_END 0x0880
+
+/* general registers */
+#define G2D_SOFT_RESET 0x0000
+#define G2D_INTEN 0x0004
+#define G2D_INTC_PEND 0x000C
+#define G2D_DMA_SFR_BASE_ADDR 0x0080
+#define G2D_DMA_COMMAND 0x0084
+#define G2D_DMA_STATUS 0x008C
+#define G2D_DMA_HOLD_CMD 0x0090
+
+/* command registers */
+#define G2D_BITBLT_START 0x0100
+
+/* registers for base address */
+#define G2D_SRC_BASE_ADDR 0x0304
+#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
+#define G2D_DST_BASE_ADDR 0x0404
+#define G2D_DST_PLANE2_BASE_ADDR 0x0418
+#define G2D_PAT_BASE_ADDR 0x0500
+#define G2D_MSK_BASE_ADDR 0x0520
+
+/* G2D_SOFT_RESET */
+#define G2D_SFRCLEAR (1 << 1)
+#define G2D_R (1 << 0)
+
+/* G2D_INTEN */
+#define G2D_INTEN_ACF (1 << 3)
+#define G2D_INTEN_UCF (1 << 2)
+#define G2D_INTEN_GCF (1 << 1)
+#define G2D_INTEN_SCF (1 << 0)
+
+/* G2D_INTC_PEND */
+#define G2D_INTP_ACMD_FIN (1 << 3)
+#define G2D_INTP_UCMD_FIN (1 << 2)
+#define G2D_INTP_GCMD_FIN (1 << 1)
+#define G2D_INTP_SCMD_FIN (1 << 0)
+
+/* G2D_DMA_COMMAND */
+#define G2D_DMA_HALT (1 << 2)
+#define G2D_DMA_CONTINUE (1 << 1)
+#define G2D_DMA_START (1 << 0)
+
+/* G2D_DMA_STATUS */
+#define G2D_DMA_LIST_DONE_COUNT (0xFF << 17)
+#define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1)
+#define G2D_DMA_DONE (1 << 0)
+#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
+
+/* G2D_DMA_HOLD_CMD */
+#define G2D_USET_HOLD (1 << 2)
+#define G2D_LIST_HOLD (1 << 1)
+#define G2D_BITBLT_HOLD (1 << 0)
+
+/* G2D_BITBLT_START */
+#define G2D_START_CASESEL (1 << 2)
+#define G2D_START_NHOLT (1 << 1)
+#define G2D_START_BITBLT (1 << 0)
+
+#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
+#define G2D_CMDLIST_NUM 64
+#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
+#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+
+/* cmdlist data structure */
+struct g2d_cmdlist {
+ u32 head;
+ u32 data[G2D_CMDLIST_DATA_NUM];
+ u32 last; /* last data offset */
+};
+
+struct drm_exynos_pending_g2d_event {
+ struct drm_pending_event base;
+ struct drm_exynos_g2d_event event;
+};
+
+struct g2d_gem_node {
+ struct list_head list;
+ unsigned int handle;
+};
+
+struct g2d_cmdlist_node {
+ struct list_head list;
+ struct g2d_cmdlist *cmdlist;
+ unsigned int gem_nr;
+ dma_addr_t dma_addr;
+
+ struct drm_exynos_pending_g2d_event *event;
+};
+
+struct g2d_runqueue_node {
+ struct list_head list;
+ struct list_head run_cmdlist;
+ struct list_head event_list;
+ struct completion complete;
+ int async;
+};
+
+struct g2d_data {
+ struct device *dev;
+ struct clk *gate_clk;
+ struct resource *regs_res;
+ void __iomem *regs;
+ int irq;
+ struct workqueue_struct *g2d_workq;
+ struct work_struct runqueue_work;
+ struct exynos_drm_subdrv subdrv;
+ bool suspended;
+
+ /* cmdlist */
+ struct g2d_cmdlist_node *cmdlist_node;
+ struct list_head free_cmdlist;
+ struct mutex cmdlist_mutex;
+ dma_addr_t cmdlist_pool;
+ void *cmdlist_pool_virt;
+
+ /* runqueue*/
+ struct g2d_runqueue_node *runqueue_node;
+ struct list_head runqueue;
+ struct mutex runqueue_mutex;
+ struct kmem_cache *runqueue_slab;
+};
+
+static int g2d_init_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+ struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ int nr;
+ int ret;
+
+ g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
+ &g2d->cmdlist_pool, GFP_KERNEL);
+ if (!g2d->cmdlist_pool_virt) {
+ dev_err(dev, "failed to allocate dma memory\n");
+ return -ENOMEM;
+ }
+
+ node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node),
+ GFP_KERNEL);
+ if (!node) {
+ dev_err(dev, "failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
+ node[nr].cmdlist =
+ g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
+ node[nr].dma_addr =
+ g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
+
+ list_add_tail(&node[nr].list, &g2d->free_cmdlist);
+ }
+
+ return 0;
+
+err:
+ dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool);
+ return ret;
+}
+
+static void g2d_fini_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+
+ kfree(g2d->cmdlist_node);
+ dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool);
+}
+
+static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
+{
+ struct device *dev = g2d->dev;
+ struct g2d_cmdlist_node *node;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ if (list_empty(&g2d->free_cmdlist)) {
+ dev_err(dev, "there is no free cmdlist\n");
+ mutex_unlock(&g2d->cmdlist_mutex);
+ return NULL;
+ }
+
+ node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
+ list);
+ list_del_init(&node->list);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ return node;
+}
+
+static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
+{
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_move_tail(&node->list, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+}
+
+static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+ struct g2d_cmdlist_node *node)
+{
+ struct g2d_cmdlist_node *lnode;
+
+ if (list_empty(&g2d_priv->inuse_cmdlist))
+ goto add_to_list;
+
+ /* this links to base address of new cmdlist */
+ lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+ struct g2d_cmdlist_node, list);
+ lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
+
+add_to_list:
+ list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+
+ if (node->event)
+ list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+}
+
+static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct g2d_cmdlist_node *node)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_cmdlist *cmdlist = node->cmdlist;
+ dma_addr_t *addr;
+ int offset;
+ int i;
+
+ for (i = 0; i < node->gem_nr; i++) {
+ struct g2d_gem_node *gem_node;
+
+ gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
+ if (!gem_node) {
+ dev_err(g2d_priv->dev, "failed to allocate gem node\n");
+ return -ENOMEM;
+ }
+
+ offset = cmdlist->last - (i * 2 + 1);
+ gem_node->handle = cmdlist->data[offset];
+
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
+ file);
+ if (IS_ERR(addr)) {
+ node->gem_nr = i;
+ kfree(gem_node);
+ return PTR_ERR(addr);
+ }
+
+ cmdlist->data[offset] = *addr;
+ list_add_tail(&gem_node->list, &g2d_priv->gem_list);
+ g2d_priv->gem_nr++;
+ }
+
+ return 0;
+}
+
+static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
+ struct drm_file *file,
+ unsigned int nr)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_gem_node *node, *n;
+
+ list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
+ if (!nr)
+ break;
+
+ exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
+ list_del_init(&node->list);
+ kfree(node);
+ nr--;
+ }
+}
+
+static void g2d_dma_start(struct g2d_data *g2d,
+ struct g2d_runqueue_node *runqueue_node)
+{
+ struct g2d_cmdlist_node *node =
+ list_first_entry(&runqueue_node->run_cmdlist,
+ struct g2d_cmdlist_node, list);
+
+ pm_runtime_get_sync(g2d->dev);
+ clk_enable(g2d->gate_clk);
+
+ /* interrupt enable */
+ writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
+ g2d->regs + G2D_INTEN);
+
+ writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
+ writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
+}
+
+static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
+{
+ struct g2d_runqueue_node *runqueue_node;
+
+ if (list_empty(&g2d->runqueue))
+ return NULL;
+
+ runqueue_node = list_first_entry(&g2d->runqueue,
+ struct g2d_runqueue_node, list);
+ list_del_init(&runqueue_node->list);
+ return runqueue_node;
+}
+
+static void g2d_free_runqueue_node(struct g2d_data *g2d,
+ struct g2d_runqueue_node *runqueue_node)
+{
+ if (!runqueue_node)
+ return;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+}
+
+static void g2d_exec_runqueue(struct g2d_data *g2d)
+{
+ g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+ if (g2d->runqueue_node)
+ g2d_dma_start(g2d, g2d->runqueue_node);
+}
+
+static void g2d_runqueue_worker(struct work_struct *work)
+{
+ struct g2d_data *g2d = container_of(work, struct g2d_data,
+ runqueue_work);
+
+
+ mutex_lock(&g2d->runqueue_mutex);
+ clk_disable(g2d->gate_clk);
+ pm_runtime_put_sync(g2d->dev);
+
+ complete(&g2d->runqueue_node->complete);
+ if (g2d->runqueue_node->async)
+ g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+
+ if (g2d->suspended)
+ g2d->runqueue_node = NULL;
+ else
+ g2d_exec_runqueue(g2d);
+ mutex_unlock(&g2d->runqueue_mutex);
+}
+
+static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
+{
+ struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
+ struct drm_exynos_pending_g2d_event *e;
+ struct timeval now;
+ unsigned long flags;
+
+ if (list_empty(&runqueue_node->event_list))
+ return;
+
+ e = list_first_entry(&runqueue_node->event_list,
+ struct drm_exynos_pending_g2d_event, base.link);
+
+ do_gettimeofday(&now);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.cmdlist_no = cmdlist_no;
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
+{
+ struct g2d_data *g2d = dev_id;
+ u32 pending;
+
+ pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
+ if (pending)
+ writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
+
+ if (pending & G2D_INTP_GCMD_FIN) {
+ u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
+
+ cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
+ G2D_DMA_LIST_DONE_COUNT_OFFSET;
+
+ g2d_finish_event(g2d, cmdlist_no);
+
+ writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
+ if (!(pending & G2D_INTP_ACMD_FIN)) {
+ writel_relaxed(G2D_DMA_CONTINUE,
+ g2d->regs + G2D_DMA_COMMAND);
+ }
+ }
+
+ if (pending & G2D_INTP_ACMD_FIN)
+ queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+
+ return IRQ_HANDLED;
+}
+
+static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+ int nr, bool for_addr)
+{
+ int reg_offset;
+ int index;
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ index = cmdlist->last - 2 * (i + 1);
+ reg_offset = cmdlist->data[index] & ~0xfffff000;
+
+ if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
+ goto err;
+ if (reg_offset % 4)
+ goto err;
+
+ switch (reg_offset) {
+ case G2D_SRC_BASE_ADDR:
+ case G2D_SRC_PLANE2_BASE_ADDR:
+ case G2D_DST_BASE_ADDR:
+ case G2D_DST_PLANE2_BASE_ADDR:
+ case G2D_PAT_BASE_ADDR:
+ case G2D_MSK_BASE_ADDR:
+ if (!for_addr)
+ goto err;
+ break;
+ default:
+ if (for_addr)
+ goto err;
+ break;
+ }
+ }
+
+ return 0;
+
+err:
+ dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+ return -EINVAL;
+}
+
+/* ioctl functions */
+int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_g2d_get_ver *ver = data;
+
+ ver->major = G2D_HW_MAJOR_VER;
+ ver->minor = G2D_HW_MINOR_VER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
+
+int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev = g2d_priv->dev;
+ struct g2d_data *g2d;
+ struct drm_exynos_g2d_set_cmdlist *req = data;
+ struct drm_exynos_g2d_cmd *cmd;
+ struct drm_exynos_pending_g2d_event *e;
+ struct g2d_cmdlist_node *node;
+ struct g2d_cmdlist *cmdlist;
+ unsigned long flags;
+ int size;
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ node = g2d_get_cmdlist(g2d);
+ if (!node)
+ return -ENOMEM;
+
+ node->event = NULL;
+
+ if (req->event_type != G2D_EVENT_NOT) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (file->event_space < sizeof(e->event)) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ ret = -ENOMEM;
+ goto err;
+ }
+ file->event_space -= sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ e = kzalloc(sizeof(*node->event), GFP_KERNEL);
+ if (!e) {
+ dev_err(dev, "failed to allocate event\n");
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ e->event.base.type = DRM_EXYNOS_G2D_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = req->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ node->event = e;
+ }
+
+ cmdlist = node->cmdlist;
+
+ cmdlist->last = 0;
+
+ /*
+ * If don't clear SFR registers, the cmdlist is affected by register
+ * values of previous cmdlist. G2D hw executes SFR clear command and
+ * a next command at the same time then the next command is ignored and
+ * is executed rightly from next next command, so needs a dummy command
+ * to next command of SFR clear command.
+ */
+ cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
+ cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
+ cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
+ cmdlist->data[cmdlist->last++] = 0;
+
+ if (node->event) {
+ cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
+ cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
+ }
+
+ /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
+ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+ if (size > G2D_CMDLIST_DATA_NUM) {
+ dev_err(dev, "cmdlist size is too big\n");
+ ret = -EINVAL;
+ goto err_free_event;
+ }
+
+ cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
+
+ if (copy_from_user(cmdlist->data + cmdlist->last,
+ (void __user *)cmd,
+ sizeof(*cmd) * req->cmd_nr)) {
+ ret = -EFAULT;
+ goto err_free_event;
+ }
+ cmdlist->last += req->cmd_nr * 2;
+
+ ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+ if (ret < 0)
+ goto err_free_event;
+
+ node->gem_nr = req->cmd_gem_nr;
+ if (req->cmd_gem_nr) {
+ struct drm_exynos_g2d_cmd *cmd_gem;
+
+ cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+
+ if (copy_from_user(cmdlist->data + cmdlist->last,
+ (void __user *)cmd_gem,
+ sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+ ret = -EFAULT;
+ goto err_free_event;
+ }
+ cmdlist->last += req->cmd_gem_nr * 2;
+
+ ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+ if (ret < 0)
+ goto err_free_event;
+
+ ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+ if (ret < 0)
+ goto err_unmap;
+ }
+
+ cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
+ cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
+
+ /* head */
+ cmdlist->head = cmdlist->last / 2;
+
+ /* tail */
+ cmdlist->data[cmdlist->last] = 0;
+
+ g2d_add_cmdlist_to_inuse(g2d_priv, node);
+
+ return 0;
+
+err_unmap:
+ g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+err_free_event:
+ if (node->event) {
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ kfree(node->event);
+ }
+err:
+ g2d_put_cmdlist(g2d, node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
+
+int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev = g2d_priv->dev;
+ struct g2d_data *g2d;
+ struct drm_exynos_g2d_exec *req = data;
+ struct g2d_runqueue_node *runqueue_node;
+ struct list_head *run_cmdlist;
+ struct list_head *event_list;
+
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
+ if (!runqueue_node) {
+ dev_err(dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+ run_cmdlist = &runqueue_node->run_cmdlist;
+ event_list = &runqueue_node->event_list;
+ INIT_LIST_HEAD(run_cmdlist);
+ INIT_LIST_HEAD(event_list);
+ init_completion(&runqueue_node->complete);
+ runqueue_node->async = req->async;
+
+ list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
+ list_splice_init(&g2d_priv->event_list, event_list);
+
+ if (list_empty(run_cmdlist)) {
+ dev_err(dev, "there is no inuse cmdlist\n");
+ kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+ return -EPERM;
+ }
+
+ mutex_lock(&g2d->runqueue_mutex);
+ list_add_tail(&runqueue_node->list, &g2d->runqueue);
+ if (!g2d->runqueue_node)
+ g2d_exec_runqueue(g2d);
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ if (runqueue_node->async)
+ goto out;
+
+ wait_for_completion(&runqueue_node->complete);
+ g2d_free_runqueue_node(g2d, runqueue_node);
+
+out:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+
+static int g2d_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv;
+
+ g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
+ if (!g2d_priv) {
+ dev_err(dev, "failed to allocate g2d private data\n");
+ return -ENOMEM;
+ }
+
+ g2d_priv->dev = dev;
+ file_priv->g2d_priv = g2d_priv;
+
+ INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
+ INIT_LIST_HEAD(&g2d_priv->event_list);
+ INIT_LIST_HEAD(&g2d_priv->gem_list);
+
+ return 0;
+}
+
+static void g2d_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct g2d_data *g2d;
+ struct g2d_cmdlist_node *node, *n;
+
+ if (!dev)
+ return;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return;
+
+ mutex_lock(&g2d->cmdlist_mutex);
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+ list_move_tail(&node->list, &g2d->free_cmdlist);
+ mutex_unlock(&g2d->cmdlist_mutex);
+
+ g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+
+ kfree(file_priv->g2d_priv);
+}
+
+static int __devinit g2d_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct g2d_data *g2d;
+ struct exynos_drm_subdrv *subdrv;
+ int ret;
+
+ g2d = kzalloc(sizeof(*g2d), GFP_KERNEL);
+ if (!g2d) {
+ dev_err(dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
+ sizeof(struct g2d_runqueue_node), 0, 0, NULL);
+ if (!g2d->runqueue_slab) {
+ ret = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ g2d->dev = dev;
+
+ g2d->g2d_workq = create_singlethread_workqueue("g2d");
+ if (!g2d->g2d_workq) {
+ dev_err(dev, "failed to create workqueue\n");
+ ret = -EINVAL;
+ goto err_destroy_slab;
+ }
+
+ INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
+ INIT_LIST_HEAD(&g2d->free_cmdlist);
+ INIT_LIST_HEAD(&g2d->runqueue);
+
+ mutex_init(&g2d->cmdlist_mutex);
+ mutex_init(&g2d->runqueue_mutex);
+
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0)
+ goto err_destroy_workqueue;
+
+ g2d->gate_clk = clk_get(dev, "fimg2d");
+ if (IS_ERR(g2d->gate_clk)) {
+ dev_err(dev, "failed to get gate clock\n");
+ ret = PTR_ERR(g2d->gate_clk);
+ goto err_fini_cmdlist;
+ }
+
+ pm_runtime_enable(dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get I/O memory\n");
+ ret = -ENOENT;
+ goto err_put_clk;
+ }
+
+ g2d->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!g2d->regs_res) {
+ dev_err(dev, "failed to request I/O memory\n");
+ ret = -ENOENT;
+ goto err_put_clk;
+ }
+
+ g2d->regs = ioremap(res->start, resource_size(res));
+ if (!g2d->regs) {
+ dev_err(dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto err_release_res;
+ }
+
+ g2d->irq = platform_get_irq(pdev, 0);
+ if (g2d->irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ ret = g2d->irq;
+ goto err_unmap_base;
+ }
+
+ ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d);
+ if (ret < 0) {
+ dev_err(dev, "irq request failed\n");
+ goto err_unmap_base;
+ }
+
+ platform_set_drvdata(pdev, g2d);
+
+ subdrv = &g2d->subdrv;
+ subdrv->dev = dev;
+ subdrv->open = g2d_open;
+ subdrv->close = g2d_close;
+
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm g2d device\n");
+ goto err_free_irq;
+ }
+
+ dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
+ G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+
+ return 0;
+
+err_free_irq:
+ free_irq(g2d->irq, g2d);
+err_unmap_base:
+ iounmap(g2d->regs);
+err_release_res:
+ release_resource(g2d->regs_res);
+ kfree(g2d->regs_res);
+err_put_clk:
+ pm_runtime_disable(dev);
+ clk_put(g2d->gate_clk);
+err_fini_cmdlist:
+ g2d_fini_cmdlist(g2d);
+err_destroy_workqueue:
+ destroy_workqueue(g2d->g2d_workq);
+err_destroy_slab:
+ kmem_cache_destroy(g2d->runqueue_slab);
+err_free_mem:
+ kfree(g2d);
+ return ret;
+}
+
+static int __devexit g2d_remove(struct platform_device *pdev)
+{
+ struct g2d_data *g2d = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&g2d->runqueue_work);
+ exynos_drm_subdrv_unregister(&g2d->subdrv);
+ free_irq(g2d->irq, g2d);
+
+ while (g2d->runqueue_node) {
+ g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+ g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+ }
+
+ iounmap(g2d->regs);
+ release_resource(g2d->regs_res);
+ kfree(g2d->regs_res);
+
+ pm_runtime_disable(&pdev->dev);
+ clk_put(g2d->gate_clk);
+
+ g2d_fini_cmdlist(g2d);
+ destroy_workqueue(g2d->g2d_workq);
+ kmem_cache_destroy(g2d->runqueue_slab);
+ kfree(g2d);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int g2d_suspend(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ mutex_lock(&g2d->runqueue_mutex);
+ g2d->suspended = true;
+ mutex_unlock(&g2d->runqueue_mutex);
+
+ while (g2d->runqueue_node)
+ /* FIXME: good range? */
+ usleep_range(500, 1000);
+
+ flush_work_sync(&g2d->runqueue_work);
+
+ return 0;
+}
+
+static int g2d_resume(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ g2d->suspended = false;
+ g2d_exec_runqueue(g2d);
+
+ return 0;
+}
+#endif
+
+SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+
+struct platform_driver g2d_driver = {
+ .probe = g2d_probe,
+ .remove = __devexit_p(g2d_remove),
+ .driver = {
+ .name = "s5p-g2d",
+ .owner = THIS_MODULE,
+ .pm = &g2d_pm_ops,
+ },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
new file mode 100644
index 000000000000..1a9c7ca8c15b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+extern int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+#else
+static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 1dffa8359f88..5c8b683029ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -66,6 +66,22 @@ static int check_gem_flags(unsigned int flags)
return 0;
}
+static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
+ struct vm_area_struct *vma)
+{
+ DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+
+ /* non-cachable as default. */
+ if (obj->flags & EXYNOS_BO_CACHABLE)
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ else if (obj->flags & EXYNOS_BO_WC)
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+}
+
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
if (!IS_NONCONTIG_BUFFER(flags)) {
@@ -80,7 +96,7 @@ out:
return roundup(size, PAGE_SIZE);
}
-static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
+struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
struct inode *inode;
@@ -180,6 +196,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
}
npages = obj->size >> PAGE_SHIFT;
+ buf->page_size = PAGE_SIZE;
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
@@ -262,24 +279,24 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
struct drm_gem_object *obj;
+ struct exynos_drm_gem_buf *buf;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (!exynos_gem_obj)
- return;
-
obj = &exynos_gem_obj->base;
+ buf = exynos_gem_obj->buffer;
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
- exynos_gem_obj->buffer->pages)
+ if (!buf->pages)
+ return;
+
+ if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
exynos_drm_gem_put_pages(obj);
else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
- exynos_gem_obj->buffer);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
- exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
+ exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
if (obj->map_list.map)
@@ -292,7 +309,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
exynos_gem_obj = NULL;
}
-static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -493,8 +510,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
vma->vm_flags |= (VM_IO | VM_RESERVED);
- /* in case of direct mapping, always having non-cachable attribute */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ update_vm_cache_attr(exynos_gem_obj, vma);
vm_size = usize = vma->vm_end - vma->vm_start;
@@ -588,6 +604,32 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_exynos_gem_info *args = data;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ args->flags = exynos_gem_obj->flags;
+ args->size = exynos_gem_obj->size;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -597,8 +639,17 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+ buf = exynos_gem_obj->buffer;
+
+ if (obj->import_attach)
+ drm_prime_gem_destroy(obj, buf->sgt);
+
exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
@@ -638,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
int ret = 0;
@@ -659,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- if (!exynos_gem_obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+ if (!obj->map_list.map) {
+ ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
@@ -724,6 +772,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -735,8 +785,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
+ obj = vma->vm_private_data;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_gem_flags(exynos_gem_obj->flags);
+ if (ret) {
+ drm_gem_vm_close(vma);
+ drm_gem_free_mmap_offset(obj);
+ return ret;
+ }
+
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
+ update_vm_cache_attr(exynos_gem_obj, vma);
+
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 4ed842039505..14d038b6cb02 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -40,6 +40,7 @@
* device address with IOMMU.
* @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
+ * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
*/
struct exynos_drm_gem_buf {
@@ -47,6 +48,7 @@ struct exynos_drm_gem_buf {
dma_addr_t dma_addr;
struct sg_table *sgt;
struct page **pages;
+ unsigned long page_size;
unsigned long size;
};
@@ -74,9 +76,15 @@ struct exynos_drm_gem_obj {
unsigned int flags;
};
+struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+/* create a private gem object and initialize it. */
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+ unsigned long size);
+
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
@@ -119,6 +127,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* get buffer information to memory region allocated by gem. */
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 3424463676e0..5d9d2c2f8f3f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -37,6 +37,8 @@ struct drm_hdmi_context {
struct exynos_drm_subdrv subdrv;
struct exynos_drm_hdmi_context *hdmi_ctx;
struct exynos_drm_hdmi_context *mixer_ctx;
+
+ bool enabled[MIXER_WIN_NR];
};
void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
@@ -189,23 +191,34 @@ static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
DRM_DEBUG_KMS("%s\n", __FILE__);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (hdmi_ops && hdmi_ops->disable)
- hdmi_ops->disable(ctx->hdmi_ctx->ctx);
- break;
- default:
- DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
- break;
+ if (mixer_ops && mixer_ops->dpms)
+ mixer_ops->dpms(ctx->mixer_ctx->ctx, mode);
+
+ if (hdmi_ops && hdmi_ops->dpms)
+ hdmi_ops->dpms(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_apply(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ for (i = 0; i < MIXER_WIN_NR; i++) {
+ if (!ctx->enabled[i])
+ continue;
+ if (mixer_ops && mixer_ops->win_commit)
+ mixer_ops->win_commit(ctx->mixer_ctx->ctx, i);
}
+
+ if (hdmi_ops && hdmi_ops->commit)
+ hdmi_ops->commit(ctx->hdmi_ctx->ctx);
}
static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.dpms = drm_hdmi_dpms,
+ .apply = drm_hdmi_apply,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
@@ -228,21 +241,37 @@ static void drm_mixer_mode_set(struct device *subdrv_dev,
static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (win < 0 || win > MIXER_WIN_NR) {
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
+ return;
+ }
+
if (mixer_ops && mixer_ops->win_commit)
- mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+ mixer_ops->win_commit(ctx->mixer_ctx->ctx, win);
+
+ ctx->enabled[win] = true;
}
static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (win < 0 || win > MIXER_WIN_NR) {
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
+ return;
+ }
+
if (mixer_ops && mixer_ops->win_disable)
- mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+ mixer_ops->win_disable(ctx->mixer_ctx->ctx, win);
+
+ ctx->enabled[win] = false;
}
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
@@ -335,25 +364,6 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
return 0;
}
-static int hdmi_runtime_suspend(struct device *dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
{
struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -372,6 +382,5 @@ struct platform_driver exynos_drm_common_hdmi_driver = {
.driver = {
.name = "exynos-drm-hdmi",
.owner = THIS_MODULE,
- .pm = &hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index f3ae192c8dcf..bd8126996e52 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -26,6 +26,9 @@
#ifndef _EXYNOS_DRM_HDMI_H_
#define _EXYNOS_DRM_HDMI_H_
+#define MIXER_WIN_NR 3
+#define MIXER_DEFAULT_WIN 0
+
/*
* exynos hdmi common context structure.
*
@@ -54,13 +57,14 @@ struct exynos_hdmi_ops {
void (*get_max_resol)(void *ctx, unsigned int *width,
unsigned int *height);
void (*commit)(void *ctx);
- void (*disable)(void *ctx);
+ void (*dpms)(void *ctx, int mode);
};
struct exynos_mixer_ops {
/* manager */
int (*enable_vblank)(void *ctx, int pipe);
void (*disable_vblank)(void *ctx);
+ void (*dpms)(void *ctx, int mode);
/* overlay */
void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index f92fe4c6174a..c4c6525d4653 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -41,8 +41,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
container_of(plane, struct exynos_plane, base);
struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
struct exynos_drm_crtc_pos pos;
- unsigned int x = src_x >> 16;
- unsigned int y = src_y >> 16;
int ret;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -53,10 +51,12 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
pos.crtc_w = crtc_w;
pos.crtc_h = crtc_h;
- pos.fb_x = x;
- pos.fb_y = y;
+ /* considering 16.16 fixed point of source values */
+ pos.fb_x = src_x >> 16;
+ pos.fb_y = src_y >> 16;
+ pos.src_w = src_w >> 16;
+ pos.src_h = src_h >> 16;
- /* TODO: scale feature */
ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index b00353876458..a137e9e39a33 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -57,18 +57,16 @@ struct hdmi_resources {
struct hdmi_context {
struct device *dev;
struct drm_device *drm_dev;
- struct fb_videomode *default_timing;
- unsigned int is_v13:1;
- unsigned int default_win;
- unsigned int default_bpp;
- bool hpd_handle;
- bool enabled;
+ bool hpd;
+ bool powered;
+ bool is_v13;
+ bool dvi_mode;
+ struct mutex hdmi_mutex;
struct resource *regs_res;
void __iomem *regs;
- unsigned int irq;
- struct workqueue_struct *wq;
- struct work_struct hotplug_work;
+ unsigned int external_irq;
+ unsigned int internal_irq;
struct i2c_client *ddc_port;
struct i2c_client *hdmiphy_port;
@@ -78,6 +76,9 @@ struct hdmi_context {
struct hdmi_resources res;
void *parent_ctx;
+
+ void (*cfg_hpd)(bool external);
+ int (*get_hpd)(void);
};
/* HDMI Version 1.3 */
@@ -361,6 +362,13 @@ static const u8 hdmiphy_conf27_027[32] = {
0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
};
+static const u8 hdmiphy_conf74_176[32] = {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
+ 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+};
+
static const u8 hdmiphy_conf74_25[32] = {
0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
@@ -750,6 +758,63 @@ static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
},
};
+static const struct hdmi_preset_conf hdmi_conf_1080p30 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v2_blank = {0x65, 0x04},
+ .v1_blank = {0x2d, 0x00},
+ .v_line = {0x65, 0x04},
+ .h_line = {0x98, 0x08},
+ .hsync_pol = {0x00},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f0 = {0xff, 0xff},
+ .v_blank_f1 = {0xff, 0xff},
+ .h_sync_start = {0x56, 0x00},
+ .h_sync_end = {0x82, 0x00},
+ .v_sync_line_bef_2 = {0x09, 0x00},
+ .v_sync_line_bef_1 = {0x04, 0x00},
+ .v_sync_line_aft_2 = {0xff, 0xff},
+ .v_sync_line_aft_1 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_2 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_1 = {0xff, 0xff},
+ .v_blank_f2 = {0xff, 0xff},
+ .v_blank_f3 = {0xff, 0xff},
+ .v_blank_f4 = {0xff, 0xff},
+ .v_blank_f5 = {0xff, 0xff},
+ .v_sync_line_aft_3 = {0xff, 0xff},
+ .v_sync_line_aft_4 = {0xff, 0xff},
+ .v_sync_line_aft_5 = {0xff, 0xff},
+ .v_sync_line_aft_6 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_3 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_4 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_5 = {0xff, 0xff},
+ .v_sync_line_aft_pxl_6 = {0xff, 0xff},
+ .vact_space_1 = {0xff, 0xff},
+ .vact_space_2 = {0xff, 0xff},
+ .vact_space_3 = {0xff, 0xff},
+ .vact_space_4 = {0xff, 0xff},
+ .vact_space_5 = {0xff, 0xff},
+ .vact_space_6 = {0xff, 0xff},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x18, 0x01, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x00, 0x00, /* vact_st3 */
+ 0x00, 0x00, /* vact_st4 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ 0x00, /* 3d FP */
+ },
+};
+
static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
.core = {
.h_blank = {0xd0, 0x02},
@@ -864,6 +929,7 @@ static const struct hdmi_conf hdmi_confs[] = {
{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
{ 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
{ 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
{ 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
{ 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
};
@@ -1194,12 +1260,8 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
static bool hdmi_is_connected(void *ctx)
{
struct hdmi_context *hdata = ctx;
- u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
-
- if (val)
- return true;
- return false;
+ return hdata->hpd;
}
static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
@@ -1215,10 +1277,12 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
if (raw_edid) {
+ hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
* EDID_LENGTH, len));
- DRM_DEBUG_KMS("width[%d] x height[%d]\n",
- raw_edid->width_cm, raw_edid->height_cm);
+ DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ raw_edid->width_cm, raw_edid->height_cm);
} else {
return -ENODEV;
}
@@ -1289,28 +1353,6 @@ static int hdmi_check_timing(void *ctx, void *timing)
return hdmi_v14_check_timing(check_timing);
}
-static int hdmi_display_power_on(void *ctx, int mode)
-{
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- DRM_DEBUG_KMS("hdmi [on]\n");
- break;
- case DRM_MODE_DPMS_STANDBY:
- break;
- case DRM_MODE_DPMS_SUSPEND:
- break;
- case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("hdmi [off]\n");
- break;
- default:
- break;
- }
-
- return 0;
-}
-
static void hdmi_set_acr(u32 freq, u8 *acr)
{
u32 n, cts;
@@ -1463,10 +1505,7 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
{
- u32 mod;
-
- mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
- if (mod & HDMI_DVI_MODE_EN)
+ if (hdata->dvi_mode)
return;
hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
@@ -1478,9 +1517,6 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
{
u32 reg;
- /* disable hpd handle for drm */
- hdata->hpd_handle = false;
-
if (hdata->is_v13)
reg = HDMI_V13_CORE_RSTOUT;
else
@@ -1491,16 +1527,10 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
mdelay(10);
hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
mdelay(10);
-
- /* enable hpd handle for drm */
- hdata->hpd_handle = true;
}
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- /* disable hpd handle for drm */
- hdata->hpd_handle = false;
-
/* enable HPD interrupts */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1514,6 +1544,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
/* disable bluescreen */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+ if (hdata->dvi_mode) {
+ /* choose DVI mode */
+ hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+ HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
+ hdmi_reg_writeb(hdata, HDMI_CON_2,
+ HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
+ }
+
if (hdata->is_v13) {
/* choose bluescreen (fecal) color */
hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
@@ -1535,9 +1573,6 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
}
-
- /* enable hpd handle for drm */
- hdata->hpd_handle = true;
}
static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
@@ -1890,8 +1925,11 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
hdmiphy_conf_reset(hdata);
hdmiphy_conf_apply(hdata);
+ mutex_lock(&hdata->hdmi_mutex);
hdmi_conf_reset(hdata);
hdmi_conf_init(hdata);
+ mutex_unlock(&hdata->hdmi_mutex);
+
hdmi_audio_init(hdata);
/* setting core registers */
@@ -1971,20 +2009,86 @@ static void hdmi_commit(void *ctx)
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdmi_conf_apply(hdata);
+}
+
+static void hdmi_poweron(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&hdata->hdmi_mutex);
+ if (hdata->powered) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ return;
+ }
+
+ hdata->powered = true;
+
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(true);
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ pm_runtime_get_sync(hdata->dev);
+
+ regulator_bulk_enable(res->regul_count, res->regul_bulk);
+ clk_enable(res->hdmiphy);
+ clk_enable(res->hdmi);
+ clk_enable(res->sclk_hdmi);
+}
+
+static void hdmi_poweroff(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&hdata->hdmi_mutex);
+ if (!hdata->powered)
+ goto out;
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ /*
+ * The TV power domain needs any condition of hdmiphy to turn off and
+ * its reset state seems to meet the condition.
+ */
+ hdmiphy_conf_reset(hdata);
+
+ clk_disable(res->sclk_hdmi);
+ clk_disable(res->hdmi);
+ clk_disable(res->hdmiphy);
+ regulator_bulk_disable(res->regul_count, res->regul_bulk);
+
+ pm_runtime_put_sync(hdata->dev);
- hdata->enabled = true;
+ mutex_lock(&hdata->hdmi_mutex);
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(false);
+
+ hdata->powered = false;
+
+out:
+ mutex_unlock(&hdata->hdmi_mutex);
}
-static void hdmi_disable(void *ctx)
+static void hdmi_dpms(void *ctx, int mode)
{
struct hdmi_context *hdata = ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (hdata->enabled) {
- hdmi_audio_control(hdata, false);
- hdmiphy_conf_reset(hdata);
- hdmi_conf_reset(hdata);
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ hdmi_poweron(hdata);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ hdmi_poweroff(hdata);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
}
}
@@ -1993,30 +2097,35 @@ static struct exynos_hdmi_ops hdmi_ops = {
.is_connected = hdmi_is_connected,
.get_edid = hdmi_get_edid,
.check_timing = hdmi_check_timing,
- .power_on = hdmi_display_power_on,
/* manager */
.mode_fixup = hdmi_mode_fixup,
.mode_set = hdmi_mode_set,
.get_max_resol = hdmi_get_max_resol,
.commit = hdmi_commit,
- .disable = hdmi_disable,
+ .dpms = hdmi_dpms,
};
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-static void hdmi_hotplug_func(struct work_struct *work)
+static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
{
- struct hdmi_context *hdata =
- container_of(work, struct hdmi_context, hotplug_work);
- struct exynos_drm_hdmi_context *ctx =
- (struct exynos_drm_hdmi_context *)hdata->parent_ctx;
+ struct exynos_drm_hdmi_context *ctx = arg;
+ struct hdmi_context *hdata = ctx->ctx;
+
+ if (!hdata->get_hpd)
+ goto out;
+
+ mutex_lock(&hdata->hdmi_mutex);
+ hdata->hpd = hdata->get_hpd();
+ mutex_unlock(&hdata->hdmi_mutex);
- drm_helper_hpd_irq_event(ctx->drm_dev);
+ if (ctx->drm_dev)
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+out:
+ return IRQ_HANDLED;
}
-static irqreturn_t hdmi_irq_handler(int irq, void *arg)
+static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
{
struct exynos_drm_hdmi_context *ctx = arg;
struct hdmi_context *hdata = ctx->ctx;
@@ -2025,19 +2134,28 @@ static irqreturn_t hdmi_irq_handler(int irq, void *arg)
intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
/* clearing flags for HPD plug/unplug */
if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
- DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
+ DRM_DEBUG_KMS("unplugged\n");
hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_UNPLUG);
}
if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
- DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
+ DRM_DEBUG_KMS("plugged\n");
hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
HDMI_INTC_FLAG_HPD_PLUG);
}
- if (ctx->drm_dev && hdata->hpd_handle)
- queue_work(hdata->wq, &hdata->hotplug_work);
+ mutex_lock(&hdata->hdmi_mutex);
+ hdata->hpd = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
+ if (hdata->powered && hdata->hpd) {
+ mutex_unlock(&hdata->hdmi_mutex);
+ goto out;
+ }
+ mutex_unlock(&hdata->hdmi_mutex);
+
+ if (ctx->drm_dev)
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+out:
return IRQ_HANDLED;
}
@@ -2131,68 +2249,6 @@ static int hdmi_resources_cleanup(struct hdmi_context *hdata)
return 0;
}
-static void hdmi_resource_poweron(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- /* turn HDMI power on */
- regulator_bulk_enable(res->regul_count, res->regul_bulk);
- /* power-on hdmi physical interface */
- clk_enable(res->hdmiphy);
- /* turn clocks on */
- clk_enable(res->hdmi);
- clk_enable(res->sclk_hdmi);
-
- hdmiphy_conf_reset(hdata);
- hdmi_conf_reset(hdata);
- hdmi_conf_init(hdata);
- hdmi_audio_init(hdata);
-}
-
-static void hdmi_resource_poweroff(struct hdmi_context *hdata)
-{
- struct hdmi_resources *res = &hdata->res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- /* turn clocks off */
- clk_disable(res->sclk_hdmi);
- clk_disable(res->hdmi);
- /* power-off hdmiphy */
- clk_disable(res->hdmiphy);
- /* turn HDMI power off */
- regulator_bulk_disable(res->regul_count, res->regul_bulk);
-}
-
-static int hdmi_runtime_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
-
- DRM_DEBUG_KMS("%s\n", __func__);
-
- hdmi_resource_poweroff(ctx->ctx);
-
- return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
-
- DRM_DEBUG_KMS("%s\n", __func__);
-
- hdmi_resource_poweron(ctx->ctx);
-
- return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
- .runtime_suspend = hdmi_runtime_suspend,
- .runtime_resume = hdmi_runtime_resume,
-};
-
static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2237,15 +2293,16 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mutex_init(&hdata->hdmi_mutex);
+
drm_hdmi_ctx->ctx = (void *)hdata;
hdata->parent_ctx = (void *)drm_hdmi_ctx;
platform_set_drvdata(pdev, drm_hdmi_ctx);
hdata->is_v13 = pdata->is_v13;
- hdata->default_win = pdata->default_win;
- hdata->default_timing = &pdata->timing;
- hdata->default_bpp = pdata->bpp;
+ hdata->cfg_hpd = pdata->cfg_hpd;
+ hdata->get_hpd = pdata->get_hpd;
hdata->dev = dev;
ret = hdmi_resources_init(hdata);
@@ -2294,41 +2351,49 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
hdata->hdmiphy_port = hdmi_hdmiphy;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- DRM_ERROR("get interrupt resource failed.\n");
- ret = -ENXIO;
+ hdata->external_irq = platform_get_irq_byname(pdev, "external_irq");
+ if (hdata->external_irq < 0) {
+ DRM_ERROR("failed to get platform irq\n");
+ ret = hdata->external_irq;
goto err_hdmiphy;
}
- /* create workqueue and hotplug work */
- hdata->wq = alloc_workqueue("exynos-drm-hdmi",
- WQ_UNBOUND | WQ_NON_REENTRANT, 1);
- if (hdata->wq == NULL) {
- DRM_ERROR("Failed to create workqueue.\n");
- ret = -ENOMEM;
+ hdata->internal_irq = platform_get_irq_byname(pdev, "internal_irq");
+ if (hdata->internal_irq < 0) {
+ DRM_ERROR("failed to get platform internal irq\n");
+ ret = hdata->internal_irq;
goto err_hdmiphy;
}
- INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
- /* register hpd interrupt */
- ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
- drm_hdmi_ctx);
+ ret = request_threaded_irq(hdata->external_irq, NULL,
+ hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "hdmi_external", drm_hdmi_ctx);
if (ret) {
- DRM_ERROR("request interrupt failed.\n");
- goto err_workqueue;
+ DRM_ERROR("failed to register hdmi internal interrupt\n");
+ goto err_hdmiphy;
+ }
+
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(false);
+
+ ret = request_threaded_irq(hdata->internal_irq, NULL,
+ hdmi_internal_irq_thread, IRQF_ONESHOT,
+ "hdmi_internal", drm_hdmi_ctx);
+ if (ret) {
+ DRM_ERROR("failed to register hdmi internal interrupt\n");
+ goto err_free_irq;
}
- hdata->irq = res->start;
/* register specific callbacks to common hdmi. */
exynos_hdmi_ops_register(&hdmi_ops);
- hdmi_resource_poweron(hdata);
+ pm_runtime_enable(dev);
return 0;
-err_workqueue:
- destroy_workqueue(hdata->wq);
+err_free_irq:
+ free_irq(hdata->external_irq, drm_hdmi_ctx);
err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
@@ -2348,18 +2413,15 @@ err_data:
static int __devexit hdmi_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
struct hdmi_context *hdata = ctx->ctx;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- hdmi_resource_poweroff(hdata);
+ pm_runtime_disable(dev);
- disable_irq(hdata->irq);
- free_irq(hdata->irq, hdata);
-
- cancel_work_sync(&hdata->hotplug_work);
- destroy_workqueue(hdata->wq);
+ free_irq(hdata->internal_irq, hdata);
hdmi_resources_cleanup(hdata);
@@ -2378,12 +2440,43 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int hdmi_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+
+ disable_irq(hdata->internal_irq);
+ disable_irq(hdata->external_irq);
+
+ hdata->hpd = false;
+ if (ctx->drm_dev)
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+
+ hdmi_poweroff(hdata);
+
+ return 0;
+}
+
+static int hdmi_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+ struct hdmi_context *hdata = ctx->ctx;
+
+ enable_irq(hdata->external_irq);
+ enable_irq(hdata->internal_irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = __devexit_p(hdmi_remove),
.driver = {
.name = "exynos4-hdmi",
.owner = THIS_MODULE,
- .pm = &hdmi_pm_ops,
+ .pm = &hdmi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e15438c01129..e2147a2ddcec 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,9 +37,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
-#define MIXER_WIN_NR 3
-#define MIXER_DEFAULT_WIN 0
-
#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
struct hdmi_win_data {
@@ -57,13 +54,14 @@ struct hdmi_win_data {
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_height;
+ unsigned int src_width;
+ unsigned int src_height;
unsigned int mode_width;
unsigned int mode_height;
unsigned int scan_flags;
};
struct mixer_resources {
- struct device *dev;
int irq;
void __iomem *mixer_regs;
void __iomem *vp_regs;
@@ -76,10 +74,13 @@ struct mixer_resources {
};
struct mixer_context {
- unsigned int irq;
+ struct device *dev;
int pipe;
bool interlace;
+ bool powered;
+ u32 int_en;
+ struct mutex mixer_mutex;
struct mixer_resources mixer_res;
struct hdmi_win_data win_data[MIXER_WIN_NR];
};
@@ -352,10 +353,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
struct hdmi_win_data *win_data;
- unsigned int full_width, full_height, width, height;
unsigned int x_ratio, y_ratio;
- unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
- unsigned int mode_width, mode_height;
unsigned int buf_num;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
@@ -367,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
switch (win_data->pixel_format) {
case DRM_FORMAT_NV12MT:
tiled_mode = true;
- case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12:
crcb_mode = false;
buf_num = 2;
break;
@@ -382,21 +380,9 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
return;
}
- full_width = win_data->fb_width;
- full_height = win_data->fb_height;
- width = win_data->crtc_width;
- height = win_data->crtc_height;
- mode_width = win_data->mode_width;
- mode_height = win_data->mode_height;
-
/* scaling feature: (src << 16) / dst */
- x_ratio = (width << 16) / width;
- y_ratio = (height << 16) / height;
-
- src_x_offset = win_data->fb_x;
- src_y_offset = win_data->fb_y;
- dst_x_offset = win_data->crtc_x;
- dst_y_offset = win_data->crtc_y;
+ x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
+ y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
if (buf_num == 2) {
luma_addr[0] = win_data->dma_addr;
@@ -404,7 +390,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
} else {
luma_addr[0] = win_data->dma_addr;
chroma_addr[0] = win_data->dma_addr
- + (full_width * full_height);
+ + (win_data->fb_width * win_data->fb_height);
}
if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@@ -413,8 +399,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
- luma_addr[1] = luma_addr[0] + full_width;
- chroma_addr[1] = chroma_addr[0] + full_width;
+ luma_addr[1] = luma_addr[0] + win_data->fb_width;
+ chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
}
} else {
ctx->interlace = false;
@@ -435,26 +421,26 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
- VP_IMG_VSIZE(full_height));
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+ VP_IMG_VSIZE(win_data->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
- VP_IMG_VSIZE(full_height / 2));
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+ VP_IMG_VSIZE(win_data->fb_height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, width);
- vp_reg_write(res, VP_SRC_HEIGHT, height);
+ vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
+ vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
vp_reg_write(res, VP_SRC_H_POSITION,
- VP_SRC_H_POSITION_VAL(src_x_offset));
- vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
+ VP_SRC_H_POSITION_VAL(win_data->fb_x));
+ vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
- vp_reg_write(res, VP_DST_WIDTH, width);
- vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
+ vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
+ vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
if (ctx->interlace) {
- vp_reg_write(res, VP_DST_HEIGHT, height / 2);
- vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
+ vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, height);
- vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
+ vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
+ vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
}
vp_reg_write(res, VP_H_RATIO, x_ratio);
@@ -468,8 +454,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, mode_height);
- mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_scan(ctx, win_data->mode_height);
+ mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
@@ -484,10 +470,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
struct hdmi_win_data *win_data;
- unsigned int full_width, width, height;
unsigned int x_ratio, y_ratio;
unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
- unsigned int mode_width, mode_height;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
@@ -510,26 +494,17 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
fmt = ARGB8888;
}
- dma_addr = win_data->dma_addr;
- full_width = win_data->fb_width;
- width = win_data->crtc_width;
- height = win_data->crtc_height;
- mode_width = win_data->mode_width;
- mode_height = win_data->mode_height;
-
/* 2x scaling feature */
x_ratio = 0;
y_ratio = 0;
- src_x_offset = win_data->fb_x;
- src_y_offset = win_data->fb_y;
dst_x_offset = win_data->crtc_x;
dst_y_offset = win_data->crtc_y;
/* converting dma address base and source offset */
- dma_addr = dma_addr
- + (src_x_offset * win_data->bpp >> 3)
- + (src_y_offset * full_width * win_data->bpp >> 3);
+ dma_addr = win_data->dma_addr
+ + (win_data->fb_x * win_data->bpp >> 3)
+ + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
src_x_offset = 0;
src_y_offset = 0;
@@ -546,10 +521,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
- mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
+ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
- val = MXR_GRP_WH_WIDTH(width);
- val |= MXR_GRP_WH_HEIGHT(height);
+ val = MXR_GRP_WH_WIDTH(win_data->crtc_width);
+ val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -567,8 +542,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* set buffer address to mixer */
mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, mode_height);
- mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_scan(ctx, win_data->mode_height);
+ mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
mixer_cfg_layer(ctx, win, true);
mixer_run(ctx);
@@ -591,6 +566,118 @@ static void vp_win_reset(struct mixer_context *ctx)
WARN(tries == 0, "failed to reset Video Processor\n");
}
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ u32 val; /* value stored to register */
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+ /* set output in RGB888 mode */
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+ /* 16 beat burst in DMA */
+ mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+ MXR_STATUS_BURST_MASK);
+
+ /* setting default layer priority: layer1 > layer0 > video
+ * because typical usage scenario would be
+ * layer1 - OSD
+ * layer0 - framebuffer
+ * video - video overlay
+ */
+ val = MXR_LAYER_CFG_GRP1_VAL(3);
+ val |= MXR_LAYER_CFG_GRP0_VAL(2);
+ val |= MXR_LAYER_CFG_VP_VAL(1);
+ mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+ /* setting background color */
+ mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+ /* setting graphical layers */
+ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+ val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+ /* the same configuration for both layers */
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+ /* setting video layers */
+ val = MXR_GRP_CFG_ALPHA_VAL(0);
+ mixer_reg_write(res, MXR_VIDEO_CFG, val);
+
+ /* configuration of Video Processor Registers */
+ vp_win_reset(ctx);
+ vp_default_filter(res);
+
+ /* disable all layers */
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ pm_runtime_get_sync(ctx->dev);
+
+ clk_enable(res->mixer);
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+
+ mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+ mixer_win_reset(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mutex_lock(&ctx->mixer_mutex);
+ if (!ctx->powered)
+ goto out;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+ clk_disable(res->mixer);
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
+
+ pm_runtime_put_sync(ctx->dev);
+
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+
+out:
+ mutex_unlock(&ctx->mixer_mutex);
+}
+
static int mixer_enable_vblank(void *ctx, int pipe)
{
struct mixer_context *mixer_ctx = ctx;
@@ -618,6 +705,27 @@ static void mixer_disable_vblank(void *ctx)
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
+static void mixer_dpms(void *ctx, int mode)
+{
+ struct mixer_context *mixer_ctx = ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ mixer_poweron(mixer_ctx);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ mixer_poweroff(mixer_ctx);
+ break;
+ default:
+ DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+ break;
+ }
+}
+
static void mixer_win_mode_set(void *ctx,
struct exynos_drm_overlay *overlay)
{
@@ -643,7 +751,7 @@ static void mixer_win_mode_set(void *ctx,
win = MIXER_DEFAULT_WIN;
if (win < 0 || win > MIXER_WIN_NR) {
- DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ DRM_ERROR("mixer window[%d] is wrong\n", win);
return;
}
@@ -665,6 +773,8 @@ static void mixer_win_mode_set(void *ctx,
win_data->fb_y = overlay->fb_y;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
+ win_data->src_width = overlay->src_width;
+ win_data->src_height = overlay->src_height;
win_data->mode_width = overlay->mode_width;
win_data->mode_height = overlay->mode_height;
@@ -672,44 +782,26 @@ static void mixer_win_mode_set(void *ctx,
win_data->scan_flags = overlay->scan_flag;
}
-static void mixer_win_commit(void *ctx, int zpos)
+static void mixer_win_commit(void *ctx, int win)
{
struct mixer_context *mixer_ctx = ctx;
- int win = zpos;
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
- if (win == DEFAULT_ZPOS)
- win = MIXER_DEFAULT_WIN;
-
- if (win < 0 || win > MIXER_WIN_NR) {
- DRM_ERROR("overlay plane[%d] is wrong\n", win);
- return;
- }
-
if (win > 1)
vp_video_buffer(mixer_ctx, win);
else
mixer_graph_buffer(mixer_ctx, win);
}
-static void mixer_win_disable(void *ctx, int zpos)
+static void mixer_win_disable(void *ctx, int win)
{
struct mixer_context *mixer_ctx = ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
unsigned long flags;
- int win = zpos;
DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
- if (win == DEFAULT_ZPOS)
- win = MIXER_DEFAULT_WIN;
-
- if (win < 0 || win > MIXER_WIN_NR) {
- DRM_ERROR("overlay plane[%d] is wrong\n", win);
- return;
- }
-
spin_lock_irqsave(&res->reg_slock, flags);
mixer_vsync_set_update(mixer_ctx, false);
@@ -723,6 +815,7 @@ static struct exynos_mixer_ops mixer_ops = {
/* manager */
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
+ .dpms = mixer_dpms,
/* overlay */
.win_mode_set = mixer_win_mode_set,
@@ -773,7 +866,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
struct mixer_context *ctx = drm_hdmi_ctx->ctx;
struct mixer_resources *res = &ctx->mixer_res;
- u32 val, val_base;
+ u32 val, base, shadow;
spin_lock(&res->reg_slock);
@@ -784,12 +877,14 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
if (val & MXR_INT_STATUS_VSYNC) {
/* interlace scan need to check shadow register */
if (ctx->interlace) {
- val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
- if (ctx->win_data[0].dma_addr != val_base)
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ if (base != shadow)
goto out;
- val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
- if (ctx->win_data[1].dma_addr != val_base)
+ base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
+ shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ if (base != shadow)
goto out;
}
@@ -811,117 +906,6 @@ out:
return IRQ_HANDLED;
}
-static void mixer_win_reset(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
- unsigned long flags;
- u32 val; /* value stored to register */
-
- spin_lock_irqsave(&res->reg_slock, flags);
- mixer_vsync_set_update(ctx, false);
-
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
-
- /* set output in RGB888 mode */
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
-
- /* 16 beat burst in DMA */
- mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
- MXR_STATUS_BURST_MASK);
-
- /* setting default layer priority: layer1 > layer0 > video
- * because typical usage scenario would be
- * layer1 - OSD
- * layer0 - framebuffer
- * video - video overlay
- */
- val = MXR_LAYER_CFG_GRP1_VAL(3);
- val |= MXR_LAYER_CFG_GRP0_VAL(2);
- val |= MXR_LAYER_CFG_VP_VAL(1);
- mixer_reg_write(res, MXR_LAYER_CFG, val);
-
- /* setting background color */
- mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
- mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
- mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
-
- /* setting graphical layers */
-
- val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
- val |= MXR_GRP_CFG_WIN_BLEND_EN;
- val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
-
- /* the same configuration for both layers */
- mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
-
- val |= MXR_GRP_CFG_BLEND_PRE_MUL;
- val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
- mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
-
- /* configuration of Video Processor Registers */
- vp_win_reset(ctx);
- vp_default_filter(res);
-
- /* disable all layers */
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
-
- mixer_vsync_set_update(ctx, true);
- spin_unlock_irqrestore(&res->reg_slock, flags);
-}
-
-static void mixer_resource_poweron(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- clk_enable(res->mixer);
- clk_enable(res->vp);
- clk_enable(res->sclk_mixer);
-
- mixer_win_reset(ctx);
-}
-
-static void mixer_resource_poweroff(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
- clk_disable(res->mixer);
- clk_disable(res->vp);
- clk_disable(res->sclk_mixer);
-}
-
-static int mixer_runtime_resume(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
-
- DRM_DEBUG_KMS("resume - start\n");
-
- mixer_resource_poweron(ctx->ctx);
-
- return 0;
-}
-
-static int mixer_runtime_suspend(struct device *dev)
-{
- struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
-
- DRM_DEBUG_KMS("suspend - start\n");
-
- mixer_resource_poweroff(ctx->ctx);
-
- return 0;
-}
-
-static const struct dev_pm_ops mixer_pm_ops = {
- .runtime_suspend = mixer_runtime_suspend,
- .runtime_resume = mixer_runtime_resume,
-};
-
static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
struct platform_device *pdev)
{
@@ -931,7 +915,6 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
struct resource *res;
int ret;
- mixer_res->dev = dev;
spin_lock_init(&mixer_res->reg_slock);
mixer_res->mixer = clk_get(dev, "mixer");
@@ -1027,7 +1010,6 @@ fail:
clk_put(mixer_res->vp);
if (!IS_ERR_OR_NULL(mixer_res->mixer))
clk_put(mixer_res->mixer);
- mixer_res->dev = NULL;
return ret;
}
@@ -1035,7 +1017,6 @@ static void mixer_resources_cleanup(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
- disable_irq(res->irq);
free_irq(res->irq, ctx);
iounmap(res->vp_regs);
@@ -1064,6 +1045,9 @@ static int __devinit mixer_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ mutex_init(&ctx->mixer_mutex);
+
+ ctx->dev = &pdev->dev;
drm_hdmi_ctx->ctx = (void *)ctx;
platform_set_drvdata(pdev, drm_hdmi_ctx);
@@ -1076,7 +1060,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
/* register specific callback point to common hdmi. */
exynos_mixer_ops_register(&mixer_ops);
- mixer_resource_poweron(ctx);
+ pm_runtime_enable(dev);
return 0;
@@ -1095,12 +1079,27 @@ static int mixer_remove(struct platform_device *pdev)
dev_info(dev, "remove successful\n");
- mixer_resource_poweroff(ctx);
+ pm_runtime_disable(&pdev->dev);
+
mixer_resources_cleanup(ctx);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int mixer_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+ struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+ mixer_poweroff(ctx);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+
struct platform_driver mixer_driver = {
.driver = {
.name = "s5p-mixer",
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 3c04bea842ce..9cc7c5e9718c 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -138,14 +138,16 @@
#define HDMI_ASP_MASK (1 << 2)
#define HDMI_EN (1 << 0)
+/* HDMI_CON_2 */
+#define HDMI_VID_PREAMBLE_DIS (1 << 5)
+#define HDMI_GUARD_BAND_DIS (1 << 1)
+
/* HDMI_PHY_STATUS */
#define HDMI_PHY_STATUS_READY (1 << 0)
/* HDMI_MODE_SEL */
#define HDMI_MODE_HDMI_EN (1 << 1)
#define HDMI_MODE_DVI_EN (1 << 0)
-#define HDMI_DVI_MODE_EN (1)
-#define HDMI_DVI_MODE_DIS (0)
#define HDMI_MODE_MASK (3 << 0)
/* HDMI_TG_CMD */
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 1583982917ce..abfa2a93f0d0 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -1,7 +1,7 @@
#
# KMS driver for the GMA500
#
-ccflags-y += -Iinclude/drm
+ccflags-y += -I$(srctree)/include/drm
gma500_gfx-y += gem_glue.o \
accel_2d.o \
@@ -12,7 +12,6 @@ gma500_gfx-y += gem_glue.o \
intel_bios.o \
intel_i2c.o \
intel_gmbus.o \
- intel_opregion.o \
mmu.o \
power.o \
psb_drv.o \
@@ -25,6 +24,8 @@ gma500_gfx-y += gem_glue.o \
psb_device.o \
mid_bios.o
+gma500_gfx-$(CONFIG_ACPI) += opregion.o \
+
gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
cdv_intel_crt.o \
cdv_intel_display.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index a54cc738926a..b7e7b49d8f62 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -49,13 +49,15 @@ static void cdv_disable_vga(struct drm_device *dev)
static int cdv_output_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+
+ drm_mode_create_scaling_mode_property(dev);
+
cdv_disable_vga(dev);
cdv_intel_crt_init(dev, &dev_priv->mode_dev);
cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
- /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
- the HDMI interface */
+ /* These bits indicate HDMI not SDVO on CDV */
if (REG_READ(SDVOB) & SDVO_DETECTED)
cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
if (REG_READ(SDVOC) & SDVO_DETECTED)
@@ -66,76 +68,75 @@ static int cdv_output_init(struct drm_device *dev)
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
/*
- * Poulsbo Backlight Interfaces
+ * Cedartrail Backlght Interfaces
*/
-#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
-#define BLC_PWM_FREQ_CALC_CONSTANT 32
-#define MHz 1000000
-
-#define PSB_BLC_PWM_PRECISION_FACTOR 10
-#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
-#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
-
-#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
-#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
-
-static int cdv_brightness;
static struct backlight_device *cdv_backlight_device;
-static int cdv_get_brightness(struct backlight_device *bd)
+static int cdv_backlight_combination_mode(struct drm_device *dev)
{
- /* return locally cached var instead of HW read (due to DPST etc.) */
- /* FIXME: ideally return actual value in case firmware fiddled with
- it */
- return cdv_brightness;
+ return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
}
-
-static int cdv_backlight_setup(struct drm_device *dev)
+static u32 cdv_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- unsigned long core_clock;
- /* u32 bl_max_freq; */
- /* unsigned long value; */
- u16 bl_max_freq;
- uint32_t value;
- uint32_t blc_pwm_precision_factor;
-
- /* get bl_max_freq and pol from dev_priv*/
- if (!dev_priv->lvds_bl) {
- dev_err(dev->dev, "Has no valid LVDS backlight info\n");
- return -ENOENT;
+ u32 max = REG_READ(BLC_PWM_CTL);
+
+ if (max == 0) {
+ DRM_DEBUG_KMS("LVDS Panel PWM value is 0!\n");
+ /* i915 does this, I believe which means that we should not
+ * smash PWM control as firmware will take control of it. */
+ return 1;
}
- bl_max_freq = dev_priv->lvds_bl->freq;
- blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
- core_clock = dev_priv->core_freq;
+ max >>= 16;
+ if (cdv_backlight_combination_mode(dev))
+ max *= 0xff;
+ return max;
+}
+
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
- value *= blc_pwm_precision_factor;
- value /= bl_max_freq;
- value /= blc_pwm_precision_factor;
+ if (cdv_backlight_combination_mode(dev)) {
+ u8 lbpc;
- if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
- value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
- return -ERANGE;
- else {
- /* FIXME */
+ val &= ~1;
+ pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+ val *= lbpc;
}
- return 0;
+ return (val * 100)/cdv_get_max_backlight(dev);
+
}
static int cdv_set_brightness(struct backlight_device *bd)
{
+ struct drm_device *dev = bl_get_data(bd);
int level = bd->props.brightness;
+ u32 blc_pwm_ctl;
/* Percentage 1-100% being valid */
if (level < 1)
level = 1;
- /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
- cdv_brightness = level;
+ level *= cdv_get_max_backlight(dev);
+ level /= 100;
+
+ if (cdv_backlight_combination_mode(dev)) {
+ u32 max = cdv_get_max_backlight(dev);
+ u8 lbpc;
+
+ lbpc = level * 0xfe / max + 1;
+ level /= lbpc;
+
+ pci_write_config_byte(dev->pdev, 0xF4, lbpc);
+ }
+
+ blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
return 0;
}
@@ -147,7 +148,6 @@ static const struct backlight_ops cdv_ops = {
static int cdv_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- int ret;
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -159,14 +159,8 @@ static int cdv_backlight_init(struct drm_device *dev)
if (IS_ERR(cdv_backlight_device))
return PTR_ERR(cdv_backlight_device);
- ret = cdv_backlight_setup(dev);
- if (ret < 0) {
- backlight_device_unregister(cdv_backlight_device);
- cdv_backlight_device = NULL;
- return ret;
- }
- cdv_backlight_device->props.brightness = 100;
- cdv_backlight_device->props.max_brightness = 100;
+ cdv_backlight_device->props.brightness =
+ cdv_get_brightness(cdv_backlight_device);
backlight_update_status(cdv_backlight_device);
dev_priv->backlight_device = cdv_backlight_device;
return 0;
@@ -238,6 +232,19 @@ static void cdv_init_pm(struct drm_device *dev)
dev_err(dev->dev, "GPU: power management timed out.\n");
}
+static void cdv_errata(struct drm_device *dev)
+{
+ /* Disable bonus launch.
+ * CPU and GPU competes for memory and display misses updates and
+ * flickers. Worst with dual core, dual displays.
+ *
+ * Fixes were done to Win 7 gfx driver to disable a feature called
+ * Bonus Launch to work around the issue, by degrading
+ * performance.
+ */
+ CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+}
+
/**
* cdv_save_display_registers - save registers lost on suspend
* @dev: our DRM device
@@ -251,7 +258,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
- dev_info(dev->dev, "Saving GPU registers.\n");
+ dev_dbg(dev->dev, "Saving GPU registers.\n");
pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
@@ -355,7 +362,7 @@ static int cdv_restore_display_registers(struct drm_device *dev)
REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR);
/* Fix arbitration bug */
- CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+ cdv_errata(dev);
drm_mode_config_reset(dev);
@@ -447,13 +454,106 @@ static void cdv_get_core_freq(struct drm_device *dev)
}
}
+static void cdv_hotplug_work_func(struct work_struct *work)
+{
+ struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
+ hotplug_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+}
+
+/* The core driver has received a hotplug IRQ. We are in IRQ context
+ so extract the needed information and kick off queued processing */
+
+static int cdv_hotplug_event(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ schedule_work(&dev_priv->hotplug_work);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ return 1;
+}
+
+static void cdv_hotplug_enable(struct drm_device *dev, bool on)
+{
+ if (on) {
+ u32 hotplug = REG_READ(PORT_HOTPLUG_EN);
+ hotplug |= HDMIB_HOTPLUG_INT_EN | HDMIC_HOTPLUG_INT_EN |
+ HDMID_HOTPLUG_INT_EN | CRT_HOTPLUG_INT_EN;
+ REG_WRITE(PORT_HOTPLUG_EN, hotplug);
+ } else {
+ REG_WRITE(PORT_HOTPLUG_EN, 0);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ }
+}
+
+/* Cedarview */
+static const struct psb_offset cdv_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .dpll_md = DPLL_A_MD,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .dpll_md = DPLL_B_MD,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
static int cdv_chip_setup(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
+
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = cdv_regmap;
cdv_get_core_freq(dev);
- gma_intel_opregion_init(dev);
+ psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
- REG_WRITE(PORT_HOTPLUG_EN, 0);
- REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ cdv_hotplug_enable(dev, false);
return 0;
}
@@ -464,13 +564,19 @@ const struct psb_ops cdv_chip_ops = {
.accel_2d = 0,
.pipes = 2,
.crtcs = 2,
+ .hdmi_mask = (1 << 0) | (1 << 1),
+ .lvds_mask = (1 << 1),
+ .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = cdv_chip_setup,
+ .errata = cdv_errata,
.crtc_helper = &cdv_intel_helper_funcs,
.crtc_funcs = &cdv_intel_crtc_funcs,
.output_init = cdv_output_init,
+ .hotplug = cdv_hotplug_event,
+ .hotplug_enable = cdv_hotplug_enable,
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
.backlight_init = cdv_backlight_init,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index a71a6cd95bdd..187422018601 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -67,8 +67,6 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
- int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -77,18 +75,9 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_LOW;
/* The max clock for CDV is 355 instead of 400 */
- max_clock = 355000;
- if (mode->clock > max_clock)
+ if (mode->clock > 355000)
return MODE_CLOCK_HIGH;
- if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
- return MODE_PANEL;
-
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
@@ -156,13 +145,7 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
u32 hotplug_en;
int i, tries = 0, ret = false;
- u32 adpa_orig;
-
- /* disable the DAC when doing the hotplug detection */
-
- adpa_orig = REG_READ(ADPA);
-
- REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
+ u32 orig;
/*
* On a CDV thep, CRT detect sequence need to be done twice
@@ -170,7 +153,7 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
*/
tries = 2;
- hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+ orig = hotplug_en = REG_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
@@ -195,8 +178,11 @@ static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
CRT_HOTPLUG_MONITOR_NONE)
ret = true;
- /* Restore the saved ADPA */
- REG_WRITE(ADPA, adpa_orig);
+ /* clear the interrupt we just generated, if any */
+ REG_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+ /* and put the bits back */
+ REG_WRITE(PORT_HOTPLUG_EN, orig);
return ret;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index be8455919b33..c3e9a0f701df 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -216,22 +216,22 @@ static void cdv_sb_reset(struct drm_device *dev)
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
- struct cdv_intel_clock_t *clock)
+ struct cdv_intel_clock_t *clock, bool is_lvds)
{
- struct psb_intel_crtc *psb_crtc =
- to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_crtc->pipe;
u32 m, n_vco, p;
int ret = 0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int ref_sfr = (pipe == 0) ? SB_REF_DPLLA : SB_REF_DPLLB;
u32 ref_value;
+ u32 lane_reg, lane_value;
cdv_sb_reset(dev);
- if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
- DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
- return -EBUSY;
- }
+ REG_WRITE(dpll_reg, DPLL_SYNCLOCK_ENABLE | DPLL_VGA_MODE_DIS);
+
+ udelay(100);
/* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
ref_value = 0x68A701;
@@ -241,6 +241,35 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
/* We don't know what the other fields of these regs are, so
* leave them in place.
*/
+ /*
+ * The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
+ * for the pipe A/B. Display spec 1.06 has wrong definition.
+ * Correct definition is like below:
+ *
+ * refclka mean use clock from same PLL
+ *
+ * if DPLLA sets 01 and DPLLB sets 01, they use clock from their pll
+ *
+ * if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
+ *
+ */
+ ret = cdv_sb_read(dev, ref_sfr, &ref_value);
+ if (ret)
+ return ret;
+ ref_value &= ~(REF_CLK_MASK);
+
+ /* use DPLL_A for pipeB on CRT/HDMI */
+ if (pipe == 1 && !is_lvds) {
+ DRM_DEBUG_KMS("use DPLLA for pipe B\n");
+ ref_value |= REF_CLK_DPLLA;
+ } else {
+ DRM_DEBUG_KMS("use their DPLL for pipe A/B\n");
+ ref_value |= REF_CLK_DPLL;
+ }
+ ret = cdv_sb_write(dev, ref_sfr, ref_value);
+ if (ret)
+ return ret;
+
ret = cdv_sb_read(dev, SB_M(pipe), &m);
if (ret)
return ret;
@@ -307,36 +336,29 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
return ret;
- /* always Program the Lane Register for the Pipe A*/
- if (pipe == 0) {
- /* Program the Lane0/1 for HDMI B */
- u32 lane_reg, lane_value;
-
- lane_reg = PSB_LANE0;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE1;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
-
- /* Program the Lane2/3 for HDMI C */
- lane_reg = PSB_LANE2;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
-
- lane_reg = PSB_LANE3;
- cdv_sb_read(dev, lane_reg, &lane_value);
- lane_value &= ~(LANE_PLL_MASK);
- lane_value |= LANE_PLL_ENABLE;
- cdv_sb_write(dev, lane_reg, lane_value);
- }
+ lane_reg = PSB_LANE0;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE1;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE2;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE3;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+ cdv_sb_write(dev, lane_reg, lane_value);
return 0;
}
@@ -480,14 +502,12 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -509,9 +529,9 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -533,15 +553,15 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
ret = -EINVAL;
goto psb_intel_pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev,
"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
@@ -553,6 +573,199 @@ psb_intel_pipe_set_base_exit:
return ret;
}
+#define FIFO_PIPEA (1 << 0)
+#define FIFO_PIPEB (1 << 1)
+
+static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
+{
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = NULL;
+
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+ if (crtc->fb == NULL || !psb_intel_crtc->active)
+ return false;
+ return true;
+}
+
+static bool cdv_intel_single_pipe_active (struct drm_device *dev)
+{
+ uint32_t pipe_enabled = 0;
+
+ if (cdv_intel_pipe_enabled(dev, 0))
+ pipe_enabled |= FIFO_PIPEA;
+
+ if (cdv_intel_pipe_enabled(dev, 1))
+ pipe_enabled |= FIFO_PIPEB;
+
+
+ DRM_DEBUG_KMS("pipe enabled %x\n", pipe_enabled);
+
+ if (pipe_enabled == FIFO_PIPEA || pipe_enabled == FIFO_PIPEB)
+ return true;
+ else
+ return false;
+}
+
+static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (psb_intel_crtc->pipe != 1)
+ return false;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS)
+ return true;
+ }
+
+ return false;
+}
+
+static void cdv_intel_disable_self_refresh (struct drm_device *dev)
+{
+ if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
+
+ /* Disable self-refresh before adjust WM */
+ REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
+ REG_READ(FW_BLC_SELF);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ /* Cedarview workaround to write ovelay plane, which force to leave
+ * MAX_FIFO state.
+ */
+ REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
+ REG_READ(OV_OVADD);
+
+ cdv_intel_wait_for_vblank(dev);
+ }
+
+}
+
+static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc)
+{
+
+ if (cdv_intel_single_pipe_active(dev)) {
+ u32 fw;
+
+ fw = REG_READ(DSPFW1);
+ fw &= ~DSP_FIFO_SR_WM_MASK;
+ fw |= (0x7e << DSP_FIFO_SR_WM_SHIFT);
+ fw &= ~CURSOR_B_FIFO_WM_MASK;
+ fw |= (0x4 << CURSOR_B_FIFO_WM_SHIFT);
+ REG_WRITE(DSPFW1, fw);
+
+ fw = REG_READ(DSPFW2);
+ fw &= ~CURSOR_A_FIFO_WM_MASK;
+ fw |= (0x6 << CURSOR_A_FIFO_WM_SHIFT);
+ fw &= ~DSP_PLANE_C_FIFO_WM_MASK;
+ fw |= (0x8 << DSP_PLANE_C_FIFO_WM_SHIFT);
+ REG_WRITE(DSPFW2, fw);
+
+ REG_WRITE(DSPFW3, 0x36000000);
+
+ /* ignore FW4 */
+
+ if (is_pipeb_lvds(dev, crtc)) {
+ REG_WRITE(DSPFW5, 0x00040330);
+ } else {
+ fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
+ (4 << DSP_PLANE_A_FIFO_WM1_SHIFT) |
+ (3 << CURSOR_B_FIFO_WM1_SHIFT) |
+ (4 << CURSOR_FIFO_SR_WM1_SHIFT);
+ REG_WRITE(DSPFW5, fw);
+ }
+
+ REG_WRITE(DSPFW6, 0x10);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ /* enable self-refresh for single pipe active */
+ REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ REG_READ(FW_BLC_SELF);
+ cdv_intel_wait_for_vblank(dev);
+
+ } else {
+
+ /* HW team suggested values... */
+ REG_WRITE(DSPFW1, 0x3f880808);
+ REG_WRITE(DSPFW2, 0x0b020202);
+ REG_WRITE(DSPFW3, 0x24000000);
+ REG_WRITE(DSPFW4, 0x08030202);
+ REG_WRITE(DSPFW5, 0x01010101);
+ REG_WRITE(DSPFW6, 0x1d0);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ cdv_intel_disable_self_refresh(dev);
+
+ }
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int palreg = PALETTE_A;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ switch (psb_intel_crtc->pipe) {
+ case 0:
+ break;
+ case 1:
+ palreg = PALETTE_B;
+ break;
+ case 2:
+ palreg = PALETTE_C;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ dev_priv->regs.pipe[0].palette[i] =
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
/**
* Sets the power management mode of the pipe and plane.
*
@@ -562,62 +775,80 @@ psb_intel_pipe_set_base_exit:
static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
+ cdv_intel_disable_self_refresh(dev);
+
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
+ if (psb_intel_crtc->active)
+ return;
+
+ psb_intel_crtc->active = true;
+
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Jim Bish - switch plan and pipe per scott */
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
udelay(150);
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+
+ temp = REG_READ(map->status);
+ temp &= ~(0xFFFF);
+ temp |= PIPE_FIFO_UNDERRUN;
+ REG_WRITE(map->status, temp);
+ REG_READ(map->status);
- psb_intel_crtc_load_lut(crtc);
+ cdv_intel_update_watermark(dev, crtc);
+ cdv_intel_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ psb_intel_crtc->crtc_enable = true;
break;
case DRM_MODE_DPMS_OFF:
+ if (!psb_intel_crtc->active)
+ return;
+
+ psb_intel_crtc->active = false;
+
/* Give the overlay scaler a chance to disable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
@@ -627,14 +858,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Jim Bish - changed pipe/plane here as well. */
+ drm_vblank_off(dev, pipe);
/* Wait for vblank for the disable to take effect */
cdv_intel_wait_for_vblank(dev);
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for vblank for the disable to take effect. */
@@ -643,23 +875,25 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
udelay(150);
+ cdv_intel_update_watermark(dev, crtc);
+ psb_intel_crtc->crtc_enable = false;
break;
}
/*Set FIFO Watermarks*/
@@ -709,21 +943,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct cdv_intel_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
@@ -757,13 +980,18 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- refclk = 96000;
-
- /* Hack selection about ref clk for CRT */
- /* Select 27MHz as the reference clk for HDMI */
- if (is_crt || is_hdmi)
+ if (dev_priv->dplla_96mhz)
+ /* low-end sku, 96/100 mhz */
+ refclk = 96000;
+ else
+ /* high-end sku, 27/100 mhz */
refclk = 27000;
+ if (is_lvds && dev_priv->lvds_use_ssc) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("Use SSC reference clock %d Mhz\n", dev_priv->lvds_ssc_freq);
+ }
+
drm_mode_debug_printmodeline(adjusted_mode);
ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
@@ -779,18 +1007,17 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
}
- dpll |= PLL_REF_INPUT_DREFCLK;
+/* dpll |= PLL_REF_INPUT_DREFCLK; */
dpll |= DPLL_SYNCLOCK_ENABLE;
- dpll |= DPLL_VGA_MODE_DIS;
- if (is_lvds)
+/* if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
- dpll |= DPLLB_MODE_DAC_SERIAL;
+ dpll |= DPLLB_MODE_DAC_SERIAL; */
/* dpll |= (2 << 11); */
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -803,10 +1030,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
dspcntr |= DISPLAY_PLANE_ENABLE;
pipeconf |= PIPEACONF_ENABLE;
- REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(map->dpll);
- cdv_dpll_set_clock_cdv(dev, crtc, &clock);
+ cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
udelay(150);
@@ -848,48 +1075,48 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- REG_WRITE(dpll_reg,
- (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll,
+ (REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
- if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+ if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
dev_err(dev->dev, "Failed to get DPLL lock\n");
return -EBUSY;
}
{
int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
}
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
cdv_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
{
@@ -903,58 +1130,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
return 0;
}
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int palreg = PALETTE_A;
- int i;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled)
- return;
-
- switch (psb_intel_crtc->pipe) {
- case 0:
- break;
- case 1:
- palreg = PALETTE_B;
- break;
- case 2:
- palreg = PALETTE_C;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return;
- }
-
- if (gma_power_begin(dev, false)) {
- for (i = 0; i < 256; i++) {
- REG_WRITE(palreg + 4 * i,
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]));
- }
- gma_power_end(dev);
- } else {
- for (i = 0; i < 256; i++) {
- dev_priv->regs.psb.save_palette_a[i] =
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]);
- }
-
- }
-}
/**
* Save HW states of giving crtc
@@ -962,11 +1137,10 @@ static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
static void cdv_intel_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -975,25 +1149,25 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
return;
}
- crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
- crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
- crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
- crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
- crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
- crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
- crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
- crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
- crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
- crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
- crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
- crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
- crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
/*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
- crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
- crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+ crtc_state->saveDSPBASE = REG_READ(map->base);
DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
crtc_state->saveDSPCNTR,
@@ -1014,7 +1188,7 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
crtc_state->saveDSPBASE
);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
}
@@ -1025,12 +1199,10 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private * dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -1041,23 +1213,23 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
DRM_DEBUG(
"current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
- REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
- REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
- REG_READ(pipeA ? FPA0 : FPB0),
- REG_READ(pipeA ? FPA1 : FPB1),
- REG_READ(pipeA ? DPLL_A : DPLL_B),
- REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
- REG_READ(pipeA ? HBLANK_A : HBLANK_B),
- REG_READ(pipeA ? HSYNC_A : HSYNC_B),
- REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
- REG_READ(pipeA ? VBLANK_A : VBLANK_B),
- REG_READ(pipeA ? VSYNC_A : VSYNC_B),
- REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
- REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
- REG_READ(pipeA ? DSPAPOS : DSPBPOS),
- REG_READ(pipeA ? DSPABASE : DSPBBASE)
- );
+ REG_READ(map->cntr),
+ REG_READ(map->conf),
+ REG_READ(map->src),
+ REG_READ(map->fp0),
+ REG_READ(map->fp1),
+ REG_READ(map->dpll),
+ REG_READ(map->htotal),
+ REG_READ(map->hblank),
+ REG_READ(map->hsync),
+ REG_READ(map->vtotal),
+ REG_READ(map->vblank),
+ REG_READ(map->vsync),
+ REG_READ(map->stride),
+ REG_READ(map->size),
+ REG_READ(map->pos),
+ REG_READ(map->base)
+ );
DRM_DEBUG(
"saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
@@ -1077,51 +1249,51 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
crtc_state->saveDSPSIZE,
crtc_state->saveDSPPOS,
crtc_state->saveDSPBASE
- );
+ );
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(pipeA ? DPLL_A : DPLL_B,
- crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
DRM_DEBUG("write dpll: %x\n",
- REG_READ(pipeA ? DPLL_A : DPLL_B));
+ REG_READ(map->dpll));
udelay(150);
}
- REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
- REG_READ(pipeA ? FPA0 : FPB0);
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
- REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
- REG_READ(pipeA ? FPA1 : FPB1);
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
- REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
udelay(150);
- REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
- REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
- REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
- REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
- REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
- REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
- REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
- REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
- REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
- REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
- REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
cdv_intel_wait_for_vblank(dev);
- REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
cdv_intel_wait_for_vblank(dev);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
}
@@ -1296,35 +1468,30 @@ static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct cdv_intel_clock_t clock;
bool is_lvds;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
- dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = REG_READ(map->fp0);
else
- fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
- dpll = (pipe == 0) ?
- dev_priv->regs.psb.saveDPLL_A :
- dev_priv->regs.psb.saveDPLL_B;
-
+ dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA0 :
- dev_priv->regs.psb.saveFPB0;
+ fp = p->fp0;
else
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA1 :
- dev_priv->regs.psb.saveFPB1;
+ fp = p->fp1;
is_lvds = (pipe == 1) &&
(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
@@ -1382,32 +1549,26 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
struct drm_display_mode *mode;
int htot;
int hsync;
int vtot;
int vsync;
- struct drm_psb_private *dev_priv = dev->dev_private;
if (gma_power_begin(dev, false)) {
- htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
- htot = (pipe == 0) ?
- dev_priv->regs.psb.saveHTOTAL_A :
- dev_priv->regs.psb.saveHTOTAL_B;
- hsync = (pipe == 0) ?
- dev_priv->regs.psb.saveHSYNC_A :
- dev_priv->regs.psb.saveHSYNC_B;
- vtot = (pipe == 0) ?
- dev_priv->regs.psb.saveVTOTAL_A :
- dev_priv->regs.psb.saveVTOTAL_B;
- vsync = (pipe == 0) ?
- dev_priv->regs.psb.saveVSYNC_A :
- dev_priv->regs.psb.saveVSYNC_B;
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 8d5269555005..88b59d4a7b7f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -242,8 +242,6 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
static int cdv_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
-
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
@@ -257,11 +255,6 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8359c1a3f45f..ff5b58eb878c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -356,6 +356,8 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
+ encoder->crtc);
u32 pfit_control;
/*
@@ -377,6 +379,8 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
else
pfit_control = 0;
+ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
@@ -552,10 +556,60 @@ static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
drm_encoder_cleanup(encoder);
}
-const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
.destroy = cdv_intel_lvds_enc_destroy,
};
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ u8 *i2c_pin)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ struct child_device_config *child = dev_priv->child_dev + i;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (child->i2c_pin)
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
/**
* cdv_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -576,6 +630,13 @@ void cdv_intel_lvds_init(struct drm_device *dev,
struct drm_psb_private *dev_priv = dev->dev_private;
u32 lvds;
int pipe;
+ u8 pin;
+
+ pin = GMBUS_PORT_PANEL;
+ if (!lvds_is_present_in_vbt(dev, &pin)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ return;
+ }
psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
GFP_KERNEL);
@@ -710,6 +771,19 @@ void cdv_intel_lvds_init(struct drm_device *dev,
goto failed_find;
}
+ /* setup PWM */
+ {
+ u32 pwm;
+
+ pwm = REG_READ(BLC_PWM_CTL2);
+ if (pipe == 1)
+ pwm |= PWM_PIPE_B;
+ else
+ pwm &= ~PWM_PIPE_B;
+ pwm |= PWM_ENABLE;
+ REG_WRITE(BLC_PWM_CTL2, pwm);
+ }
+
out:
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8ea202f1ba50..5732b5702e1c 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -153,7 +153,7 @@ static void psbfb_vm_close(struct vm_area_struct *vma)
{
}
-static struct vm_operations_struct psbfb_vm_ops = {
+static const struct vm_operations_struct psbfb_vm_ops = {
.fault = psbfb_vm_fault,
.open = psbfb_vm_open,
.close = psbfb_vm_close
@@ -408,6 +408,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
return -ENOMEM;
}
+ memset(dev_priv->vram_addr + backing->offset, 0, size);
+
mutex_lock(&dev->struct_mutex);
info = framebuffer_alloc(0, device);
@@ -453,8 +455,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->fix.ypanstep = 0;
/* Accessed stolen memory directly */
- info->screen_base = (char *)dev_priv->vram_addr +
- backing->offset;
+ info->screen_base = dev_priv->vram_addr + backing->offset;
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
@@ -475,7 +476,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- dev_info(dev->dev, "allocated %dx%d fb\n",
+ dev_dbg(dev->dev, "allocated %dx%d fb\n",
psbfb->base.width, psbfb->base.height);
mutex_unlock(&dev->struct_mutex);
@@ -543,9 +544,25 @@ static int psbfb_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+ struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
int new_fb = 0;
+ int bytespp;
int ret;
+ bytespp = sizes->surface_bpp / 8;
+ if (bytespp == 3) /* no 24bit packed */
+ bytespp = 4;
+
+ /* If the mode will not fit in 32bit then switch to 16bit to get
+ a console on full resolution. The X mode setting server will
+ allocate its own 32bit GEM framebuffer */
+ if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
+ dev_priv->vram_stolen_size) {
+ sizes->surface_bpp = 16;
+ sizes->surface_depth = 16;
+ }
+
if (!helper->fb) {
ret = psbfb_create(psb_fbdev, sizes);
if (ret)
@@ -555,7 +572,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
return new_fb;
}
-struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
.gamma_set = psbfb_gamma_set,
.gamma_get = psbfb_gamma_get,
.fb_probe = psbfb_probe,
@@ -732,10 +749,7 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_SDVO);
break;
case INTEL_OUTPUT_LVDS:
- if (IS_MRST(dev))
- crtc_mask = (1 << 0);
- else
- crtc_mask = (1 << 1);
+ crtc_mask = dev_priv->ops->lvds_mask;
clone_mask = (1 << INTEL_OUTPUT_LVDS);
break;
case INTEL_OUTPUT_MIPI:
@@ -747,10 +761,7 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_MIPI2);
break;
case INTEL_OUTPUT_HDMI:
- if (IS_MFLD(dev))
- crtc_mask = (1 << 1);
- else
- crtc_mask = (1 << 0);
+ crtc_mask = dev_priv->ops->hdmi_mask;
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
}
@@ -771,7 +782,7 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.funcs = (void *) &psb_mode_funcs;
+ dev->mode_config.funcs = &psb_mode_funcs;
/* set memory base */
/* Oaktrail and Poulsbo should use BAR 2*/
@@ -786,15 +797,23 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 2048;
psb_setup_outputs(dev);
+
+ if (dev_priv->ops->errata)
+ dev_priv->ops->errata(dev);
+
+ dev_priv->modeset = true;
}
void psb_modeset_cleanup(struct drm_device *dev)
{
- mutex_lock(&dev->struct_mutex);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->modeset) {
+ mutex_lock(&dev->struct_mutex);
- drm_kms_helper_poll_fini(dev);
- psb_fbdev_fini(dev);
- drm_mode_config_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+ psb_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 9fbb86868e2e..fc7d144bc2d3 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -124,6 +124,8 @@ static int psb_gem_create(struct drm_file *file,
dev_err(dev->dev, "GEM init failed for %lld\n", size);
return -ENOMEM;
}
+ /* Limit the object to 32bit mappings */
+ mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
/* Give the object a handle so we can carry it more easily */
ret = drm_gem_handle_create(file, &r->gem, &handle);
if (ret) {
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index c6465b40090f..04a371aceb34 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -39,6 +39,10 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
{
uint32_t mask = PSB_PTE_VALID;
+ /* Ensure we explode rather than put an invalid low mapping of
+ a high mapping page into the gtt */
+ BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
+
if (type & PSB_MMU_CACHED_MEMORY)
mask |= PSB_PTE_CACHED;
if (type & PSB_MMU_RO_MEMORY)
@@ -57,7 +61,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
* Given a gtt_range object return the GTT offset of the page table
* entries for this gtt_range
*/
-static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long offset;
@@ -78,7 +82,8 @@ static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
*/
static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
{
- u32 *gtt_slot, pte;
+ u32 __iomem *gtt_slot;
+ u32 pte;
struct page **pages;
int i;
@@ -93,7 +98,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
pages = r->pages;
/* Make sure changes are visible to the GPU */
- set_pages_array_uc(pages, r->npage);
+ set_pages_array_wc(pages, r->npage);
/* Write our page entries into the GTT itself */
for (i = r->roll; i < r->npage; i++) {
@@ -122,7 +127,8 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- u32 *gtt_slot, pte;
+ u32 __iomem *gtt_slot;
+ u32 pte;
int i;
WARN_ON(r->stolen);
@@ -148,7 +154,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
*/
void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
{
- u32 *gtt_slot, pte;
+ u32 __iomem *gtt_slot;
+ u32 pte;
int i;
if (roll >= r->npage) {
@@ -409,8 +416,6 @@ int psb_gtt_init(struct drm_device *dev, int resume)
unsigned long stolen_size, vram_stolen_size;
unsigned i, num_pages;
unsigned pfn_base;
- uint32_t vram_pages;
- uint32_t dvmt_mode = 0;
struct psb_gtt *pg;
int ret = 0;
@@ -483,13 +488,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
stolen_size = vram_stolen_size;
- printk(KERN_INFO "Stolen memory information\n");
- printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
- printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
- vram_stolen_size/1024);
- dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
- printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
- (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+ dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
+ dev_priv->stolen_base, vram_stolen_size / 1024);
if (resume && (gtt_pages != pg->gtt_pages) &&
(stolen_size != pg->stolen_size)) {
@@ -525,8 +525,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
*/
pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
- vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
- printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+ num_pages = vram_stolen_size >> PAGE_SHIFT;
+ dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) {
pte = psb_gtt_mask_pte(pfn_base + i, 0);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d4d0c5b8bf91..973d7f6d66b7 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -26,6 +26,8 @@
#include "psb_intel_reg.h"
#include "intel_bios.h"
+#define SLAVE_ADDR1 0x70
+#define SLAVE_ADDR2 0x72
static void *find_section(struct bdb_header *bdb, int section_id)
{
@@ -52,6 +54,16 @@ static void *find_section(struct bdb_header *bdb, int section_id)
return NULL;
}
+static u16
+get_blocksize(void *p)
+{
+ u16 *block_ptr, block_size;
+
+ block_ptr = (u16 *)((char *)p - 2);
+ block_size = *block_ptr;
+ return block_size;
+}
+
static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
struct lvds_dvo_timing *dvo_timing)
{
@@ -75,6 +87,16 @@ static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
panel_fixed_mode->clock = dvo_timing->clock * 10;
panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ if (dvo_timing->hsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dvo_timing->vsync_positive)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -217,6 +239,180 @@ static void parse_general_features(struct drm_psb_private *dev_priv,
}
}
+static void
+parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct sdvo_device_mapping *p_mapping;
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ if (p_child->slave_addr != SLAVE_ADDR1 &&
+ p_child->slave_addr != SLAVE_ADDR2) {
+ /*
+ * If the slave address is neither 0x70 nor 0x72,
+ * it is not a SDVO device. Skip it.
+ */
+ continue;
+ }
+ if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+ p_child->dvo_port != DEVICE_PORT_DVOC) {
+ /* skip the incorrect SDVO port */
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ continue;
+ }
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ p_child->slave_addr,
+ (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
+ p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+ if (!p_mapping->initialized) {
+ p_mapping->dvo_port = p_child->dvo_port;
+ p_mapping->slave_addr = p_child->slave_addr;
+ p_mapping->dvo_wiring = p_child->dvo_wiring;
+ p_mapping->ddc_pin = p_child->ddc_pin;
+ p_mapping->i2c_pin = p_child->i2c_pin;
+ p_mapping->initialized = 1;
+ DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ p_mapping->dvo_port,
+ p_mapping->slave_addr,
+ p_mapping->dvo_wiring,
+ p_mapping->ddc_pin,
+ p_mapping->i2c_pin);
+ } else {
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
+ }
+ if (p_child->slave2_addr) {
+ /* Maybe this is a SDVO device with multiple inputs */
+ /* And the mapping info is not added */
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
+ }
+ count++;
+ }
+
+ if (!count) {
+ /* No SDVO device info is found */
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ }
+ return;
+}
+
+
+static void
+parse_driver_features(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_driver_features *driver;
+
+ driver = find_section(bdb, BDB_DRIVER_FEATURES);
+ if (!driver)
+ return;
+
+ /* This bit means to use 96Mhz for DPLL_A or not */
+ if (driver->primary_lfp_id)
+ dev_priv->dplla_96mhz = true;
+ else
+ dev_priv->dplla_96mhz = false;
+}
+
+static void
+parse_device_mapping(struct drm_psb_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child devices that are present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ return;
+ }
+ dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child devices\n");
+ return;
+ }
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
+
+
/**
* psb_intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
@@ -236,38 +432,54 @@ bool psb_intel_init_bios(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
struct vbt_header *vbt = NULL;
- struct bdb_header *bdb;
- u8 __iomem *bios;
+ struct bdb_header *bdb = NULL;
+ u8 __iomem *bios = NULL;
size_t size;
int i;
- bios = pci_map_rom(pdev, &size);
- if (!bios)
- return -1;
+ /* XXX Should this validation be moved to intel_opregion.c? */
+ if (dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+ vbt->signature);
+ bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+ } else
+ dev_priv->opregion.vbt = NULL;
+ }
- /* Scour memory looking for the VBT signature */
- for (i = 0; i + 4 < size; i++) {
- if (!memcmp(bios + i, "$VBT", 4)) {
- vbt = (struct vbt_header *)(bios + i);
- break;
+ if (bdb == NULL) {
+ bios = pci_map_rom(pdev, &size);
+ if (!bios)
+ return -1;
+
+ /* Scour memory looking for the VBT signature */
+ for (i = 0; i + 4 < size; i++) {
+ if (!memcmp(bios + i, "$VBT", 4)) {
+ vbt = (struct vbt_header *)(bios + i);
+ break;
+ }
}
- }
- if (!vbt) {
- dev_err(dev->dev, "VBT signature missing\n");
- pci_unmap_rom(pdev, bios);
- return -1;
+ if (!vbt) {
+ dev_err(dev->dev, "VBT signature missing\n");
+ pci_unmap_rom(pdev, bios);
+ return -1;
+ }
+ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
}
- bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
-
- /* Grab useful general definitions */
+ /* Grab useful general dxefinitions */
parse_general_features(dev_priv, bdb);
+ parse_driver_features(dev_priv, bdb);
parse_lfp_panel_data(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
+ parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
parse_backlight_data(dev_priv, bdb);
- pci_unmap_rom(pdev, bios);
+ if (bios)
+ pci_unmap_rom(pdev, bios);
return 0;
}
@@ -278,26 +490,8 @@ bool psb_intel_init_bios(struct drm_device *dev)
void psb_intel_destroy_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_display_mode *sdvo_lvds_vbt_mode =
- dev_priv->sdvo_lvds_vbt_mode;
- struct drm_display_mode *lfp_lvds_vbt_mode =
- dev_priv->lfp_lvds_vbt_mode;
- struct bdb_lvds_backlight *lvds_bl =
- dev_priv->lvds_bl;
-
- /*free sdvo panel mode*/
- if (sdvo_lvds_vbt_mode) {
- dev_priv->sdvo_lvds_vbt_mode = NULL;
- kfree(sdvo_lvds_vbt_mode);
- }
- if (lfp_lvds_vbt_mode) {
- dev_priv->lfp_lvds_vbt_mode = NULL;
- kfree(lfp_lvds_vbt_mode);
- }
-
- if (lvds_bl) {
- dev_priv->lvds_bl = NULL;
- kfree(lvds_bl);
- }
+ kfree(dev_priv->sdvo_lvds_vbt_mode);
+ kfree(dev_priv->lfp_lvds_vbt_mode);
+ kfree(dev_priv->lvds_bl);
}
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 70f1bf018183..0a738663eb5a 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -127,9 +127,93 @@ struct bdb_general_features {
/* bits 5 */
u8 int_crt_support:1;
u8 int_tv_support:1;
- u8 rsvd11:6; /* finish byte */
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
} __attribute__((packed));
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+struct child_device_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* ascii string */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __attribute__((packed));
+
+
struct bdb_general_definitions {
/* DDC GPIO */
u8 crt_ddc_gmbus_pin;
@@ -144,13 +228,18 @@ struct bdb_general_definitions {
u8 boot_display[2];
u8 child_dev_size;
- /* device info */
- u8 tv_or_lvds_info[33];
- u8 dev1[33];
- u8 dev2[33];
- u8 dev3[33];
- u8 dev4[33];
- /* may be another device block here on some platforms */
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * sizeof(child_device_config);
+ */
+ struct child_device_config devices[0];
};
struct bdb_lvds_options {
@@ -302,6 +391,45 @@ struct bdb_sdvo_lvds_options {
u8 panel_misc_bits_4;
} __attribute__((packed));
+struct bdb_driver_features {
+ u8 boot_dev_algorithm:1;
+ u8 block_display_switch:1;
+ u8 allow_display_switch:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ u8 static_display:1;
+ u8 reserved2:7;
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ u8 hdmi_termination;
+ u8 custom_vbt_version;
+} __attribute__((packed));
extern bool psb_intel_init_bios(struct drm_device *dev);
extern void psb_intel_destroy_bios(struct drm_device *dev);
@@ -427,4 +555,21 @@ extern void psb_intel_destroy_bios(struct drm_device *dev);
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index af656787db0f..265ad0de44a6 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -163,142 +163,30 @@ struct backlight_device *mdfld_get_backlight_device(void)
*
* Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
*/
-static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct medfield_state *regs = &dev_priv->regs.mdfld;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
int i;
+ u32 *mipi_val;
/* register */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 dspstatus_reg = PIPEASTAT;
- u32 palette_reg = PALETTE_A;
-
- /* pointer to values */
- u32 *dpll_val = &regs->saveDPLL_A;
- u32 *fp_val = &regs->saveFPA0;
- u32 *pipeconf_val = &regs->savePIPEACONF;
- u32 *htot_val = &regs->saveHTOTAL_A;
- u32 *hblank_val = &regs->saveHBLANK_A;
- u32 *hsync_val = &regs->saveHSYNC_A;
- u32 *vtot_val = &regs->saveVTOTAL_A;
- u32 *vblank_val = &regs->saveVBLANK_A;
- u32 *vsync_val = &regs->saveVSYNC_A;
- u32 *pipesrc_val = &regs->savePIPEASRC;
- u32 *dspstride_val = &regs->saveDSPASTRIDE;
- u32 *dsplinoff_val = &regs->saveDSPALINOFF;
- u32 *dsptileoff_val = &regs->saveDSPATILEOFF;
- u32 *dspsize_val = &regs->saveDSPASIZE;
- u32 *dsppos_val = &regs->saveDSPAPOS;
- u32 *dspsurf_val = &regs->saveDSPASURF;
- u32 *mipi_val = &regs->saveMIPI;
- u32 *dspcntr_val = &regs->saveDSPACNTR;
- u32 *dspstatus_val = &regs->saveDSPASTATUS;
- u32 *palette_val = regs->save_palette_a;
-
- switch (pipe) {
+
+ switch (pipenum) {
case 0:
+ mipi_val = &regs->saveMIPI;
break;
case 1:
- /* regester */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- dspstatus_reg = PIPEBSTAT;
- palette_reg = PALETTE_B;
-
- /* values */
- dpll_val = &regs->saveDPLL_B;
- fp_val = &regs->saveFPB0;
- pipeconf_val = &regs->savePIPEBCONF;
- htot_val = &regs->saveHTOTAL_B;
- hblank_val = &regs->saveHBLANK_B;
- hsync_val = &regs->saveHSYNC_B;
- vtot_val = &regs->saveVTOTAL_B;
- vblank_val = &regs->saveVBLANK_B;
- vsync_val = &regs->saveVSYNC_B;
- pipesrc_val = &regs->savePIPEBSRC;
- dspstride_val = &regs->saveDSPBSTRIDE;
- dsplinoff_val = &regs->saveDSPBLINOFF;
- dsptileoff_val = &regs->saveDSPBTILEOFF;
- dspsize_val = &regs->saveDSPBSIZE;
- dsppos_val = &regs->saveDSPBPOS;
- dspsurf_val = &regs->saveDSPBSURF;
- dspcntr_val = &regs->saveDSPBCNTR;
- dspstatus_val = &regs->saveDSPBSTATUS;
- palette_val = regs->save_palette_b;
+ mipi_val = &regs->saveMIPI;
break;
case 2:
/* register */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- dspstatus_reg = PIPECSTAT;
- palette_reg = PALETTE_C;
-
/* pointer to values */
- pipeconf_val = &regs->savePIPECCONF;
- htot_val = &regs->saveHTOTAL_C;
- hblank_val = &regs->saveHBLANK_C;
- hsync_val = &regs->saveHSYNC_C;
- vtot_val = &regs->saveVTOTAL_C;
- vblank_val = &regs->saveVBLANK_C;
- vsync_val = &regs->saveVSYNC_C;
- pipesrc_val = &regs->savePIPECSRC;
- dspstride_val = &regs->saveDSPCSTRIDE;
- dsplinoff_val = &regs->saveDSPCLINOFF;
- dsptileoff_val = &regs->saveDSPCTILEOFF;
- dspsize_val = &regs->saveDSPCSIZE;
- dsppos_val = &regs->saveDSPCPOS;
- dspsurf_val = &regs->saveDSPCSURF;
mipi_val = &regs->saveMIPI_C;
- dspcntr_val = &regs->saveDSPCCNTR;
- dspstatus_val = &regs->saveDSPCSTATUS;
- palette_val = regs->save_palette_c;
break;
default:
DRM_ERROR("%s, invalid pipe number.\n", __func__);
@@ -306,30 +194,30 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
}
/* Pipe & plane A info */
- *dpll_val = PSB_RVDC32(dpll_reg);
- *fp_val = PSB_RVDC32(fp_reg);
- *pipeconf_val = PSB_RVDC32(pipeconf_reg);
- *htot_val = PSB_RVDC32(htot_reg);
- *hblank_val = PSB_RVDC32(hblank_reg);
- *hsync_val = PSB_RVDC32(hsync_reg);
- *vtot_val = PSB_RVDC32(vtot_reg);
- *vblank_val = PSB_RVDC32(vblank_reg);
- *vsync_val = PSB_RVDC32(vsync_reg);
- *pipesrc_val = PSB_RVDC32(pipesrc_reg);
- *dspstride_val = PSB_RVDC32(dspstride_reg);
- *dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
- *dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
- *dspsize_val = PSB_RVDC32(dspsize_reg);
- *dsppos_val = PSB_RVDC32(dsppos_reg);
- *dspsurf_val = PSB_RVDC32(dspsurf_reg);
- *dspcntr_val = PSB_RVDC32(dspcntr_reg);
- *dspstatus_val = PSB_RVDC32(dspstatus_reg);
+ pipe->dpll = PSB_RVDC32(map->dpll);
+ pipe->fp0 = PSB_RVDC32(map->fp0);
+ pipe->conf = PSB_RVDC32(map->conf);
+ pipe->htotal = PSB_RVDC32(map->htotal);
+ pipe->hblank = PSB_RVDC32(map->hblank);
+ pipe->hsync = PSB_RVDC32(map->hsync);
+ pipe->vtotal = PSB_RVDC32(map->vtotal);
+ pipe->vblank = PSB_RVDC32(map->vblank);
+ pipe->vsync = PSB_RVDC32(map->vsync);
+ pipe->src = PSB_RVDC32(map->src);
+ pipe->stride = PSB_RVDC32(map->stride);
+ pipe->linoff = PSB_RVDC32(map->linoff);
+ pipe->tileoff = PSB_RVDC32(map->tileoff);
+ pipe->size = PSB_RVDC32(map->size);
+ pipe->pos = PSB_RVDC32(map->pos);
+ pipe->surf = PSB_RVDC32(map->surf);
+ pipe->cntr = PSB_RVDC32(map->cntr);
+ pipe->status = PSB_RVDC32(map->status);
/*save palette (gamma) */
for (i = 0; i < 256; i++)
- palette_val[i] = PSB_RVDC32(palette_reg + (i << 2));
+ pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
- if (pipe == 1) {
+ if (pipenum == 1) {
regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
@@ -349,7 +237,7 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
*
* Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
*/
-static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
{
/* To get panel out of ULPS mode. */
u32 temp = 0;
@@ -357,142 +245,30 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
struct drm_psb_private *dev_priv = dev->dev_private;
struct mdfld_dsi_config *dsi_config = NULL;
struct medfield_state *regs = &dev_priv->regs.mdfld;
- u32 i = 0;
- u32 dpll = 0;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
+ u32 i;
+ u32 dpll;
u32 timeout = 0;
- /* regester */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
- u32 dspstatus_reg = PIPEASTAT;
+ /* register */
u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 palette_reg = PALETTE_A;
/* values */
- u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE;
- u32 fp_val = regs->saveFPA0;
- u32 pipeconf_val = regs->savePIPEACONF;
- u32 htot_val = regs->saveHTOTAL_A;
- u32 hblank_val = regs->saveHBLANK_A;
- u32 hsync_val = regs->saveHSYNC_A;
- u32 vtot_val = regs->saveVTOTAL_A;
- u32 vblank_val = regs->saveVBLANK_A;
- u32 vsync_val = regs->saveVSYNC_A;
- u32 pipesrc_val = regs->savePIPEASRC;
- u32 dspstride_val = regs->saveDSPASTRIDE;
- u32 dsplinoff_val = regs->saveDSPALINOFF;
- u32 dsptileoff_val = regs->saveDSPATILEOFF;
- u32 dspsize_val = regs->saveDSPASIZE;
- u32 dsppos_val = regs->saveDSPAPOS;
- u32 dspsurf_val = regs->saveDSPASURF;
- u32 dspstatus_val = regs->saveDSPASTATUS;
+ u32 dpll_val = pipe->dpll;
u32 mipi_val = regs->saveMIPI;
- u32 dspcntr_val = regs->saveDSPACNTR;
- u32 *palette_val = regs->save_palette_a;
- switch (pipe) {
+ switch (pipenum) {
case 0:
+ dpll_val &= ~DPLL_VCO_ENABLE;
dsi_config = dev_priv->dsi_configs[0];
break;
case 1:
- /* regester */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- dspstatus_reg = PIPEBSTAT;
- palette_reg = PALETTE_B;
-
- /* values */
- dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE;
- fp_val = regs->saveFPB0;
- pipeconf_val = regs->savePIPEBCONF;
- htot_val = regs->saveHTOTAL_B;
- hblank_val = regs->saveHBLANK_B;
- hsync_val = regs->saveHSYNC_B;
- vtot_val = regs->saveVTOTAL_B;
- vblank_val = regs->saveVBLANK_B;
- vsync_val = regs->saveVSYNC_B;
- pipesrc_val = regs->savePIPEBSRC;
- dspstride_val = regs->saveDSPBSTRIDE;
- dsplinoff_val = regs->saveDSPBLINOFF;
- dsptileoff_val = regs->saveDSPBTILEOFF;
- dspsize_val = regs->saveDSPBSIZE;
- dsppos_val = regs->saveDSPBPOS;
- dspsurf_val = regs->saveDSPBSURF;
- dspcntr_val = regs->saveDSPBCNTR;
- dspstatus_val = regs->saveDSPBSTATUS;
- palette_val = regs->save_palette_b;
+ dpll_val &= ~DPLL_VCO_ENABLE;
break;
case 2:
- /* regester */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- dspstatus_reg = PIPECSTAT;
- palette_reg = PALETTE_C;
-
- /* values */
- pipeconf_val = regs->savePIPECCONF;
- htot_val = regs->saveHTOTAL_C;
- hblank_val = regs->saveHBLANK_C;
- hsync_val = regs->saveHSYNC_C;
- vtot_val = regs->saveVTOTAL_C;
- vblank_val = regs->saveVBLANK_C;
- vsync_val = regs->saveVSYNC_C;
- pipesrc_val = regs->savePIPECSRC;
- dspstride_val = regs->saveDSPCSTRIDE;
- dsplinoff_val = regs->saveDSPCLINOFF;
- dsptileoff_val = regs->saveDSPCTILEOFF;
- dspsize_val = regs->saveDSPCSIZE;
- dsppos_val = regs->saveDSPCPOS;
- dspsurf_val = regs->saveDSPCSURF;
mipi_val = regs->saveMIPI_C;
- dspcntr_val = regs->saveDSPCCNTR;
- dspstatus_val = regs->saveDSPCSTATUS;
- palette_val = regs->save_palette_c;
-
dsi_config = dev_priv->dsi_configs[1];
break;
default:
@@ -503,14 +279,14 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
- if (pipe == 1) {
- PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
- PSB_RVDC32(dpll_reg);
+ if (pipenum == 1) {
+ PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
+ PSB_RVDC32(map->dpll);
- PSB_WVDC32(fp_val, fp_reg);
+ PSB_WVDC32(pipe->fp0, map->fp0);
} else {
- dpll = PSB_RVDC32(dpll_reg);
+ dpll = PSB_RVDC32(map->dpll);
if (!(dpll & DPLL_VCO_ENABLE)) {
@@ -518,23 +294,23 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
before enable the VCO */
if (dpll & MDFLD_PWR_GATE_EN) {
dpll &= ~MDFLD_PWR_GATE_EN;
- PSB_WVDC32(dpll, dpll_reg);
+ PSB_WVDC32(dpll, map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
- PSB_WVDC32(fp_val, fp_reg);
- PSB_WVDC32(dpll_val, dpll_reg);
+ PSB_WVDC32(pipe->fp0, map->fp0);
+ PSB_WVDC32(dpll_val, map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
dpll_val |= DPLL_VCO_ENABLE;
- PSB_WVDC32(dpll_val, dpll_reg);
- PSB_RVDC32(dpll_reg);
+ PSB_WVDC32(dpll_val, map->dpll);
+ PSB_RVDC32(map->dpll);
/* wait for DSI PLL to lock */
while (timeout < 20000 &&
- !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
@@ -547,28 +323,28 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
}
}
/* Restore mode */
- PSB_WVDC32(htot_val, htot_reg);
- PSB_WVDC32(hblank_val, hblank_reg);
- PSB_WVDC32(hsync_val, hsync_reg);
- PSB_WVDC32(vtot_val, vtot_reg);
- PSB_WVDC32(vblank_val, vblank_reg);
- PSB_WVDC32(vsync_val, vsync_reg);
- PSB_WVDC32(pipesrc_val, pipesrc_reg);
- PSB_WVDC32(dspstatus_val, dspstatus_reg);
+ PSB_WVDC32(pipe->htotal, map->htotal);
+ PSB_WVDC32(pipe->hblank, map->hblank);
+ PSB_WVDC32(pipe->hsync, map->hsync);
+ PSB_WVDC32(pipe->vtotal, map->vtotal);
+ PSB_WVDC32(pipe->vblank, map->vblank);
+ PSB_WVDC32(pipe->vsync, map->vsync);
+ PSB_WVDC32(pipe->src, map->src);
+ PSB_WVDC32(pipe->status, map->status);
/*set up the plane*/
- PSB_WVDC32(dspstride_val, dspstride_reg);
- PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
- PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
- PSB_WVDC32(dspsize_val, dspsize_reg);
- PSB_WVDC32(dsppos_val, dsppos_reg);
- PSB_WVDC32(dspsurf_val, dspsurf_reg);
-
- if (pipe == 1) {
+ PSB_WVDC32(pipe->stride, map->stride);
+ PSB_WVDC32(pipe->linoff, map->linoff);
+ PSB_WVDC32(pipe->tileoff, map->tileoff);
+ PSB_WVDC32(pipe->size, map->size);
+ PSB_WVDC32(pipe->pos, map->pos);
+ PSB_WVDC32(pipe->surf, map->surf);
+
+ if (pipenum == 1) {
/* restore palette (gamma) */
/*DRM_UDELAY(50000); */
for (i = 0; i < 256; i++)
- PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
@@ -578,7 +354,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*TODO: resume pipe*/
/*enable the plane*/
- PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg);
+ PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
return 0;
}
@@ -588,7 +364,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*setup MIPI adapter + MIPI IP registers*/
if (dsi_config)
- mdfld_dsi_controller_init(dsi_config, pipe);
+ mdfld_dsi_controller_init(dsi_config, pipenum);
if (in_atomic() || in_interrupt())
mdelay(20);
@@ -596,7 +372,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
msleep(20);
/*enable the plane*/
- PSB_WVDC32(dspcntr_val, dspcntr_reg);
+ PSB_WVDC32(pipe->cntr, map->cntr);
if (in_atomic() || in_interrupt())
mdelay(20);
@@ -625,12 +401,12 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
mdelay(1);
/*enable the pipe*/
- PSB_WVDC32(pipeconf_val, pipeconf_reg);
+ PSB_WVDC32(pipe->conf, map->conf);
/* restore palette (gamma) */
/*DRM_UDELAY(50000); */
for (i = 0; i < 256; i++)
- PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
return 0;
}
@@ -667,14 +443,98 @@ static int mdfld_power_up(struct drm_device *dev)
return 0;
}
+/* Medfield */
+static const struct psb_offset mdfld_regmap[3] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = MDFLD_DPLL_DIV0,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = MDFLD_DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = MRST_DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+ {
+ .fp0 = MRST_FPA0, /* This is what the old code did ?? */
+ .cntr = DSPCCNTR,
+ .conf = PIPECCONF,
+ .src = PIPECSRC,
+ /* No DPLL_C */
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_C,
+ .hblank = HBLANK_C,
+ .hsync = HSYNC_C,
+ .vtotal = VTOTAL_C,
+ .vblank = VBLANK_C,
+ .vsync = VSYNC_C,
+ .stride = DSPCSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPCPOS,
+ .surf = DSPCSURF,
+ .addr = MDFLD_DSPCBASE,
+ .status = PIPECSTAT,
+ .linoff = DSPCLINOFF,
+ .tileoff = DSPCTILEOFF,
+ .palette = PALETTE_C,
+ },
+};
+
+static int mdfld_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = mdfld_regmap;
+ return mid_chip_setup(dev);
+}
+
const struct psb_ops mdfld_chip_ops = {
.name = "mdfld",
.accel_2d = 0,
.pipes = 3,
.crtcs = 3,
+ .lvds_mask = (1 << 1),
+ .hdmi_mask = (1 << 1),
+ .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
- .chip_setup = mid_chip_setup,
+ .chip_setup = mdfld_chip_setup,
.crtc_helper = &mdfld_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d52358b744a0..b34ff097b979 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -869,7 +869,6 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
mdfld_set_pipe_timing(dsi_config, pipe);
REG_WRITE(DSPABASE, 0x00);
- REG_WRITE(DSPASTRIDE, (mode->hdisplay * 4));
REG_WRITE(DSPASIZE,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index baa0e14165e0..489ffd2c66e5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -605,6 +605,8 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
struct mdfld_dsi_config *dsi_config =
mdfld_dsi_get_config(dsi_connector);
struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 mipi_val = 0;
if (!dsi_connector) {
@@ -632,21 +634,13 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
/*init regs*/
- if (pipe == 0) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPACNTR;
- pkg_sender->pipeconf_reg = PIPEACONF;
- pkg_sender->dsplinoff_reg = DSPALINOFF;
- pkg_sender->dspsurf_reg = DSPASURF;
- pkg_sender->pipestat_reg = PIPEASTAT;
- } else if (pipe == 2) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPCCNTR;
- pkg_sender->pipeconf_reg = PIPECCONF;
- pkg_sender->dsplinoff_reg = DSPCLINOFF;
- pkg_sender->dspsurf_reg = DSPCSURF;
- pkg_sender->pipestat_reg = PIPECSTAT;
- }
+ /* FIXME: should just copy the regmap ptr ? */
+ pkg_sender->dpll_reg = map->dpll;
+ pkg_sender->dspcntr_reg = map->cntr;
+ pkg_sender->pipeconf_reg = map->conf;
+ pkg_sender->dsplinoff_reg = map->linoff;
+ pkg_sender->dspsurf_reg = map->surf;
+ pkg_sender->pipestat_reg = map->status;
pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index a35a2921bdf7..3f3cd619c79f 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -50,17 +50,14 @@ struct mrst_clock_t {
void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int count, temp;
- u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
- break;
case 1:
- pipeconf_reg = PIPEBCONF;
- break;
case 2:
- pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
@@ -73,7 +70,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
/* Wait for for the pipe disable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_PIPE_STATE) == 0)
break;
}
@@ -81,17 +78,14 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int count, temp;
- u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
- break;
case 1:
- pipeconf_reg = PIPEBCONF;
- break;
case 2:
- pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
@@ -104,7 +98,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
/* Wait for for the pipe enable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_PIPE_STATE) == 1)
break;
}
@@ -189,15 +183,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dsplinoff = DSPALINOFF;
- int dspsurf = DSPASURF;
- int dspstride = DSPASTRIDE;
- int dspcntr_reg = DSPACNTR;
u32 dspcntr;
int ret;
@@ -215,23 +206,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
- switch (pipe) {
- case 0:
- dsplinoff = DSPALINOFF;
- break;
- case 1:
- dsplinoff = DSPBLINOFF;
- dspsurf = DSPBSURF;
- dspstride = DSPBSTRIDE;
- dspcntr_reg = DSPBCNTR;
- break;
- case 2:
- dsplinoff = DSPCLINOFF;
- dspsurf = DSPCSURF;
- dspstride = DSPCSTRIDE;
- dspcntr_reg = DSPCCNTR;
- break;
- default:
+ if (pipe > 2) {
DRM_ERROR("Illegal Pipe Number.\n");
return -EINVAL;
}
@@ -242,8 +217,8 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -261,14 +236,14 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
start, offset, x, y);
- REG_WRITE(dsplinoff, offset);
- REG_READ(dsplinoff);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->linoff, offset);
+ REG_READ(map->linoff);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
gma_power_end(dev);
@@ -281,78 +256,56 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
*/
void mdfld_disable_crtc(struct drm_device *dev, int pipe)
{
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
dev_dbg(dev->dev, "pipe = %d\n", pipe);
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = MDFLD_DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = DSPBSURF;
- pipeconf_reg = PIPEBCONF;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
-
if (pipe != 1)
mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* FIXME_JLIU7 MDFLD_PO revisit */
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 &&
!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
& PIPEACONF_ENABLE)) || pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
if (!(temp & MDFLD_PWR_GATE_EN)) {
/* gating power of DPLL */
- REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+ REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(5000);
}
@@ -373,41 +326,15 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
- u32 pipestat_reg = PIPEASTAT;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 pipeconf = dev_priv->pipeconf[pipe];
u32 temp;
int timeout = 0;
dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
-/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
-/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = MRST_DSPBBASE;
- pipeconf_reg = PIPEBCONF;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- pipestat_reg = PIPECSTAT;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
+ /* Note: Old code uses pipe a stat for pipe b but that appears
+ to be a bug */
if (!gma_power_begin(dev, true))
return;
@@ -420,25 +347,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
/* When ungating power of DPLL, needs to wait 0.5us
before enable the VCO */
if (temp & MDFLD_PWR_GATE_EN) {
temp &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, temp);
+ REG_WRITE(map->dpll, temp);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/**
* wait for DSI PLL to lock
@@ -446,25 +373,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
* since both MIPI pipes share the same PLL.
*/
while ((pipe != 2) && (timeout < 20000) &&
- !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
}
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(pipeconf_reg, pipeconf);
+ REG_WRITE(map->conf, pipeconf);
/* Wait for for the pipe enable to take effect. */
mdfldWaitForPipeEnable(dev, pipe);
@@ -473,39 +400,39 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
/*workaround for sighting 3741701 Random X blank display*/
/*perform w/a in video mode only on pipe A or C*/
if (pipe == 0 || pipe == 2) {
- REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+ REG_WRITE(map->status, REG_READ(map->status));
msleep(100);
- if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg))
+ if (PIPE_VBLANK_STATUS & REG_READ(map->status))
dev_dbg(dev->dev, "OK");
else {
dev_dbg(dev->dev, "STUCK!!!!");
/*shutdown controller*/
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg,
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
REG_WRITE(0xb048, 1);
msleep(100);
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
temp &= ~PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
+ REG_WRITE(map->conf, temp);
msleep(100); /*wait for pipe disable*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
msleep(100);
REG_WRITE(0xb004, REG_READ(0xb004));
/* try to bring the controller back up again*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg,
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
REG_WRITE(0xb048, 2);
msleep(100);
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
temp |= PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
+ REG_WRITE(map->conf, temp);
}
}
@@ -529,35 +456,35 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 && !((REG_READ(PIPEACONF)
| REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
|| pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
@@ -764,21 +691,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = MRST_FPA0;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int pipeconf_reg = PIPEACONF;
- int htot_reg = HTOTAL_A;
- int hblank_reg = HBLANK_A;
- int hsync_reg = HSYNC_A;
- int vtot_reg = VTOTAL_A;
- int vblank_reg = VBLANK_A;
- int vsync_reg = VSYNC_A;
- int dspsize_reg = DSPASIZE;
- int dsppos_reg = DSPAPOS;
- int pipesrc_reg = PIPEASRC;
- u32 *pipeconf = &dev_priv->pipeconf[pipe];
- u32 *dspcntr = &dev_priv->dspcntr[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
clk_tmp = 0;
@@ -806,45 +719,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
#endif
- switch (pipe) {
- case 0:
- break;
- case 1:
- fp_reg = FPB0;
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- pipesrc_reg = PIPEBSRC;
- fp_reg = MDFLD_DPLL_DIV0;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- pipesrc_reg = PIPECSRC;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return 0;
- }
-
ret = check_fb(crtc->fb);
if (ret)
return ret;
@@ -929,21 +803,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
* contained within the displayable area of the screen image
* (frame buffer).
*/
- REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+ REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
| (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
/* Set the CRTC with encoder mode. */
- REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
+ REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
| (mode->crtc_vdisplay - 1));
} else {
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->crtc_vdisplay - 1) << 16) |
(mode->crtc_hdisplay - 1));
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->src,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
}
- REG_WRITE(dsppos_reg, 0);
+ REG_WRITE(map->pos, 0);
if (psb_intel_encoder)
drm_connector_property_get_value(connector,
@@ -961,34 +835,34 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
- REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start -
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start -
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start -
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start -
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
@@ -1000,12 +874,12 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
/* setup pipeconf */
- *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+ dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
/* Set up the display plane register */
- *dspcntr = REG_READ(dspcntr_reg);
- *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
- *dspcntr |= DISPLAY_PLANE_ENABLE;
+ dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
+ dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
+ dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
if (is_mipi2)
goto mrst_crtc_mode_set_exit;
@@ -1070,21 +944,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
clock.p1, m_conv);
}
- dpll = REG_READ(dpll_reg);
+ dpll = REG_READ(map->dpll);
if (dpll & DPLL_VCO_ENABLE) {
dpll &= ~DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
/* reset M1, N1 & P1 */
- REG_WRITE(fp_reg, 0);
+ REG_WRITE(map->fp0, 0);
dpll &= ~MDFLD_P1_MASK;
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
@@ -1093,7 +967,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
* enable the VCO */
if (dpll & MDFLD_PWR_GATE_EN) {
dpll &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
@@ -1134,18 +1008,18 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
fp = 0x000000c1;
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
dpll |= DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* wait for DSI PLL to lock */
while (timeout < 20000 &&
- !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
@@ -1155,11 +1029,11 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
- REG_WRITE(pipeconf_reg, *pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
+ REG_READ(map->conf);
/* Wait for for the pipe enable to take effect. */
- REG_WRITE(dspcntr_reg, *dspcntr);
+ REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
psb_intel_wait_for_vblank(dev);
mrst_crtc_mode_set_exit:
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 5eee9ad80da4..b2a790bd9899 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,139 +118,214 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
dev_priv->platform_rev_id);
}
+struct vbt_header {
+ u32 signature;
+ u8 revision;
+} __packed;
+
+/* The same for r0 and r1 */
+struct vbt_r0 {
+ struct vbt_header vbt_header;
+ u8 size;
+ u8 checksum;
+} __packed;
+
+struct vbt_r10 {
+ struct vbt_header vbt_header;
+ u8 checksum;
+ u16 size;
+ u8 panel_count;
+ u8 primary_panel_idx;
+ u8 secondary_panel_idx;
+ u8 __reserved[5];
+} __packed;
+
+static int read_vbt_r0(u32 addr, struct vbt_r0 *vbt)
+{
+ void __iomem *vbt_virtual;
+
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (vbt_virtual == NULL)
+ return -1;
+
+ memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual);
+
+ return 0;
+}
+
+static int read_vbt_r10(u32 addr, struct vbt_r10 *vbt)
+{
+ void __iomem *vbt_virtual;
+
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (!vbt_virtual)
+ return -1;
+
+ memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual);
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r0(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r0 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r0 gct;
+ u8 bpi;
+
+ if (read_vbt_r0(addr, &vbt))
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+ if (!gct_virtual)
+ return -1;
+ memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+ iounmap(gct_virtual);
+
+ bpi = gct.PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt = gct.PD.PanelType;
+ dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+ dev_priv->gct_data.Panel_Port_Control =
+ gct.panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r1(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r0 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r1 gct;
+ u8 bpi;
+
+ if (read_vbt_r0(addr, &vbt))
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+ if (!gct_virtual)
+ return -1;
+ memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+ iounmap(gct_virtual);
+
+ bpi = gct.PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt = gct.PD.PanelType;
+ dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+ dev_priv->gct_data.Panel_Port_Control =
+ gct.panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+ return 0;
+}
+
+static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
+{
+ struct vbt_r10 vbt;
+ void __iomem *gct_virtual;
+ struct gct_r10 *gct;
+ struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+ struct gct_r10_timing_info *ti;
+ int ret = -1;
+
+ if (read_vbt_r10(addr, &vbt))
+ return -1;
+
+ gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
+ if (!gct)
+ return -1;
+
+ gct_virtual = ioremap(addr + sizeof(vbt),
+ sizeof(*gct) * vbt.panel_count);
+ if (!gct_virtual)
+ goto out;
+ memcpy_fromio(gct, gct_virtual, sizeof(*gct));
+ iounmap(gct_virtual);
+
+ dev_priv->gct_data.bpi = vbt.primary_panel_idx;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ gct[vbt.primary_panel_idx].Panel_MIPI_Display_Descriptor;
+
+ ti = &gct[vbt.primary_panel_idx].DTD;
+ dp_ti->pixel_clock = ti->pixel_clock;
+ dp_ti->hactive_hi = ti->hactive_hi;
+ dp_ti->hactive_lo = ti->hactive_lo;
+ dp_ti->hblank_hi = ti->hblank_hi;
+ dp_ti->hblank_lo = ti->hblank_lo;
+ dp_ti->hsync_offset_hi = ti->hsync_offset_hi;
+ dp_ti->hsync_offset_lo = ti->hsync_offset_lo;
+ dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi;
+ dp_ti->hsync_pulse_width_lo = ti->hsync_pulse_width_lo;
+ dp_ti->vactive_hi = ti->vactive_hi;
+ dp_ti->vactive_lo = ti->vactive_lo;
+ dp_ti->vblank_hi = ti->vblank_hi;
+ dp_ti->vblank_lo = ti->vblank_lo;
+ dp_ti->vsync_offset_hi = ti->vsync_offset_hi;
+ dp_ti->vsync_offset_lo = ti->vsync_offset_lo;
+ dp_ti->vsync_pulse_width_hi = ti->vsync_pulse_width_hi;
+ dp_ti->vsync_pulse_width_lo = ti->vsync_pulse_width_lo;
+
+ ret = 0;
+out:
+ kfree(gct);
+ return ret;
+}
+
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
u32 addr;
- u16 new_size;
- u8 *vbt_virtual;
- u8 bpi;
- u8 number_desc = 0;
- struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
- struct gct_r10_timing_info ti;
- void *pGCT;
+ u8 __iomem *vbt_virtual;
+ struct vbt_header vbt_header;
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+ int ret = -1;
- /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
+ /* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
- /* check for platform config address == 0. */
- /* this means fw doesn't support vbt */
-
- if (addr == 0) {
- vbt->size = 0;
- return;
- }
+ if (!addr)
+ goto out;
/* get the virtual address of the vbt */
- vbt_virtual = ioremap(addr, sizeof(*vbt));
- if (vbt_virtual == NULL) {
- vbt->size = 0;
- return;
- }
+ vbt_virtual = ioremap(addr, sizeof(vbt_header));
+ if (!vbt_virtual)
+ goto out;
- memcpy(vbt, vbt_virtual, sizeof(*vbt));
- iounmap(vbt_virtual); /* Free virtual address space */
+ memcpy_fromio(&vbt_header, vbt_virtual, sizeof(vbt_header));
+ iounmap(vbt_virtual);
- /* No matching signature don't process the data */
- if (memcmp(vbt->signature, "$GCT", 4)) {
- vbt->size = 0;
- return;
- }
+ if (memcmp(&vbt_header.signature, "$GCT", 4))
+ goto out;
+
+ dev_dbg(dev->dev, "GCT revision is %02x\n", vbt_header.revision);
- dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
-
- switch (vbt->revision) {
- case 0:
- vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
- vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->oaktrail_gct;
- bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
- dev_priv->gct_data.bpi = bpi;
- dev_priv->gct_data.pt =
- ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
- memcpy(&dev_priv->gct_data.DTD,
- &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
- sizeof(struct oaktrail_timing_info));
- dev_priv->gct_data.Panel_Port_Control =
- ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ switch (vbt_header.revision) {
+ case 0x00:
+ ret = mid_get_vbt_data_r0(dev_priv, addr);
break;
- case 1:
- vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
- vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->oaktrail_gct;
- bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
- dev_priv->gct_data.bpi = bpi;
- dev_priv->gct_data.pt =
- ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
- memcpy(&dev_priv->gct_data.DTD,
- &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
- sizeof(struct oaktrail_timing_info));
- dev_priv->gct_data.Panel_Port_Control =
- ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ case 0x01:
+ ret = mid_get_vbt_data_r1(dev_priv, addr);
break;
case 0x10:
- /*header definition changed from rev 01 (v2) to rev 10h. */
- /*so, some values have changed location*/
- new_size = vbt->checksum; /*checksum contains lo size byte*/
- /*LSB of oaktrail_gct contains hi size byte*/
- new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
-
- vbt->checksum = vbt->size; /*size contains the checksum*/
- if (new_size > 0xff)
- vbt->size = 0xff; /*restrict size to 255*/
- else
- vbt->size = new_size;
-
- /* number of descriptors defined in the GCT */
- number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
- bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
- vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
- GCT_R10_DISPLAY_DESC_SIZE * number_desc);
- pGCT = vbt->oaktrail_gct;
- pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
- dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
-
- /*copy the GCT display timings into a temp structure*/
- memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
-
- /*now copy the temp struct into the dev_priv->gct_data*/
- dp_ti->pixel_clock = ti.pixel_clock;
- dp_ti->hactive_hi = ti.hactive_hi;
- dp_ti->hactive_lo = ti.hactive_lo;
- dp_ti->hblank_hi = ti.hblank_hi;
- dp_ti->hblank_lo = ti.hblank_lo;
- dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
- dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
- dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
- dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
- dp_ti->vactive_hi = ti.vactive_hi;
- dp_ti->vactive_lo = ti.vactive_lo;
- dp_ti->vblank_hi = ti.vblank_hi;
- dp_ti->vblank_lo = ti.vblank_lo;
- dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
- dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
- dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
- dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
-
- /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- *((u8 *)pGCT + 0x0d);
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
- (*((u8 *)pGCT + 0x0e)) << 8;
+ ret = mid_get_vbt_data_r10(dev_priv, addr);
break;
default:
dev_err(dev->dev, "Unknown revision of GCT!\n");
- vbt->size = 0;
}
+
+out:
+ if (ret)
+ dev_err(dev->dev, "Unable to read GCT!");
+ else
+ dev_priv->has_gct = true;
}
int mid_chip_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index 2da1f368f14e..f2f9f38a5362 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -19,14 +19,6 @@
/* MID device specific descriptors */
-struct oaktrail_vbt {
- s8 signature[4]; /*4 bytes,"$GCT" */
- u8 revision;
- u8 size;
- u8 checksum;
- void *oaktrail_gct;
-} __packed;
-
struct oaktrail_timing_info {
u16 pixel_clock;
u8 hactive_lo;
@@ -161,7 +153,7 @@ union oaktrail_panel_rx {
u16 panel_receiver;
} __packed;
-struct oaktrail_gct_v1 {
+struct gct_r0 {
union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -178,7 +170,7 @@ struct oaktrail_gct_v1 {
union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
} __packed;
-struct oaktrail_gct_v2 {
+struct gct_r1 {
union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -195,6 +187,16 @@ struct oaktrail_gct_v2 {
union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
} __packed;
+struct gct_r10 {
+ struct gct_r10_timing_info DTD;
+ u16 Panel_MIPI_Display_Descriptor;
+ u16 Panel_MIPI_Receiver_Descriptor;
+ u16 Panel_Backlight_Inverter_Descriptor;
+ u8 Panel_Initial_Brightness;
+ u32 MIPI_Ctlr_Init_ptr;
+ u32 MIPI_Panel_Init_ptr;
+} __packed;
+
struct oaktrail_gct_data {
u8 bpi; /* boot panel index, number of panel used during boot */
u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
@@ -213,9 +215,6 @@ struct oaktrail_gct_data {
#define MODE_SETTING_IN_DSR 0x4
#define MODE_SETTING_ENCODER_DONE 0x8
-#define GCT_R10_HEADER_SIZE 16
-#define GCT_R10_DISPLAY_DESC_SIZE 28
-
/*
* Moorestown HDMI interfaces
*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index a39b0d0d680f..f821c835ca90 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -162,12 +162,10 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
if (!gma_power_begin(dev, true))
@@ -181,32 +179,32 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
psb_intel_crtc_load_lut(crtc);
@@ -223,28 +221,28 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for for the pipe disable to take effect. */
psb_intel_wait_for_vblank(dev);
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
@@ -292,17 +290,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
struct oaktrail_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -350,7 +338,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (oaktrail_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->src,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
@@ -369,34 +357,34 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
- REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg,
+ REG_WRITE(map->hblank,
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(hsync_reg,
+ REG_WRITE(map->hsync,
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(vblank_reg,
+ REG_WRITE(map->vblank,
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(vsync_reg,
+ REG_WRITE(map->vsync,
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
@@ -408,10 +396,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
}
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr |= DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
@@ -467,30 +455,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
mrstPrintPll("chosen", &clock);
if (dpll & DPLL_VCO_ENABLE) {
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Check the DPLLA lock bit PIPEACONF[29] */
udelay(150);
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
psb_intel_wait_for_vblank(dev);
oaktrail_crtc_mode_set_exit:
@@ -509,15 +497,13 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -533,9 +519,9 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -557,12 +543,12 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
ret = -EINVAL;
goto pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
pipe_set_base_exit:
gma_power_end(dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 41d1924ea31e..0f9b7db80f6b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -187,6 +187,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
int i;
u32 pp_stat;
@@ -201,24 +202,24 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Pipe & plane A info */
- regs->psb.savePIPEACONF = PSB_RVDC32(PIPEACONF);
- regs->psb.savePIPEASRC = PSB_RVDC32(PIPEASRC);
- regs->psb.saveFPA0 = PSB_RVDC32(MRST_FPA0);
- regs->psb.saveFPA1 = PSB_RVDC32(MRST_FPA1);
- regs->psb.saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
- regs->psb.saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
- regs->psb.saveHBLANK_A = PSB_RVDC32(HBLANK_A);
- regs->psb.saveHSYNC_A = PSB_RVDC32(HSYNC_A);
- regs->psb.saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
- regs->psb.saveVBLANK_A = PSB_RVDC32(VBLANK_A);
- regs->psb.saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+ p->conf = PSB_RVDC32(PIPEACONF);
+ p->src = PSB_RVDC32(PIPEASRC);
+ p->fp0 = PSB_RVDC32(MRST_FPA0);
+ p->fp1 = PSB_RVDC32(MRST_FPA1);
+ p->dpll = PSB_RVDC32(MRST_DPLL_A);
+ p->htotal = PSB_RVDC32(HTOTAL_A);
+ p->hblank = PSB_RVDC32(HBLANK_A);
+ p->hsync = PSB_RVDC32(HSYNC_A);
+ p->vtotal = PSB_RVDC32(VTOTAL_A);
+ p->vblank = PSB_RVDC32(VBLANK_A);
+ p->vsync = PSB_RVDC32(VSYNC_A);
regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
- regs->psb.saveDSPACNTR = PSB_RVDC32(DSPACNTR);
- regs->psb.saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
- regs->psb.saveDSPAADDR = PSB_RVDC32(DSPABASE);
- regs->psb.saveDSPASURF = PSB_RVDC32(DSPASURF);
- regs->psb.saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
- regs->psb.saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+ p->cntr = PSB_RVDC32(DSPACNTR);
+ p->stride = PSB_RVDC32(DSPASTRIDE);
+ p->addr = PSB_RVDC32(DSPABASE);
+ p->surf = PSB_RVDC32(DSPASURF);
+ p->linoff = PSB_RVDC32(DSPALINOFF);
+ p->tileoff = PSB_RVDC32(DSPATILEOFF);
/* Save cursor regs */
regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
@@ -227,7 +228,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
/* Save palette (gamma) */
for (i = 0; i < 256; i++)
- regs->psb.save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+ p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_save(dev);
@@ -300,6 +301,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
u32 pp_stat;
int i;
@@ -317,21 +319,21 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
PSB_WVDC32(0x80000000, VGACNTRL);
/* set the plls */
- PSB_WVDC32(regs->psb.saveFPA0, MRST_FPA0);
- PSB_WVDC32(regs->psb.saveFPA1, MRST_FPA1);
+ PSB_WVDC32(p->fp0, MRST_FPA0);
+ PSB_WVDC32(p->fp1, MRST_FPA1);
/* Actually enable it */
- PSB_WVDC32(regs->psb.saveDPLL_A, MRST_DPLL_A);
+ PSB_WVDC32(p->dpll, MRST_DPLL_A);
DRM_UDELAY(150);
/* Restore mode */
- PSB_WVDC32(regs->psb.saveHTOTAL_A, HTOTAL_A);
- PSB_WVDC32(regs->psb.saveHBLANK_A, HBLANK_A);
- PSB_WVDC32(regs->psb.saveHSYNC_A, HSYNC_A);
- PSB_WVDC32(regs->psb.saveVTOTAL_A, VTOTAL_A);
- PSB_WVDC32(regs->psb.saveVBLANK_A, VBLANK_A);
- PSB_WVDC32(regs->psb.saveVSYNC_A, VSYNC_A);
- PSB_WVDC32(regs->psb.savePIPEASRC, PIPEASRC);
+ PSB_WVDC32(p->htotal, HTOTAL_A);
+ PSB_WVDC32(p->hblank, HBLANK_A);
+ PSB_WVDC32(p->hsync, HSYNC_A);
+ PSB_WVDC32(p->vtotal, VTOTAL_A);
+ PSB_WVDC32(p->vblank, VBLANK_A);
+ PSB_WVDC32(p->vsync, VSYNC_A);
+ PSB_WVDC32(p->src, PIPEASRC);
PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
/* Restore performance mode*/
@@ -339,16 +341,16 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
/* Enable the pipe*/
if (dev_priv->iLVDS_enable)
- PSB_WVDC32(regs->psb.savePIPEACONF, PIPEACONF);
+ PSB_WVDC32(p->conf, PIPEACONF);
/* Set up the plane*/
- PSB_WVDC32(regs->psb.saveDSPALINOFF, DSPALINOFF);
- PSB_WVDC32(regs->psb.saveDSPASTRIDE, DSPASTRIDE);
- PSB_WVDC32(regs->psb.saveDSPATILEOFF, DSPATILEOFF);
+ PSB_WVDC32(p->linoff, DSPALINOFF);
+ PSB_WVDC32(p->stride, DSPASTRIDE);
+ PSB_WVDC32(p->tileoff, DSPATILEOFF);
/* Enable the plane */
- PSB_WVDC32(regs->psb.saveDSPACNTR, DSPACNTR);
- PSB_WVDC32(regs->psb.saveDSPASURF, DSPASURF);
+ PSB_WVDC32(p->cntr, DSPACNTR);
+ PSB_WVDC32(p->surf, DSPASURF);
/* Enable Cursor A */
PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
@@ -357,7 +359,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
/* Restore palette (gamma) */
for (i = 0; i < 256; i++)
- PSB_WVDC32(regs->psb.save_palette_a[i], PALETTE_A + (i << 2));
+ PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_restore(dev);
@@ -454,31 +456,84 @@ static int oaktrail_power_up(struct drm_device *dev)
return 0;
}
+/* Oaktrail */
+static const struct psb_offset oaktrail_regmap[2] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+};
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
int ret;
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+
+ dev_priv->regmap = oaktrail_regmap;
+
ret = mid_chip_setup(dev);
if (ret < 0)
return ret;
- if (vbt->size == 0) {
+ if (!dev_priv->has_gct) {
/* Now pull the BIOS data */
- gma_intel_opregion_init(dev);
+ psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
}
+ oaktrail_hdmi_setup(dev);
return 0;
}
static void oaktrail_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
oaktrail_hdmi_teardown(dev);
- if (vbt->size == 0)
+ if (!dev_priv->has_gct)
psb_intel_destroy_bios(dev);
}
@@ -487,6 +542,9 @@ const struct psb_ops oaktrail_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
+ .hdmi_mask = (1 << 0),
+ .lvds_mask = (1 << 0),
+ .cursor_needs_phys = 0,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = oaktrail_chip_setup,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f8b367b45f66..c10899c953b9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -179,7 +179,6 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
@@ -188,11 +187,6 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
@@ -440,6 +434,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
@@ -450,14 +445,14 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
/* pipe B */
- regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
- regs->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
- regs->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
- regs->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
- regs->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
- regs->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
- regs->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
- regs->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
+ pipeb->conf = PSB_RVDC32(PIPEBCONF);
+ pipeb->src = PSB_RVDC32(PIPEBSRC);
+ pipeb->htotal = PSB_RVDC32(HTOTAL_B);
+ pipeb->hblank = PSB_RVDC32(HBLANK_B);
+ pipeb->hsync = PSB_RVDC32(HSYNC_B);
+ pipeb->vtotal = PSB_RVDC32(VTOTAL_B);
+ pipeb->vblank = PSB_RVDC32(VBLANK_B);
+ pipeb->vsync = PSB_RVDC32(VSYNC_B);
hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
@@ -469,12 +464,12 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
/* plane */
- regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
- regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
- regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
- regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
- regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
- regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+ pipeb->cntr = PSB_RVDC32(DSPBCNTR);
+ pipeb->stride = PSB_RVDC32(DSPBSTRIDE);
+ pipeb->addr = PSB_RVDC32(DSPBBASE);
+ pipeb->surf = PSB_RVDC32(DSPBSURF);
+ pipeb->linoff = PSB_RVDC32(DSPBLINOFF);
+ pipeb->tileoff = PSB_RVDC32(DSPBTILEOFF);
/* cursor B */
regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
@@ -483,7 +478,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
/* save palette */
for (i = 0; i < 256; i++)
- regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+ pipeb->palette[i] = PSB_RVDC32(PALETTE_B + (i << 2));
}
/* restore HDMI register state */
@@ -492,6 +487,7 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
@@ -503,13 +499,13 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
DRM_UDELAY(150);
/* pipe */
- PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC);
- PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B);
- PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B);
- PSB_WVDC32(regs->saveHSYNC_B, HSYNC_B);
- PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B);
- PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B);
- PSB_WVDC32(regs->saveVSYNC_B, VSYNC_B);
+ PSB_WVDC32(pipeb->src, PIPEBSRC);
+ PSB_WVDC32(pipeb->htotal, HTOTAL_B);
+ PSB_WVDC32(pipeb->hblank, HBLANK_B);
+ PSB_WVDC32(pipeb->hsync, HSYNC_B);
+ PSB_WVDC32(pipeb->vtotal, VTOTAL_B);
+ PSB_WVDC32(pipeb->vblank, VBLANK_B);
+ PSB_WVDC32(pipeb->vsync, VSYNC_B);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
@@ -519,15 +515,15 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
- PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF);
+ PSB_WVDC32(pipeb->conf, PIPEBCONF);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
/* plane */
- PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF);
- PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE);
- PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF);
- PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR);
- PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF);
+ PSB_WVDC32(pipeb->linoff, DSPBLINOFF);
+ PSB_WVDC32(pipeb->stride, DSPBSTRIDE);
+ PSB_WVDC32(pipeb->tileoff, DSPBTILEOFF);
+ PSB_WVDC32(pipeb->cntr, DSPBCNTR);
+ PSB_WVDC32(pipeb->surf, DSPBSURF);
/* cursor B */
PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
@@ -536,5 +532,5 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
/* restore palette */
for (i = 0; i < 256; i++)
- PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2));
+ PSB_WVDC32(pipeb->palette[i], PALETTE_B + (i << 2));
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 5e84fbde749b..88627e3ba1e3 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -250,7 +250,7 @@ static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
*/
static void oaktrail_hdmi_i2c_gpio_fix(void)
{
- void *base;
+ void __iomem *base;
unsigned int gpio_base = 0xff12c000;
int gpio_len = 0x1000;
u32 temp;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 654f32b22b21..558c77fb55ec 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -257,7 +257,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
mode_dev->panel_fixed_mode = NULL;
/* Use the firmware provided data on Moorestown */
- if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
+ if (dev_priv->has_gct) {
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return;
@@ -371,7 +371,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
BRIGHTNESS_MAX_LEVEL);
mode_dev->panel_wants_dither = false;
- if (dev_priv->vbt_data.size != 0x00)
+ if (dev_priv->has_gct)
mode_dev->panel_wants_dither = (dev_priv->gct_data.
Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
if (dev_priv->lvds_dither)
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
new file mode 100644
index 000000000000..c430bd424681
--- /dev/null
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET 0x200
+#define OPREGION_ASLE_OFFSET 0x300
+#define OPREGION_VBT_OFFSET 0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI (1<<0)
+#define MBOX_SWSCI (1<<1)
+#define MBOX_ASLE (1<<2)
+
+struct opregion_header {
+ u8 signature[16];
+ u32 size;
+ u32 opregion_ver;
+ u8 bios_ver[32];
+ u8 vbios_ver[16];
+ u8 driver_ver[16];
+ u32 mboxes;
+ u8 reserved[164];
+} __packed;
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+ u32 drdy; /* driver readiness */
+ u32 csts; /* notification status */
+ u32 cevt; /* current event */
+ u8 rsvd1[20];
+ u32 didl[8]; /* supported display devices ID list */
+ u32 cpdl[8]; /* currently presented display list */
+ u32 cadl[8]; /* currently active display list */
+ u32 nadl[8]; /* next active devices list */
+ u32 aslp; /* ASL sleep time-out */
+ u32 tidx; /* toggle table index */
+ u32 chpd; /* current hotplug enable indicator */
+ u32 clid; /* current lid state*/
+ u32 cdck; /* current docking state */
+ u32 sxsw; /* Sx state resume */
+ u32 evts; /* ASL supported events */
+ u32 cnot; /* current OS notification */
+ u32 nrdy; /* driver status */
+ u8 rsvd2[60];
+} __packed;
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+ /*FIXME: add it later*/
+} __packed;
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+ u32 ardy; /* driver readiness */
+ u32 aslc; /* ASLE interrupt command */
+ u32 tche; /* technology enabled indicator */
+ u32 alsi; /* current ALS illuminance reading */
+ u32 bclp; /* backlight brightness to set */
+ u32 pfit; /* panel fitting state */
+ u32 cblv; /* current brightness level */
+ u16 bclm[20]; /* backlight level duty cycle mapping table */
+ u32 cpfm; /* current panel fitting mode */
+ u32 epfm; /* enabled panel fitting modes */
+ u8 plut[74]; /* panel LUT and identifier */
+ u32 pfmb; /* PWM freq and min brightness */
+ u8 rsvd[102];
+} __packed;
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM (1 << 0)
+#define ASLE_SET_BACKLIGHT (1 << 1)
+#define ASLE_SET_PFIT (1 << 2)
+#define ASLE_SET_PWM_FREQ (1 << 3)
+#define ASLE_REQ_MSK 0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID (1<<31)
+#define ASLE_BCLP_MSK (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID (1<<31)
+
+static struct psb_intel_opregion *system_opregion;
+
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct backlight_device *bd = dev_priv->backlight_device;
+
+ DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLE_BACKLIGHT_FAILED;
+
+ if (bd == NULL)
+ return ASLE_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp > 255)
+ return ASLE_BACKLIGHT_FAILED;
+
+ if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
+ int max = bd->props.max_brightness;
+ bd->props.brightness = bclp * max / 255;
+ backlight_update_status(bd);
+ }
+
+ asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
+
+ return 0;
+}
+
+void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+ if (!asle_req) {
+ DRM_DEBUG_DRIVER("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+ asle->aslc = asle_stat;
+}
+
+#define ASLE_ALS_EN (1<<0)
+#define ASLE_BLC_EN (1<<1)
+#define ASLE_PFIT_EN (1<<2)
+#define ASLE_PFMB_EN (1<<3)
+
+void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+
+ if (asle && system_opregion ) {
+ /* Don't do this on Medfield or other non PC like devices, they
+ use the bit for something different altogether */
+ psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+ psb_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+
+ asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN
+ | ASLE_PFMB_EN;
+ asle->ardy = 1;
+ }
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID (1<<1)
+#define ACPI_EV_DOCK (1<<2)
+
+
+static int psb_intel_opregion_video_event(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ /* The only video events relevant to opregion are 0x80. These indicate
+ either a docking event, lid switch or display switch request. In
+ Linux, these are handled by the dock, button and video drivers.
+ We might want to fix the video driver to be opregion-aware in
+ future, but right now we just indicate to the firmware that the
+ request has been handled */
+
+ struct opregion_acpi *acpi;
+
+ if (!system_opregion)
+ return NOTIFY_DONE;
+
+ acpi = system_opregion->acpi;
+ acpi->csts = 0;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block psb_intel_opregion_notifier = {
+ .notifier_call = psb_intel_opregion_video_event,
+};
+
+void psb_intel_opregion_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video
+ * module. We don't actually need to do anything with them. */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+
+ system_opregion = opregion;
+ register_acpi_notifier(&psb_intel_opregion_notifier);
+ }
+}
+
+void psb_intel_opregion_fini(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi->drdy = 0;
+
+ system_opregion = NULL;
+ unregister_acpi_notifier(&psb_intel_opregion_notifier);
+ }
+
+ /* just clear all opregion memory pointers now */
+ iounmap(opregion->header);
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+}
+
+int psb_intel_opregion_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_opregion *opregion = &dev_priv->opregion;
+ u32 opregion_phy, mboxes;
+ void __iomem *base;
+ int err = 0;
+
+ pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
+ if (opregion_phy == 0) {
+ DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
+ return -ENOTSUPP;
+ }
+ DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
+ base = acpi_os_ioremap(opregion_phy, 8*1024);
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ opregion->header = base;
+ opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+ opregion->lid_state = base + ACPI_CLID;
+
+ mboxes = opregion->header->mboxes;
+ if (mboxes & MBOX_ACPI) {
+ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ }
+
+ if (mboxes & MBOX_ASLE) {
+ DRM_DEBUG_DRIVER("ASLE supported\n");
+ opregion->asle = base + OPREGION_ASLE_OFFSET;
+ }
+
+ return 0;
+
+err_out:
+ iounmap(base);
+ return err;
+}
+
diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/opregion.h
index d946bc1b17bf..4a90f8b0e16c 100644
--- a/drivers/gpu/drm/gma500/intel_opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Intel Corporation
+ * Copyright 2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,62 +20,35 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
- * FIXME: resolve with the i915 version
*/
-#include "psb_drv.h"
+#if defined(CONFIG_ACPI)
+extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
+extern void psb_intel_opregion_init(struct drm_device *dev);
+extern void psb_intel_opregion_fini(struct drm_device *dev);
+extern int psb_intel_opregion_setup(struct drm_device *dev);
+extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
-struct opregion_header {
- u8 signature[16];
- u32 size;
- u32 opregion_ver;
- u8 bios_ver[32];
- u8 vbios_ver[16];
- u8 driver_ver[16];
- u32 mboxes;
- u8 reserved[164];
-} __packed;
+#else
-struct opregion_apci {
- /*FIXME: add it later*/
-} __packed;
-
-struct opregion_swsci {
- /*FIXME: add it later*/
-} __packed;
-
-struct opregion_acpi {
- /*FIXME: add it later*/
-} __packed;
-
-int gma_intel_opregion_init(struct drm_device *dev)
+extern inline void psb_intel_opregion_asle_intr(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 opregion_phy;
- void *base;
- u32 *lid_state;
-
- dev_priv->lid_state = NULL;
-
- pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
- if (opregion_phy == 0)
- return -ENOTSUPP;
+}
- base = ioremap(opregion_phy, 8*1024);
- if (!base)
- return -ENOMEM;
+extern inline void psb_intel_opregion_init(struct drm_device *dev)
+{
+}
- lid_state = base + 0x01ac;
+extern inline void psb_intel_opregion_fini(struct drm_device *dev)
+{
+}
- dev_priv->lid_state = lid_state;
- dev_priv->lid_last_state = readl(lid_state);
+extern inline int psb_intel_opregion_setup(struct drm_device *dev)
+{
return 0;
}
-int gma_intel_opregion_exit(struct drm_device *dev)
+extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
- if (dev_priv->lid_state)
- iounmap(dev_priv->lid_state);
- return 0;
}
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 95d163e4f1f4..5971bc82b765 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -144,6 +144,10 @@ static int psb_backlight_init(struct drm_device *dev)
psb_backlight_device->props.max_brightness = 100;
backlight_update_status(psb_backlight_device);
dev_priv->backlight_device = psb_backlight_device;
+
+ /* This must occur after the backlight is properly initialised */
+ psb_lid_timer_init(dev_priv);
+
return 0;
}
@@ -197,7 +201,8 @@ static int psb_save_display_registers(struct drm_device *dev)
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- connector->funcs->save(connector);
+ if (connector->funcs->save)
+ connector->funcs->save(connector);
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -235,7 +240,8 @@ static int psb_restore_display_registers(struct drm_device *dev)
crtc->funcs->restore(crtc);
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- connector->funcs->restore(connector);
+ if (connector->funcs->restore)
+ connector->funcs->restore(connector);
mutex_unlock(&dev->mode_config.mutex);
return 0;
@@ -289,17 +295,73 @@ static void psb_get_core_freq(struct drm_device *dev)
}
}
+/* Poulsbo */
+static const struct psb_offset psb_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
static int psb_chip_setup(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->regmap = psb_regmap;
psb_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
- gma_intel_opregion_init(dev);
+ psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
return 0;
}
static void psb_chip_teardown(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
@@ -308,6 +370,9 @@ const struct psb_ops psb_chip_ops = {
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
+ .hdmi_mask = (1 << 0),
+ .lvds_mask = (1 << 1),
+ .cursor_needs_phys = 1,
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
.chip_teardown = psb_chip_teardown,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index c34adf9d910a..a8858a907f47 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -79,6 +79,14 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
#endif
{ 0, }
};
@@ -144,10 +152,6 @@ static void psb_lastclose(struct drm_device *dev)
return;
}
-static void psb_do_takedown(struct drm_device *dev)
-{
-}
-
static int psb_do_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -172,24 +176,6 @@ static int psb_do_init(struct drm_device *dev)
dev_priv->gatt_free_offset = pg->mmu_gatt_start +
(stolen_gtt << PAGE_SHIFT) * 1024;
- if (1 || drm_debug) {
- uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
- uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
- DRM_INFO("SGX core id = 0x%08x\n", core_id);
- DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
- _PSB_CC_REVISION_MAJOR_SHIFT,
- (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
- _PSB_CC_REVISION_MINOR_SHIFT);
- DRM_INFO
- ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
- _PSB_CC_REVISION_MAINTENANCE_SHIFT,
- (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
- _PSB_CC_REVISION_DESIGNER_SHIFT);
- }
-
-
spin_lock_init(&dev_priv->irqmask_lock);
spin_lock_init(&dev_priv->lock_2d);
@@ -204,7 +190,6 @@ static int psb_do_init(struct drm_device *dev)
PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
return 0;
out_err:
- psb_do_takedown(dev);
return ret;
}
@@ -214,18 +199,16 @@ static int psb_driver_unload(struct drm_device *dev)
/* Kill vblank etc here */
- gma_backlight_exit(dev);
-
- psb_modeset_cleanup(dev);
if (dev_priv) {
- psb_lid_timer_takedown(dev_priv);
- gma_intel_opregion_exit(dev);
+ if (dev_priv->backlight_device)
+ gma_backlight_exit(dev);
+ psb_modeset_cleanup(dev);
if (dev_priv->ops->chip_teardown)
dev_priv->ops->chip_teardown(dev);
- psb_do_takedown(dev);
+ psb_intel_opregion_fini(dev);
if (dev_priv->pf_pd) {
psb_mmu_free_pagedir(dev_priv->pf_pd);
@@ -246,6 +229,7 @@ static int psb_driver_unload(struct drm_device *dev)
}
psb_gtt_takedown(dev);
if (dev_priv->scratch_page) {
+ set_pages_wb(dev_priv->scratch_page, 1);
__free_page(dev_priv->scratch_page);
dev_priv->scratch_page = NULL;
}
@@ -258,15 +242,13 @@ static int psb_driver_unload(struct drm_device *dev)
dev_priv->sgx_reg = NULL;
}
+ /* Destroy VBT data */
+ psb_intel_destroy_bios(dev);
+
kfree(dev_priv);
dev->dev_private = NULL;
-
- /*destroy VBT data*/
- psb_intel_destroy_bios(dev);
}
-
gma_power_uninit(dev);
-
return 0;
}
@@ -290,11 +272,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
pci_set_master(dev->pdev);
- if (!IS_PSB(dev)) {
- if (pci_enable_msi(dev->pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
- }
-
dev_priv->num_pipe = dev_priv->ops->pipes;
resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
@@ -309,6 +286,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->sgx_reg)
goto out_err;
+ psb_intel_opregion_setup(dev);
+
ret = dev_priv->ops->chip_setup(dev);
if (ret)
goto out_err;
@@ -348,10 +327,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
-/* igd_opregion_init(&dev_priv->opregion_dev); */
acpi_video_register();
- if (dev_priv->lid_state)
- psb_lid_timer_init(dev_priv);
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
@@ -370,8 +346,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_install(dev);
+
+ drm_irq_install(dev);
dev->vblank_disable_allowed = 1;
@@ -398,6 +374,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
return ret;
+ psb_intel_opregion_enable_asle(dev);
#if 0
/*enable runtime pm at last*/
pm_runtime_enable(&dev->pdev->dev);
@@ -619,7 +596,7 @@ static const struct dev_pm_ops psb_pm_ops = {
.runtime_idle = psb_runtime_idle,
};
-static struct vm_operations_struct psb_gem_vm_ops = {
+static const struct vm_operations_struct psb_gem_vm_ops = {
.fault = psb_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 40ce2c9bc2e4..1bd115ecefe1 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -30,6 +30,7 @@
#include "psb_intel_drv.h"
#include "gtt.h"
#include "power.h"
+#include "opregion.h"
#include "oaktrail.h"
/* Append new drm mode definition here, align with libdrm definition */
@@ -120,6 +121,7 @@ enum {
#define PSB_HWSTAM 0x2098
#define PSB_INSTPM 0x20C0
#define PSB_INT_IDENTITY_R 0x20A4
+#define _PSB_IRQ_ASLE (1<<0)
#define _MDFLD_PIPEC_EVENT_FLAG (1<<2)
#define _MDFLD_PIPEC_VBLANK_FLAG (1<<3)
#define _PSB_DPST_PIPEB_FLAG (1<<4)
@@ -130,6 +132,7 @@ enum {
#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
#define _MDFLD_MIPIA_FLAG (1<<16)
#define _MDFLD_MIPIC_FLAG (1<<17)
+#define _PSB_IRQ_DISP_HOTSYNC (1<<17)
#define _PSB_IRQ_SGX_FLAG (1<<18)
#define _PSB_IRQ_MSVDX_FLAG (1<<19)
#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
@@ -257,7 +260,8 @@ struct psb_intel_opregion {
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
struct opregion_asle *asle;
- int enabled;
+ void *vbt;
+ u32 __iomem *lid_state;
};
struct sdvo_device_mapping {
@@ -277,50 +281,72 @@ struct intel_gmbus {
};
/*
+ * Register offset maps
+ */
+
+struct psb_offset {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 surf;
+ u32 addr;
+ u32 base;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette;
+};
+
+/*
* Register save state. This is used to hold the context when the
* device is powered off. In the case of Oaktrail this can (but does not
* yet) include screen blank. Operations occuring during the save
* update the register cache instead.
*/
+
+/*
+ * Common status for pipes.
+ */
+struct psb_pipe {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 base;
+ u32 surf;
+ u32 addr;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette[256];
+};
+
struct psb_state {
- uint32_t saveDSPACNTR;
- uint32_t saveDSPBCNTR;
- uint32_t savePIPEACONF;
- uint32_t savePIPEBCONF;
- uint32_t savePIPEASRC;
- uint32_t savePIPEBSRC;
- uint32_t saveFPA0;
- uint32_t saveFPA1;
- uint32_t saveDPLL_A;
- uint32_t saveDPLL_A_MD;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPABASE;
- uint32_t saveDSPASURF;
- uint32_t saveDSPASTATUS;
- uint32_t saveFPB0;
- uint32_t saveFPB1;
- uint32_t saveDPLL_B;
- uint32_t saveDPLL_B_MD;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBBASE;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBSTATUS;
uint32_t saveVCLK_DIVISOR_VGA0;
uint32_t saveVCLK_DIVISOR_VGA1;
uint32_t saveVCLK_POST_DIV;
@@ -335,14 +361,8 @@ struct psb_state {
uint32_t savePP_CONTROL;
uint32_t savePP_CYCLE;
uint32_t savePFIT_CONTROL;
- uint32_t savePaletteA[256];
- uint32_t savePaletteB[256];
uint32_t saveCLOCKGATING;
uint32_t saveDSPARB;
- uint32_t saveDSPATILEOFF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveDSPAADDR;
- uint32_t saveDSPBADDR;
uint32_t savePFIT_AUTO_RATIOS;
uint32_t savePFIT_PGM_RATIOS;
uint32_t savePP_ON_DELAYS;
@@ -350,8 +370,6 @@ struct psb_state {
uint32_t savePP_DIVISOR;
uint32_t saveBCLRPAT_A;
uint32_t saveBCLRPAT_B;
- uint32_t saveDSPALINOFF;
- uint32_t saveDSPBLINOFF;
uint32_t savePERF_MODE;
uint32_t saveDSPFW1;
uint32_t saveDSPFW2;
@@ -366,8 +384,6 @@ struct psb_state {
uint32_t saveDSPBCURSOR_BASE;
uint32_t saveDSPACURSOR_POS;
uint32_t saveDSPBCURSOR_POS;
- uint32_t save_palette_a[256];
- uint32_t save_palette_b[256];
uint32_t saveOV_OVADD;
uint32_t saveOV_OGAMC0;
uint32_t saveOV_OGAMC1;
@@ -390,64 +406,7 @@ struct psb_state {
};
struct medfield_state {
- uint32_t saveDPLL_A;
- uint32_t saveFPA0;
- uint32_t savePIPEACONF;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t savePIPEASRC;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPALINOFF;
- uint32_t saveDSPATILEOFF;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPASURF;
- uint32_t saveDSPACNTR;
- uint32_t saveDSPASTATUS;
- uint32_t save_palette_a[256];
uint32_t saveMIPI;
-
- uint32_t saveDPLL_B;
- uint32_t saveFPB0;
- uint32_t savePIPEBCONF;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t savePIPEBSRC;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBLINOFF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBCNTR;
- uint32_t saveDSPBSTATUS;
- uint32_t save_palette_b[256];
-
- uint32_t savePIPECCONF;
- uint32_t saveHTOTAL_C;
- uint32_t saveHBLANK_C;
- uint32_t saveHSYNC_C;
- uint32_t saveVTOTAL_C;
- uint32_t saveVBLANK_C;
- uint32_t saveVSYNC_C;
- uint32_t savePIPECSRC;
- uint32_t saveDSPCSTRIDE;
- uint32_t saveDSPCLINOFF;
- uint32_t saveDSPCTILEOFF;
- uint32_t saveDSPCSIZE;
- uint32_t saveDSPCPOS;
- uint32_t saveDSPCSURF;
- uint32_t saveDSPCCNTR;
- uint32_t saveDSPCSTATUS;
- uint32_t save_palette_c[256];
uint32_t saveMIPI_C;
uint32_t savePFIT_CONTROL;
@@ -476,6 +435,7 @@ struct cdv_state {
};
struct psb_save_area {
+ struct psb_pipe pipe[3];
uint32_t saveBSM;
uint32_t saveVBT;
union {
@@ -494,15 +454,19 @@ struct psb_ops;
struct drm_psb_private {
struct drm_device *dev;
const struct psb_ops *ops;
+ const struct psb_offset *regmap;
+
+ struct child_device_config *child_dev;
+ int child_dev_num;
struct psb_gtt gtt;
/* GTT Memory manager */
struct psb_gtt_mm *gtt_mm;
struct page *scratch_page;
- u32 *gtt_map;
+ u32 __iomem *gtt_map;
uint32_t stolen_base;
- void *vram_addr;
+ u8 __iomem *vram_addr;
unsigned long vram_stolen_size;
int gtt_initialized;
u16 gmch_ctrl; /* Saved GTT setup */
@@ -518,8 +482,8 @@ struct drm_psb_private {
* Register base
*/
- uint8_t *sgx_reg;
- uint8_t *vdc_reg;
+ uint8_t __iomem *sgx_reg;
+ uint8_t __iomem *vdc_reg;
uint32_t gatt_free_offset;
/*
@@ -543,6 +507,7 @@ struct drm_psb_private {
* Modesetting
*/
struct psb_intel_mode_device mode_dev;
+ bool modeset; /* true if we have done the mode_device setup */
struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
@@ -605,7 +570,7 @@ struct drm_psb_private {
int rpm_enabled;
/* MID specific */
- struct oaktrail_vbt vbt_data;
+ bool has_gct;
struct oaktrail_gct_data gct_data;
/* Oaktrail HDMI state */
@@ -621,6 +586,11 @@ struct drm_psb_private {
uint32_t msi_addr;
uint32_t msi_data;
+ /*
+ * Hotplug handling
+ */
+
+ struct work_struct hotplug_work;
/*
* LID-Switch
@@ -628,7 +598,6 @@ struct drm_psb_private {
spinlock_t lid_lock;
struct timer_list lid_timer;
struct psb_intel_opregion opregion;
- u32 *lid_state;
u32 lid_last_state;
/*
@@ -669,6 +638,8 @@ struct drm_psb_private {
u32 dspcntr[3];
int mdfld_panel_id;
+
+ bool dplla_96mhz; /* DPLL data from the VBT */
};
@@ -682,6 +653,9 @@ struct psb_ops {
int pipes; /* Number of output pipes */
int crtcs; /* Number of CRTCs */
int sgx_offset; /* Base offset of SGX device */
+ int hdmi_mask; /* Mask of HDMI CRTCs */
+ int lvds_mask; /* Mask of LVDS CRTCs */
+ int cursor_needs_phys; /* If cursor base reg need physical address */
/* Sub functions */
struct drm_crtc_helper_funcs const *crtc_helper;
@@ -690,9 +664,13 @@ struct psb_ops {
/* Setup hooks */
int (*chip_setup)(struct drm_device *dev);
void (*chip_teardown)(struct drm_device *dev);
+ /* Optional helper caller after modeset */
+ void (*errata)(struct drm_device *dev);
/* Display management hooks */
int (*output_init)(struct drm_device *dev);
+ int (*hotplug)(struct drm_device *dev);
+ void (*hotplug_enable)(struct drm_device *dev, bool on);
/* Power management hooks */
void (*init_pm)(struct drm_device *dev);
int (*save_regs)(struct drm_device *dev);
@@ -789,12 +767,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
/*
- * intel_opregion.c
- */
-extern int gma_intel_opregion_init(struct drm_device *dev);
-extern int gma_intel_opregion_exit(struct drm_device *dev);
-
-/*
* framebuffer.c
*/
extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 2616558457c8..36c3c99612f6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -337,15 +337,12 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -367,9 +364,9 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -392,18 +389,10 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
psb_gtt_unpin(psbfb->gtt);
goto psb_intel_pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
-
+ REG_WRITE(map->cntr, dspcntr);
- if (0 /* FIXMEAC - check what PSB needs */) {
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
- } else {
- REG_WRITE(dspbase, start + offset);
- REG_READ(dspbase);
- }
+ REG_WRITE(map->base, start + offset);
+ REG_READ(map->base);
psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
@@ -424,14 +413,10 @@ psb_intel_pipe_set_base_exit:
static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
- /* struct drm_i915_private *dev_priv = dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
@@ -442,34 +427,34 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
psb_intel_crtc_load_lut(crtc);
@@ -487,29 +472,29 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for vblank for the disable to take effect. */
psb_intel_wait_for_vblank(dev);
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
@@ -589,22 +574,11 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct psb_intel_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -690,7 +664,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -712,9 +686,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
drm_mode_debug_printmodeline(mode);
if (dpll & DPLL_VCO_ENABLE) {
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
udelay(150);
}
@@ -747,45 +721,45 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_READ(LVDS);
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
- REG_READ(dpll_reg);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
@@ -799,10 +773,10 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int palreg = PALETTE_A;
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+ int palreg = map->palette;
int i;
/* The clocks have to be on to load the palette. */
@@ -811,12 +785,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
switch (psb_intel_crtc->pipe) {
case 0:
- break;
case 1:
- palreg = PALETTE_B;
- break;
- case 2:
- palreg = PALETTE_C;
break;
default:
dev_err(dev->dev, "Illegal Pipe Number.\n");
@@ -836,7 +805,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
- dev_priv->regs.psb.save_palette_a[i] =
+ dev_priv->regs.pipe[0].palette[i] =
((psb_intel_crtc->lut_r[i] +
psb_intel_crtc->lut_adj[i]) << 16) |
((psb_intel_crtc->lut_g[i] +
@@ -854,11 +823,10 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
static void psb_intel_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -867,27 +835,27 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
return;
}
- crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
- crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
- crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
- crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
- crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
- crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
- crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
- crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
- crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
- crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
- crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
- crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
- crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
/*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
- crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
- crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+ crtc_state->saveDSPBASE = REG_READ(map->base);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
}
@@ -898,12 +866,10 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
static void psb_intel_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private * dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -913,45 +879,45 @@ static void psb_intel_crtc_restore(struct drm_crtc *crtc)
}
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+ REG_WRITE(map->dpll,
crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_READ(map->dpll);
udelay(150);
}
- REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
- REG_READ(pipeA ? FPA0 : FPB0);
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
- REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
- REG_READ(pipeA ? FPA1 : FPB1);
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
- REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
udelay(150);
- REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
- REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
- REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
- REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
- REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
- REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
- REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
- REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
- REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
- REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
- REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
psb_intel_wait_for_vblank(dev);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
}
@@ -962,6 +928,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
@@ -969,8 +936,10 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t temp;
size_t addr = 0;
struct gtt_range *gt;
+ struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
struct drm_gem_object *obj;
- int ret;
+ void *tmp_dst, *tmp_src;
+ int ret, i, cursor_pages;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
@@ -1019,10 +988,32 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
return ret;
}
+ if (dev_priv->ops->cursor_needs_phys) {
+ if (cursor_gt == NULL) {
+ dev_err(dev->dev, "No hardware cursor mem available");
+ return -ENOMEM;
+ }
- addr = gt->offset; /* Or resource.start ??? */
+ /* Prevent overflow */
+ if (gt->npage > 4)
+ cursor_pages = 4;
+ else
+ cursor_pages = gt->npage;
+
+ /* Copy the cursor to cursor mem */
+ tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
+ for (i = 0; i < cursor_pages; i++) {
+ tmp_src = kmap(gt->pages[i]);
+ memcpy(tmp_dst, tmp_src, PAGE_SIZE);
+ kunmap(gt->pages[i]);
+ tmp_dst += PAGE_SIZE;
+ }
- psb_intel_crtc->cursor_addr = addr;
+ addr = psb_intel_crtc->cursor_addr;
+ } else {
+ addr = gt->offset; /* Or resource.start ??? */
+ psb_intel_crtc->cursor_addr = addr;
+ }
temp = 0;
/* set the pipe for the cursor */
@@ -1115,34 +1106,30 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct psb_intel_clock_t clock;
bool is_lvds;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
- dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = REG_READ(map->fp0);
else
- fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
- dpll = (pipe == 0) ?
- dev_priv->regs.psb.saveDPLL_A :
- dev_priv->regs.psb.saveDPLL_B;
+ dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA0 :
- dev_priv->regs.psb.saveFPB0;
+ fp = p->fp0;
else
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA1 :
- dev_priv->regs.psb.saveFPB1;
+ fp = p->fp1;
is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
LVDS_PORT_EN);
@@ -1202,26 +1189,20 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
int vtot;
int vsync;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
if (gma_power_begin(dev, false)) {
- htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
- htot = (pipe == 0) ?
- dev_priv->regs.psb.saveHTOTAL_A :
- dev_priv->regs.psb.saveHTOTAL_B;
- hsync = (pipe == 0) ?
- dev_priv->regs.psb.saveHSYNC_A :
- dev_priv->regs.psb.saveHSYNC_B;
- vtot = (pipe == 0) ?
- dev_priv->regs.psb.saveVTOTAL_A :
- dev_priv->regs.psb.saveVTOTAL_B;
- vsync = (pipe == 0) ?
- dev_priv->regs.psb.saveVSYNC_A :
- dev_priv->regs.psb.saveVSYNC_B;
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -1257,6 +1238,9 @@ void psb_intel_crtc_destroy(struct drm_crtc *crtc)
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
psb_intel_crtc->cursor_obj = NULL;
}
+
+ if (psb_intel_crtc->cursor_gt != NULL)
+ psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
kfree(psb_intel_crtc->crtc_state);
drm_crtc_cleanup(crtc);
kfree(psb_intel_crtc);
@@ -1285,13 +1269,33 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
* Set the default value of cursor control and base register
* to zero. This is a workaround for h/w defect on Oaktrail
*/
-static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
+static void psb_intel_cursor_init(struct drm_device *dev,
+ struct psb_intel_crtc *psb_intel_crtc)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+ struct gtt_range *cursor_gt;
+
+ if (dev_priv->ops->cursor_needs_phys) {
+ /* Allocate 4 pages of stolen mem for a hardware cursor. That
+ * is enough for the 64 x 64 ARGB cursors we support.
+ */
+ cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
+ if (!cursor_gt) {
+ psb_intel_crtc->cursor_gt = NULL;
+ goto out;
+ }
+ psb_intel_crtc->cursor_gt = cursor_gt;
+ psb_intel_crtc->cursor_addr = dev_priv->stolen_base +
+ cursor_gt->offset;
+ } else {
+ psb_intel_crtc->cursor_gt = NULL;
+ }
- REG_WRITE(control[pipe], 0);
- REG_WRITE(base[pipe], 0);
+out:
+ REG_WRITE(control[psb_intel_crtc->pipe], 0);
+ REG_WRITE(base[psb_intel_crtc->pipe], 0);
}
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
@@ -1357,7 +1361,7 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
psb_intel_crtc->mode_set.connectors =
(struct drm_connector **) (psb_intel_crtc + 1);
psb_intel_crtc->mode_set.num_connectors = 0;
- psb_intel_cursor_init(dev, pipe);
+ psb_intel_cursor_init(dev, psb_intel_crtc);
}
int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index f40535e56689..2515f83248cb 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -106,11 +106,6 @@ struct psb_intel_mode_device {
size_t(*bo_offset) (struct drm_device *dev, void *bo);
/*
- * Cursor (Can go ?)
- */
- int cursor_needs_physical;
-
- /*
* LVDS info
*/
int backlight_duty_cycle; /* restore backlight to this value */
@@ -176,6 +171,7 @@ struct psb_intel_crtc {
int pipe;
int plane;
uint32_t cursor_addr;
+ struct gtt_range *cursor_gt;
u8 lut_r[256], lut_g[256], lut_b[256];
u8 lut_adj[256];
struct psb_intel_framebuffer *fbdev_fb;
@@ -193,6 +189,9 @@ struct psb_intel_crtc {
/*crtc mode setting flags*/
u32 mode_flags;
+ bool active;
+ bool crtc_enable;
+
/* Saved Crtc HW states */
struct psb_intel_crtc_state *crtc_state;
};
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index e89d3a2e8fdc..8e8c8efb0a89 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -91,6 +91,9 @@
#define BLC_PWM_CTL 0x61254
#define BLC_PWM_CTL2 0x61250
+#define PWM_ENABLE (1 << 31)
+#define PWM_LEGACY_MODE (1 << 30)
+#define PWM_PIPE_B (1 << 29)
#define BLC_PWM_CTL_C 0x62254
#define BLC_PWM_CTL2_C 0x62250
#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
@@ -216,7 +219,7 @@
#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_FPA0h1_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_LOCK (1 << 15) /* CDV */
/*
@@ -343,6 +346,9 @@
#define FP_M2_DIV_SHIFT 0
#define PORT_HOTPLUG_EN 0x61110
+#define HDMIB_HOTPLUG_INT_EN (1 << 29)
+#define HDMIC_HOTPLUG_INT_EN (1 << 28)
+#define HDMID_HOTPLUG_INT_EN (1 << 27)
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -501,10 +507,12 @@
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
#define PIPE_TE_ENABLE (1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22)
#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
#define PIPE_VSYNC_ENABL (1UL << 25)
#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
+#define PIPE_FIFO_UNDERRUN (1UL << 31)
#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
PIPE_HDMI_AUDIO_BUFFER_DONE)
#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
@@ -569,12 +577,27 @@ struct dpst_guardband {
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
+#define FW_BLC_SELF 0x20e0
+#define FW_BLC_SELF_EN (1<<15)
+
#define DSPARB 0x70030
#define DSPFW1 0x70034
+#define DSP_FIFO_SR_WM_MASK 0xFF800000
+#define DSP_FIFO_SR_WM_SHIFT 23
+#define CURSOR_B_FIFO_WM_MASK 0x003F0000
+#define CURSOR_B_FIFO_WM_SHIFT 16
#define DSPFW2 0x70038
+#define CURSOR_A_FIFO_WM_MASK 0x3F00
+#define CURSOR_A_FIFO_WM_SHIFT 8
+#define DSP_PLANE_C_FIFO_WM_MASK 0x7F
+#define DSP_PLANE_C_FIFO_WM_SHIFT 0
#define DSPFW3 0x7003c
#define DSPFW4 0x70050
#define DSPFW5 0x70054
+#define DSP_PLANE_B_FIFO_WM1_SHIFT 24
+#define DSP_PLANE_A_FIFO_WM1_SHIFT 16
+#define CURSOR_B_FIFO_WM1_SHIFT 8
+#define CURSOR_FIFO_SR_WM1_SHIFT 0
#define DSPFW6 0x70058
#define DSPCHICKENBIT 0x70400
#define DSPACNTR 0x70180
@@ -1290,6 +1313,15 @@ No status bits are changed.
#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
#define SB_N_CB_TUNE_SHIFT 24
+/* the bit 14:13 is used to select between the different reference clock for Pipe A/B */
+#define SB_REF_DPLLA 0x8010
+#define SB_REF_DPLLB 0x8030
+#define REF_CLK_MASK (0x3 << 13)
+#define REF_CLK_CORE (0 << 13)
+#define REF_CLK_DPLL (1 << 13)
+#define REF_CLK_DPLLA (2 << 13)
+/* For the DPLL B, it will use the reference clk from DPLL A when using (2 << 13) */
+
#define _SB_REF_A 0x8018
#define _SB_REF_B 0x8038
#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
@@ -1313,6 +1345,7 @@ No status bits are changed.
#define LANE_PLL_MASK (0x7 << 20)
#define LANE_PLL_ENABLE (0x3 << 20)
+#define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21))
#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 36330cabcea2..d39b15be7649 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1141,7 +1141,6 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1161,11 +1160,6 @@ static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- /* We assume worst case scenario of 32 bpp here, since we don't know */
- if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
- dev_priv->vram_stolen_size)
- return MODE_MEM;
-
return MODE_OK;
}
@@ -2044,8 +2038,7 @@ psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
- intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_attach_broadcast_rgb_property(&connector->base.base);
*/
}
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 1869586457b1..8652cdf3f03f 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -190,6 +190,9 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
*/
static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
{
+ if (vdc_stat & _PSB_IRQ_ASLE)
+ psb_intel_opregion_asle_intr(dev);
+
if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
mid_pipe_event_handler(dev, 0);
@@ -199,11 +202,9 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
{
- struct drm_device *dev = (struct drm_device *) arg;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
-
- uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
+ struct drm_device *dev = arg;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
int handled = 0;
spin_lock(&dev_priv->irqmask_lock);
@@ -220,6 +221,8 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
if (vdc_stat & _PSB_IRQ_SGX_FLAG)
sgx_int = 1;
+ if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
+ hotplug_int = 1;
vdc_stat &= dev_priv->vdc_irq_mask;
spin_unlock(&dev_priv->irqmask_lock);
@@ -241,6 +244,13 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
handled = 1;
}
+ /* Note: this bit has other meanings on some devices, so we will
+ need to address that later if it ever matters */
+ if (hotplug_int && dev_priv->ops->hotplug) {
+ handled = dev_priv->ops->hotplug(dev);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+ }
+
PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
DRM_READMEMORYBARRIER();
@@ -273,6 +283,11 @@ void psb_irq_preinstall(struct drm_device *dev)
dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
*/
+ /* Revisit this area - want per device masks ? */
+ if (dev_priv->ops->hotplug)
+ dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
+ dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE;
+
/* This register is safe even if display island is off */
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
@@ -305,18 +320,23 @@ int psb_irq_postinstall(struct drm_device *dev)
else
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+ if (dev_priv->ops->hotplug_enable)
+ dev_priv->ops->hotplug_enable(dev, true);
+
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
return 0;
}
void psb_irq_uninstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ if (dev_priv->ops->hotplug_enable)
+ dev_priv->ops->hotplug_enable(dev, false);
+
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
if (dev->vblank_enabled[0])
@@ -406,7 +426,7 @@ void psb_irq_turn_off_dpst(struct drm_device *dev)
psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
- PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
+ PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
PWM_CONTROL_LOGIC);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index b867aabe6bf3..1d2ebb5e530f 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -29,7 +29,7 @@ static void psb_lid_timer_func(unsigned long data)
struct drm_device *dev = (struct drm_device *)dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
- u32 *lid_state = dev_priv->lid_state;
+ u32 __iomem *lid_state = dev_priv->opregion.lid_state;
u32 pp_status;
if (readl(lid_state) == dev_priv->lid_last_state)
@@ -40,10 +40,16 @@ static void psb_lid_timer_func(unsigned long data)
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
- } while ((pp_status & PP_ON) == 0);
+ } while ((pp_status & PP_ON) == 0 &&
+ (pp_status & PP_SEQUENCE_MASK) != 0);
- /*FIXME: should be backlight level before*/
- psb_intel_lvds_set_brightness(dev, 100);
+ if (REG_READ(PP_STATUS) & PP_ON) {
+ /*FIXME: should be backlight level before*/
+ psb_intel_lvds_set_brightness(dev, 100);
+ } else {
+ DRM_DEBUG("LVDS panel never powered up");
+ return;
+ }
} else {
psb_intel_lvds_set_brightness(dev, 0);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index f920fb5e42b6..fa9439159ebd 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -130,11 +130,10 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
return -EINVAL;
/* This is all entirely broken */
- down_write(&current->mm->mmap_sem);
old_fops = file_priv->filp->f_op;
file_priv->filp->f_op = &i810_buffer_fops;
dev_priv->mmap_buffer = buf;
- buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
+ buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
PROT_READ | PROT_WRITE,
MAP_SHARED, buf->bus_address);
dev_priv->mmap_buffer = NULL;
@@ -145,7 +144,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
retcode = PTR_ERR(buf_priv->virtual);
buf_priv->virtual = NULL;
}
- up_write(&current->mm->mmap_sem);
return retcode;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ce7fc77678b4..2e9268da58d8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,17 +11,21 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
+ i915_gem_stolen.o \
i915_gem_tiling.o \
+ i915_sysfs.o \
i915_trace_points.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
intel_bios.o \
+ intel_ddi.o \
intel_dp.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
intel_panel.o \
+ intel_pm.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
@@ -34,7 +38,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
dvo_ch7017.o \
dvo_ivch.o \
dvo_tfp410.o \
- dvo_sil164.o
+ dvo_sil164.o \
+ i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6162a1681f0..5363e9c66c27 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -47,7 +47,6 @@ enum {
FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
- DEFERRED_FREE_LIST,
};
static const char *yesno(int v)
@@ -178,18 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
- case PINNED_LIST:
- seq_printf(m, "Pinned:\n");
- head = &dev_priv->mm.pinned_list;
- break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
- case DEFERRED_FREE_LIST:
- seq_printf(m, "Deferred free:\n");
- head = &dev_priv->mm.deferred_free_list;
- break;
default:
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
@@ -252,21 +243,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.pinned_list, mm_list);
- seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
- count, mappable_count, size, mappable_size);
-
- size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.deferred_free_list, mm_list);
- seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
- count, mappable_count, size, mappable_size);
-
- size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
if (obj->fault_mappable) {
size += obj->gtt_space->size;
@@ -294,6 +275,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
+ uintptr_t list = (uintptr_t) node->info_ent->data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
@@ -305,6 +287,9 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (list == PINNED_LIST && obj->pin_count == 0)
+ continue;
+
seq_printf(m, " ");
describe_obj(m, obj);
seq_printf(m, "\n");
@@ -321,7 +306,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return 0;
}
-
static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -430,10 +414,6 @@ static void i915_ring_seqno_info(struct seq_file *m,
if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n",
ring->name, ring->get_seqno(ring));
- seq_printf(m, "Waiter sequence (%s): %d\n",
- ring->name, ring->waiting_seqno);
- seq_printf(m, "IRQ sequence (%s): %d\n",
- ring->name, ring->irq_seqno);
}
}
@@ -468,7 +448,45 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
if (ret)
return ret;
- if (!HAS_PCH_SPLIT(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "Display IER:\t%08x\n",
+ I915_READ(VLV_IER));
+ seq_printf(m, "Display IIR:\t%08x\n",
+ I915_READ(VLV_IIR));
+ seq_printf(m, "Display IIR_RW:\t%08x\n",
+ I915_READ(VLV_IIR_RW));
+ seq_printf(m, "Display IMR:\t%08x\n",
+ I915_READ(VLV_IMR));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat:\t%08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
+
+ seq_printf(m, "Master IER:\t%08x\n",
+ I915_READ(VLV_MASTER_IER));
+
+ seq_printf(m, "Render IER:\t%08x\n",
+ I915_READ(GTIER));
+ seq_printf(m, "Render IIR:\t%08x\n",
+ I915_READ(GTIIR));
+ seq_printf(m, "Render IMR:\t%08x\n",
+ I915_READ(GTIMR));
+
+ seq_printf(m, "PM IER:\t\t%08x\n",
+ I915_READ(GEN6_PMIER));
+ seq_printf(m, "PM IIR:\t\t%08x\n",
+ I915_READ(GEN6_PMIIR));
+ seq_printf(m, "PM IMR:\t\t%08x\n",
+ I915_READ(GEN6_PMIMR));
+
+ seq_printf(m, "Port hotplug:\t%08x\n",
+ I915_READ(PORT_HOTPLUG_EN));
+ seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+ I915_READ(VLV_DPFLIPSTAT));
+ seq_printf(m, "DPINVGTT:\t%08x\n",
+ I915_READ(DPINVGTT));
+
+ } else if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -564,69 +582,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_ringbuffer_data(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- if (!ring->obj) {
- seq_printf(m, "No ringbuffer setup\n");
- } else {
- const u8 __iomem *virt = ring->virtual_start;
- uint32_t off;
-
- for (off = 0; off < ring->size; off += 4) {
- uint32_t *ptr = (uint32_t *)(virt + off);
- seq_printf(m, "%08x : %08x\n", off, *ptr);
- }
- }
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_ringbuffer_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- int ret;
-
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- if (ring->size == 0)
- return 0;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- seq_printf(m, "Ring %s:\n", ring->name);
- seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
- seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
- seq_printf(m, " Size : %08x\n", ring->size);
- seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
- seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
- seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
- }
- seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
- seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static const char *ring_str(int ring)
{
switch (ring) {
@@ -704,6 +659,7 @@ static void i915_ring_error_state(struct seq_file *m,
struct drm_i915_error_state *error,
unsigned ring)
{
+ BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
seq_printf(m, "%s command stream:\n", ring_str(ring));
seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
@@ -718,8 +674,8 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 4)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
- seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
seq_printf(m, " SYNC_0: 0x%08x\n",
error->semaphore_mboxes[ring][0]);
@@ -727,31 +683,35 @@ static void i915_ring_error_state(struct seq_file *m,
error->semaphore_mboxes[ring][1]);
}
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
}
+struct i915_error_state_file_priv {
+ struct drm_device *dev;
+ struct drm_i915_error_state *error;
+};
+
static int i915_error_state(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct i915_error_state_file_priv *error_priv = m->private;
+ struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
- unsigned long flags;
+ struct drm_i915_error_state *error = error_priv->error;
+ struct intel_ring_buffer *ring;
int i, j, page, offset, elt;
- spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (!dev_priv->first_error) {
+ if (!error) {
seq_printf(m, "no error state collected\n");
- goto out;
+ return 0;
}
- error = dev_priv->first_error;
-
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, "IER: 0x%08x\n", error->ier);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
for (i = 0; i < dev_priv->num_fence_regs; i++)
@@ -762,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- i915_ring_error_state(m, dev, error, RCS);
- if (HAS_BLT(dev))
- i915_ring_error_state(m, dev, error, BCS);
- if (HAS_BSD(dev))
- i915_ring_error_state(m, dev, error, VCS);
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_error_state(m, dev, error, i);
if (error->active_bo)
print_error_buffers(m, "Active",
@@ -828,12 +785,71 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->display)
intel_display_print_error_state(m, dev, error->display);
-out:
+ return 0;
+}
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct i915_error_state_file_priv *error_priv = m->private;
+ struct drm_device *dev = error_priv->dev;
+
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+
+ mutex_lock(&dev->struct_mutex);
+ i915_destroy_error_state(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_error_state_file_priv *error_priv;
+ unsigned long flags;
+
+ error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
+ if (!error_priv)
+ return -ENOMEM;
+
+ error_priv->dev = dev;
+
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ error_priv->error = dev_priv->first_error;
+ if (error_priv->error)
+ kref_get(&error_priv->error->ref);
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
- return 0;
+ return single_open(file, i915_error_state, error_priv);
+}
+
+static int i915_error_state_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct i915_error_state_file_priv *error_priv = m->private;
+
+ if (error_priv->error)
+ kref_put(&error_priv->error->ref, i915_error_state_free);
+ kfree(error_priv);
+
+ return single_release(inode, file);
}
+static const struct file_operations i915_error_state_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_error_state_open,
+ .read = seq_read,
+ .write = i915_error_state_write,
+ .llseek = default_llseek,
+ .release = i915_error_state_release,
+};
+
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1132,6 +1148,17 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "Core Power Down: %s\n",
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+
+ /* Not exactly sure what this is */
+ seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6_LOCKED));
+ seq_printf(m, "RC6 residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6));
+ seq_printf(m, "RC6+ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6p));
+ seq_printf(m, "RC6++ residency since boot: %u\n",
+ I915_READ(GEN6_GT_GFX_RC6pp));
+
return 0;
}
@@ -1306,17 +1333,25 @@ static int i915_opregion(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
int ret;
+ if (data == NULL)
+ return -ENOMEM;
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out;
- if (opregion->header)
- seq_write(m, opregion->header, OPREGION_SIZE);
+ if (opregion->header) {
+ memcpy_fromio(data, opregion->header, OPREGION_SIZE);
+ seq_write(m, data, OPREGION_SIZE);
+ }
mutex_unlock(&dev->struct_mutex);
+out:
+ kfree(data);
return 0;
}
@@ -1505,6 +1540,53 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_dpio_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+
+ if (!IS_VALLEYVIEW(dev)) {
+ seq_printf(m, "unsupported\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
+
+ seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_DIV_A));
+ seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_DIV_B));
+
+ seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
+ seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
+
+ seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+ seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+
+ seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
+ seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
+ intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
+
+ seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
+ intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+
static ssize_t
i915_wedged_read(struct file *filp,
char __user *ubuf,
@@ -1562,6 +1644,65 @@ static const struct file_operations i915_wedged_fops = {
};
static ssize_t
+i915_ring_stop_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[20];
+ int len;
+
+ len = snprintf(buf, sizeof(buf),
+ "0x%08x\n", dev_priv->stop_rings);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_ring_stop_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 0;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
+
+ mutex_lock(&dev->struct_mutex);
+ dev_priv->stop_rings = val;
+ mutex_unlock(&dev->struct_mutex);
+
+ return cnt;
+}
+
+static const struct file_operations i915_ring_stop_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_ring_stop_read,
+ .write = i915_ring_stop_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
size_t max,
@@ -1738,7 +1879,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
return 0;
}
-int i915_forcewake_release(struct inode *inode, struct file *file)
+static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1803,11 +1944,10 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
+ {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
- {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
- {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -1816,13 +1956,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
- {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
- {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
- {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
- {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
- {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
- {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
- {"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
{"i915_delayfreq_table", i915_delayfreq_table, 0},
@@ -1839,6 +1972,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
+ {"i915_dpio", i915_dpio_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -1867,6 +2001,17 @@ int i915_debugfs_init(struct drm_minor *minor)
&i915_cache_sharing_fops);
if (ret)
return ret;
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_ring_stop",
+ &i915_ring_stop_fops);
+ if (ret)
+ return ret;
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_error_state",
+ &i915_error_state_fops);
+ if (ret)
+ return ret;
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -1885,6 +2030,10 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
+ 1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ba60f3c8f911..36822b924eb1 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "drmP.h"
#include "drm.h"
#include "drm_crtc_helper.h"
@@ -34,15 +36,62 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
-#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <acpi/video.h>
+#include <asm/pat.h>
+
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
+ */
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
+} while (0)
+
+static inline u32
+intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
+{
+ if (I915_NEED_GFX_HWS(dev_priv->dev))
+ return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
+ else
+ return intel_read_status_page(LP_RING(dev_priv), reg);
+}
+
+#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_BREADCRUMB_INDEX 0x21
+
+void i915_update_dri1_breadcrumb(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv;
+
+ if (dev->primary->master) {
+ master_priv = dev->primary->master->driver_priv;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
+ }
+}
static void i915_write_hws_pga(struct drm_device *dev)
{
@@ -97,7 +146,7 @@ static void i915_free_hws(struct drm_device *dev)
if (ring->status_page.gfx_addr) {
ring->status_page.gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
}
/* Need to rewrite hardware status page */
@@ -195,7 +244,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
/* Allow hardware batchbuffers unless told otherwise.
*/
- dev_priv->allow_batchbuffer = 1;
+ dev_priv->dri1.allow_batchbuffer = 1;
return 0;
}
@@ -207,7 +256,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("%s\n", __func__);
- if (ring->map.handle == NULL) {
+ if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
@@ -236,6 +285,9 @@ static int i915_dma_init(struct drm_device *dev, void *data,
drm_i915_init_t *init = data;
int retcode = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
switch (init->func) {
case I915_INIT_DMA:
retcode = i915_initialize(dev, init);
@@ -578,6 +630,9 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
@@ -598,7 +653,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
int ret;
struct drm_clip_rect *cliprects = NULL;
- if (!dev_priv->allow_batchbuffer) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
@@ -655,6 +713,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects < 0)
@@ -706,11 +767,166 @@ fail_batch_free:
return ret;
}
+static int i915_emit_irq(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+ i915_kernel_lost_context(dev);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ dev_priv->counter++;
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->counter = 1;
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
+
+ return dev_priv->counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ int ret = 0;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+ DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ READ_BREADCRUMB(dev_priv));
+
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ return 0;
+ }
+
+ if (master_priv->sarea_priv)
+ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+ ret = -EBUSY;
+
+ if (ret == -EBUSY) {
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+ READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+ }
+
+ return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+static int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_emit_t *emit = data;
+ int result;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ mutex_lock(&dev->struct_mutex);
+ result = i915_emit_irq(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+ DRM_ERROR("copy_to_user\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+static int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_irq_wait_t *irqwait = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_vblank_pipe_t *pipe = data;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+ return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+static int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* The delayed swap mechanism was fundamentally racy, and has been
+ * removed. The model was that the client requested a delayed flip/swap
+ * from the kernel, then waited for vblank before continuing to perform
+ * rendering. The problem was that the kernel might wake the client
+ * up before it dispatched the vblank swap (since the lock has to be
+ * held while touching the ringbuffer), in which case the client would
+ * clear and start the next frame before the swap occurred, and
+ * flicker would occur in addition to likely missing the vblank.
+ *
+ * In the absence of this ioctl, userland falls back to a correct path
+ * of waiting for a vblank, then dispatching the swap on its own.
+ * Context switching to userland and back is plenty fast enough for
+ * meeting the requirements of vblank swapping.
+ */
+ return -EINVAL;
+}
+
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
DRM_DEBUG_DRIVER("%s\n", __func__);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -739,7 +955,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pdev->irq ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
- value = dev_priv->allow_batchbuffer ? 1 : 0;
+ value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
break;
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
@@ -748,7 +964,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
- value = dev_priv->has_gem;
+ value = 1;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
@@ -761,13 +977,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_EXECBUF2:
/* depends on GEM */
- value = dev_priv->has_gem;
+ value = 1;
break;
case I915_PARAM_HAS_BSD:
- value = HAS_BSD(dev);
+ value = intel_ring_initialized(&dev_priv->ring[VCS]);
break;
case I915_PARAM_HAS_BLT:
- value = HAS_BLT(dev);
+ value = intel_ring_initialized(&dev_priv->ring[BCS]);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
@@ -787,6 +1003,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev);
break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -816,10 +1035,9 @@ static int i915_setparam(struct drm_device *dev, void *data,
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- dev_priv->tex_lru_log_granularity = param->value;
break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->allow_batchbuffer = param->value;
+ dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
break;
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
@@ -844,6 +1062,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -861,23 +1082,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
- dev_priv->hws_map.offset = dev->agp->base + hws->addr;
- dev_priv->hws_map.size = 4*1024;
- dev_priv->hws_map.type = 0;
- dev_priv->hws_map.flags = 0;
- dev_priv->hws_map.mtrr = 0;
-
- drm_core_ioremap_wc(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
+ dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
+ 4096);
+ if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr =
- (void __force __iomem *)dev_priv->hws_map.handle;
- memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+ memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -1013,133 +1228,6 @@ intel_teardown_mchbar(struct drm_device *dev)
release_resource(&dev_priv->mch_res);
}
-#define PTE_ADDRESS_MASK 0xfffff000
-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
-#define PTE_MAPPING_TYPE_MASK (3 << 1)
-#define PTE_VALID (1 << 0)
-
-/**
- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
- * a physical one
- * @dev: drm device
- * @offset: address to translate
- *
- * Some chip functions require allocations from stolen space and need the
- * physical address of the memory in question.
- */
-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev_priv->bridge_dev;
- u32 base;
-
-#if 0
- /* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so compute the base by subtracting the stolen memory
- * from the Top of Low Usable DRAM which is where the BIOS places
- * the graphics stolen memory.
- */
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* top 32bits are reserved = 0 */
- pci_read_config_dword(pdev, 0xA4, &base);
- } else {
- /* XXX presume 8xx is the same as i915 */
- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
- }
-#else
- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- u16 val;
- pci_read_config_word(pdev, 0xb0, &val);
- base = val >> 4 << 20;
- } else {
- u8 val;
- pci_read_config_byte(pdev, 0x9c, &val);
- base = val >> 3 << 27;
- }
- base -= dev_priv->mm.gtt->stolen_size;
-#endif
-
- return base + offset;
-}
-
-static void i915_warn_stolen(struct drm_device *dev)
-{
- DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
- DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
-}
-
-static void i915_setup_compression(struct drm_device *dev, int size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
- unsigned long cfb_base;
- unsigned long ll_base = 0;
-
- /* Just in case the BIOS is doing something questionable. */
- intel_disable_fbc(dev);
-
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
- if (compressed_fb)
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb)
- goto err;
-
- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
- if (!cfb_base)
- goto err_fb;
-
- if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
- 4096, 4096, 0);
- if (compressed_llb)
- compressed_llb = drm_mm_get_block(compressed_llb,
- 4096, 4096);
- if (!compressed_llb)
- goto err_fb;
-
- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
- if (!ll_base)
- goto err_llb;
- }
-
- dev_priv->cfb_size = size;
-
- dev_priv->compressed_fb = compressed_fb;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
- else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
- } else {
- I915_WRITE(FBC_CFB_BASE, cfb_base);
- I915_WRITE(FBC_LL_BASE, ll_base);
- dev_priv->compressed_llb = compressed_llb;
- }
-
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
- cfb_base, ll_base, size >> 20);
- return;
-
-err_llb:
- drm_mm_put_block(compressed_llb);
-err_fb:
- drm_mm_put_block(compressed_fb);
-err:
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
-}
-
-static void i915_cleanup_compression(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- drm_mm_put_block(dev_priv->compressed_fb);
- if (dev_priv->compressed_llb)
- drm_mm_put_block(dev_priv->compressed_llb);
-}
-
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
@@ -1158,14 +1246,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
- printk(KERN_INFO "i915: switched on\n");
+ pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(dev->pdev, PCI_D0);
i915_resume(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
- printk(KERN_ERR "i915: switched off\n");
+ pr_err("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@ -1183,88 +1271,11 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
-static bool
-intel_enable_ppgtt(struct drm_device *dev)
-{
- if (i915_enable_ppgtt >= 0)
- return i915_enable_ppgtt;
-
-#ifdef CONFIG_INTEL_IOMMU
- /* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
- return false;
-#endif
-
- return true;
-}
-
-static int i915_load_gem_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long prealloc_size, gtt_size, mappable_size;
- int ret;
-
- prealloc_size = dev_priv->mm.gtt->stolen_size;
- gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
- mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
- /* Basic memrange allocator for stolen space */
- drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
-
- mutex_lock(&dev->struct_mutex);
- if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
- /* PPGTT pdes are stolen from global gtt ptes, so shrink the
- * aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
- /* For paranoia keep the guard page in between. */
- gtt_size -= PAGE_SIZE;
-
- i915_gem_do_init(dev, 0, mappable_size, gtt_size);
-
- ret = i915_gem_init_aliasing_ppgtt(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- } else {
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch
- * page. There are a number of places where the hardware
- * apparently prefetches past the end of the object, and we've
- * seen multiple hangs with the GPU head pointer stuck in a
- * batchbuffer bound at the last page of the aperture. One page
- * should be enough to keep any prefetching inside of the
- * aperture.
- */
- i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
- }
-
- ret = i915_gem_init_hw(dev);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- i915_gem_cleanup_aliasing_ppgtt(dev);
- return ret;
- }
-
- /* Try to set up FBC with a reasonable compressed buffer size */
- if (I915_HAS_FBC(dev) && i915_powersave) {
- int cfb_size;
-
- /* Leave 1M for line length buffer & misc. */
-
- /* Try to get a 32M buffer... */
- if (prealloc_size > (36*1024*1024))
- cfb_size = 32*1024*1024;
- else /* fall back to 7/8 of the stolen space */
- cfb_size = prealloc_size * 7 / 8;
- i915_setup_compression(dev, cfb_size);
- }
-
- /* Allow hardware batchbuffers unless told otherwise. */
- dev_priv->allow_batchbuffer = 1;
- return 0;
-}
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
static int i915_load_modeset_init(struct drm_device *dev)
{
@@ -1288,22 +1299,22 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(dev->pdev,
- i915_switcheroo_set_state,
- NULL,
- i915_switcheroo_can_switch);
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
if (ret)
goto cleanup_vga_client;
- /* IIR "flip pending" bit means done if this bit is set */
- if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
- dev_priv->flip_pending_is_done = true;
+ /* Initialise stolen first so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ ret = i915_gem_init_stolen(dev);
+ if (ret)
+ goto cleanup_vga_switcheroo;
intel_modeset_init(dev);
- ret = i915_load_gem_init(dev);
+ ret = i915_gem_init(dev);
if (ret)
- goto cleanup_vga_switcheroo;
+ goto cleanup_gem_stolen;
intel_modeset_gem_init(dev);
@@ -1333,6 +1344,8 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_gem_stolen:
+ i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
@@ -1365,573 +1378,48 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
-static void i915_pineview_get_mem_freq(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 tmp;
-
- tmp = I915_READ(CLKCFG);
-
- switch (tmp & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_533:
- dev_priv->fsb_freq = 533; /* 133*4 */
- break;
- case CLKCFG_FSB_800:
- dev_priv->fsb_freq = 800; /* 200*4 */
- break;
- case CLKCFG_FSB_667:
- dev_priv->fsb_freq = 667; /* 167*4 */
- break;
- case CLKCFG_FSB_400:
- dev_priv->fsb_freq = 400; /* 100*4 */
- break;
- }
-
- switch (tmp & CLKCFG_MEM_MASK) {
- case CLKCFG_MEM_533:
- dev_priv->mem_freq = 533;
- break;
- case CLKCFG_MEM_667:
- dev_priv->mem_freq = 667;
- break;
- case CLKCFG_MEM_800:
- dev_priv->mem_freq = 800;
- break;
- }
-
- /* detect pineview DDR3 setting */
- tmp = I915_READ(CSHRDDR3CTL);
- dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
-}
-
-static void i915_ironlake_get_mem_freq(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u16 ddrpll, csipll;
-
- ddrpll = I915_READ16(DDRMPLL1);
- csipll = I915_READ16(CSIPLL0);
-
- switch (ddrpll & 0xff) {
- case 0xc:
- dev_priv->mem_freq = 800;
- break;
- case 0x10:
- dev_priv->mem_freq = 1066;
- break;
- case 0x14:
- dev_priv->mem_freq = 1333;
- break;
- case 0x18:
- dev_priv->mem_freq = 1600;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
- ddrpll & 0xff);
- dev_priv->mem_freq = 0;
- break;
- }
-
- dev_priv->r_t = dev_priv->mem_freq;
-
- switch (csipll & 0x3ff) {
- case 0x00c:
- dev_priv->fsb_freq = 3200;
- break;
- case 0x00e:
- dev_priv->fsb_freq = 3733;
- break;
- case 0x010:
- dev_priv->fsb_freq = 4266;
- break;
- case 0x012:
- dev_priv->fsb_freq = 4800;
- break;
- case 0x014:
- dev_priv->fsb_freq = 5333;
- break;
- case 0x016:
- dev_priv->fsb_freq = 5866;
- break;
- case 0x018:
- dev_priv->fsb_freq = 6400;
- break;
- default:
- DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
- csipll & 0x3ff);
- dev_priv->fsb_freq = 0;
- break;
- }
-
- if (dev_priv->fsb_freq == 3200) {
- dev_priv->c_m = 0;
- } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->c_m = 1;
- } else {
- dev_priv->c_m = 2;
- }
-}
-
-static const struct cparams {
- u16 i;
- u16 t;
- u16 m;
- u16 c;
-} cparams[] = {
- { 1, 1333, 301, 28664 },
- { 1, 1066, 294, 24460 },
- { 1, 800, 294, 25192 },
- { 0, 1333, 276, 27605 },
- { 0, 1066, 276, 27605 },
- { 0, 800, 231, 23784 },
-};
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+static void
+i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
+ unsigned long size)
{
- u64 total_count, diff, ret;
- u32 count1, count2, count3, m = 0, c = 0;
- unsigned long now = jiffies_to_msecs(jiffies), diff1;
- int i;
+ dev_priv->mm.gtt_mtrr = -1;
- diff1 = now - dev_priv->last_time1;
+#if defined(CONFIG_X86_PAT)
+ if (cpu_has_pat)
+ return;
+#endif
- /* Prevent division-by-zero if we are asking too fast.
- * Also, we don't get interesting results if we are polling
- * faster than once in 10ms, so just return the saved value
- * in such cases.
+ /* Set up a WC MTRR for non-PAT systems. This is more common than
+ * one would think, because the kernel disables PAT on first
+ * generation Core chips because WC PAT gets overridden by a UC
+ * MTRR if present. Even if a UC MTRR isn't present.
*/
- if (diff1 <= 10)
- return dev_priv->chipset_power;
-
- count1 = I915_READ(DMIEC);
- count2 = I915_READ(DDREC);
- count3 = I915_READ(CSIEC);
-
- total_count = count1 + count2 + count3;
-
- /* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->last_count1) {
- diff = ~0UL - dev_priv->last_count1;
- diff += total_count;
- } else {
- diff = total_count - dev_priv->last_count1;
- }
-
- for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->c_m &&
- cparams[i].t == dev_priv->r_t) {
- m = cparams[i].m;
- c = cparams[i].c;
- break;
- }
+ dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
+ if (dev_priv->mm.gtt_mtrr < 0) {
+ DRM_INFO("MTRR allocation failed. Graphics "
+ "performance may suffer.\n");
}
-
- diff = div_u64(diff, diff1);
- ret = ((m * diff) + c);
- ret = div_u64(ret, 10);
-
- dev_priv->last_count1 = total_count;
- dev_priv->last_time1 = now;
-
- dev_priv->chipset_power = ret;
-
- return ret;
}
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
- unsigned long m, x, b;
- u32 tsfs;
-
- tsfs = I915_READ(TSFS);
-
- m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = I915_READ8(TR1);
-
- b = tsfs & TSFS_INTR_MASK;
-
- return ((m * x) / 127) - b;
-}
-
-static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
- static const struct v_table {
- u16 vd; /* in .1 mil */
- u16 vm; /* in .1 mil */
- } v_table[] = {
- { 0, 0, },
- { 375, 0, },
- { 500, 0, },
- { 625, 0, },
- { 750, 0, },
- { 875, 0, },
- { 1000, 0, },
- { 1125, 0, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4125, 3000, },
- { 4250, 3125, },
- { 4375, 3250, },
- { 4500, 3375, },
- { 4625, 3500, },
- { 4750, 3625, },
- { 4875, 3750, },
- { 5000, 3875, },
- { 5125, 4000, },
- { 5250, 4125, },
- { 5375, 4250, },
- { 5500, 4375, },
- { 5625, 4500, },
- { 5750, 4625, },
- { 5875, 4750, },
- { 6000, 4875, },
- { 6125, 5000, },
- { 6250, 5125, },
- { 6375, 5250, },
- { 6500, 5375, },
- { 6625, 5500, },
- { 6750, 5625, },
- { 6875, 5750, },
- { 7000, 5875, },
- { 7125, 6000, },
- { 7250, 6125, },
- { 7375, 6250, },
- { 7500, 6375, },
- { 7625, 6500, },
- { 7750, 6625, },
- { 7875, 6750, },
- { 8000, 6875, },
- { 8125, 7000, },
- { 8250, 7125, },
- { 8375, 7250, },
- { 8500, 7375, },
- { 8625, 7500, },
- { 8750, 7625, },
- { 8875, 7750, },
- { 9000, 7875, },
- { 9125, 8000, },
- { 9250, 8125, },
- { 9375, 8250, },
- { 9500, 8375, },
- { 9625, 8500, },
- { 9750, 8625, },
- { 9875, 8750, },
- { 10000, 8875, },
- { 10125, 9000, },
- { 10250, 9125, },
- { 10375, 9250, },
- { 10500, 9375, },
- { 10625, 9500, },
- { 10750, 9625, },
- { 10875, 9750, },
- { 11000, 9875, },
- { 11125, 10000, },
- { 11250, 10125, },
- { 11375, 10250, },
- { 11500, 10375, },
- { 11625, 10500, },
- { 11750, 10625, },
- { 11875, 10750, },
- { 12000, 10875, },
- { 12125, 11000, },
- { 12250, 11125, },
- { 12375, 11250, },
- { 12500, 11375, },
- { 12625, 11500, },
- { 12750, 11625, },
- { 12875, 11750, },
- { 13000, 11875, },
- { 13125, 12000, },
- { 13250, 12125, },
- { 13375, 12250, },
- { 13500, 12375, },
- { 13625, 12500, },
- { 13750, 12625, },
- { 13875, 12750, },
- { 14000, 12875, },
- { 14125, 13000, },
- { 14250, 13125, },
- { 14375, 13250, },
- { 14500, 13375, },
- { 14625, 13500, },
- { 14750, 13625, },
- { 14875, 13750, },
- { 15000, 13875, },
- { 15125, 14000, },
- { 15250, 14125, },
- { 15375, 14250, },
- { 15500, 14375, },
- { 15625, 14500, },
- { 15750, 14625, },
- { 15875, 14750, },
- { 16000, 14875, },
- { 16125, 15000, },
- };
- if (dev_priv->info->is_mobile)
- return v_table[pxvid].vm;
- else
- return v_table[pxvid].vd;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
- struct timespec now, diff1;
- u64 diff;
- unsigned long diffms;
- u32 count;
-
- if (dev_priv->info->gen != 5)
- return;
+ struct apertures_struct *ap;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
+ bool primary;
- getrawmonotonic(&now);
- diff1 = timespec_sub(now, dev_priv->last_time2);
-
- /* Don't divide by 0 */
- diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
- if (!diffms)
+ ap = alloc_apertures(1);
+ if (!ap)
return;
- count = I915_READ(GFXEC);
-
- if (count < dev_priv->last_count2) {
- diff = ~0UL - dev_priv->last_count2;
- diff += count;
- } else {
- diff = count - dev_priv->last_count2;
- }
-
- dev_priv->last_count2 = count;
- dev_priv->last_time2 = now;
-
- /* More magic constants... */
- diff = diff * 1181;
- diff = div_u64(diff, diffms * 10);
- dev_priv->gfx_power = diff;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- unsigned long t, corr, state1, corr2, state2;
- u32 pxvid, ext_v;
-
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
- pxvid = (pxvid >> 24) & 0x7f;
- ext_v = pvid_to_extvid(dev_priv, pxvid);
-
- state1 = ext_v;
-
- t = i915_mch_val(dev_priv);
-
- /* Revel in the empirically derived constants */
-
- /* Correction factor in 1/100000 units */
- if (t > 80)
- corr = ((t * 2349) + 135940);
- else if (t >= 50)
- corr = ((t * 964) + 29317);
- else /* < 50 */
- corr = ((t * 301) + 1004);
-
- corr = corr * ((150142 * state1) / 10000 - 78642);
- corr /= 100000;
- corr2 = (corr * dev_priv->corr);
-
- state2 = (corr2 * state1) / 10000;
- state2 /= 100; /* convert to mW */
-
- i915_update_gfx_val(dev_priv);
-
- return dev_priv->gfx_power + state2;
-}
-
-/* Global for IPS driver to get at the current i915 device */
-static struct drm_i915_private *i915_mch_dev;
-/*
- * Lock protecting IPS related data structures
- * - i915_mch_dev
- * - dev_priv->max_delay
- * - dev_priv->min_delay
- * - dev_priv->fmax
- * - dev_priv->gpu_busy
- */
-static DEFINE_SPINLOCK(mchdev_lock);
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
- struct drm_i915_private *dev_priv;
- unsigned long chipset_val, graphics_val, ret = 0;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- chipset_val = i915_chipset_val(dev_priv);
- graphics_val = i915_gfx_val(dev_priv);
+ ap->ranges[0].base = dev_priv->dev->agp->base;
+ ap->ranges[0].size =
+ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ primary =
+ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
- ret = chipset_val + graphics_val;
+ remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->max_delay > dev_priv->fmax)
- dev_priv->max_delay--;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->max_delay < dev_priv->min_delay)
- dev_priv->max_delay++;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = false;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- ret = dev_priv->busy;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- dev_priv->max_delay = dev_priv->fstart;
-
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
- ret = false;
-
-out_unlock:
- spin_unlock(&mchdev_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
-
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
-{
- void (*link)(void);
-
- link = symbol_get(ips_link_to_i915_driver);
- if (link) {
- link();
- symbol_put(ips_link_to_i915_driver);
- }
+ kfree(ap);
}
/**
@@ -1948,8 +1436,16 @@ ips_ping_for_i915_load(void)
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
+ struct intel_device_info *info;
int ret = 0, mmio_bar;
- uint32_t agp_size;
+ uint32_t aperture_size;
+
+ info = (struct intel_device_info *) flags;
+
+ /* Refuse to load on gen6+ without kms enabled. */
+ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
/* i915 has 4 more counters */
dev->counters += 4;
@@ -1964,13 +1460,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
- dev_priv->info = (struct intel_device_info *) flags;
+ dev_priv->info = info;
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
}
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
+ goto put_bridge;
+ }
+
+ i915_kick_out_firmware_fb(dev_priv);
+
pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */
@@ -1996,34 +1501,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge;
}
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto out_rmmap;
- }
-
- agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base, agp_size);
+ io_mapping_create_wc(dev->agp->base, aperture_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
}
- /* Set up a WC MTRR for non-PAT systems. This is more common than
- * one would think, because the kernel disables PAT on first
- * generation Core chips because WC PAT gets overridden by a UC
- * MTRR if present. Even if a UC MTRR isn't present.
- */
- dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
- agp_size,
- MTRR_TYPE_WRCOMB, 1);
- if (dev_priv->mm.gtt_mtrr < 0) {
- DRM_INFO("MTRR allocation failed. Graphics "
- "performance may suffer.\n");
- }
+ i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
@@ -2047,9 +1534,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
- /* enable GEM by default */
- dev_priv->has_gem = 1;
-
intel_irq_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -2069,11 +1553,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- if (IS_PINEVIEW(dev))
- i915_pineview_get_mem_freq(dev);
- else if (IS_GEN5(dev))
- i915_ironlake_get_mem_freq(dev);
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -2093,7 +1572,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
@@ -2117,6 +1596,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
}
+ i915_setup_sysfs(dev);
+
/* Must be done after probing outputs */
intel_opregion_init(dev);
acpi_video_register();
@@ -2124,14 +1605,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
- if (IS_GEN5(dev)) {
- spin_lock(&mchdev_lock);
- i915_mch_dev = dev_priv;
- dev_priv->mchdev_lock = &mchdev_lock;
- spin_unlock(&mchdev_lock);
-
- ips_ping_for_i915_load();
- }
+ if (IS_GEN5(dev))
+ intel_gpu_ips_init(dev_priv);
return 0;
@@ -2166,17 +1641,18 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- spin_lock(&mchdev_lock);
- i915_mch_dev = NULL;
- spin_unlock(&mchdev_lock);
+ intel_gpu_ips_teardown();
+
+ i915_teardown_sysfs(dev);
if (dev_priv->mm.inactive_shrinker.shrink)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
+ i915_gem_retire_requests(dev);
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */
@@ -2228,8 +1704,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
- if (I915_HAS_FBC(dev) && i915_powersave)
- i915_cleanup_compression(dev);
+ i915_gem_cleanup_stolen(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
@@ -2277,7 +1752,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
* mode setting case, we want to restore the kernel's initial mode (just
* in case the last client left us in a bad state).
*
- * Additionally, in the non-mode setting case, we'll tear down the AGP
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
@@ -2322,7 +1797,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -2355,16 +1830,10 @@ struct drm_ioctl_desc i915_ioctls[] = {
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i9x5 is AGP.
+/*
+ * This is really ugly: Because old userspace abused the linux agp interface to
+ * manage the gtt, we need to claim that all intel devices are agp. For
+ * otherwise the drm core refuses to initialize the agp support code.
*/
int i915_driver_device_is_agp(struct drm_device * dev)
{
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ae8a64f9f845..9fe9ebe52a7a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -84,6 +84,12 @@ MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
+int i915_lvds_channel_mode __read_mostly;
+module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
@@ -93,8 +99,8 @@ MODULE_PARM_DESC(lvds_use_ssc,
int i915_vbt_sdvo_panel_type __read_mostly = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
- "Override selection of SDVO panel mode in the VBT "
- "(default: auto)");
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
static bool i915_try_reset __read_mostly = true;
module_param_named(reset, i915_try_reset, bool, 0600);
@@ -209,6 +215,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -216,6 +223,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
+ .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -224,6 +232,8 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -233,6 +243,8 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -241,6 +253,8 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -250,6 +264,46 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
+ .has_pch_split = 1,
+ .has_force_wake = 1,
+};
+
+static const struct intel_device_info intel_valleyview_m_info = {
+ .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+ .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 0,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .is_valleyview = 1,
+};
+
+static const struct intel_device_info intel_haswell_d_info = {
+ .is_haswell = 1, .gen = 7,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_pch_split = 1,
+ .has_force_wake = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+ .is_haswell = 1, .gen = 7, .is_mobile = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
+ .has_llc = 1,
+ .has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -297,6 +351,13 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
+ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
+ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
+ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
{0, 0, 0}
};
@@ -308,6 +369,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
void intel_detect_pch(struct drm_device *dev)
{
@@ -328,20 +390,45 @@ void intel_detect_pch(struct drm_device *dev)
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
+ dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+ } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_LPT;
+ dev_priv->num_pch_pll = 0;
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
}
+ BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
pci_dev_put(pch);
}
}
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return 1;
+}
+
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -366,7 +453,7 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
udelay(10);
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(FORCEWAKE_MT);
count = 0;
@@ -408,7 +495,7 @@ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@@ -446,6 +533,31 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
+void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+
+ /* Already awake? */
+ if ((I915_READ(0x130094) & 0xa1) == 0xa1)
+ return;
+
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
+ POSTING_READ(FORCEWAKE_VLV);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
+ udelay(10);
+}
+
+void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
+ /* FIXME: confirm VLV behavior with Punit folks */
+ POSTING_READ(FORCEWAKE_VLV);
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -525,15 +637,16 @@ static int i915_drm_thaw(struct drm_device *dev)
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (HAS_PCH_SPLIT(dev))
+ ironlake_init_pch_refclk(dev);
+
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
- if (HAS_PCH_SPLIT(dev))
- ironlake_init_pch_refclk(dev);
-
+ intel_modeset_init_hw(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
@@ -541,9 +654,6 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
mutex_unlock(&dev->mode_config.mutex);
-
- if (IS_IRONLAKE_M(dev))
- ironlake_enable_rc6(dev);
}
intel_opregion_init(dev);
@@ -576,7 +686,7 @@ int i915_resume(struct drm_device *dev)
return 0;
}
-static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+static int i8xx_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -610,11 +720,12 @@ static int i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return gdrst & 0x1;
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int i965_do_reset(struct drm_device *dev, u8 flags)
+static int i965_do_reset(struct drm_device *dev)
{
+ int ret;
u8 gdrst;
/*
@@ -623,20 +734,43 @@ static int i965_do_reset(struct drm_device *dev, u8 flags)
* triggers the reset; when done, the hardware will clear it.
*/
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_RENDER |
+ GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ gdrst | GRDOM_MEDIA |
+ GRDOM_RESET_ENABLE);
return wait_for(i965_reset_complete(dev), 500);
}
-static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+static int ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+ u32 gdrst;
+ int ret;
+
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
-static int gen6_do_reset(struct drm_device *dev, u8 flags)
+static int gen6_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -671,10 +805,44 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
return ret;
}
+static int intel_gpu_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = -ENODEV;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ ret = gen6_do_reset(dev);
+ break;
+ case 5:
+ ret = ironlake_do_reset(dev);
+ break;
+ case 4:
+ ret = i965_do_reset(dev);
+ break;
+ case 2:
+ ret = i8xx_do_reset(dev);
+ break;
+ }
+
+ /* Also reset the gpu hangman. */
+ if (dev_priv->stop_rings) {
+ DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
+ dev_priv->stop_rings = 0;
+ if (ret == -ENODEV) {
+ DRM_ERROR("Reset not implemented, but ignoring "
+ "error for simulated gpu hangs\n");
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
- * @flags: reset domains
*
* Reset the chip. Useful if a hang is detected. Returns zero on successful
* reset or otherwise an error code.
@@ -687,14 +855,9 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
* - re-init interrupt state
* - re-init display
*/
-int i915_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- /*
- * We really should only reset the display subsystem if we actually
- * need to
- */
- bool need_display = true;
int ret;
if (!i915_try_reset)
@@ -703,26 +866,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (!mutex_trylock(&dev->struct_mutex))
return -EBUSY;
+ dev_priv->stop_rings = 0;
+
i915_gem_reset(dev);
ret = -ENODEV;
- if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+ if (get_seconds() - dev_priv->last_gpu_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
- } else switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- ret = gen6_do_reset(dev, flags);
- break;
- case 5:
- ret = ironlake_do_reset(dev, flags);
- break;
- case 4:
- ret = i965_do_reset(dev, flags);
- break;
- case 2:
- ret = i8xx_do_reset(dev, flags);
- break;
- }
+ else
+ ret = intel_gpu_reset(dev);
+
dev_priv->last_gpu_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
@@ -746,36 +899,27 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
+ struct intel_ring_buffer *ring;
+ int i;
+
dev_priv->mm.suspended = 0;
i915_gem_init_swizzling(dev);
- dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
- if (HAS_BSD(dev))
- dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
- if (HAS_BLT(dev))
- dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+ for_each_ring(ring, dev_priv, i)
+ ring->init(ring);
i915_gem_init_ppgtt(dev);
mutex_unlock(&dev->struct_mutex);
- drm_irq_uninstall(dev);
- drm_mode_config_reset(dev);
- drm_irq_install(dev);
- mutex_lock(&dev->struct_mutex);
- }
- mutex_unlock(&dev->struct_mutex);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_modeset_init_hw(dev);
- /*
- * Perform a full modeset as on later generations, e.g. Ironlake, we may
- * need to retrain the display link and cannot just restore the register
- * values.
- */
- if (need_display) {
- mutex_lock(&dev->mode_config.mutex);
- drm_helper_resume_force_mode(dev);
- mutex_unlock(&dev->mode_config.mutex);
+ drm_irq_uninstall(dev);
+ drm_irq_install(dev);
+ } else {
+ mutex_unlock(&dev->struct_mutex);
}
return 0;
@@ -874,7 +1018,7 @@ static const struct dev_pm_ops i915_pm_ops = {
.restore = i915_pm_resume,
};
-static struct vm_operations_struct i915_gem_vm_ops = {
+static const struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -901,7 +1045,7 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
@@ -924,6 +1068,12 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = i915_gem_prime_export,
+ .gem_prime_import = i915_gem_prime_import,
+
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
@@ -993,6 +1143,12 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5fabc6c31fec..b0b676abde0d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -38,6 +38,8 @@
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <linux/backlight.h>
+#include <linux/intel-iommu.h>
+#include <linux/kref.h>
/* General customization:
*/
@@ -63,10 +65,30 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
+enum port {
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
+
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+struct intel_pch_pll {
+ int refcount; /* count of number of CRTCs sharing this PLL */
+ int active; /* count of number of active CRTCs (i.e. DPMS on) */
+ bool on; /* is the PLL actually active? Disabled during modeset */
+ int pll_reg;
+ int fp0_reg;
+ int fp1_reg;
+};
+#define I915_NUM_PLLS 2
+
/* Interface history:
*
* 1.1: Original.
@@ -111,11 +133,11 @@ struct opregion_asle;
struct drm_i915_private;
struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- struct opregion_asle *asle;
- void *vbt;
+ struct opregion_header __iomem *header;
+ struct opregion_acpi __iomem *acpi;
+ struct opregion_swsci __iomem *swsci;
+ struct opregion_asle __iomem *asle;
+ void __iomem *vbt;
u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
@@ -135,7 +157,6 @@ struct drm_i915_master_private {
struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
- uint32_t setup_seqno;
int pin_count;
};
@@ -151,8 +172,11 @@ struct sdvo_device_mapping {
struct intel_display_error_state;
struct drm_i915_error_state {
+ struct kref ref;
u32 eir;
u32 pgtbl_er;
+ u32 ier;
+ bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
@@ -218,11 +242,15 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
+ void (*sanitize_pm)(struct drm_device *dev);
+ void (*update_linetime_wm)(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
+ void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
@@ -255,6 +283,10 @@ struct intel_device_info {
u8 is_broadwater:1;
u8 is_crestline:1;
u8 is_ivybridge:1;
+ u8 is_valleyview:1;
+ u8 has_pch_split:1;
+ u8 has_force_wake:1;
+ u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
u8 has_hotplug:1;
@@ -291,10 +323,12 @@ enum no_fbc_reason {
enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
+ PCH_LPT, /* Lynxpoint PCH */
};
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
struct intel_fbdev;
struct intel_fbc_work;
@@ -302,7 +336,6 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
bool force_bit;
- bool has_gpio;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
@@ -314,7 +347,6 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
- int has_gem;
int relative_constants_mode;
void __iomem *regs;
@@ -326,19 +358,23 @@ typedef struct drm_i915_private {
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
- struct intel_gmbus *gmbus;
+ struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
+ /**
+ * Base address of the gmbus and gpio block.
+ */
+ uint32_t gpio_mmio_base;
+
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
- drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
@@ -354,6 +390,10 @@ typedef struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+
+ /* DPIO indirect register protection */
+ spinlock_t dpio_lock;
+
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
@@ -363,22 +403,20 @@ typedef struct drm_i915_private {
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
- int tex_lru_log_granularity;
- int allow_batchbuffer;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
- int vblank_pipe;
int num_pipe;
+ int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
- uint32_t last_acthd;
- uint32_t last_acthd_bsd;
- uint32_t last_acthd_blt;
+ uint32_t last_acthd[I915_NUM_RINGS];
uint32_t last_instdone;
uint32_t last_instdone1;
+ unsigned int stop_rings;
+
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
@@ -405,6 +443,8 @@ typedef struct drm_i915_private {
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
@@ -428,6 +468,7 @@ typedef struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
+ /* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
@@ -652,24 +693,10 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
- /**
- * LRU list of objects which are not in the ringbuffer but
- * are still pinned in the GTT.
- */
- struct list_head pinned_list;
-
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
/**
- * List of objects currently pending being freed.
- *
- * These objects are no longer in use, but due to a signal
- * we were prevented from freeing them at the appointed time.
- */
- struct list_head deferred_free_list;
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -717,6 +744,16 @@ typedef struct drm_i915_private {
size_t object_memory;
u32 object_count;
} mm;
+
+ /* Old dri1 support infrastructure, beware the dragons ya fools entering
+ * here! */
+ struct {
+ unsigned allow_batchbuffer : 1;
+ u32 __iomem *gfx_hws_cpu_addr;
+ } dri1;
+
+ /* Kernel Modesetting */
+
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
@@ -726,7 +763,8 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
- bool flip_pending_is_done;
+
+ struct intel_pch_pll pch_plls[I915_NUM_PLLS];
/* Reclocking support */
bool render_reclock_avail;
@@ -781,6 +819,11 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property;
} drm_i915_private_t;
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+ if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
HDMI_AUDIO_OFF, /* force turn off HDMI audio */
@@ -844,7 +887,14 @@ struct drm_i915_gem_object {
* Current tiling mode for the object.
*/
unsigned int tiling_mode:2;
- unsigned int tiling_changed:1;
+ /**
+ * Whether the tiling parameters for the currently associated fence
+ * register have changed. Note that for the purposes of tracking
+ * tiling changes we also treat the unfenced register, the register
+ * slot that the object occupies whilst it executes a fenced
+ * command (such as BLT on gen2/3), as a "fence".
+ */
+ unsigned int fence_dirty:1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -881,6 +931,7 @@ struct drm_i915_gem_object {
unsigned int cache_level:2;
unsigned int has_aliasing_ppgtt_mapping:1;
+ unsigned int has_global_gtt_mapping:1;
struct page **pages;
@@ -890,6 +941,11 @@ struct drm_i915_gem_object {
struct scatterlist *sg_list;
int num_sg;
+ /* prime dma-buf support */
+ struct sg_table *sg_table;
+ void *dma_buf_vmapping;
+ int vmapping_count;
+
/**
* Used for performing relocations during execbuffer insertion.
*/
@@ -904,13 +960,12 @@ struct drm_i915_gem_object {
*/
uint32_t gtt_offset;
- /** Breadcrumb of last rendering to the buffer. */
- uint32_t last_rendering_seqno;
struct intel_ring_buffer *ring;
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
- struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -918,13 +973,6 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
-
- /**
- * If present, while GEM_DOMAIN_CPU is in the read domain this array
- * flags which individual pages are valid.
- */
- uint8_t *page_cpu_valid;
-
/** User space pin count and filp owning the pin */
uint32_t user_pin_count;
struct drm_file *pin_filp;
@@ -1001,6 +1049,8 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
+#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
/*
@@ -1044,13 +1094,16 @@ struct drm_i915_file_private {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
#include "i915_trace.h"
/**
@@ -1081,6 +1134,7 @@ extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
+extern int i915_lvds_channel_mode __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
@@ -1094,6 +1148,7 @@ extern int i915_master_create(struct drm_device *dev, struct drm_master *master)
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_dma.c */
+void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
@@ -1104,12 +1159,14 @@ extern void i915_driver_preclose(struct drm_device *dev,
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
+#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+#endif
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
-extern int i915_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -1119,19 +1176,10 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
-extern int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern void intel_irq_init(struct drm_device *dev);
-extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+void i915_error_state_free(struct kref *error_ref);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1205,8 +1253,12 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ gfp_t gfpmask);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
@@ -1229,17 +1281,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-static inline void
+static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count++;
- }
+ return true;
+ } else
+ return false;
}
static inline void
@@ -1260,27 +1313,25 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-void i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
+int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire);
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
@@ -1301,6 +1352,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags);
+
+
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
@@ -1311,18 +1369,24 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
-int __must_check i915_gem_evict_everything(struct drm_device *dev,
- bool purgeable_only);
-int __must_check i915_gem_evict_inactive(struct drm_device *dev,
- bool purgeable_only);
+int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -1354,9 +1418,20 @@ extern int i915_restore_state(struct drm_device *dev);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
+/* i915_sysfs.c */
+void i915_setup_sysfs(struct drm_device *dev_priv);
+void i915_teardown_sysfs(struct drm_device *dev_priv);
+
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
+extern inline bool intel_gmbus_is_port_valid(unsigned port)
+{
+ return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
+}
+
+extern struct i2c_adapter *intel_gmbus_get_adapter(
+ struct drm_i915_private *dev_priv, unsigned port);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -1391,6 +1466,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
#endif /* CONFIG_ACPI */
/* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1403,12 +1479,17 @@ extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern int intel_enable_rc6(const struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
+extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1420,28 +1501,6 @@ extern void intel_display_print_error_state(struct seq_file *m,
struct intel_display_error_state *error);
#endif
-#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
-
-#define BEGIN_LP_RING(n) \
- intel_ring_begin(LP_RING(dev_priv), (n))
-
-#define OUT_RING(x) \
- intel_ring_emit(LP_RING(dev_priv), x)
-
-#define ADVANCE_LP_RING() \
- intel_ring_advance(LP_RING(dev_priv))
-
-/**
- * Lock test for when it's just for synchronization of ring access.
- *
- * In that case, we don't need to do it when GEM is initialized as nobody else
- * has access to the ring.
- */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
- if (LP_RING(dev->dev_private)->obj == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file); \
-} while (0)
-
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
* returned.
@@ -1450,12 +1509,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
-
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0d1e4b7b4b99..288d7b8f49ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,31 +35,41 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
+#include <linux/dma-buf.h>
static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
- bool write);
-static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
- uint64_t offset,
- uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable);
-static void i915_gem_clear_fence_reg(struct drm_device *dev,
- struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file);
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj);
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable);
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ /* As we do not have an associated fence register, we will force
+ * a tiling change if we ever need to acquire one.
+ */
+ obj->fence_dirty = false;
+ obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
@@ -122,26 +132,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj->gtt_space && !obj->active && obj->pin_count == 0;
-}
-
-void i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long mappable_end,
- unsigned long end)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
-
- dev_priv->mm.gtt_start = start;
- dev_priv->mm.gtt_mappable_end = mappable_end;
- dev_priv->mm.gtt_end = end;
- dev_priv->mm.gtt_total = end - start;
- dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
-
- /* Take over this portion of the GTT */
- intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+ return !obj->active;
}
int
@@ -150,12 +141,20 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_init *args = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
if (args->gtt_start >= args->gtt_end ||
(args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
return -EINVAL;
+ /* GEM with user mode setting was never supported on ilk and later. */
+ if (INTEL_INFO(dev)->gen >= 5)
+ return -ENODEV;
+
mutex_lock(&dev->struct_mutex);
- i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
+ i915_gem_init_global_gtt(dev, args->gtt_start,
+ args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -170,13 +169,11 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
size_t pinned;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
- pinned += obj->gtt_space->size;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ if (obj->pin_count)
+ pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->mm.gtt_total;
@@ -247,6 +244,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
+
return i915_gem_create(file, dev,
args->size, &args->handle);
}
@@ -259,66 +257,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
obj->tiling_mode != I915_TILING_NONE;
}
-/**
- * This is the fast shmem pread path, which attempts to copy_from_user directly
- * from the backing pages of the object to the user's address space. On a
- * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
- */
-static int
-i915_gem_shmem_pread_fast(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
-{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int page_offset, page_length;
-
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
-
- offset = args->offset;
-
- while (remain > 0) {
- struct page *page;
- char *vaddr;
- int ret;
-
- /* Operation in this page
- *
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- vaddr = kmap_atomic(page);
- ret = __copy_to_user_inatomic(user_data,
- vaddr + page_offset,
- page_length);
- kunmap_atomic(vaddr);
-
- mark_page_accessed(page);
- page_cache_release(page);
- if (ret)
- return -EFAULT;
-
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
-
- return 0;
-}
-
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -346,8 +284,8 @@ __copy_to_user_swizzled(char __user *cpu_vaddr,
}
static inline int
-__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
- const char *cpu_vaddr,
+__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
+ const char __user *cpu_vaddr,
int length)
{
int ret, cpu_offset = 0;
@@ -371,37 +309,121 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
return 0;
}
-/**
- * This is the fallback shmem pread path, which allocates temporary storage
- * in kernel space to copy_to_user into outside of the struct_mutex, so we
- * can copy out of the object's backing pages while holding the struct mutex
- * and not take page faults.
- */
+/* Per-page copy function for the shmem pread fastpath.
+ * Flushes invalid cachelines before reading the target if
+ * needs_clflush is set. */
static int
-i915_gem_shmem_pread_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pread *args,
- struct drm_file *file)
+shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ char *vaddr;
+ int ret;
+
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
+
+ vaddr = kmap_atomic(page);
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_to_user_inatomic(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
+
+ return ret;
+}
+
+static void
+shmem_clflush_swizzled_range(char *addr, unsigned long length,
+ bool swizzled)
+{
+ if (unlikely(swizzled)) {
+ unsigned long start = (unsigned long) addr;
+ unsigned long end = (unsigned long) addr + length;
+
+ /* For swizzling simply ensure that we always flush both
+ * channels. Lame, but simple and it works. Swizzled
+ * pwrite/pread is far from a hotpath - current userspace
+ * doesn't use it at all. */
+ start = round_down(start, 128);
+ end = round_up(end, 128);
+
+ drm_clflush_virt_range((void *)start, end - start);
+ } else {
+ drm_clflush_virt_range(addr, length);
+ }
+
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling, bool needs_clflush)
+{
+ char *vaddr;
+ int ret;
+
+ vaddr = kmap(page);
+ if (needs_clflush)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+
+ if (page_do_bit17_swizzling)
+ ret = __copy_to_user_swizzled(user_data,
+ vaddr, shmem_page_offset,
+ page_length);
+ else
+ ret = __copy_to_user(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap(page);
+
+ return ret;
+}
+
+static int
+i915_gem_shmem_pread(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pread *args,
+ struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
- int shmem_page_offset, page_length, ret;
+ int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
+ int prefaulted = 0;
+ int needs_clflush = 0;
+ int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- offset = args->offset;
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush = 1;
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ return ret;
+ }
- mutex_unlock(&dev->struct_mutex);
+ offset = args->offset;
while (remain > 0) {
struct page *page;
- char *vaddr;
/* Operation in this page
*
@@ -413,28 +435,51 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
+ if (obj->pages) {
+ page = obj->pages[offset >> PAGE_SHIFT];
+ release_page = 0;
+ } else {
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+ release_page = 1;
}
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data,
- vaddr, shmem_page_offset,
- page_length);
- else
- ret = __copy_to_user(user_data,
- vaddr + shmem_page_offset,
- page_length);
- kunmap(page);
+ ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+ if (ret == 0)
+ goto next_page;
- mark_page_accessed(page);
+ hit_slowpath = 1;
+ page_cache_get(page);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!prefaulted) {
+ ret = fault_in_multipages_writeable(user_data, remain);
+ /* Userspace is tricking us, but we've already clobbered
+ * its pages with the prefault and promised to write the
+ * data up to the first fault. Hence ignore any errors
+ * and just continue. */
+ (void)ret;
+ prefaulted = 1;
+ }
+
+ ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ needs_clflush);
+
+ mutex_lock(&dev->struct_mutex);
page_cache_release(page);
+next_page:
+ mark_page_accessed(page);
+ if (release_page)
+ page_cache_release(page);
if (ret) {
ret = -EFAULT;
@@ -447,10 +492,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
+ if (hit_slowpath) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ }
return ret;
}
@@ -476,11 +522,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
- if (ret)
- return -EFAULT;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -498,19 +539,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
- trace_i915_gem_object_pread(obj, args->offset, args->size);
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj,
- args->offset,
- args->size);
- if (ret)
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
goto out;
+ }
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
+ trace_i915_gem_object_pread(obj, args->offset, args->size);
+
+ ret = i915_gem_shmem_pread(dev, obj, args, file);
out:
drm_gem_object_unreference(&obj->base);
@@ -529,40 +568,19 @@ fast_user_write(struct io_mapping *mapping,
char __user *user_data,
int length)
{
- char *vaddr_atomic;
+ void __iomem *vaddr_atomic;
+ void *vaddr;
unsigned long unwritten;
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
- unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
+ /* We can use the cpu mem copy function because this is X86. */
+ vaddr = (void __force*)vaddr_atomic + page_offset;
+ unwritten = __copy_from_user_inatomic_nocache(vaddr,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
-/* Here's the write path which can sleep for
- * page faults
- */
-
-static inline void
-slow_kernel_write(struct io_mapping *mapping,
- loff_t gtt_base, int gtt_offset,
- struct page *user_page, int user_offset,
- int length)
-{
- char __iomem *dst_vaddr;
- char *src_vaddr;
-
- dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
- src_vaddr = kmap(user_page);
-
- memcpy_toio(dst_vaddr + gtt_offset,
- src_vaddr + user_offset,
- length);
-
- kunmap(user_page);
- io_mapping_unmap(dst_vaddr);
-}
-
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
@@ -577,7 +595,19 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
- int page_offset, page_length;
+ int page_offset, page_length, ret;
+
+ ret = i915_gem_object_pin(obj, 0, true);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -602,214 +632,133 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* retry in the slow path.
*/
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
- page_offset, user_data, page_length))
- return -EFAULT;
+ page_offset, user_data, page_length)) {
+ ret = -EFAULT;
+ goto out_unpin;
+ }
remain -= page_length;
user_data += page_length;
offset += page_length;
}
- return 0;
+out_unpin:
+ i915_gem_object_unpin(obj);
+out:
+ return ret;
}
-/**
- * This is the fallback GTT pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
- *
- * This code resulted in x11perf -rgb10text consuming about 10% more CPU
- * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
- */
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set. */
static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- ssize_t remain;
- loff_t gtt_page_base, offset;
- loff_t first_data_page, last_data_page, num_pages;
- loff_t pinned_pages, i;
- struct page **user_pages;
- struct mm_struct *mm = current->mm;
- int gtt_page_offset, data_page_offset, data_page_index, page_length;
+ char *vaddr;
int ret;
- uint64_t data_ptr = args->data_ptr;
-
- remain = args->size;
-
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, and all of the pwrite implementations
- * want to hold it while dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
-
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
-
- mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out_unpin_pages;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto out_unpin_pages;
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin_pages;
-
- offset = obj->gtt_offset + args->offset;
-
- while (remain > 0) {
- /* Operation in this page
- *
- * gtt_page_base = page offset within aperture
- * gtt_page_offset = offset within page in aperture
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
- * page_length = bytes to copy for this page
- */
- gtt_page_base = offset & PAGE_MASK;
- gtt_page_offset = offset_in_page(offset);
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = offset_in_page(data_ptr);
-
- page_length = remain;
- if ((gtt_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - gtt_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
-
- slow_kernel_write(dev_priv->mm.gtt_mapping,
- gtt_page_base, gtt_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
-
- remain -= page_length;
- offset += page_length;
- data_ptr += page_length;
- }
+ if (unlikely(page_do_bit17_swizzling))
+ return -EINVAL;
-out_unpin_pages:
- for (i = 0; i < pinned_pages; i++)
- page_cache_release(user_pages[i]);
- drm_free_large(user_pages);
+ vaddr = kmap_atomic(page);
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ if (needs_clflush_after)
+ drm_clflush_virt_range(vaddr + shmem_page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
return ret;
}
-/**
- * This is the fast shmem pwrite path, which attempts to directly
- * copy_from_user into the kmapped pages backing the object.
- */
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+ char __user *user_data,
+ bool page_do_bit17_swizzling,
+ bool needs_clflush_before,
+ bool needs_clflush_after)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- int page_offset, page_length;
-
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
-
- offset = args->offset;
- obj->dirty = 1;
-
- while (remain > 0) {
- struct page *page;
- char *vaddr;
- int ret;
-
- /* Operation in this page
- *
- * page_offset = offset within page
- * page_length = bytes to copy for this page
- */
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ char *vaddr;
+ int ret;
- vaddr = kmap_atomic(page);
- ret = __copy_from_user_inatomic(vaddr + page_offset,
+ vaddr = kmap(page);
+ if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ if (page_do_bit17_swizzling)
+ ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
user_data,
page_length);
- kunmap_atomic(vaddr);
-
- set_page_dirty(page);
- mark_page_accessed(page);
- page_cache_release(page);
-
- /* If we get a fault while copying data, then (presumably) our
- * source page isn't available. Return the error and we'll
- * retry in the slow path.
- */
- if (ret)
- return -EFAULT;
-
- remain -= page_length;
- user_data += page_length;
- offset += page_length;
- }
+ else
+ ret = __copy_from_user(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ if (needs_clflush_after)
+ shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+ page_length,
+ page_do_bit17_swizzling);
+ kunmap(page);
- return 0;
+ return ret;
}
-/**
- * This is the fallback shmem pwrite path, which uses get_user_pages to pin
- * the memory and maps it using kmap_atomic for copying.
- *
- * This avoids taking mmap_sem for faulting on the user's address while the
- * struct_mutex is held.
- */
static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file)
+i915_gem_shmem_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
- int shmem_page_offset, page_length, ret;
+ int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ int hit_slowpath = 0;
+ int needs_clflush_after = 0;
+ int needs_clflush_before = 0;
+ int release_page;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ /* If we're not in the cpu write domain, set ourself into the gtt
+ * write domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will use the data
+ * right away and we therefore have to clflush anyway. */
+ if (obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_after = 1;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+ }
+ /* Same trick applies for invalidate partially written cachelines before
+ * writing. */
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
+ && obj->cache_level == I915_CACHE_NONE)
+ needs_clflush_before = 1;
+
offset = args->offset;
obj->dirty = 1;
- mutex_unlock(&dev->struct_mutex);
-
while (remain > 0) {
struct page *page;
- char *vaddr;
+ int partial_cacheline_write;
/* Operation in this page
*
@@ -822,29 +771,51 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
+ /* If we don't overwrite a cacheline completely we need to be
+ * careful to have up-to-date data by first clflushing. Don't
+ * overcomplicate things and flush the entire patch. */
+ partial_cacheline_write = needs_clflush_before &&
+ ((shmem_page_offset | page_length)
+ & (boot_cpu_data.x86_clflush_size - 1));
+
+ if (obj->pages) {
+ page = obj->pages[offset >> PAGE_SHIFT];
+ release_page = 0;
+ } else {
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+ release_page = 1;
}
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
- vaddr = kmap(page);
- if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
- user_data,
- page_length);
- else
- ret = __copy_from_user(vaddr + shmem_page_offset,
- user_data,
- page_length);
- kunmap(page);
+ ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+ if (ret == 0)
+ goto next_page;
+
+ hit_slowpath = 1;
+ page_cache_get(page);
+ mutex_unlock(&dev->struct_mutex);
+ ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+ user_data, page_do_bit17_swizzling,
+ partial_cacheline_write,
+ needs_clflush_after);
+
+ mutex_lock(&dev->struct_mutex);
+ page_cache_release(page);
+next_page:
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ if (release_page)
+ page_cache_release(page);
if (ret) {
ret = -EFAULT;
@@ -857,17 +828,21 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
}
out:
- mutex_lock(&dev->struct_mutex);
- /* Fixup: Kill any reinstated backing storage pages */
- if (obj->madv == __I915_MADV_PURGED)
- i915_gem_object_truncate(obj);
- /* and flush dirty cachelines in case the object isn't in the cpu write
- * domain anymore. */
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- intel_gtt_chipset_flush();
+ if (hit_slowpath) {
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ /* and flush dirty cachelines in case the object isn't in the cpu write
+ * domain anymore. */
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ intel_gtt_chipset_flush();
+ }
}
+ if (needs_clflush_after)
+ intel_gtt_chipset_flush();
+
return ret;
}
@@ -892,8 +867,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
+ ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
if (ret)
return -EFAULT;
@@ -914,8 +889,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ /* prime objects have no backing filp to GEM pread/pwrite
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ ret = -EINVAL;
+ goto out;
+ }
+
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+ ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@@ -928,42 +912,18 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
if (obj->gtt_space &&
+ obj->cache_level == I915_CACHE_NONE &&
+ obj->tiling_mode == I915_TILING_NONE &&
+ obj->map_and_fenceable &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_object_pin(obj, 0, true);
- if (ret)
- goto out;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- goto out_unpin;
-
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin;
-
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
-
-out_unpin:
- i915_gem_object_unpin(obj);
-
- if (ret != -EFAULT)
- goto out;
- /* Fall through to the shmfs paths because the gtt paths might
- * fail with non-page-backed user pointers (e.g. gtt mappings
- * when moving data between textures). */
+ /* Note that the gtt paths might fail with non-page-backed user
+ * pointers (e.g. gtt mappings when moving data between
+ * textures). Fallback to the shmem path in that case. */
}
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- goto out;
-
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
if (ret == -EFAULT)
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file);
out:
drm_gem_object_unreference(&obj->base);
@@ -986,9 +946,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
uint32_t write_domain = args->write_domain;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
/* Only handle setting domains to types used by the CPU. */
if (write_domain & I915_GEM_GPU_DOMAINS)
return -EINVAL;
@@ -1042,9 +999,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret = 0;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -1080,13 +1034,18 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
unsigned long addr;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
+ /* prime objects have no backing filp to GEM mmap
+ * pages from.
+ */
+ if (!obj->filp) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EINVAL;
+ }
+
addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
@@ -1151,10 +1110,10 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
- if (obj->tiling_mode == I915_TILING_NONE)
- ret = i915_gem_object_put_fence(obj);
- else
- ret = i915_gem_object_get_fence(obj, NULL);
+ if (!obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ ret = i915_gem_object_get_fence(obj);
if (ret)
goto unlock;
@@ -1308,9 +1267,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
struct drm_i915_gem_object *obj;
int ret;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -1368,14 +1324,10 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
-
-static int
+int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
{
@@ -1384,6 +1336,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
struct inode *inode;
struct page *page;
+ if (obj->pages || obj->sg_table)
+ return 0;
+
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*/
@@ -1425,6 +1380,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int i;
+ if (!obj->pages)
+ return;
+
BUG_ON(obj->madv == __I915_MADV_PURGED);
if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -1473,7 +1431,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
- obj->last_fenced_ring = ring;
/* Bump MRU to take account of the delayed flush */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
@@ -1512,15 +1469,11 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (obj->pin_count != 0)
- list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
- else
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
BUG_ON(!list_empty(&obj->gpu_write_list));
BUG_ON(!obj->active);
obj->ring = NULL;
- obj->last_fenced_ring = NULL;
i915_gem_object_move_off_active(obj);
obj->fenced_gpu_access = false;
@@ -1546,6 +1499,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
inode = obj->base.filp->f_path.dentry->d_inode;
shmem_truncate_range(inode, 0, (loff_t)-1);
+ if (obj->base.map_list.map)
+ drm_gem_free_mmap_offset(&obj->base);
+
obj->madv = __I915_MADV_PURGED;
}
@@ -1711,30 +1667,29 @@ static void i915_gem_reset_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- struct drm_i915_gem_object *obj = reg->obj;
- if (!obj)
- continue;
+ i915_gem_write_fence(dev, i, NULL);
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
+ if (reg->obj)
+ i915_gem_object_fence_lost(reg->obj);
- reg->obj->fence_reg = I915_FENCE_REG_NONE;
- reg->obj->fenced_gpu_access = false;
- reg->obj->last_fenced_seqno = 0;
- reg->obj->last_fenced_ring = NULL;
- i915_gem_clear_fence_reg(dev, reg);
+ reg->pin_count = 0;
+ reg->obj = NULL;
+ INIT_LIST_HEAD(&reg->lru_list);
}
+
+ INIT_LIST_HEAD(&dev_priv->mm.fence_list);
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *ring;
int i;
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_lists(dev_priv, ring);
/* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the
@@ -1839,24 +1794,11 @@ void
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i;
- if (!list_empty(&dev_priv->mm.deferred_free_list)) {
- struct drm_i915_gem_object *obj, *next;
-
- /* We must be careful that during unbind() we do not
- * accidentally infinitely recurse into retire requests.
- * Currently:
- * retire -> free -> unbind -> wait -> retire_ring
- */
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.deferred_free_list,
- mm_list)
- i915_gem_free_object_tail(obj);
- }
-
- for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_retire_requests_ring(ring);
}
static void
@@ -1864,6 +1806,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
+ struct intel_ring_buffer *ring;
bool idle;
int i;
@@ -1883,9 +1826,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
* objects indefinitely.
*/
idle = true;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request;
int ret;
@@ -1907,20 +1848,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno,
- bool do_retire)
+static int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- u32 ier;
- int ret = 0;
-
- BUG_ON(seqno == 0);
+ BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
@@ -1935,6 +1866,20 @@ i915_wait_request(struct intel_ring_buffer *ring,
return recovery_complete ? -EIO : -EAGAIN;
}
+ return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+ int ret = 0;
+
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
if (seqno == ring->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
@@ -1948,54 +1893,67 @@ i915_wait_request(struct intel_ring_buffer *ring,
return ret;
}
- seqno = request->seqno;
+ BUG_ON(seqno != request->seqno);
}
- if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- if (HAS_PCH_SPLIT(ring->dev))
- ier = I915_READ(DEIER) | I915_READ(GTIER);
- else
- ier = I915_READ(IER);
- if (!ier) {
- DRM_ERROR("something (likely vbetool) disabled "
- "interrupts, re-enabling\n");
- ring->dev->driver->irq_preinstall(ring->dev);
- ring->dev->driver->irq_postinstall(ring->dev);
- }
+ return ret;
+}
- trace_i915_gem_request_wait_begin(ring, seqno);
-
- ring->waiting_seqno = seqno;
- if (ring->irq_get(ring)) {
- if (dev_priv->mm.interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
-
- ring->irq_put(ring);
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000))
- ret = -EBUSY;
- ring->waiting_seqno = 0;
-
- trace_i915_gem_request_wait_end(ring, seqno);
- }
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+ bool interruptible)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int ret = 0;
+
+ if (i915_seqno_passed(ring->get_seqno(ring), seqno))
+ return 0;
+
+ trace_i915_gem_request_wait_begin(ring, seqno);
+ if (WARN_ON(!ring->irq_get(ring)))
+ return -ENODEV;
+
+#define EXIT_COND \
+ (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
+ atomic_read(&dev_priv->mm.wedged))
+
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ EXIT_COND);
+ else
+ wait_event(ring->irq_queue, EXIT_COND);
+
+ ring->irq_put(ring);
+ trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+ return ret;
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ int ret = 0;
+
+ BUG_ON(seqno == 0);
+
+ ret = i915_gem_check_wedge(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
- /* Directly dispatch request retiring. While we have the work queue
- * to handle this, the waiter on a request often wants an associated
- * buffer to have made it to the inactive list, and we would need
- * a separate wait queue to handle that.
- */
- if (ret == 0 && do_retire)
- i915_gem_retire_requests_ring(ring);
-
return ret;
}
@@ -2017,15 +1975,58 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
- true);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
+ i915_gem_retire_requests_ring(obj->ring);
}
return 0;
}
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
+ return i915_gem_object_wait_rendering(obj);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_rendering_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ ret = i915_gem_check_olr(obj->ring, seqno);
+ if (ret)
+ return ret;
+
+ ret = to->sync_to(to, from, seqno);
+ if (!ret)
+ from->sync_seqno[idx] = seqno;
+
+ return ret;
+}
+
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
u32 old_write_domain, old_read_domains;
@@ -2062,13 +2063,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (obj->gtt_space == NULL)
return 0;
- if (obj->pin_count != 0) {
- DRM_ERROR("Attempting to unbind pinned buffer\n");
- return -EINVAL;
- }
+ if (obj->pin_count)
+ return -EBUSY;
ret = i915_gem_object_finish_gpu(obj);
- if (ret == -ERESTARTSYS)
+ if (ret)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
@@ -2095,16 +2094,18 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
- if (ret == -ERESTARTSYS)
+ if (ret)
return ret;
trace_i915_gem_object_unbind(obj);
- i915_gem_gtt_unbind_object(obj);
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_unbind_object(obj);
if (obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
obj->has_aliasing_ppgtt_mapping = 0;
}
+ i915_gem_gtt_finish_object(obj);
i915_gem_object_put_pages_gtt(obj);
@@ -2145,7 +2146,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
@@ -2159,208 +2160,201 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
- do_retire);
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
-int i915_gpu_idle(struct drm_device *dev, bool do_retire)
+int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret, i;
/* Flush everything onto the inactive list. */
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
+ for_each_ring(ring, dev_priv, i) {
+ ret = i915_ring_idle(ring);
if (ret)
return ret;
+
+ /* Is the device fubar? */
+ if (WARN_ON(!list_empty(&ring->gpu_write_list)))
+ return -EBUSY;
}
return 0;
}
-static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj->stride / 128) - 1) <<
- SANDYBRIDGE_FENCE_PITCH_SHIFT;
-
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 6);
- if (ret)
- return ret;
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
- intel_ring_emit(pipelined, (u32)val);
- intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
- intel_ring_emit(pipelined, (u32)(val >> 32));
- intel_ring_advance(pipelined);
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
} else
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+ val = 0;
- return 0;
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
}
-static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
- 0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
- val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I965_FENCE_TILING_Y_SHIFT;
- val |= I965_FENCE_REG_VALID;
-
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 6);
- if (ret)
- return ret;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
- intel_ring_emit(pipelined, (u32)val);
- intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
- intel_ring_emit(pipelined, (u32)(val >> 32));
- intel_ring_advance(pipelined);
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
} else
- I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+ val = 0;
- return 0;
+ I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
+ POSTING_READ(FENCE_REG_965_0 + reg * 8);
}
-static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- u32 fence_reg, val, pitch_val;
- int tile_width;
-
- if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
- (size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- obj->gtt_offset, obj->map_and_fenceable, size))
- return -EINVAL;
+ u32 val;
- if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_width = 128;
- else
- tile_width = 512;
-
- /* Note: pitch better be a power of two tile widths */
- pitch_val = obj->stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
-
- val = obj->gtt_offset;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
-
- fence_reg = obj->fence_reg;
- if (fence_reg < 8)
- fence_reg = FENCE_REG_830_0 + fence_reg * 4;
- else
- fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ int pitch_val;
+ int tile_width;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 4);
- if (ret)
- return ret;
+ WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size);
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(pipelined, fence_reg);
- intel_ring_emit(pipelined, val);
- intel_ring_advance(pipelined);
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ /* Note: pitch better be a power of two tile widths */
+ pitch_val = obj->stride / tile_width;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
} else
- I915_WRITE(fence_reg, val);
+ val = 0;
- return 0;
+ if (reg < 8)
+ reg = FENCE_REG_830_0 + reg * 4;
+ else
+ reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
-static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u32 size = obj->gtt_space->size;
- int regnum = obj->fence_reg;
uint32_t val;
- uint32_t pitch_val;
- if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
- (size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
- obj->gtt_offset, size))
- return -EINVAL;
-
- pitch_val = obj->stride / 128;
- pitch_val = ffs(pitch_val) - 1;
-
- val = obj->gtt_offset;
- if (obj->tiling_mode == I915_TILING_Y)
- val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I830_FENCE_SIZE_BITS(size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
- val |= I830_FENCE_REG_VALID;
+ if (obj) {
+ u32 size = obj->gtt_space->size;
+ uint32_t pitch_val;
+
+ WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size);
+
+ pitch_val = obj->stride / 128;
+ pitch_val = ffs(pitch_val) - 1;
+
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
+ val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(size);
+ val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_REG_VALID;
+ } else
+ val = 0;
- if (pipelined) {
- int ret = intel_ring_begin(pipelined, 4);
- if (ret)
- return ret;
+ I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+ POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
- intel_ring_emit(pipelined, MI_NOOP);
- intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
- intel_ring_emit(pipelined, val);
- intel_ring_advance(pipelined);
- } else
- I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+ struct drm_i915_gem_object *obj)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
+ case 5:
+ case 4: i965_write_fence_reg(dev, reg, obj); break;
+ case 3: i915_write_fence_reg(dev, reg, obj); break;
+ case 2: i830_write_fence_reg(dev, reg, obj); break;
+ default: break;
+ }
+}
- return 0;
+static inline int fence_number(struct drm_i915_private *dev_priv,
+ struct drm_i915_fence_reg *fence)
+{
+ return fence - dev_priv->fence_regs;
}
-static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+ struct drm_i915_fence_reg *fence,
+ bool enable)
{
- return i915_seqno_passed(ring->get_seqno(ring), seqno);
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+ if (enable) {
+ obj->fence_reg = reg;
+ fence->obj = obj;
+ list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+ } else {
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ fence->obj = NULL;
+ list_del_init(&fence->lru_list);
+ }
}
static int
-i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
{
int ret;
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->last_fenced_ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2369,18 +2363,12 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
obj->fenced_gpu_access = false;
}
- if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
- obj->last_fenced_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
- obj->last_fenced_seqno,
- true);
- if (ret)
- return ret;
- }
+ if (obj->last_fenced_seqno) {
+ ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
+ if (ret)
+ return ret;
obj->last_fenced_seqno = 0;
- obj->last_fenced_ring = NULL;
}
/* Ensure that all CPU reads are completed before installing a fence
@@ -2395,34 +2383,29 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->tiling_mode)
- i915_gem_release_mmap(obj);
-
- ret = i915_gem_object_flush_fence(obj, NULL);
+ ret = i915_gem_object_flush_fence(obj);
if (ret)
return ret;
- if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
- WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
- i915_gem_clear_fence_reg(obj->base.dev,
- &dev_priv->fence_regs[obj->fence_reg]);
+ if (obj->fence_reg == I915_FENCE_REG_NONE)
+ return 0;
- obj->fence_reg = I915_FENCE_REG_NONE;
- }
+ i915_gem_object_update_fence(obj,
+ &dev_priv->fence_regs[obj->fence_reg],
+ false);
+ i915_gem_object_fence_lost(obj);
return 0;
}
static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev,
- struct intel_ring_buffer *pipelined)
+i915_find_fence_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_fence_reg *reg, *first, *avail;
+ struct drm_i915_fence_reg *reg, *avail;
int i;
/* First try to find a free reg */
@@ -2440,204 +2423,77 @@ i915_find_fence_reg(struct drm_device *dev,
return NULL;
/* None available, try to steal one or wait for a user to finish */
- avail = first = NULL;
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
if (reg->pin_count)
continue;
- if (first == NULL)
- first = reg;
-
- if (!pipelined ||
- !reg->obj->last_fenced_ring ||
- reg->obj->last_fenced_ring == pipelined) {
- avail = reg;
- break;
- }
+ return reg;
}
- if (avail == NULL)
- avail = first;
-
- return avail;
+ return NULL;
}
/**
- * i915_gem_object_get_fence - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg
- * @pipelined: ring on which to queue the change, or NULL for CPU access
- * @interruptible: must we wait uninterruptibly for the register to retire?
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
- *
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
*/
int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg;
int ret;
- /* XXX disable pipelining. There are bugs. Shocking. */
- pipelined = NULL;
+ /* Have we updated the tiling parameters upon the object and so
+ * will need to serialise the write to the associated fence register?
+ */
+ if (obj->fence_dirty) {
+ ret = i915_gem_object_flush_fence(obj);
+ if (ret)
+ return ret;
+ }
/* Just update our place in the LRU if our fence is getting reused. */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
reg = &dev_priv->fence_regs[obj->fence_reg];
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
-
- if (obj->tiling_changed) {
- ret = i915_gem_object_flush_fence(obj, pipelined);
- if (ret)
- return ret;
-
- if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
- pipelined = NULL;
-
- if (pipelined) {
- reg->setup_seqno =
- i915_gem_next_request_seqno(pipelined);
- obj->last_fenced_seqno = reg->setup_seqno;
- obj->last_fenced_ring = pipelined;
- }
-
- goto update;
+ if (!obj->fence_dirty) {
+ list_move_tail(&reg->lru_list,
+ &dev_priv->mm.fence_list);
+ return 0;
}
+ } else if (enable) {
+ reg = i915_find_fence_reg(dev);
+ if (reg == NULL)
+ return -EDEADLK;
- if (!pipelined) {
- if (reg->setup_seqno) {
- if (!ring_passed_seqno(obj->last_fenced_ring,
- reg->setup_seqno)) {
- ret = i915_wait_request(obj->last_fenced_ring,
- reg->setup_seqno,
- true);
- if (ret)
- return ret;
- }
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
- reg->setup_seqno = 0;
- }
- } else if (obj->last_fenced_ring &&
- obj->last_fenced_ring != pipelined) {
- ret = i915_gem_object_flush_fence(obj, pipelined);
+ ret = i915_gem_object_flush_fence(old);
if (ret)
return ret;
- }
-
- return 0;
- }
-
- reg = i915_find_fence_reg(dev, pipelined);
- if (reg == NULL)
- return -EDEADLK;
-
- ret = i915_gem_object_flush_fence(obj, pipelined);
- if (ret)
- return ret;
-
- if (reg->obj) {
- struct drm_i915_gem_object *old = reg->obj;
-
- drm_gem_object_reference(&old->base);
-
- if (old->tiling_mode)
- i915_gem_release_mmap(old);
- ret = i915_gem_object_flush_fence(old, pipelined);
- if (ret) {
- drm_gem_object_unreference(&old->base);
- return ret;
+ i915_gem_object_fence_lost(old);
}
+ } else
+ return 0;
- if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
- pipelined = NULL;
-
- old->fence_reg = I915_FENCE_REG_NONE;
- old->last_fenced_ring = pipelined;
- old->last_fenced_seqno =
- pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
-
- drm_gem_object_unreference(&old->base);
- } else if (obj->last_fenced_seqno == 0)
- pipelined = NULL;
-
- reg->obj = obj;
- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
- obj->fence_reg = reg - dev_priv->fence_regs;
- obj->last_fenced_ring = pipelined;
-
- reg->setup_seqno =
- pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
- obj->last_fenced_seqno = reg->setup_seqno;
-
-update:
- obj->tiling_changed = false;
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- ret = sandybridge_write_fence_reg(obj, pipelined);
- break;
- case 5:
- case 4:
- ret = i965_write_fence_reg(obj, pipelined);
- break;
- case 3:
- ret = i915_write_fence_reg(obj, pipelined);
- break;
- case 2:
- ret = i830_write_fence_reg(obj, pipelined);
- break;
- }
-
- return ret;
-}
-
-/**
- * i915_gem_clear_fence_reg - clear out fence register info
- * @obj: object to clear
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj.
- */
-static void
-i915_gem_clear_fence_reg(struct drm_device *dev,
- struct drm_i915_fence_reg *reg)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t fence_reg = reg - dev_priv->fence_regs;
-
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
- break;
- case 5:
- case 4:
- I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
- break;
- case 3:
- if (fence_reg >= 8)
- fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
- else
- case 2:
- fence_reg = FENCE_REG_830_0 + fence_reg * 4;
-
- I915_WRITE(fence_reg, 0);
- break;
- }
+ i915_gem_object_update_fence(obj, reg, enable);
+ obj->fence_dirty = false;
- list_del_init(&reg->lru_list);
- reg->obj = NULL;
- reg->setup_seqno = 0;
- reg->pin_count = 0;
+ return 0;
}
/**
@@ -2749,7 +2605,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return ret;
}
- ret = i915_gem_gtt_bind_object(obj);
+ ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
@@ -2761,6 +2617,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free;
}
+ if (!dev_priv->mm.aliasing_ppgtt)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
@@ -2878,6 +2737,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2918,6 +2778,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_read_domains,
old_write_domain);
+ /* And bump the LRU for this access */
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
return 0;
}
@@ -2953,7 +2817,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return ret;
}
- i915_gem_gtt_rebind_object(obj, cache_level);
+ if (obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(obj, cache_level);
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
@@ -2990,11 +2855,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
* any flushes to be pipelined (for pageflips).
- *
- * For the display plane, we want to be in the GTT but out of any write
- * domains. So in many ways this looks like set_to_gtt_domain() apart from the
- * ability to pipeline the waits, pinning and any additional subtleties
- * that may differentiate the display plane from ordinary buffers.
*/
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
@@ -3009,8 +2869,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return ret;
if (pipelined != obj->ring) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret == -ERESTARTSYS)
+ ret = i915_gem_object_sync(obj, pipelined);
+ if (ret)
return ret;
}
@@ -3082,7 +2942,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
-static int
+int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
@@ -3095,17 +2955,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
+ if (write || obj->pending_gpu_write) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
i915_gem_object_flush_gtt_write_domain(obj);
- /* If we have a partially-valid cache of the object in the CPU,
- * finish invalidating it and free the per-page flags.
- */
- i915_gem_object_set_to_full_cpu_read_domain(obj);
-
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3136,113 +2993,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
-/**
- * Moves the object from a partially CPU read to a full one.
- *
- * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
- * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
- */
-static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
-{
- if (!obj->page_cpu_valid)
- return;
-
- /* If we're partially in the CPU read domain, finish moving it in.
- */
- if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
- int i;
-
- for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
- if (obj->page_cpu_valid[i])
- continue;
- drm_clflush_pages(obj->pages + i, 1);
- }
- }
-
- /* Free the page_cpu_valid mappings which are now stale, whether
- * or not we've got I915_GEM_DOMAIN_CPU.
- */
- kfree(obj->page_cpu_valid);
- obj->page_cpu_valid = NULL;
-}
-
-/**
- * Set the CPU read domain on a range of the object.
- *
- * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
- * not entirely valid. The page_cpu_valid member of the object flags which
- * pages have been flushed, and will be respected by
- * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
- * of the whole object.
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
- uint64_t offset, uint64_t size)
-{
- uint32_t old_read_domains;
- int i, ret;
-
- if (offset == 0 && size == obj->base.size)
- return i915_gem_object_set_to_cpu_domain(obj, 0);
-
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
-
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
-
- i915_gem_object_flush_gtt_write_domain(obj);
-
- /* If we're already fully in the CPU read domain, we're done. */
- if (obj->page_cpu_valid == NULL &&
- (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
- return 0;
-
- /* Otherwise, create/clear the per-page CPU read domain flag if we're
- * newly adding I915_GEM_DOMAIN_CPU
- */
- if (obj->page_cpu_valid == NULL) {
- obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
- GFP_KERNEL);
- if (obj->page_cpu_valid == NULL)
- return -ENOMEM;
- } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
- memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
-
- /* Flush the cache on any pages that are still invalid from the CPU's
- * perspective.
- */
- for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
- i++) {
- if (obj->page_cpu_valid[i])
- continue;
-
- drm_clflush_pages(obj->pages + i, 1);
-
- obj->page_cpu_valid[i] = 1;
- }
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
-
- old_read_domains = obj->base.read_domains;
- obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->base.write_domain);
-
- return 0;
-}
-
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
@@ -3280,28 +3030,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = 0;
- if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- /* And wait for the seqno passing without holding any locks and
- * causing extra latency for others. This is safe as the irq
- * generation is designed to be run atomically and so is
- * lockless.
- */
- if (ring->irq_get(ring)) {
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- ring->irq_put(ring);
-
- if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
- } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000)) {
- ret = -EBUSY;
- }
- }
-
+ ret = __wait_seqno(ring, seqno, true);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3313,12 +3042,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
- WARN_ON(i915_verify_lists(dev));
if (obj->gtt_space != NULL) {
if ((alignment && obj->gtt_offset & (alignment - 1)) ||
@@ -3343,34 +3069,23 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return ret;
}
- if (obj->pin_count++ == 0) {
- if (!obj->active)
- list_move_tail(&obj->mm_list,
- &dev_priv->mm.pinned_list);
- }
+ if (!obj->has_global_gtt_mapping && map_and_fenceable)
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+ obj->pin_count++;
obj->pin_mappable |= map_and_fenceable;
- WARN_ON(i915_verify_lists(dev));
return 0;
}
void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- WARN_ON(i915_verify_lists(dev));
BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL);
- if (--obj->pin_count == 0) {
- if (!obj->active)
- list_move_tail(&obj->mm_list,
- &dev_priv->mm.inactive_list);
+ if (--obj->pin_count == 0)
obj->pin_mappable = false;
- }
- WARN_ON(i915_verify_lists(dev));
}
int
@@ -3494,20 +3209,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
- } else if (obj->ring->outstanding_lazy_request ==
- obj->last_rendering_seqno) {
- struct drm_i915_gem_request *request;
-
- /* This ring is not being cleared by active usage,
- * so emit a request to do so.
- */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request) {
- ret = i915_add_request(obj->ring, NULL, request);
- if (ret)
- kfree(request);
- } else
- ret = -ENOMEM;
+ } else {
+ ret = i915_gem_check_olr(obj->ring,
+ obj->last_rendering_seqno);
}
/* Update the active list for the hardware's current position.
@@ -3587,6 +3291,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
+ u32 mask;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3597,8 +3302,15 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ }
+
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ mapping_set_gfp_mask(mapping, mask);
i915_gem_info_add_obj(dev_priv, size);
@@ -3643,46 +3355,42 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- ret = i915_gem_object_unbind(obj);
- if (ret == -ERESTARTSYS) {
- list_move(&obj->mm_list,
- &dev_priv->mm.deferred_free_list);
- return;
- }
trace_i915_gem_object_destroy(obj);
+ if (gem_obj->import_attach)
+ drm_prime_gem_destroy(gem_obj, obj->sg_table);
+
+ if (obj->phys_obj)
+ i915_gem_detach_phys_object(dev, obj);
+
+ obj->pin_count = 0;
+ if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ WARN_ON(i915_gem_object_unbind(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
if (obj->base.map_list.map)
drm_gem_free_mmap_offset(&obj->base);
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
- kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- struct drm_device *dev = obj->base.dev;
-
- while (obj->pin_count > 0)
- i915_gem_object_unpin(obj);
-
- if (obj->phys_obj)
- i915_gem_detach_phys_object(dev, obj);
-
- i915_gem_free_object_tail(obj);
-}
-
int
i915_gem_idle(struct drm_device *dev)
{
@@ -3696,20 +3404,16 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
- if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev, false);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_gem_evict_everything(dev, false);
i915_gem_reset_fences(dev);
@@ -3747,9 +3451,9 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else
- I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -3787,21 +3491,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
- uint32_t ecochk = I915_READ(GAM_ECOCHK);
+ uint32_t ecochk, gab_ctl, ecobits;
+
+ ecobits = I915_READ(GAC_ECO_BITS);
+ I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+ gab_ctl = I915_READ(GAB_CTL);
+ I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+ ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
- ring = &dev_priv->ring[i];
-
+ for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
- GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
@@ -3845,14 +3555,80 @@ cleanup_render_ring:
return ret;
}
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+ if (i915_enable_ppgtt >= 0)
+ return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+ /* Disable ppgtt on SNB if VT-d is on. */
+ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ return false;
+#endif
+
+ return true;
+}
+
+int i915_gem_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long gtt_size, mappable_size;
+ int ret;
+
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ mutex_lock(&dev->struct_mutex);
+ if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+
+ i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ } else {
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch
+ * page. There are a number of places where the hardware
+ * apparently prefetches past the end of the object, and we've
+ * seen multiple hangs with the GPU head pointer stuck in a
+ * batchbuffer bound at the last page of the aperture. One page
+ * should be enough to keep any prefetching inside of the
+ * aperture.
+ */
+ i915_gem_init_global_gtt(dev, 0, mappable_size,
+ gtt_size);
+ }
+
+ ret = i915_gem_init_hw(dev);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
+ return ret;
+ }
+
+ /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->dri1.allow_batchbuffer = 1;
+ return 0;
+}
+
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int i;
- for (i = 0; i < I915_NUM_RINGS; i++)
- intel_cleanup_ring_buffer(&dev_priv->ring[i]);
+ for_each_ring(ring, dev_priv, i)
+ intel_cleanup_ring_buffer(ring);
}
int
@@ -3860,7 +3636,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
+ int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
@@ -3882,10 +3658,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- for (i = 0; i < I915_NUM_RINGS; i++) {
- BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
- BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
- }
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -3944,9 +3716,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
@@ -3958,12 +3728,8 @@ i915_gem_load(struct drm_device *dev)
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
- u32 tmp = I915_READ(MI_ARB_STATE);
- if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
- /* arb state is a masked write, so set bit + bit in mask */
- tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
- I915_WRITE(MI_ARB_STATE, tmp);
- }
+ I915_WRITE(MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -3978,9 +3744,7 @@ i915_gem_load(struct drm_device *dev)
dev_priv->num_fence_regs = 8;
/* Initialize fence registers to zero */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
- }
+ i915_gem_reset_fences(dev);
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -4268,7 +4032,7 @@ rescan:
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- if (i915_gpu_idle(dev, true) == 0)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index cc93cac242d6..a4f6aaabca99 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -114,22 +114,6 @@ i915_verify_lists(struct drm_device *dev)
}
}
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
- if (obj->base.dev != dev ||
- !atomic_read(&obj->base.refcount.refcount)) {
- DRM_ERROR("freed pinned %p\n", obj);
- err++;
- break;
- } else if (!obj->pin_count || obj->active ||
- (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
- DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
- obj,
- obj->pin_count, obj->active,
- obj->base.write_domain);
- err++;
- }
- }
-
return warned = err;
}
#endif /* WATCH_INACTIVE */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 000000000000..aa308e1337db
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2012 Red Hat Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "i915_drv.h"
+#include <linux/dma-buf.h>
+
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int npages = obj->base.size / PAGE_SIZE;
+ struct sg_table *sg = NULL;
+ int ret;
+ int nents;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!obj->pages) {
+ ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+ if (ret)
+ goto out;
+ }
+
+ /* link the pages into an SG then map the sg */
+ sg = drm_prime_pages_to_sg(obj->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+
+ if (obj->base.export_dma_buf == dma_buf) {
+ /* drop the reference on the export fd holds */
+ obj->base.export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(&obj->base);
+ }
+}
+
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (obj->dma_buf_vmapping) {
+ obj->vmapping_count++;
+ goto out_unlock;
+ }
+
+ if (!obj->pages) {
+ ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ }
+
+ obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+ if (!obj->dma_buf_vmapping) {
+ DRM_ERROR("failed to vmap object\n");
+ goto out_unlock;
+ }
+
+ obj->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return obj->dma_buf_vmapping;
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return;
+
+ --obj->vmapping_count;
+ if (obj->vmapping_count == 0) {
+ vunmap(obj->dma_buf_vmapping);
+ obj->dma_buf_vmapping = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static const struct dma_buf_ops i915_dmabuf_ops = {
+ .map_dma_buf = i915_gem_map_dma_buf,
+ .unmap_dma_buf = i915_gem_unmap_dma_buf,
+ .release = i915_gem_dmabuf_release,
+ .kmap = i915_gem_dmabuf_kmap,
+ .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
+ .kunmap = i915_gem_dmabuf_kunmap,
+ .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+ .mmap = i915_gem_dmabuf_mmap,
+ .vmap = i915_gem_dmabuf_vmap,
+ .vunmap = i915_gem_dmabuf_vunmap,
+};
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ return dma_buf_export(obj, &i915_dmabuf_ops,
+ obj->base.size, 0600);
+}
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct drm_i915_gem_object *obj;
+ int npages;
+ int size;
+ int ret;
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &i915_dmabuf_ops) {
+ obj = dma_buf->priv;
+ /* is it from our device? */
+ if (obj->base.dev == dev) {
+ drm_gem_object_reference(&obj->base);
+ return &obj->base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ size = dma_buf->size;
+ npages = size / PAGE_SIZE;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto fail_unmap;
+ }
+
+ ret = drm_gem_private_object_init(dev, &obj->base, size);
+ if (ret) {
+ kfree(obj);
+ goto fail_unmap;
+ }
+
+ obj->sg_table = sg;
+ obj->base.import_attach = attach;
+
+ return &obj->base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 21a82710f4b2..ae7c24e12e52 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -35,6 +35,9 @@
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
+ if (obj->pin_count)
+ return false;
+
list_add(&obj->exec_list, unwind);
return drm_mm_scan_add_block(obj->gtt_space);
}
@@ -90,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
- if (obj->base.write_domain || obj->pin_count)
+ if (obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@@ -99,14 +102,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
- if (obj->pin_count)
- continue;
-
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (!obj->base.write_domain || obj->pin_count)
+ if (!obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@@ -166,8 +166,9 @@ int
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ struct drm_i915_gem_object *obj, *next;
bool lists_empty;
+ int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
@@ -177,29 +178,24 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only);
- /* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev, true);
+ /* The gpu_idle will flush everything in the write domain to the
+ * active list. Then we must move everything off the active list
+ * with retire requests.
+ */
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ i915_gem_retire_requests(dev);
- return i915_gem_evict_inactive(dev, purgeable_only);
-}
-
-/** Unbinds all inactive objects. */
-int
-i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj, *next;
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ /* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
- int ret = i915_gem_object_unbind(obj);
- if (ret)
- return ret;
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj));
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index de431942ded4..974a9f1068a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -266,6 +266,12 @@ eb_destroy(struct eb_objects *eb)
kfree(eb);
}
+static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+{
+ return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+ obj->cache_level != I915_CACHE_NONE);
+}
+
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
@@ -273,6 +279,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_i915_obj;
uint32_t target_offset;
int ret = -EINVAL;
@@ -281,7 +288,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (unlikely(target_obj == NULL))
return -ENOENT;
- target_offset = to_intel_bo(target_obj)->gtt_offset;
+ target_i915_obj = to_intel_bo(target_obj);
+ target_offset = target_i915_obj->gtt_offset;
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
@@ -352,11 +360,19 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
+ /* We can't wait for rendering with pagefaults disabled */
+ if (obj->active && in_atomic())
+ return -EFAULT;
+
reloc->delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ if (use_cpu_reloc(obj)) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
char *vaddr;
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ return ret;
+
vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
kunmap_atomic(vaddr);
@@ -365,11 +381,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
uint32_t __iomem *reloc_entry;
void __iomem *reloc_page;
- /* We can't wait for rendering with pagefaults disabled */
- if (obj->active && in_atomic())
- return -EFAULT;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
@@ -383,6 +399,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
io_mapping_unmap_atomic(reloc_page);
}
+ /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+ * pipe_control writes because the gpu doesn't properly redirect them
+ * through the ppgtt for non_secure batchbuffers. */
+ if (unlikely(IS_GEN6(dev) &&
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+ !target_i915_obj->has_global_gtt_mapping)) {
+ i915_gem_gtt_bind_object(target_i915_obj,
+ target_i915_obj->cache_level);
+ }
+
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
@@ -393,30 +419,46 @@ static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
+ struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- int i, ret;
+ int remain, ret;
user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc)))
+ remain = entry->relocation_count;
+ while (remain) {
+ struct drm_i915_gem_relocation_entry *r = stack_reloc;
+ int count = remain;
+ if (count > ARRAY_SIZE(stack_reloc))
+ count = ARRAY_SIZE(stack_reloc);
+ remain -= count;
+
+ if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
return -EFAULT;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
- if (ret)
- return ret;
+ do {
+ u64 offset = r->presumed_offset;
- if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset)))
- return -EFAULT;
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+ if (ret)
+ return ret;
+
+ if (r->presumed_offset != offset &&
+ __copy_to_user_inatomic(&user_relocs->presumed_offset,
+ &r->presumed_offset,
+ sizeof(r->presumed_offset))) {
+ return -EFAULT;
+ }
+
+ user_relocs++;
+ r++;
+ } while (--count);
}
return 0;
+#undef N_RELOC
}
static int
@@ -465,6 +507,13 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
+need_reloc_mappable(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ return entry->relocation_count && !use_cpu_reloc(obj);
+}
+
+static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
{
@@ -477,8 +526,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
@@ -486,18 +534,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- if (obj->tiling_mode) {
- ret = i915_gem_object_get_fence(obj, ring);
- if (ret)
- goto err_unpin;
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
+ if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
- i915_gem_object_pin_fence(obj);
- } else {
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto err_unpin;
- }
+
obj->pending_fenced_gpu_access = true;
}
}
@@ -535,8 +578,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
@@ -576,8 +618,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
- need_mappable =
- entry->relocation_count ? true : need_fence;
+ need_mappable = need_fence || need_reloc_mappable(obj);
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
@@ -798,64 +839,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
-static bool
-intel_enable_semaphores(struct drm_device *dev)
-{
- if (INTEL_INFO(dev)->gen < 6)
- return 0;
-
- if (i915_semaphores >= 0)
- return i915_semaphores;
-
- /* Disable semaphores on SNB */
- if (INTEL_INFO(dev)->gen == 6)
- return 0;
-
- return 1;
-}
-
-static int
-i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *to)
-{
- struct intel_ring_buffer *from = obj->ring;
- u32 seqno;
- int ret, idx;
-
- if (from == NULL || to == from)
- return 0;
-
- /* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (!intel_enable_semaphores(obj->base.dev))
- return i915_gem_object_wait_rendering(obj);
-
- idx = intel_ring_sync_index(from, to);
-
- seqno = obj->last_rendering_seqno;
- if (seqno <= from->sync_seqno[idx])
- return 0;
-
- if (seqno == from->outstanding_lazy_request) {
- struct drm_i915_gem_request *request;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- ret = i915_add_request(from, NULL, request);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- seqno = request->seqno;
- }
-
- from->sync_seqno[idx] = seqno;
-
- return to->sync_to(to, from, seqno - 1);
-}
-
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
@@ -917,7 +900,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
list_for_each_entry(obj, objects, exec_list) {
- ret = i915_gem_execbuffer_sync_rings(obj, ring);
+ ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
}
@@ -955,7 +938,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
- if (fault_in_pages_readable(ptr, length))
+ if (fault_in_multipages_readable(ptr, length))
return -EFAULT;
}
@@ -984,11 +967,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
- intel_mark_busy(ring->dev, obj);
+ if (obj->pin_count) /* check for potential scanout */
+ intel_mark_busy(ring->dev, obj);
}
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
+
+ intel_mark_busy(ring->dev, NULL);
}
static void
@@ -1078,17 +1064,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->ring[RCS];
break;
case I915_EXEC_BSD:
- if (!HAS_BSD(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BSD)\n");
- return -EINVAL;
- }
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
- if (!HAS_BLT(dev)) {
- DRM_DEBUG("execbuf with invalid ring (BLT)\n");
- return -EINVAL;
- }
ring = &dev_priv->ring[BCS];
break;
default:
@@ -1096,6 +1074,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
+ if (!intel_ring_initialized(ring)) {
+ DRM_DEBUG("execbuf with invalid ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
@@ -1133,11 +1116,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
+
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
@@ -1242,9 +1231,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
- ret = i915_gpu_idle(dev, true);
+ ret = i915_gpu_idle(dev);
if (ret)
goto err;
+ i915_gem_retire_requests(dev);
BUG_ON(ring->sync_seqno[i]);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a135c61f4119..9fd25a435536 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,11 +96,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
- }
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
- if (dev_priv->mm.gtt->needs_dmar) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
PCI_DMA_BIDIRECTIONAL);
@@ -112,8 +111,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
}
ppgtt->pt_dma_addr[i] = pt_addr;
- } else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+ }
}
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
@@ -269,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
i915_ppgtt_insert_sg_entries(ppgtt,
@@ -319,7 +323,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev, false)) {
+ if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -346,48 +350,39 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
- i915_gem_gtt_rebind_object(obj, obj->cache_level);
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
}
-int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
- int ret;
-
- if (dev_priv->mm.gtt->needs_dmar) {
- ret = intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- if (ret != 0)
- return ret;
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
- return 0;
+ if (dev_priv->mm.gtt->needs_dmar)
+ return intel_gtt_map_memory(obj->pages,
+ obj->base.size >> PAGE_SHIFT,
+ &obj->sg_list,
+ &obj->num_sg);
+ else
+ return 0;
}
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ intel_gtt_insert_sg_entries(obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
intel_gtt_insert_sg_entries(obj->sg_list,
@@ -399,19 +394,26 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
+
+ obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+
+ obj->has_global_gtt_mapping = 0;
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible;
interruptible = do_idling(dev_priv);
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
-
if (obj->sg_list) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
@@ -419,3 +421,23 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
undo_idling(dev_priv, interruptible);
}
+
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Substract the guard page ... */
+ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+
+ dev_priv->mm.gtt_start = start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
+ dev_priv->mm.gtt_end = end;
+ dev_priv->mm.gtt_total = end - start;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+
+ /* ... but ensure that we clear the entire range. */
+ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
new file mode 100644
index 000000000000..ada2e90a2a60
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * The BIOS typically reserves some of the system's memory for the exclusive
+ * use of the integrated graphics. This memory is no longer available for
+ * use by the OS and so the user finds that his system has less memory
+ * available than he put in. We refer to this memory as stolen.
+ *
+ * The BIOS will allocate its framebuffer from the stolen memory. Our
+ * goal is try to reuse that object for our own fbcon which must always
+ * be available for panics. Anything else we can reuse the stolen memory
+ * for is a boon.
+ */
+
+#define PTE_ADDRESS_MASK 0xfffff000
+#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+#define PTE_MAPPING_TYPE_CACHED (3 << 1)
+#define PTE_MAPPING_TYPE_MASK (3 << 1)
+#define PTE_VALID (1 << 0)
+
+/**
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ * a physical one
+ * @dev: drm device
+ * @offset: address to translate
+ *
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
+ */
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+ * is unreliable, so compute the base by subtracting the stolen memory
+ * from the Top of Low Usable DRAM which is where the BIOS places
+ * the graphics stolen memory.
+ */
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
+ } else {
+ /* XXX presume 8xx is the same as i915 */
+ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+ }
+#else
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ u16 val;
+ pci_read_config_word(pdev, 0xb0, &val);
+ base = val >> 4 << 20;
+ } else {
+ u8 val;
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+ }
+ base -= dev_priv->mm.gtt->stolen_size;
+#endif
+
+ return base + offset;
+}
+
+static void i915_warn_stolen(struct drm_device *dev)
+{
+ DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
+ DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+}
+
+static void i915_setup_compression(struct drm_device *dev, int size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ unsigned long cfb_base;
+ unsigned long ll_base = 0;
+
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
+
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (compressed_fb)
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb)
+ goto err;
+
+ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
+
+ if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+ 4096, 4096, 0);
+ if (compressed_llb)
+ compressed_llb = drm_mm_get_block(compressed_llb,
+ 4096, 4096);
+ if (!compressed_llb)
+ goto err_fb;
+
+ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
+ }
+
+ dev_priv->cfb_size = size;
+
+ dev_priv->compressed_fb = compressed_fb;
+ if (HAS_PCH_SPLIT(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
+ I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ } else {
+ I915_WRITE(FBC_CFB_BASE, cfb_base);
+ I915_WRITE(FBC_LL_BASE, ll_base);
+ dev_priv->compressed_llb = compressed_llb;
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+ cfb_base, ll_base, size >> 20);
+ return;
+
+err_llb:
+ drm_mm_put_block(compressed_llb);
+err_fb:
+ drm_mm_put_block(compressed_fb);
+err:
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ i915_warn_stolen(dev);
+}
+
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->compressed_llb)
+ drm_mm_put_block(dev_priv->compressed_llb);
+}
+
+void i915_gem_cleanup_stolen(struct drm_device *dev)
+{
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
+}
+
+int i915_gem_init_stolen(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
+
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+ /* Try to set up FBC with a reasonable compressed buffer size */
+ if (I915_HAS_FBC(dev) && i915_powersave) {
+ int cfb_size;
+
+ /* Leave 1M for line length buffer & misc. */
+
+ /* Try to get a 32M buffer... */
+ if (prealloc_size > (36*1024*1024))
+ cfb_size = 32*1024*1024;
+ else /* fall back to 7/8 of the stolen space */
+ cfb_size = prealloc_size * 7 / 8;
+ i915_setup_compression(dev, cfb_size);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 1a9306665987..b964df51cec7 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -354,9 +354,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
- * need to ensure that any fence register is cleared.
+ * need to ensure that any fence register is updated before
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
*/
- i915_gem_release_mmap(obj);
obj->map_and_fenceable =
obj->gtt_space == NULL ||
@@ -374,9 +380,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
if (ret == 0) {
- obj->tiling_changed = true;
+ obj->fence_dirty =
+ obj->fenced_gpu_access ||
+ obj->fence_reg != I915_FENCE_REG_NONE;
+
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
}
}
/* we have to maintain this existing ABI... */
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 13b028994b2b..0e72abb9f701 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -34,6 +34,7 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
+#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
@@ -181,7 +182,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
(unsigned long)request);
}
-drm_ioctl_compat_t *i915_compat_ioctls[] = {
+static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -189,6 +190,7 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_ALLOC] = compat_i915_alloc
};
+#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
@@ -217,3 +219,4 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return ret;
}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afd4e03e337e..ed3224c37423 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sysrq.h>
#include <linux/slab.h>
#include "drmP.h"
@@ -35,35 +37,6 @@
#include "i915_trace.h"
#include "intel_drv.h"
-#define MAX_NOPID ((u32)~0)
-
-/**
- * Interrupts that are always left unmasked.
- *
- * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
- * we leave them always unmasked in IMR and then control enabling them through
- * PIPESTAT alone.
- */
-#define I915_INTERRUPT_ENABLE_FIX \
- (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-
-/** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
-
-#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
- PIPE_VBLANK_INTERRUPT_STATUS)
-
-#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
- PIPE_VBLANK_INTERRUPT_ENABLE)
-
-#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
- DRM_I915_VBLANK_PIPE_B)
-
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
@@ -118,6 +91,10 @@ void intel_enable_asle(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
+ /* FIXME: opregion/asle for VLV */
+ if (IS_VALLEYVIEW(dev))
+ return;
+
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
@@ -354,15 +331,12 @@ static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 seqno;
if (ring->obj == NULL)
return;
- seqno = ring->get_seqno(ring);
- trace_i915_gem_request_complete(ring, seqno);
+ trace_i915_gem_request_complete(ring, ring->get_seqno(ring));
- ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
@@ -376,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps_work);
- u8 new_delay = dev_priv->cur_delay;
u32 pm_iir, pm_imr;
+ u8 new_delay;
spin_lock_irq(&dev_priv->rps_lock);
pm_iir = dev_priv->pm_iir;
@@ -386,51 +360,159 @@ static void gen6_pm_rps_work(struct work_struct *work)
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
- if (!pm_iir)
+ if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
mutex_lock(&dev_priv->dev->struct_mutex);
- if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
- } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
- gen6_gt_force_wake_get(dev_priv);
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->min_delay) {
- new_delay = dev_priv->min_delay;
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
- ((new_delay << 16) & 0x3f0000));
- } else {
- /* Make sure we continue to get down interrupts
- * until we hit the minimum frequency */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
- }
- gen6_gt_force_wake_put(dev_priv);
- }
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+ new_delay = dev_priv->cur_delay + 1;
+ else
+ new_delay = dev_priv->cur_delay - 1;
gen6_set_rps(dev_priv->dev, new_delay);
- dev_priv->cur_delay = new_delay;
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static void snb_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+
+ if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
+ GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
+
+ if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_ERROR_INTERRUPT)) {
+ DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
+ i915_handle_error(dev, false);
+ }
+}
+
+static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
+ u32 pm_iir)
+{
+ unsigned long flags;
/*
- * rps_lock not held here because clearing is non-destructive. There is
- * an *extremely* unlikely race with gen6_rps_enable() that is prevented
- * by holding struct_mutex for the duration of the write.
+ * IIR bits should never already be set because IMR should
+ * prevent an interrupt from being shown in IIR. The warning
+ * displays a case where we've unsafely cleared
+ * dev_priv->pm_iir. Although missing an interrupt of the same
+ * type is not a problem, it displays a problem in the logic.
+ *
+ * The mask bit in IMR is cleared by rps_work.
*/
- mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ spin_lock_irqsave(&dev_priv->rps_lock, flags);
+ dev_priv->pm_iir |= pm_iir;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+ POSTING_READ(GEN6_PMIMR);
+ spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+
+ queue_work(dev_priv->wq, &dev_priv->rps_work);
}
-static void pch_irq_handler(struct drm_device *dev)
+static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 pch_iir;
+ u32 iir, gt_iir, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long irqflags;
int pipe;
+ u32 pipe_stats[I915_MAX_PIPES];
+ u32 vblank_status;
+ int vblank = 0;
+ bool blc_event;
- pch_iir = I915_READ(SDEIIR);
+ atomic_inc(&dev_priv->irq_received);
+
+ vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS;
+
+ while (true) {
+ iir = I915_READ(VLV_IIR);
+ gt_iir = I915_READ(GTIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
+
+ if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+ goto out;
+
+ ret = IRQ_HANDLED;
+
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+
+ if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
+ drm_handle_vblank(dev, 0);
+ vblank++;
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
+ drm_handle_vblank(dev, 1);
+ vblank++;
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ I915_WRITE(GTIIR, gt_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ I915_WRITE(VLV_IIR, iir);
+ }
+
+out:
+ return ret;
+}
+
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
@@ -467,95 +549,110 @@ static void pch_irq_handler(struct drm_device *dev)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+ SDE_AUDIO_POWER_SHIFT_CPT);
+
+ if (pch_iir & SDE_AUX_MASK_CPT)
+ DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+
+ if (pch_iir & SDE_GMBUS_CPT)
+ DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+ DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+ DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK_CPT)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+}
+
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
- struct drm_i915_master_private *master_priv;
+ u32 de_iir, gt_iir, de_ier, pm_iir;
+ irqreturn_t ret = IRQ_NONE;
+ int i;
atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
- de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
- pch_iir = I915_READ(SDEIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
-
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && pm_iir == 0)
- goto done;
-
- ret = IRQ_HANDLED;
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
+ if (gt_iir) {
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ I915_WRITE(GTIIR, gt_iir);
+ ret = IRQ_HANDLED;
}
- if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
-
- if (de_iir & DE_GSE_IVB)
- intel_opregion_gse_intr(dev);
-
- if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip_plane(dev, 0);
- }
+ de_iir = I915_READ(DEIIR);
+ if (de_iir) {
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_gse_intr(dev);
+
+ for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+ intel_prepare_page_flip(dev, i);
+ intel_finish_page_flip_plane(dev, i);
+ }
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
+ }
- if (de_iir & DE_PLANEB_FLIP_DONE_IVB) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip_plane(dev, 1);
- }
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ u32 pch_iir = I915_READ(SDEIIR);
- if (de_iir & DE_PIPEA_VBLANK_IVB)
- drm_handle_vblank(dev, 0);
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ cpt_irq_handler(dev, pch_iir);
- if (de_iir & DE_PIPEB_VBLANK_IVB)
- drm_handle_vblank(dev, 1);
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
- /* check event from PCH */
- if (de_iir & DE_PCH_EVENT_IVB) {
- if (pch_iir & SDE_HOTPLUG_MASK_CPT)
- queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev);
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
}
- if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = IRQ_HANDLED;
}
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(DEIIR, de_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
return ret;
}
+static void ilk_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -563,14 +660,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
- struct drm_i915_master_private *master_priv;
- u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
atomic_inc(&dev_priv->irq_received);
- if (IS_GEN6(dev))
- bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
-
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -592,19 +684,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
ret = IRQ_HANDLED;
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-
- if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & bsd_usr_interrupt)
- notify_ring(dev, &dev_priv->ring[VCS]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
+ if (IS_GEN5(dev))
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -629,7 +712,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev);
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
}
if (de_iir & DE_PCU_EVENT) {
@@ -637,25 +723,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
i915_handle_rps_change(dev);
}
- if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
- /*
- * IIR bits should never already be set because IMR should
- * prevent an interrupt from being shown in IIR. The warning
- * displays a case where we've unsafely cleared
- * dev_priv->pm_iir. Although missing an interrupt of the same
- * type is not a problem, it displays a problem in the logic.
- *
- * The mask bit in IMR is cleared by rps_work.
- */
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
- dev_priv->pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
- queue_work(dev_priv->wq, &dev_priv->rps_work);
- }
+ if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+ gen6_queue_rps_work(dev_priv, pm_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -691,7 +760,7 @@ static void i915_error_work_func(struct work_struct *work)
if (atomic_read(&dev_priv->mm.wedged)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
- if (!i915_reset(dev, GRDOM_RENDER)) {
+ if (!i915_reset(dev)) {
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
@@ -727,7 +796,8 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+ if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
+ src->has_global_gtt_mapping) {
void __iomem *s;
/* Simply ignore tiling or any overlapping fence.
@@ -782,10 +852,11 @@ i915_error_object_free(struct drm_i915_error_object *obj)
kfree(obj);
}
-static void
-i915_error_state_free(struct drm_device *dev,
- struct drm_i915_error_state *error)
+void
+i915_error_state_free(struct kref *error_ref)
{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
int i;
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
@@ -798,37 +869,56 @@ i915_error_state_free(struct drm_device *dev,
kfree(error->overlay);
kfree(error);
}
+static void capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->seqno = obj->last_rendering_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
-static u32 capture_bo_list(struct drm_i915_error_buffer *err,
- int count,
- struct list_head *head)
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
int i = 0;
list_for_each_entry(obj, head, mm_list) {
- err->size = obj->base.size;
- err->name = obj->base.name;
- err->seqno = obj->last_rendering_seqno;
- err->gtt_offset = obj->gtt_offset;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
- err->fence_reg = obj->fence_reg;
- err->pinned = 0;
- if (obj->pin_count > 0)
- err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
- err->tiling = obj->tiling_mode;
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : -1;
- err->cache_level = obj->cache_level;
-
+ capture_bo(err++, obj);
if (++i == count)
break;
+ }
+
+ return i;
+}
- err++;
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, gtt_list) {
+ if (obj->pin_count == 0)
+ continue;
+
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
}
return i;
@@ -901,7 +991,6 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -910,6 +999,7 @@ static void i915_record_ring_state(struct drm_device *dev,
}
if (INTEL_INFO(dev)->gen >= 4) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
@@ -919,11 +1009,13 @@ static void i915_record_ring_state(struct drm_device *dev,
error->bbaddr = I915_READ64(BB_ADDR);
}
} else {
+ error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
error->ipehr[ring->id] = I915_READ(IPEHR);
error->instdone[ring->id] = I915_READ(INSTDONE);
}
+ error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
@@ -938,15 +1030,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_ring_buffer *ring = &dev_priv->ring[i];
-
- if (ring->obj == NULL)
- continue;
-
+ for_each_ring(ring, dev_priv, i) {
i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer =
@@ -1013,8 +1101,19 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
+ kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else if (IS_VALLEYVIEW(dev))
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ else if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
@@ -1034,8 +1133,9 @@ static void i915_capture_error_state(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
- i++;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
+ if (obj->pin_count)
+ i++;
error->pinned_bo_count = i - error->active_bo_count;
error->active_bo = NULL;
@@ -1050,15 +1150,15 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error->active_bo)
error->active_bo_count =
- capture_bo_list(error->active_bo,
- error->active_bo_count,
- &dev_priv->mm.active_list);
+ capture_active_bo(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
if (error->pinned_bo)
error->pinned_bo_count =
- capture_bo_list(error->pinned_bo,
- error->pinned_bo_count,
- &dev_priv->mm.pinned_list);
+ capture_pinned_bo(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.gtt_list);
do_gettimeofday(&error->time);
@@ -1073,7 +1173,7 @@ static void i915_capture_error_state(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
- i915_error_state_free(dev, error);
+ i915_error_state_free(&error->ref);
}
void i915_destroy_error_state(struct drm_device *dev)
@@ -1088,7 +1188,7 @@ void i915_destroy_error_state(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
- i915_error_state_free(dev, error);
+ kref_put(&error->ref, i915_error_state_free);
}
#else
#define i915_capture_error_state(x)
@@ -1103,33 +1203,26 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (!eir)
return;
- printk(KERN_ERR "render error detected, EIR: 0x%08x\n",
- eir);
+ pr_err("render error detected, EIR: 0x%08x\n", eir);
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ pr_err(" INSTDONE: 0x%08x\n",
I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
@@ -1138,53 +1231,42 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (!IS_GEN2(dev)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
- printk(KERN_ERR "page table error\n");
- printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
- pgtbl_err);
+ pr_err("page table error\n");
+ pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
- printk(KERN_ERR "memory refresh error:\n");
+ pr_err("memory refresh error:\n");
for_each_pipe(pipe)
- printk(KERN_ERR "pipe %c stat: 0x%08x\n",
+ pr_err("pipe %c stat: 0x%08x\n",
pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
- printk(KERN_ERR "instruction error\n");
- printk(KERN_ERR " INSTPM: 0x%08x\n",
- I915_READ(INSTPM));
+ pr_err("instruction error\n");
+ pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
- I915_READ(INSTDONE));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD));
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
+ pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
- printk(KERN_ERR " IPEIR: 0x%08x\n",
- I915_READ(IPEIR_I965));
- printk(KERN_ERR " IPEHR: 0x%08x\n",
- I915_READ(IPEHR_I965));
- printk(KERN_ERR " INSTDONE: 0x%08x\n",
+ pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+ pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+ pr_err(" INSTDONE: 0x%08x\n",
I915_READ(INSTDONE_I965));
- printk(KERN_ERR " INSTPS: 0x%08x\n",
- I915_READ(INSTPS));
- printk(KERN_ERR " INSTDONE1: 0x%08x\n",
- I915_READ(INSTDONE1));
- printk(KERN_ERR " ACTHD: 0x%08x\n",
- I915_READ(ACTHD_I965));
+ pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
+ pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
+ pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
@@ -1217,6 +1299,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i;
i915_capture_error_state(dev);
i915_report_and_clear_eir(dev);
@@ -1228,11 +1312,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- wake_up_all(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- wake_up_all(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- wake_up_all(&dev_priv->ring[BCS].irq_queue);
+ for_each_ring(ring, dev_priv, i)
+ wake_up_all(&ring->irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -1265,7 +1346,8 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
- stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
+ stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
+ obj->gtt_offset;
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
@@ -1281,248 +1363,6 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
}
-static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv;
- u32 iir, new_iir;
- u32 pipe_stats[I915_MAX_PIPES];
- u32 vblank_status;
- int vblank = 0;
- unsigned long irqflags;
- int irq_received;
- int ret = IRQ_NONE, pipe;
- bool blc_event = false;
-
- atomic_inc(&dev_priv->irq_received);
-
- iir = I915_READ(IIR);
-
- if (INTEL_INFO(dev)->gen >= 4)
- vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
- else
- vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
-
- for (;;) {
- irq_received = iir != 0;
-
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- i915_handle_error(dev, false);
-
- for_each_pipe(pipe) {
- int reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff) {
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe %c underrun\n",
- pipe_name(pipe));
- I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = 1;
- }
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- if (!irq_received)
- break;
-
- ret = IRQ_HANDLED;
-
- /* Consume port. Then clear IIR or we'll miss events */
- if ((I915_HAS_HOTPLUG(dev)) &&
- (iir & I915_DISPLAY_PORT_INTERRUPT)) {
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
-
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_status);
- if (hotplug_status & dev_priv->hotplug_supported_mask)
- queue_work(dev_priv->wq,
- &dev_priv->hotplug_work);
-
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_READ(PORT_HOTPLUG_STAT);
- }
-
- I915_WRITE(IIR, iir);
- new_iir = I915_READ(IIR); /* Flush posted writes */
-
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch =
- READ_BREADCRUMB(dev_priv);
- }
-
- if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
-
- if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 0);
- if (dev_priv->flip_pending_is_done)
- intel_finish_page_flip_plane(dev, 0);
- }
-
- if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
- intel_prepare_page_flip(dev, 1);
- if (dev_priv->flip_pending_is_done)
- intel_finish_page_flip_plane(dev, 1);
- }
-
- for_each_pipe(pipe) {
- if (pipe_stats[pipe] & vblank_status &&
- drm_handle_vblank(dev, pipe)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
- }
-
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
- }
-
-
- if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
-
- /* With MSI, interrupts are only generated when iir
- * transitions from zero to nonzero. If another bit got
- * set while we were handling the existing iir bits, then
- * we would never get another interrupt.
- *
- * This is fine on non-MSI as well, as if we hit this path
- * we avoid exiting the interrupt handler only to generate
- * another one.
- *
- * Note that for MSI this could cause a stray interrupt report
- * if an interrupt landed in the time between writing IIR and
- * the posting read. This should be rare enough to never
- * trigger the 99% of 100,000 interrupts test for disabling
- * stray interrupts.
- */
- iir = new_iir;
- }
-
- return ret;
-}
-
-static int i915_emit_irq(struct drm_device * dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
- i915_kernel_lost_context(dev);
-
- DRM_DEBUG_DRIVER("\n");
-
- dev_priv->counter++;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->counter = 1;
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_enqueue = dev_priv->counter;
-
- if (BEGIN_LP_RING(4) == 0) {
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
-
- return dev_priv->counter;
-}
-
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- int ret = 0;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
- DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
- READ_BREADCRUMB(dev_priv));
-
- if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- return 0;
- }
-
- if (master_priv->sarea_priv)
- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
- if (ring->irq_get(ring)) {
- DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- ring->irq_put(ring);
- } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
- ret = -EBUSY;
-
- if (ret == -EBUSY) {
- DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
- READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
- }
-
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i915_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t *emit = data;
- int result;
-
- if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- mutex_lock(&dev->struct_mutex);
- result = i915_emit_irq(dev);
- mutex_unlock(&dev->struct_mutex);
-
- if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i915_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t *irqwait = data;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return i915_wait_irq(dev, irqwait->irq_seq);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -1544,7 +1384,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
/* maintain vblank delivery even in deep C-states */
if (dev_priv->info->gen == 3)
- I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
+ I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1575,8 +1415,34 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ ironlake_enable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (5 * pipe));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ return 0;
+}
+
+static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 dpfl, imr;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dpfl = I915_READ(VLV_DPFLIPSTAT);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0) {
+ dpfl |= PIPEA_VBLANK_INT_EN;
+ imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ } else {
+ dpfl |= PIPEA_VBLANK_INT_EN;
+ imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+ }
+ I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+ I915_WRITE(VLV_IMR, imr);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1592,8 +1458,7 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->info->gen == 3)
- I915_WRITE(INSTPM,
- INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -1618,63 +1483,30 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB);
+ ironlake_disable_display_irq(dev_priv,
+ DE_PIPEA_VBLANK_IVB << (pipe * 5));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-/* Set the vblank monitor pipe
- */
-int i915_vblank_pipe_set(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int i915_vblank_pipe_get(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t *pipe = data;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
+ u32 dpfl, imr;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dpfl = I915_READ(VLV_DPFLIPSTAT);
+ imr = I915_READ(VLV_IMR);
+ if (pipe == 0) {
+ dpfl &= ~PIPEA_VBLANK_INT_EN;
+ imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+ } else {
+ dpfl &= ~PIPEB_VBLANK_INT_EN;
+ imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
}
-
- pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
- return 0;
-}
-
-/**
- * Schedule buffer swap at given vertical blank.
- */
-int i915_vblank_swap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* The delayed swap mechanism was fundamentally racy, and has been
- * removed. The model was that the client requested a delayed flip/swap
- * from the kernel, then waited for vblank before continuing to perform
- * rendering. The problem was that the kernel might wake the client
- * up before it dispatched the vblank swap (since the lock has to be
- * held while touching the ringbuffer), in which case the client would
- * clear and start the next frame before the swap occurred, and
- * flicker would occur in addition to likely missing the vblank.
- *
- * In the absence of this ioctl, userland falls back to a correct path
- * of waiting for a vblank, then dispatching the swap on its own.
- * Context switching to userland and back is plenty fast enough for
- * meeting the requirements of vblank swapping.
- */
- return -EINVAL;
+ I915_WRITE(VLV_IMR, imr);
+ I915_WRITE(VLV_DPFLIPSTAT, dpfl);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static u32
@@ -1689,11 +1521,9 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
if (list_empty(&ring->request_list) ||
i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */
- if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
- DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
- ring->name,
- ring->waiting_seqno,
- ring->get_seqno(ring));
+ if (waitqueue_active(&ring->irq_queue)) {
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ ring->name);
wake_up_all(&ring->irq_queue);
*err = true;
}
@@ -1716,6 +1546,35 @@ static bool kick_ring(struct intel_ring_buffer *ring)
return false;
}
+static bool i915_hangcheck_hung(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->hangcheck_count++ > 1) {
+ bool hung = true;
+
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+
+ if (!IS_GEN2(dev)) {
+ struct intel_ring_buffer *ring;
+ int i;
+
+ /* Is the chip hanging on a WAIT_FOR_EVENT?
+ * If so we can simply poke the RB_WAIT bit
+ * and break the hang. This should work on
+ * all but the second generation chipsets.
+ */
+ for_each_ring(ring, dev_priv, i)
+ hung &= !kick_ring(ring);
+ }
+
+ return hung;
+ }
+
+ return false;
+}
+
/**
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. The first time this is called we simply record
@@ -1726,19 +1585,31 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
- bool err = false;
+ uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
+ struct intel_ring_buffer *ring;
+ bool err = false, idle;
+ int i;
if (!i915_enable_hangcheck)
return;
+ memset(acthd, 0, sizeof(acthd));
+ idle = true;
+ for_each_ring(ring, dev_priv, i) {
+ idle &= i915_hangcheck_ring_idle(ring, &err);
+ acthd[i] = intel_ring_get_active_head(ring);
+ }
+
/* If all work is done then ACTHD clearly hasn't advanced. */
- if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
- i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
- i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
- dev_priv->hangcheck_count = 0;
- if (err)
+ if (idle) {
+ if (err) {
+ if (i915_hangcheck_hung(dev))
+ return;
+
goto repeat;
+ }
+
+ dev_priv->hangcheck_count = 0;
return;
}
@@ -1749,47 +1620,16 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone = I915_READ(INSTDONE_I965);
instdone1 = I915_READ(INSTDONE1);
}
- acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
- acthd_bsd = HAS_BSD(dev) ?
- intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
- acthd_blt = HAS_BLT(dev) ?
- intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
- if (dev_priv->last_acthd == acthd &&
- dev_priv->last_acthd_bsd == acthd_bsd &&
- dev_priv->last_acthd_blt == acthd_blt &&
+ if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
- if (dev_priv->hangcheck_count++ > 1) {
- DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
- i915_handle_error(dev, true);
-
- if (!IS_GEN2(dev)) {
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- if (kick_ring(&dev_priv->ring[RCS]))
- goto repeat;
-
- if (HAS_BSD(dev) &&
- kick_ring(&dev_priv->ring[VCS]))
- goto repeat;
-
- if (HAS_BLT(dev) &&
- kick_ring(&dev_priv->ring[BCS]))
- goto repeat;
- }
-
+ if (i915_hangcheck_hung(dev))
return;
- }
} else {
dev_priv->hangcheck_count = 0;
- dev_priv->last_acthd = acthd;
- dev_priv->last_acthd_bsd = acthd_bsd;
- dev_priv->last_acthd_blt = acthd_blt;
+ memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
dev_priv->last_instdone = instdone;
dev_priv->last_instdone1 = instdone1;
}
@@ -1808,10 +1648,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
atomic_set(&dev_priv->irq_received, 0);
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
- INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
@@ -1832,6 +1668,38 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
+static void valleyview_irq_preinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ /* VLV magic */
+ I915_WRITE(VLV_IMR, 0);
+ I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+ I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+ I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+
+ /* and GT */
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ POSTING_READ(GTIER);
+
+ I915_WRITE(DPINVGTT, 0xff);
+
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
@@ -1861,13 +1729,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
u32 render_irqs;
u32 hotplug_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
@@ -1884,8 +1745,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
if (IS_GEN6(dev))
render_irqs =
GT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
+ GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
else
render_irqs =
GT_USER_INTERRUPT |
@@ -1930,26 +1791,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
- DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB |
- DE_PLANEB_FLIP_DONE_IVB;
+ u32 display_mask =
+ DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
+ DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB;
u32 render_irqs;
u32 hotplug_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
- if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
- if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
-
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB |
- DE_PIPEB_VBLANK_IVB);
+ I915_WRITE(DEIER,
+ display_mask |
+ DE_PIPEC_VBLANK_IVB |
+ DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB);
POSTING_READ(DEIER);
dev_priv->gt_irq_mask = ~0;
@@ -1957,8 +1816,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
+ render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+ GEN6_BLITTER_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
@@ -1978,15 +1837,496 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void i915_driver_irq_preinstall(struct drm_device * dev)
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 render_irqs;
+ u32 enable_mask;
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ u16 msid;
+
+ enable_mask = I915_DISPLAY_PORT_INTERRUPT;
+ enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+ dev_priv->irq_mask = ~enable_mask;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ /* Hack for broken MSIs on VLV */
+ pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
+ pci_read_config_word(dev->pdev, 0x98, &msid);
+ msid &= 0xff; /* mask out delivery bits */
+ msid |= (1<<14);
+ pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
+
+ I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+ I915_WRITE(VLV_IER, enable_mask);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(PIPESTAT(0), 0xffff);
+ I915_WRITE(PIPESTAT(1), 0xffff);
+ POSTING_READ(VLV_IER);
+
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+
+ render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
+ GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BLT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_RENDER_CS_ERROR_INTERRUPT |
+ GT_SYNC_STATUS |
+ GT_USER_INTERRUPT;
+
+ dev_priv->gt_irq_mask = ~render_irqs;
+
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, 0);
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
+
+ /* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+ I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+#if 0 /* FIXME: check register definitions; some have moved */
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+#endif
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+
+ return 0;
+}
+
+static void valleyview_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (!dev_priv)
+ return;
+
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0xffff);
+ I915_WRITE(VLV_IIR, 0xffffffff);
+ I915_WRITE(VLV_IMR, 0xffffffff);
+ I915_WRITE(VLV_IER, 0x0);
+ POSTING_READ(VLV_IER);
+}
+
+static void ironlake_irq_uninstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (!dev_priv)
+ return;
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ I915_WRITE(DEIMR, 0xffffffff);
+ I915_WRITE(DEIER, 0x0);
+ I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+ I915_WRITE(GTIMR, 0xffffffff);
+ I915_WRITE(GTIER, 0x0);
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+}
+
+static void i8xx_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
atomic_set(&dev_priv->irq_received, 0);
- INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
- INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ POSTING_READ16(IER);
+}
+
+static int i8xx_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE16(EMR,
+ ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+
+ I915_WRITE16(IER,
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT);
+ POSTING_READ16(IER);
+
+ return 0;
+}
+
+static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u16 iir, new_iir;
+ u32 pipe_stats[2];
+ unsigned long irqflags;
+ int irq_received;
+ int pipe;
+ u16 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ return IRQ_NONE;
+
+ while (iir & ~flip_mask) {
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ I915_WRITE16(IIR, iir & ~flip_mask);
+ new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+ i915_update_dri1_breadcrumb(dev);
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 0)) {
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip(dev, 0);
+ flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, 1)) {
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip(dev, 1);
+ flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ }
+ }
+
+ iir = new_iir;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void i8xx_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE16(IMR, 0xffff);
+ I915_WRITE16(IER, 0x0);
+ I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+static void i915_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xeffe);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+ POSTING_READ(IER);
+}
+
+static int i915_irq_postinstall(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 enable_mask;
+
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+
+ I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ /* Enable in IER... */
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+ /* and unmask in IMR */
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+ }
+
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ I915_WRITE(IER, enable_mask);
+ POSTING_READ(IER);
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
+
+ /* Ignore TV since it's buggy */
+
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ }
+
+ intel_opregion_enable_asle(dev);
+
+ return 0;
+}
+
+static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ u32 flip_mask =
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+ u32 flip[2] = {
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
+ };
+ int pipe, ret = IRQ_NONE;
+
+ atomic_inc(&dev_priv->irq_received);
+
+ iir = I915_READ(IIR);
+ do {
+ bool irq_received = (iir & ~flip_mask) != 0;
+ bool blc_event = false;
+
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
+
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /* Clear the PIPE*STAT regs before the IIR */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ POSTING_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir & ~flip_mask);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+
+ for_each_pipe(pipe) {
+ int plane = pipe;
+ if (IS_MOBILE(dev))
+ plane = !plane;
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ if (iir & flip[plane]) {
+ intel_prepare_page_flip(dev, plane);
+ intel_finish_page_flip(dev, pipe);
+ flip_mask &= ~flip[plane];
+ }
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ ret = IRQ_HANDLED;
+ iir = new_iir;
+ } while (iir & ~flip_mask);
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
+}
+
+static void i915_irq_uninstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (I915_HAS_HOTPLUG(dev)) {
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ }
+
+ I915_WRITE16(HWSTAM, 0xffff);
+ for_each_pipe(pipe) {
+ /* Clear enable bits; then clear status bits */
+ I915_WRITE(PIPESTAT(pipe), 0);
+ I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+ }
+ I915_WRITE(IMR, 0xffffffff);
+ I915_WRITE(IER, 0x0);
+
+ I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i965_irq_preinstall(struct drm_device * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ atomic_set(&dev_priv->irq_received, 0);
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -2001,20 +2341,25 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
POSTING_READ(IER);
}
-/*
- * Must be called after intel_modeset_init or hotplug interrupts won't be
- * enabled correctly.
- */
-static int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
+ u32 enable_mask;
u32 error_mask;
- dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
-
/* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
+ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+ enable_mask = ~dev_priv->irq_mask;
+ enable_mask |= I915_USER_INTERRUPT;
+
+ if (IS_G4X(dev))
+ enable_mask |= I915_BSD_USER_INTERRUPT;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -2081,31 +2426,124 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void ironlake_irq_uninstall(struct drm_device *dev)
+static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
{
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 iir, new_iir;
+ u32 pipe_stats[I915_MAX_PIPES];
+ unsigned long irqflags;
+ int irq_received;
+ int ret = IRQ_NONE, pipe;
- if (!dev_priv)
- return;
+ atomic_inc(&dev_priv->irq_received);
- dev_priv->vblank_pipe = 0;
+ iir = I915_READ(IIR);
- I915_WRITE(HWSTAM, 0xffffffff);
+ for (;;) {
+ bool blc_event = false;
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- I915_WRITE(DEIIR, I915_READ(DEIIR));
+ irq_received = iir != 0;
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
+ /* Can't rely on pipestat interrupt bit in iir as it might
+ * have been cleared after the pipestat interrupt was received.
+ * It doesn't set the bit in iir again, but it still produces
+ * interrupts (for non-MSI).
+ */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ i915_handle_error(dev, false);
- I915_WRITE(SDEIMR, 0xffffffff);
- I915_WRITE(SDEIER, 0x0);
- I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (!irq_received)
+ break;
+
+ ret = IRQ_HANDLED;
+
+ /* Consume port. Then clear IIR or we'll miss events */
+ if ((I915_HAS_HOTPLUG(dev)) &&
+ (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hotplug_status);
+ if (hotplug_status & dev_priv->hotplug_supported_mask)
+ queue_work(dev_priv->wq,
+ &dev_priv->hotplug_work);
+
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ I915_READ(PORT_HOTPLUG_STAT);
+ }
+
+ I915_WRITE(IIR, iir);
+ new_iir = I915_READ(IIR); /* Flush posted writes */
+
+ if (iir & I915_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 0);
+
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 1);
+
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+ drm_handle_vblank(dev, pipe)) {
+ i915_pageflip_stall_check(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+ }
+
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev);
+
+ /* With MSI, interrupts are only generated when iir
+ * transitions from zero to nonzero. If another bit got
+ * set while we were handling the existing iir bits, then
+ * we would never get another interrupt.
+ *
+ * This is fine on non-MSI as well, as if we hit this path
+ * we avoid exiting the interrupt handler only to generate
+ * another one.
+ *
+ * Note that for MSI this could cause a stray interrupt report
+ * if an interrupt landed in the time between writing IIR and
+ * the posting read. This should be rare enough to never
+ * trigger the 99% of 100,000 interrupts test for disabling
+ * stray interrupts.
+ */
+ iir = new_iir;
+ }
+
+ i915_update_dri1_breadcrumb(dev);
+
+ return ret;
}
-static void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i965_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -2113,8 +2551,6 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- dev_priv->vblank_pipe = 0;
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -2134,9 +2570,15 @@ static void i915_driver_irq_uninstall(struct drm_device * dev)
void intel_irq_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
@@ -2147,7 +2589,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->get_vblank_timestamp = NULL;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_VALLEYVIEW(dev)) {
+ dev->driver->irq_handler = valleyview_irq_handler;
+ dev->driver->irq_preinstall = valleyview_irq_preinstall;
+ dev->driver->irq_postinstall = valleyview_irq_postinstall;
+ dev->driver->irq_uninstall = valleyview_irq_uninstall;
+ dev->driver->enable_vblank = valleyview_enable_vblank;
+ dev->driver->disable_vblank = valleyview_disable_vblank;
+ } else if (IS_IVYBRIDGE(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2155,6 +2604,14 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (IS_HASWELL(dev)) {
+ /* Share interrupts handling with IVB */
+ dev->driver->irq_handler = ivybridge_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2163,10 +2620,25 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
} else {
- dev->driver->irq_preinstall = i915_driver_irq_preinstall;
- dev->driver->irq_postinstall = i915_driver_irq_postinstall;
- dev->driver->irq_uninstall = i915_driver_irq_uninstall;
- dev->driver->irq_handler = i915_driver_irq_handler;
+ if (INTEL_INFO(dev)->gen == 2) {
+ dev->driver->irq_preinstall = i8xx_irq_preinstall;
+ dev->driver->irq_postinstall = i8xx_irq_postinstall;
+ dev->driver->irq_handler = i8xx_irq_handler;
+ dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ } else if (INTEL_INFO(dev)->gen == 3) {
+ /* IIR "flip pending" means done if this bit is set */
+ I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+
+ dev->driver->irq_preinstall = i915_irq_preinstall;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_uninstall;
+ dev->driver->irq_handler = i915_irq_handler;
+ } else {
+ dev->driver->irq_preinstall = i965_irq_preinstall;
+ dev->driver->irq_postinstall = i965_irq_postinstall;
+ dev->driver->irq_uninstall = i965_irq_uninstall;
+ dev->driver->irq_handler = i965_irq_handler;
+ }
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d24d65f0c3e..48d5e8e051cf 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -27,6 +27,11 @@
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
@@ -77,6 +82,7 @@
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GRDOM_RESET_ENABLE (1<<0)
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
@@ -125,6 +131,13 @@
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+#define GAC_ECO_BITS 0x14090
+#define ECOBITS_PPGTT_CACHE64B (3<<8)
+#define ECOBITS_PPGTT_CACHE4B (0<<8)
+
+#define GAB_CTL 0x24000
+#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -197,6 +210,14 @@
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -222,6 +243,7 @@
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -301,6 +323,61 @@
#define DEBUG_RESET_RENDER (1<<8)
#define DEBUG_RESET_DISPLAY (1<<9)
+/*
+ * DPIO - a special bus for various display related registers to hide behind:
+ * 0x800c: m1, m2, n, p1, p2, k dividers
+ * 0x8014: REF and SFR select
+ * 0x8014: N divider, VCO select
+ * 0x801c/3c: core clock bits
+ * 0x8048/68: low pass filter coefficients
+ * 0x8100: fast clock controls
+ */
+#define DPIO_PKT 0x2100
+#define DPIO_RID (0<<24)
+#define DPIO_OP_WRITE (1<<16)
+#define DPIO_OP_READ (0<<16)
+#define DPIO_PORTID (0x12<<8)
+#define DPIO_BYTE (0xf<<4)
+#define DPIO_BUSY (1<<0) /* status only */
+#define DPIO_DATA 0x2104
+#define DPIO_REG 0x2108
+#define DPIO_CTL 0x2110
+#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
+#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
+#define DPIO_SFR_BYPASS (1<<1)
+#define DPIO_RESET (1<<0)
+
+#define _DPIO_DIV_A 0x800c
+#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
+#define DPIO_K_SHIFT (24) /* 4 bits */
+#define DPIO_P1_SHIFT (21) /* 3 bits */
+#define DPIO_P2_SHIFT (16) /* 5 bits */
+#define DPIO_N_SHIFT (12) /* 4 bits */
+#define DPIO_ENABLE_CALIBRATION (1<<11)
+#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
+#define DPIO_M2DIV_MASK 0xff
+#define _DPIO_DIV_B 0x802c
+#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+
+#define _DPIO_REFSFR_A 0x8014
+#define DPIO_REFSEL_OVERRIDE 27
+#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
+#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
+#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
+#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
+#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
+#define _DPIO_REFSFR_B 0x8034
+#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+
+#define _DPIO_CORE_CLK_A 0x801c
+#define _DPIO_CORE_CLK_B 0x803c
+#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+
+#define _DPIO_LFP_COEFF_A 0x8048
+#define _DPIO_LFP_COEFF_B 0x8068
+#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
+
+#define DPIO_FASTCLK_DISABLE 0x8100
/*
* Fence registers
@@ -360,8 +437,6 @@
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
-#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
-#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
@@ -417,6 +492,7 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define DMA_FADD_I8XX 0x020d0
#define ERROR_GEN6 0x040a0
@@ -432,6 +508,7 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
+#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
@@ -447,14 +524,16 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
-#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
-#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
-
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
+#define VLV_IIR_RW 0x182084
+#define VLV_IER 0x1820a0
+#define VLV_IIR 0x1820a4
+#define VLV_IMR 0x1820a8
+#define VLV_ISR 0x1820ac
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -500,7 +579,6 @@
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
-#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
@@ -565,7 +643,6 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
-#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -579,7 +656,12 @@
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
-/* GEN6 interrupt control */
+#define CACHE_MODE_1 0x7004 /* IVB+ */
+#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+
+/* GEN6 interrupt control
+ * Note that the per-ring interrupt bits do alias with the global interrupt bits
+ * in GTIMR. */
#define GEN6_RENDER_HWSTAM 0x2098
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
@@ -615,6 +697,21 @@
#define GEN6_BSD_RNCID 0x12198
+#define GEN7_FF_THREAD_MODE 0x20a0
+#define GEN7_FF_SCHED_MASK 0x0077070
+#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
+#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
+#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
+#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
+#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
+#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
+#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
+#define GEN7_FF_VS_SCHED_HW (0x0<<12)
+#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
+#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
+#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
+#define GEN7_FF_DS_SCHED_HW (0x0<<4)
+
/*
* Framebuffer compression (915+ only)
*/
@@ -743,9 +840,9 @@
#define GMBUS_PORT_PANEL 3
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
- /* 6 reserved */
-#define GMBUS_PORT_DPD 7 /* HDMID */
-#define GMBUS_NUM_PORTS 8
+#define GMBUS_PORT_DPD 6 /* HDMID */
+#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
+#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
#define GMBUS1 0x5104 /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
@@ -797,7 +894,9 @@
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -809,6 +908,7 @@
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
@@ -904,6 +1004,7 @@
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+
#define _FPA0 0x06040
#define _FPA1 0x06044
#define _FPB0 0x06048
@@ -1044,6 +1145,9 @@
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
+#define FW_BLC_SELF_VLV 0x6500
+#define FW_CSPWRDWNEN (1<<15)
+
/*
* Palette regs
*/
@@ -1601,9 +1705,12 @@
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
+/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT_B (1 << 29)
#define VIDEO_DIP_PORT_C (2 << 29)
+#define VIDEO_DIP_PORT_D (3 << 29)
+#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
@@ -1614,6 +1721,10 @@
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
+#define VIDEO_DIP_FREQ_MASK (3 << 16)
+/* HSW and later: */
+#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
#define PP_STATUS 0x61200
@@ -2380,7 +2491,8 @@
/* Pipe A */
#define _PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
+#define DSL_LINEMASK_GEN2 0x00000fff
+#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
@@ -2422,23 +2534,30 @@
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
+#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
+#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
+#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -2463,6 +2582,40 @@
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+#define VLV_DPFLIPSTAT 0x70028
+#define PIPEB_LINE_COMPARE_STATUS (1<<29)
+#define PIPEB_HLINE_INT_EN (1<<28)
+#define PIPEB_VBLANK_INT_EN (1<<27)
+#define SPRITED_FLIPDONE_INT_EN (1<<26)
+#define SPRITEC_FLIPDONE_INT_EN (1<<25)
+#define PLANEB_FLIPDONE_INT_EN (1<<24)
+#define PIPEA_LINE_COMPARE_STATUS (1<<21)
+#define PIPEA_HLINE_INT_EN (1<<20)
+#define PIPEA_VBLANK_INT_EN (1<<19)
+#define SPRITEB_FLIPDONE_INT_EN (1<<18)
+#define SPRITEA_FLIPDONE_INT_EN (1<<17)
+#define PLANEA_FLIPDONE_INT_EN (1<<16)
+
+#define DPINVGTT 0x7002c /* VLV only */
+#define CURSORB_INVALID_GTT_INT_EN (1<<23)
+#define CURSORA_INVALID_GTT_INT_EN (1<<22)
+#define SPRITED_INVALID_GTT_INT_EN (1<<21)
+#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
+#define PLANEB_INVALID_GTT_INT_EN (1<<19)
+#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
+#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
+#define PLANEA_INVALID_GTT_INT_EN (1<<16)
+#define DPINVGTT_EN_MASK 0xff0000
+#define CURSORB_INVALID_GTT_STATUS (1<<7)
+#define CURSORA_INVALID_GTT_STATUS (1<<6)
+#define SPRITED_INVALID_GTT_STATUS (1<<5)
+#define SPRITEC_INVALID_GTT_STATUS (1<<4)
+#define PLANEB_INVALID_GTT_STATUS (1<<3)
+#define SPRITEB_INVALID_GTT_STATUS (1<<2)
+#define SPRITEA_INVALID_GTT_STATUS (1<<1)
+#define PLANEA_INVALID_GTT_STATUS (1<<0)
+#define DPINVGTT_STATUS_MASK 0xff
+
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
@@ -2492,11 +2645,28 @@
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
+/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_32 32
+#define DRAIN_LATENCY_PRECISION_16 16
+#define VLV_DDL1 0x70050
+#define DDL_CURSORA_PRECISION_32 (1<<31)
+#define DDL_CURSORA_PRECISION_16 (0<<31)
+#define DDL_CURSORA_SHIFT 24
+#define DDL_PLANEA_PRECISION_32 (1<<7)
+#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define VLV_DDL2 0x70054
+#define DDL_CURSORB_PRECISION_32 (1<<31)
+#define DDL_CURSORB_PRECISION_16 (0<<31)
+#define DDL_CURSORB_SHIFT 24
+#define DDL_PLANEB_PRECISION_32 (1<<7)
+#define DDL_PLANEB_PRECISION_16 (0<<7)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32
+#define VALLEYVIEW_FIFO_SIZE 255
#define G4X_FIFO_SIZE 127
#define I965_FIFO_SIZE 512
#define I945_FIFO_SIZE 127
@@ -2504,6 +2674,7 @@
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
+#define VALLEYVIEW_MAX_WM 0xff
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
@@ -2518,6 +2689,7 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+#define VALLEYVIEW_CURSOR_MAX_WM 64
#define I965_CURSOR_FIFO 64
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
@@ -2726,6 +2898,13 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK (0xfffff000)
+#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
+#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
+ (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
+
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
@@ -3058,25 +3237,38 @@
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
+#define DE_PIPEC_VBLANK_IVB (1<<10)
#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
-#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
-#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEB_VBLANK_IVB (1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
+#define MASTER_INTERRUPT_ENABLE (1<<31)
+
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
#define DEIER 0x4400c
-/* GT interrupt */
-#define GT_PIPE_NOTIFY (1 << 4)
-#define GT_SYNC_STATUS (1 << 2)
-#define GT_USER_INTERRUPT (1 << 0)
-#define GT_BSD_USER_INTERRUPT (1 << 5)
-#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
-#define GT_BLT_USER_INTERRUPT (1 << 22)
+/* GT interrupt.
+ * Note that for gen6+ the ring-specific interrupt bits do alias with the
+ * corresponding bits in the per-ring interrupt control registers. */
+#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
+#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
+#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
+#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
+#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
+#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
+#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
+#define GT_PIPE_NOTIFY (1 << 4)
+#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
+#define GT_SYNC_STATUS (1 << 2)
+#define GT_USER_INTERRUPT (1 << 0)
#define GTISR 0x44010
#define GTIMR 0x44014
@@ -3129,7 +3321,7 @@
/* PCH */
-/* south display engine interrupt */
+/* south display engine interrupt: IBX */
#define SDE_AUDIO_POWER_D (1 << 27)
#define SDE_AUDIO_POWER_C (1 << 26)
#define SDE_AUDIO_POWER_B (1 << 25)
@@ -3165,15 +3357,44 @@
#define SDE_TRANSA_CRC_ERR (1 << 1)
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
-/* CPT */
-#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -3226,15 +3447,15 @@
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -3329,6 +3550,57 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+#define VLV_VIDEO_DIP_CTL_A 0x60220
+#define VLV_VIDEO_DIP_DATA_A 0x60208
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
+
+#define VLV_VIDEO_DIP_CTL_B 0x61170
+#define VLV_VIDEO_DIP_DATA_B 0x61174
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A 0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
+#define HSW_VIDEO_DIP_GCP_A 0x60210
+
+#define HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+#define HSW_VIDEO_DIP_GCP_B 0x61210
+
+#define HSW_TVIDEO_DIP_CTL(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(pipe) \
+ _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
@@ -3489,6 +3761,9 @@
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
+/* LPT */
+#define FDI_PORT_WIDTH_2X_LPT (1<<19)
+#define FDI_PORT_WIDTH_1X_LPT (0<<19)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
@@ -3549,6 +3824,7 @@
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
/* or SDVOB */
+#define VLV_HDMIB 0x61140
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER(pipe) ((pipe) << 30)
@@ -3714,6 +3990,8 @@
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
#define FORCEWAKE 0xA18C
+#define FORCEWAKE_VLV 0x1300b0
+#define FORCEWAKE_ACK_VLV 0x1300b4
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
@@ -3731,6 +4009,7 @@
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 0x9404
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
@@ -3811,6 +4090,11 @@
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define GEN6_GT_GFX_RC6_LOCKED 0x138104
+#define GEN6_GT_GFX_RC6 0x138108
+#define GEN6_GT_GFX_RC6p 0x13810C
+#define GEN6_GT_GFX_RC6pp 0x138110
+
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
@@ -3870,4 +4154,197 @@
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+/* HSW Power Wells */
+#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
+#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
+#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
+#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
+#define HSW_PWR_WELL_ENABLE (1<<31)
+#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_CTL5 0x45410
+#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
+#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
+#define HSW_PWR_WELL_FORCE_ON (1<<19)
+#define HSW_PWR_WELL_CTL6 0x45414
+
+/* Per-pipe DDI Function Control */
+#define PIPE_DDI_FUNC_CTL_A 0x60400
+#define PIPE_DDI_FUNC_CTL_B 0x61400
+#define PIPE_DDI_FUNC_CTL_C 0x62400
+#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
+#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
+ PIPE_DDI_FUNC_CTL_A, \
+ PIPE_DDI_FUNC_CTL_B)
+#define PIPE_DDI_FUNC_ENABLE (1<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define PIPE_DDI_PORT_MASK (0xf<<28)
+#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
+#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
+#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
+#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
+#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
+#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
+#define PIPE_DDI_BPC_8 (0<<20)
+#define PIPE_DDI_BPC_10 (1<<20)
+#define PIPE_DDI_BPC_6 (2<<20)
+#define PIPE_DDI_BPC_12 (3<<20)
+#define PIPE_DDI_BFI_ENABLE (1<<4)
+#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
+#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
+#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A 0x64040
+#define DP_TP_CTL_B 0x64140
+#define DP_TP_CTL(port) _PORT(port, \
+ DP_TP_CTL_A, \
+ DP_TP_CTL_B)
+#define DP_TP_CTL_ENABLE (1<<31)
+#define DP_TP_CTL_MODE_SST (0<<27)
+#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
+#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
+#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A 0x64044
+#define DP_TP_STATUS_B 0x64144
+#define DP_TP_STATUS(port) _PORT(port, \
+ DP_TP_STATUS_A, \
+ DP_TP_STATUS_B)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A 0x64000
+#define DDI_BUF_CTL_B 0x64100
+#define DDI_BUF_CTL(port) _PORT(port, \
+ DDI_BUF_CTL_A, \
+ DDI_BUF_CTL_B)
+#define DDI_BUF_CTL_ENABLE (1<<31)
+#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
+#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
+#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
+#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
+#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
+#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
+#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
+#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
+#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
+#define DDI_BUF_EMP_MASK (0xf<<24)
+#define DDI_BUF_IS_IDLE (1<<7)
+#define DDI_PORT_WIDTH_X1 (0<<1)
+#define DDI_PORT_WIDTH_X2 (1<<1)
+#define DDI_PORT_WIDTH_X4 (3<<1)
+#define DDI_INIT_DISPLAY_DETECTED (1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A 0x64E00
+#define DDI_BUF_TRANS_B 0x64E60
+#define DDI_BUF_TRANS(port) _PORT(port, \
+ DDI_BUF_TRANS_A, \
+ DDI_BUF_TRANS_B)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR 0xC6000
+#define SBI_DATA 0xC6004
+#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_OP_CRRD (0x6<<8)
+#define SBI_CTL_OP_CRWR (0x7<<8)
+#define SBI_RESPONSE_FAIL (0x1<<1)
+#define SBI_RESPONSE_SUCCESS (0x0<<1)
+#define SBI_BUSY (0x1<<0)
+#define SBI_READY (0x0<<0)
+
+/* SBI offsets */
+#define SBI_SSCDIVINTPHASE6 0x0600
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
+#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
+#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
+#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
+#define SBI_SSCCTL 0x020c
+#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_DISABLE (1<<0)
+#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
+#define SBI_DBUFF0 0x2a00
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE 0xC6020
+#define PIXCLK_GATE_UNGATE 1<<0
+#define PIXCLK_GATE_GATE 0<<0
+
+/* SPLL */
+#define SPLL_CTL 0x46020
+#define SPLL_PLL_ENABLE (1<<31)
+#define SPLL_PLL_SCC (1<<28)
+#define SPLL_PLL_NON_SCC (2<<28)
+#define SPLL_PLL_FREQ_810MHz (0<<26)
+#define SPLL_PLL_FREQ_1350MHz (1<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1 0x46040
+#define WRPLL_CTL2 0x46060
+#define WRPLL_PLL_ENABLE (1<<31)
+#define WRPLL_PLL_SELECT_SSC (0x01<<28)
+#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
+#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+/* WRPLL divider programming */
+#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
+#define WRPLL_DIVIDER_POST(x) ((x)<<8)
+#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A 0x46100
+#define PORT_CLK_SEL_B 0x46104
+#define PORT_CLK_SEL(port) _PORT(port, \
+ PORT_CLK_SEL_A, \
+ PORT_CLK_SEL_B)
+#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
+#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
+#define PORT_CLK_SEL_LCPLL_810 (2<<29)
+#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL1 (4<<29)
+#define PORT_CLK_SEL_WRPLL2 (5<<29)
+
+/* Pipe clock selection */
+#define PIPE_CLK_SEL_A 0x46140
+#define PIPE_CLK_SEL_B 0x46144
+#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
+ PIPE_CLK_SEL_A, \
+ PIPE_CLK_SEL_B)
+/* For each pipe, we need to select the corresponding port clock */
+#define PIPE_CLK_SEL_DISABLED (0x0<<29)
+#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
+
+/* LCPLL Control */
+#define LCPLL_CTL 0x130040
+#define LCPLL_PLL_DISABLE (1<<31)
+#define LCPLL_PLL_LOCK (1<<30)
+#define LCPLL_CD_CLOCK_DISABLE (1<<25)
+#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A 0x45270
+#define PIPE_WM_LINETIME_B 0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
+ PIPE_WM_LINETIME_A, \
+ PIPE_WM_LINETIME_A)
+#define PIPE_WM_LINETIME_MASK (0x1ff)
+#define PIPE_WM_LINETIME_TIME(x) ((x))
+#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
+#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP 0xc2014
+#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
+#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
+#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2b5eb229ff2c..a748e5cabe14 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -40,7 +40,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
return false;
if (HAS_PCH_SPLIT(dev))
- dpll_reg = PCH_DPLL(pipe);
+ dpll_reg = _PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
@@ -740,8 +740,11 @@ static void i915_restore_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+ * otherwise we get blank eDP screen after S3 on some machines
+ */
I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
@@ -876,22 +879,6 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
- mutex_unlock(&dev->struct_mutex);
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_init_clock_gating(dev);
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- intel_init_emon(dev);
- }
-
- if (INTEL_INFO(dev)->gen >= 6) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
-
- mutex_lock(&dev->struct_mutex);
/* Cache mode state */
I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
new file mode 100644
index 000000000000..79f83445afa0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "i915_drv.h"
+
+static u32 calc_residency(struct drm_device *dev, const u32 reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 raw_time; /* 32b value may overflow during fixed point math */
+
+ if (!intel_enable_rc6(dev))
+ return 0;
+
+ raw_time = I915_READ(reg) * 128ULL;
+ return DIV_ROUND_UP_ULL(raw_time, 100000);
+}
+
+static ssize_t
+show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
+}
+
+static ssize_t
+show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
+}
+
+static ssize_t
+show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
+}
+
+static ssize_t
+show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+ return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
+}
+
+static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
+static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
+static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
+static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+
+static struct attribute *rc6_attrs[] = {
+ &dev_attr_rc6_enable.attr,
+ &dev_attr_rc6_residency_ms.attr,
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute_group rc6_attr_group = {
+ .name = power_group_name,
+ .attrs = rc6_attrs
+};
+
+void i915_setup_sysfs(struct drm_device *dev)
+{
+ int ret;
+
+ /* ILK doesn't have any residency information */
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+ if (ret)
+ DRM_ERROR("sysfs setup failed\n");
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+ sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+}
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index ead876eb6ea0..f1df2bd4ecf4 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -7,5 +7,7 @@
#include "i915_drv.h"
+#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "i915_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bae3edf956a4..f413899475e9 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -9,6 +9,7 @@
#include <acpi/acpi_drivers.h>
#include "drmP.h"
+#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
@@ -182,8 +183,6 @@ static void intel_dsm_platform_mux_info(void)
DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
intel_dsm_mux_type(info->buffer.pointer[3]));
}
- } else {
- DRM_ERROR("MUX INFO call failed\n");
}
out:
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b48fc2a8410c..353459362f6f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
}
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+ const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs,
+ int index)
+{
+ size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+ size_t ofs;
+
+ if (index >= ARRAY_SIZE(ptrs->ptr))
+ return NULL;
+ ofs = ptrs->ptr[index].fp_timing_offset;
+ if (ofs < data_ofs ||
+ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+ return NULL;
+ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
@@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct bdb_lvds_lfp_data *lvds_lfp_data;
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int i, downclock;
@@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
"Normal Clock %dKHz, downclock %dKHz\n",
panel_fixed_mode->clock, 10*downclock);
}
+
+ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
+ if (fp_timing) {
+ /* check the resolution, just to be sure */
+ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+ fp_timing->y_res == panel_fixed_mode->vdisplay) {
+ dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
+ DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+ dev_priv->bios_lvds_val);
+ }
+ }
}
/* Try to find sdvo panel data */
@@ -256,6 +292,11 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
int index;
index = i915_vbt_sdvo_panel_type;
+ if (index == -2) {
+ DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+ return;
+ }
+
if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
@@ -332,11 +373,11 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if (bus_pin >= 1 && bus_pin <= 6)
+ if (intel_gmbus_is_port_valid(bus_pin))
dev_priv->crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
- block_size);
+ block_size);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 90b9793fd5da..75a70c46ef1b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -55,18 +55,36 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
-static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 temp, reg;
+ u32 temp;
- if (HAS_PCH_SPLIT(dev))
- reg = PCH_ADPA;
- else
- reg = ADPA;
+ temp = I915_READ(PCH_ADPA);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* Just leave port enable cleared */
+ break;
+ }
+
+ I915_WRITE(PCH_ADPA, temp);
+}
- temp = I915_READ(reg);
+static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(ADPA);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
@@ -85,7 +103,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
- I915_WRITE(reg, temp);
+ I915_WRITE(ADPA, temp);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -278,9 +296,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
struct edid *edid;
bool is_digital = false;
+ struct i2c_adapter *i2c;
- edid = drm_get_edid(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ edid = drm_get_edid(connector, i2c);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
@@ -476,15 +495,16 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ struct i2c_adapter *i2c;
- ret = intel_ddc_get_modes(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ ret = intel_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- return intel_ddc_get_modes(connector,
- &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
+ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+ return intel_ddc_get_modes(connector, i2c);
}
static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,12 +527,20 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
- .dpms = intel_crt_dpms,
+static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.prepare = intel_encoder_prepare,
.commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
+ .dpms = pch_crt_dpms,
+};
+
+static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
+ .mode_fixup = intel_crt_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .commit = intel_encoder_commit,
+ .mode_set = intel_crt_mode_set,
+ .dpms = gmch_crt_dpms,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -536,7 +564,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
+ DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
}
@@ -558,6 +586,7 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_encoder_helper_funcs *encoder_helper_funcs;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
@@ -586,14 +615,23 @@ void intel_crt_init(struct drm_device *dev)
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
- crt->base.crtc_mask = (1 << 0) | (1 << 1);
+ if (IS_HASWELL(dev))
+ crt->base.crtc_mask = (1 << 0);
+ else
+ crt->base.crtc_mask = (1 << 0) | (1 << 1);
+
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
+ if (HAS_PCH_SPLIT(dev))
+ encoder_helper_funcs = &pch_encoder_funcs;
+ else
+ encoder_helper_funcs = &gmch_encoder_funcs;
+
+ drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
new file mode 100644
index 000000000000..46d1e886c692
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const u32 hsw_ddi_translations_dp[] = {
+ 0x00FFFFFF, 0x0006000E, /* DP parameters */
+ 0x00D75FFF, 0x0005000A,
+ 0x00C30FFF, 0x00040006,
+ 0x80AAAFFF, 0x000B0000,
+ 0x00FFFFFF, 0x0005000A,
+ 0x00D75FFF, 0x000C0004,
+ 0x80C30FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006,
+ 0x80D75FFF, 0x000B0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+static const u32 hsw_ddi_translations_fdi[] = {
+ 0x00FFFFFF, 0x0007000E, /* FDI parameters */
+ 0x00D75FFF, 0x000F000A,
+ 0x00C30FFF, 0x00060006,
+ 0x00AAAFFF, 0x001E0000,
+ 0x00FFFFFF, 0x000F000A,
+ 0x00D75FFF, 0x00160004,
+ 0x00C30FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00060006,
+ 0x00D75FFF, 0x001E0000,
+ 0x00FFFFFF, 0x00040006 /* HDMI parameters */
+};
+
+/* On Haswell, DDI port buffers must be programmed with correct values
+ * in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
+ int i;
+ const u32 *ddi_translations = ((use_fdi_mode) ?
+ hsw_ddi_translations_fdi :
+ hsw_ddi_translations_dp);
+
+ DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
+ port_name(port),
+ use_fdi_mode ? "FDI" : "DP");
+
+ WARN((use_fdi_mode && (port != PORT_E)),
+ "Programming port %c in FDI mode, this probably will not work.\n",
+ port_name(port));
+
+ for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ I915_WRITE(reg, ddi_translations[i]);
+ reg += 4;
+ }
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void intel_prepare_ddi(struct drm_device *dev)
+{
+ int port;
+
+ if (IS_HASWELL(dev)) {
+ for (port = PORT_A; port < PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port, false);
+
+ /* DDI E is the suggested one to work in FDI mode, so program is as such by
+ * default. It will have to be re-programmed in case a digital DP output
+ * will be detected on it
+ */
+ intel_prepare_ddi_buffers(dev, PORT_E, true);
+ }
+}
+
+static const long hsw_ddi_buf_ctl_values[] = {
+ DDI_BUF_EMP_400MV_0DB_HSW,
+ DDI_BUF_EMP_400MV_3_5DB_HSW,
+ DDI_BUF_EMP_400MV_6DB_HSW,
+ DDI_BUF_EMP_400MV_9_5DB_HSW,
+ DDI_BUF_EMP_600MV_0DB_HSW,
+ DDI_BUF_EMP_600MV_3_5DB_HSW,
+ DDI_BUF_EMP_600MV_6DB_HSW,
+ DDI_BUF_EMP_800MV_0DB_HSW,
+ DDI_BUF_EMP_800MV_3_5DB_HSW
+};
+
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp, i;
+
+ /* Configure CPU PLL, wait for warmup */
+ I915_WRITE(SPLL_CTL,
+ SPLL_PLL_ENABLE |
+ SPLL_PLL_FREQ_1350MHz |
+ SPLL_PLL_SCC);
+
+ /* Use SPLL to drive the output when in FDI mode */
+ I915_WRITE(PORT_CLK_SEL(PORT_E),
+ PORT_CLK_SEL_SPLL);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(PORT_E));
+
+ udelay(20);
+
+ /* Start the training iterating through available voltages and emphasis */
+ for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+ /* Configure DP_TP_CTL with auto-training */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 |
+ DP_TP_CTL_ENABLE);
+
+ /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp = (temp & ~DDI_BUF_EMP_MASK);
+ I915_WRITE(DDI_BUF_CTL(PORT_E),
+ temp |
+ DDI_BUF_CTL_ENABLE |
+ DDI_PORT_WIDTH_X2 |
+ hsw_ddi_buf_ctl_values[i]);
+
+ udelay(600);
+
+ /* Enable CPU FDI Receiver with auto-training */
+ reg = FDI_RX_CTL(pipe);
+ I915_WRITE(reg,
+ I915_READ(reg) |
+ FDI_LINK_TRAIN_AUTO |
+ FDI_RX_ENABLE |
+ FDI_LINK_TRAIN_PATTERN_1_CPT |
+ FDI_RX_ENHANCE_FRAME_ENABLE |
+ FDI_PORT_WIDTH_2X_LPT |
+ FDI_RX_PLL_ENABLE);
+ POSTING_READ(reg);
+ udelay(100);
+
+ temp = I915_READ(DP_TP_STATUS(PORT_E));
+ if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+ DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+
+ /* Enable normal pixel sending for FDI */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN |
+ DP_TP_CTL_LINK_TRAIN_NORMAL |
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+ DP_TP_CTL_ENABLE);
+
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
+ temp = I915_READ(DDI_FUNC_CTL(pipe));
+ temp &= ~PIPE_DDI_PORT_MASK;
+ temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
+ PIPE_DDI_MODE_SELECT_FDI |
+ PIPE_DDI_FUNC_ENABLE |
+ PIPE_DDI_PORT_WIDTH_X2;
+ I915_WRITE(DDI_FUNC_CTL(pipe),
+ temp);
+ break;
+ } else {
+ DRM_ERROR("Error training BUF_CTL %d\n", i);
+
+ /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
+ I915_WRITE(DP_TP_CTL(PORT_E),
+ I915_READ(DP_TP_CTL(PORT_E)) &
+ ~DP_TP_CTL_ENABLE);
+ I915_WRITE(FDI_RX_CTL(pipe),
+ I915_READ(FDI_RX_CTL(pipe)) &
+ ~FDI_RX_PLL_ENABLE);
+ continue;
+ }
+ }
+
+ DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* For DDI connections, it is possible to support different outputs over the
+ * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
+ * the time the output is detected what exactly is on the other end of it. This
+ * function aims at providing support for this detection and proper output
+ * configuration.
+ */
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+ /* For now, we don't do any proper output detection and assume that we
+ * handle HDMI only */
+
+ switch(port){
+ case PORT_A:
+ /* We don't handle eDP and DP yet */
+ DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
+ break;
+ /* Assume that the ports B, C and D are working in HDMI mode for now */
+ case PORT_B:
+ case PORT_C:
+ case PORT_D:
+ intel_hdmi_init(dev, DDI_BUF_CTL(port));
+ break;
+ default:
+ DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
+ port);
+ break;
+ }
+}
+
+/* WRPLL clock dividers */
+struct wrpll_tmds_clock {
+ u32 clock;
+ u16 p; /* Post divider */
+ u16 n2; /* Feedback divider */
+ u16 r2; /* Reference divider */
+};
+
+/* Table of matching values for WRPLL clocks programming for each frequency */
+static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
+ {19750, 38, 25, 18},
+ {20000, 48, 32, 18},
+ {21000, 36, 21, 15},
+ {21912, 42, 29, 17},
+ {22000, 36, 22, 15},
+ {23000, 36, 23, 15},
+ {23500, 40, 40, 23},
+ {23750, 26, 16, 14},
+ {23750, 26, 16, 14},
+ {24000, 36, 24, 15},
+ {25000, 36, 25, 15},
+ {25175, 26, 40, 33},
+ {25200, 30, 21, 15},
+ {26000, 36, 26, 15},
+ {27000, 30, 21, 14},
+ {27027, 18, 100, 111},
+ {27500, 30, 29, 19},
+ {28000, 34, 30, 17},
+ {28320, 26, 30, 22},
+ {28322, 32, 42, 25},
+ {28750, 24, 23, 18},
+ {29000, 30, 29, 18},
+ {29750, 32, 30, 17},
+ {30000, 30, 25, 15},
+ {30750, 30, 41, 24},
+ {31000, 30, 31, 18},
+ {31500, 30, 28, 16},
+ {32000, 30, 32, 18},
+ {32500, 28, 32, 19},
+ {33000, 24, 22, 15},
+ {34000, 28, 30, 17},
+ {35000, 26, 32, 19},
+ {35500, 24, 30, 19},
+ {36000, 26, 26, 15},
+ {36750, 26, 46, 26},
+ {37000, 24, 23, 14},
+ {37762, 22, 40, 26},
+ {37800, 20, 21, 15},
+ {38000, 24, 27, 16},
+ {38250, 24, 34, 20},
+ {39000, 24, 26, 15},
+ {40000, 24, 32, 18},
+ {40500, 20, 21, 14},
+ {40541, 22, 147, 89},
+ {40750, 18, 19, 14},
+ {41000, 16, 17, 14},
+ {41500, 22, 44, 26},
+ {41540, 22, 44, 26},
+ {42000, 18, 21, 15},
+ {42500, 22, 45, 26},
+ {43000, 20, 43, 27},
+ {43163, 20, 24, 15},
+ {44000, 18, 22, 15},
+ {44900, 20, 108, 65},
+ {45000, 20, 25, 15},
+ {45250, 20, 52, 31},
+ {46000, 18, 23, 15},
+ {46750, 20, 45, 26},
+ {47000, 20, 40, 23},
+ {48000, 18, 24, 15},
+ {49000, 18, 49, 30},
+ {49500, 16, 22, 15},
+ {50000, 18, 25, 15},
+ {50500, 18, 32, 19},
+ {51000, 18, 34, 20},
+ {52000, 18, 26, 15},
+ {52406, 14, 34, 25},
+ {53000, 16, 22, 14},
+ {54000, 16, 24, 15},
+ {54054, 16, 173, 108},
+ {54500, 14, 24, 17},
+ {55000, 12, 22, 18},
+ {56000, 14, 45, 31},
+ {56250, 16, 25, 15},
+ {56750, 14, 25, 17},
+ {57000, 16, 27, 16},
+ {58000, 16, 43, 25},
+ {58250, 16, 38, 22},
+ {58750, 16, 40, 23},
+ {59000, 14, 26, 17},
+ {59341, 14, 40, 26},
+ {59400, 16, 44, 25},
+ {60000, 16, 32, 18},
+ {60500, 12, 39, 29},
+ {61000, 14, 49, 31},
+ {62000, 14, 37, 23},
+ {62250, 14, 42, 26},
+ {63000, 12, 21, 15},
+ {63500, 14, 28, 17},
+ {64000, 12, 27, 19},
+ {65000, 14, 32, 19},
+ {65250, 12, 29, 20},
+ {65500, 12, 32, 22},
+ {66000, 12, 22, 15},
+ {66667, 14, 38, 22},
+ {66750, 10, 21, 17},
+ {67000, 14, 33, 19},
+ {67750, 14, 58, 33},
+ {68000, 14, 30, 17},
+ {68179, 14, 46, 26},
+ {68250, 14, 46, 26},
+ {69000, 12, 23, 15},
+ {70000, 12, 28, 18},
+ {71000, 12, 30, 19},
+ {72000, 12, 24, 15},
+ {73000, 10, 23, 17},
+ {74000, 12, 23, 14},
+ {74176, 8, 100, 91},
+ {74250, 10, 22, 16},
+ {74481, 12, 43, 26},
+ {74500, 10, 29, 21},
+ {75000, 12, 25, 15},
+ {75250, 10, 39, 28},
+ {76000, 12, 27, 16},
+ {77000, 12, 53, 31},
+ {78000, 12, 26, 15},
+ {78750, 12, 28, 16},
+ {79000, 10, 38, 26},
+ {79500, 10, 28, 19},
+ {80000, 12, 32, 18},
+ {81000, 10, 21, 14},
+ {81081, 6, 100, 111},
+ {81624, 8, 29, 24},
+ {82000, 8, 17, 14},
+ {83000, 10, 40, 26},
+ {83950, 10, 28, 18},
+ {84000, 10, 28, 18},
+ {84750, 6, 16, 17},
+ {85000, 6, 17, 18},
+ {85250, 10, 30, 19},
+ {85750, 10, 27, 17},
+ {86000, 10, 43, 27},
+ {87000, 10, 29, 18},
+ {88000, 10, 44, 27},
+ {88500, 10, 41, 25},
+ {89000, 10, 28, 17},
+ {89012, 6, 90, 91},
+ {89100, 10, 33, 20},
+ {90000, 10, 25, 15},
+ {91000, 10, 32, 19},
+ {92000, 10, 46, 27},
+ {93000, 10, 31, 18},
+ {94000, 10, 40, 23},
+ {94500, 10, 28, 16},
+ {95000, 10, 44, 25},
+ {95654, 10, 39, 22},
+ {95750, 10, 39, 22},
+ {96000, 10, 32, 18},
+ {97000, 8, 23, 16},
+ {97750, 8, 42, 29},
+ {98000, 8, 45, 31},
+ {99000, 8, 22, 15},
+ {99750, 8, 34, 23},
+ {100000, 6, 20, 18},
+ {100500, 6, 19, 17},
+ {101000, 6, 37, 33},
+ {101250, 8, 21, 14},
+ {102000, 6, 17, 15},
+ {102250, 6, 25, 22},
+ {103000, 8, 29, 19},
+ {104000, 8, 37, 24},
+ {105000, 8, 28, 18},
+ {106000, 8, 22, 14},
+ {107000, 8, 46, 29},
+ {107214, 8, 27, 17},
+ {108000, 8, 24, 15},
+ {108108, 8, 173, 108},
+ {109000, 6, 23, 19},
+ {109000, 6, 23, 19},
+ {110000, 6, 22, 18},
+ {110013, 6, 22, 18},
+ {110250, 8, 49, 30},
+ {110500, 8, 36, 22},
+ {111000, 8, 23, 14},
+ {111264, 8, 150, 91},
+ {111375, 8, 33, 20},
+ {112000, 8, 63, 38},
+ {112500, 8, 25, 15},
+ {113100, 8, 57, 34},
+ {113309, 8, 42, 25},
+ {114000, 8, 27, 16},
+ {115000, 6, 23, 18},
+ {116000, 8, 43, 25},
+ {117000, 8, 26, 15},
+ {117500, 8, 40, 23},
+ {118000, 6, 38, 29},
+ {119000, 8, 30, 17},
+ {119500, 8, 46, 26},
+ {119651, 8, 39, 22},
+ {120000, 8, 32, 18},
+ {121000, 6, 39, 29},
+ {121250, 6, 31, 23},
+ {121750, 6, 23, 17},
+ {122000, 6, 42, 31},
+ {122614, 6, 30, 22},
+ {123000, 6, 41, 30},
+ {123379, 6, 37, 27},
+ {124000, 6, 51, 37},
+ {125000, 6, 25, 18},
+ {125250, 4, 13, 14},
+ {125750, 4, 27, 29},
+ {126000, 6, 21, 15},
+ {127000, 6, 24, 17},
+ {127250, 6, 41, 29},
+ {128000, 6, 27, 19},
+ {129000, 6, 43, 30},
+ {129859, 4, 25, 26},
+ {130000, 6, 26, 18},
+ {130250, 6, 42, 29},
+ {131000, 6, 32, 22},
+ {131500, 6, 38, 26},
+ {131850, 6, 41, 28},
+ {132000, 6, 22, 15},
+ {132750, 6, 28, 19},
+ {133000, 6, 34, 23},
+ {133330, 6, 37, 25},
+ {134000, 6, 61, 41},
+ {135000, 6, 21, 14},
+ {135250, 6, 167, 111},
+ {136000, 6, 62, 41},
+ {137000, 6, 35, 23},
+ {138000, 6, 23, 15},
+ {138500, 6, 40, 26},
+ {138750, 6, 37, 24},
+ {139000, 6, 34, 22},
+ {139050, 6, 34, 22},
+ {139054, 6, 34, 22},
+ {140000, 6, 28, 18},
+ {141000, 6, 36, 23},
+ {141500, 6, 22, 14},
+ {142000, 6, 30, 19},
+ {143000, 6, 27, 17},
+ {143472, 4, 17, 16},
+ {144000, 6, 24, 15},
+ {145000, 6, 29, 18},
+ {146000, 6, 47, 29},
+ {146250, 6, 26, 16},
+ {147000, 6, 49, 30},
+ {147891, 6, 23, 14},
+ {148000, 6, 23, 14},
+ {148250, 6, 28, 17},
+ {148352, 4, 100, 91},
+ {148500, 6, 33, 20},
+ {149000, 6, 48, 29},
+ {150000, 6, 25, 15},
+ {151000, 4, 19, 17},
+ {152000, 6, 27, 16},
+ {152280, 6, 44, 26},
+ {153000, 6, 34, 20},
+ {154000, 6, 53, 31},
+ {155000, 6, 31, 18},
+ {155250, 6, 50, 29},
+ {155750, 6, 45, 26},
+ {156000, 6, 26, 15},
+ {157000, 6, 61, 35},
+ {157500, 6, 28, 16},
+ {158000, 6, 65, 37},
+ {158250, 6, 44, 25},
+ {159000, 6, 53, 30},
+ {159500, 6, 39, 22},
+ {160000, 6, 32, 18},
+ {161000, 4, 31, 26},
+ {162000, 4, 18, 15},
+ {162162, 4, 131, 109},
+ {162500, 4, 53, 44},
+ {163000, 4, 29, 24},
+ {164000, 4, 17, 14},
+ {165000, 4, 22, 18},
+ {166000, 4, 32, 26},
+ {167000, 4, 26, 21},
+ {168000, 4, 46, 37},
+ {169000, 4, 104, 83},
+ {169128, 4, 64, 51},
+ {169500, 4, 39, 31},
+ {170000, 4, 34, 27},
+ {171000, 4, 19, 15},
+ {172000, 4, 51, 40},
+ {172750, 4, 32, 25},
+ {172800, 4, 32, 25},
+ {173000, 4, 41, 32},
+ {174000, 4, 49, 38},
+ {174787, 4, 22, 17},
+ {175000, 4, 35, 27},
+ {176000, 4, 30, 23},
+ {177000, 4, 38, 29},
+ {178000, 4, 29, 22},
+ {178500, 4, 37, 28},
+ {179000, 4, 53, 40},
+ {179500, 4, 73, 55},
+ {180000, 4, 20, 15},
+ {181000, 4, 55, 41},
+ {182000, 4, 31, 23},
+ {183000, 4, 42, 31},
+ {184000, 4, 30, 22},
+ {184750, 4, 26, 19},
+ {185000, 4, 37, 27},
+ {186000, 4, 51, 37},
+ {187000, 4, 36, 26},
+ {188000, 4, 32, 23},
+ {189000, 4, 21, 15},
+ {190000, 4, 38, 27},
+ {190960, 4, 41, 29},
+ {191000, 4, 41, 29},
+ {192000, 4, 27, 19},
+ {192250, 4, 37, 26},
+ {193000, 4, 20, 14},
+ {193250, 4, 53, 37},
+ {194000, 4, 23, 16},
+ {194208, 4, 23, 16},
+ {195000, 4, 26, 18},
+ {196000, 4, 45, 31},
+ {197000, 4, 35, 24},
+ {197750, 4, 41, 28},
+ {198000, 4, 22, 15},
+ {198500, 4, 25, 17},
+ {199000, 4, 28, 19},
+ {200000, 4, 37, 25},
+ {201000, 4, 61, 41},
+ {202000, 4, 112, 75},
+ {202500, 4, 21, 14},
+ {203000, 4, 146, 97},
+ {204000, 4, 62, 41},
+ {204750, 4, 44, 29},
+ {205000, 4, 38, 25},
+ {206000, 4, 29, 19},
+ {207000, 4, 23, 15},
+ {207500, 4, 40, 26},
+ {208000, 4, 37, 24},
+ {208900, 4, 48, 31},
+ {209000, 4, 48, 31},
+ {209250, 4, 31, 20},
+ {210000, 4, 28, 18},
+ {211000, 4, 25, 16},
+ {212000, 4, 22, 14},
+ {213000, 4, 30, 19},
+ {213750, 4, 38, 24},
+ {214000, 4, 46, 29},
+ {214750, 4, 35, 22},
+ {215000, 4, 43, 27},
+ {216000, 4, 24, 15},
+ {217000, 4, 37, 23},
+ {218000, 4, 42, 26},
+ {218250, 4, 42, 26},
+ {218750, 4, 34, 21},
+ {219000, 4, 47, 29},
+ {219000, 4, 47, 29},
+ {220000, 4, 44, 27},
+ {220640, 4, 49, 30},
+ {220750, 4, 36, 22},
+ {221000, 4, 36, 22},
+ {222000, 4, 23, 14},
+ {222525, 4, 28, 17},
+ {222750, 4, 33, 20},
+ {227000, 4, 37, 22},
+ {230250, 4, 29, 17},
+ {233500, 4, 38, 22},
+ {235000, 4, 40, 23},
+ {238000, 4, 30, 17},
+ {241500, 2, 17, 19},
+ {245250, 2, 20, 22},
+ {247750, 2, 22, 24},
+ {253250, 2, 15, 16},
+ {256250, 2, 18, 19},
+ {262500, 2, 31, 32},
+ {267250, 2, 66, 67},
+ {268500, 2, 94, 95},
+ {270000, 2, 14, 14},
+ {272500, 2, 77, 76},
+ {273750, 2, 57, 56},
+ {280750, 2, 24, 23},
+ {281250, 2, 23, 22},
+ {286000, 2, 17, 16},
+ {291750, 2, 26, 24},
+ {296703, 2, 56, 51},
+ {297000, 2, 22, 20},
+ {298000, 2, 21, 19},
+};
+
+void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int port = intel_hdmi->ddi_port;
+ int pipe = intel_crtc->pipe;
+ int p, n2, r2, valid=0;
+ u32 temp, i;
+
+ /* On Haswell, we need to enable the clocks and prepare DDI function to
+ * work in HDMI mode for this pipe.
+ */
+ DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+
+ for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
+ if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
+ p = wrpll_tmds_clock_table[i].p;
+ n2 = wrpll_tmds_clock_table[i].n2;
+ r2 = wrpll_tmds_clock_table[i].r2;
+
+ DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
+ crtc->mode.clock,
+ p, n2, r2);
+
+ valid = 1;
+ break;
+ }
+ }
+
+ if (!valid) {
+ DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
+ crtc->mode.clock);
+ return;
+ }
+
+ /* Enable LCPLL if disabled */
+ temp = I915_READ(LCPLL_CTL);
+ if (temp & LCPLL_PLL_DISABLE)
+ I915_WRITE(LCPLL_CTL,
+ temp & ~LCPLL_PLL_DISABLE);
+
+ /* Configure WR PLL 1, program the correct divider values for
+ * the desired frequency and wait for warmup */
+ I915_WRITE(WRPLL_CTL1,
+ WRPLL_PLL_ENABLE |
+ WRPLL_PLL_SELECT_LCPLL_2700 |
+ WRPLL_DIVIDER_REFERENCE(r2) |
+ WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p));
+
+ udelay(20);
+
+ /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
+ * this port for connection.
+ */
+ I915_WRITE(PORT_CLK_SEL(port),
+ PORT_CLK_SEL_WRPLL1);
+ I915_WRITE(PIPE_CLK_SEL(pipe),
+ PIPE_CLK_SEL_PORT(port));
+
+ udelay(20);
+
+ if (intel_hdmi->has_audio) {
+ /* Proper support for digital audio needs a new logic and a new set
+ * of registers, so we leave it for future patch bombing.
+ */
+ DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
+ pipe_name(intel_crtc->pipe));
+ }
+
+ /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+ temp = I915_READ(DDI_FUNC_CTL(pipe));
+ temp &= ~PIPE_DDI_PORT_MASK;
+ temp &= ~PIPE_DDI_BPC_12;
+ temp |= PIPE_DDI_SELECT_PORT(port) |
+ PIPE_DDI_MODE_SELECT_HDMI |
+ ((intel_crtc->bpp > 24) ?
+ PIPE_DDI_BPC_12 :
+ PIPE_DDI_BPC_8) |
+ PIPE_DDI_FUNC_ENABLE;
+
+ I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int port = intel_hdmi->ddi_port;
+ u32 temp;
+
+ temp = I915_READ(DDI_BUF_CTL(port));
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ } else {
+ temp |= DDI_BUF_CTL_ENABLE;
+ }
+
+ /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
+ * and swing/emphasis values are ignored so nothing special needs
+ * to be done besides enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ temp);
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b1cf3b3ff51..a8538ac0299d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/cpufreq.h>
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -44,7 +44,6 @@
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -360,6 +359,88 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
+u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
+{
+ unsigned long flags;
+ u32 val = 0;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO idle wait timed out\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(DPIO_REG, reg);
+ I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
+ DPIO_BYTE);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO read wait timed out\n");
+ goto out_unlock;
+ }
+ val = I915_READ(DPIO_DATA);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+ return val;
+}
+
+static void vlv_init_dpio(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Reset the DPIO config */
+ I915_WRITE(DPIO_CTL, 0);
+ POSTING_READ(DPIO_CTL);
+ I915_WRITE(DPIO_CTL, 1);
+ POSTING_READ(DPIO_CTL);
+}
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+ },
+ },
+ { } /* terminating entry */
+};
+
+static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
+ unsigned int reg)
+{
+ unsigned int val;
+
+ /* use the module option value if specified */
+ if (i915_lvds_channel_mode > 0)
+ return i915_lvds_channel_mode == 2;
+
+ if (dmi_check_system(intel_dual_link_lvds))
+ return true;
+
+ if (dev_priv->lvds_val)
+ val = dev_priv->lvds_val;
+ else {
+ /* BIOS should set the proper LVDS register value at boot, but
+ * in reality, it doesn't set the value when the lid is closed;
+ * we need to check "the value to be set" in VBT when LVDS
+ * register is uninitialized.
+ */
+ val = I915_READ(reg);
+ if (!(val & ~LVDS_DETECTED))
+ val = dev_priv->bios_lvds_val;
+ dev_priv->lvds_val = val;
+ }
+ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
@@ -368,8 +449,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP) {
+ if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -397,8 +477,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (is_dual_link_lvds(dev_priv, LVDS))
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
@@ -536,8 +615,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
* reliably set up different single/dual channel state, if we
* even can.
*/
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
+ if (is_dual_link_lvds(dev_priv, LVDS))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
@@ -706,6 +784,17 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
+static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 frame, frame_reg = PIPEFRAME(pipe);
+
+ frame = I915_READ(frame_reg);
+
+ if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
+ DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
/**
* intel_wait_for_vblank - wait for vblank on a given pipe
* @dev: drm device
@@ -719,6 +808,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ironlake_wait_for_vblank(dev, pipe);
+ return;
+ }
+
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
*
@@ -771,15 +865,20 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
100))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} else {
- u32 last_line;
+ u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ if (IS_GEN2(dev))
+ line_mask = DSL_LINEMASK_GEN2;
+ else
+ line_mask = DSL_LINEMASK_GEN3;
+
/* Wait for the display line to settle */
do {
- last_line = I915_READ(reg) & DSL_LINEMASK;
+ last_line = I915_READ(reg) & line_mask;
mdelay(5);
- } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
+ } while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
@@ -811,34 +910,49 @@ static void assert_pll(struct drm_i915_private *dev_priv,
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+ struct intel_pch_pll *pll,
+ struct intel_crtc *crtc,
+ bool state)
{
- int reg;
u32 val;
bool cur_state;
- if (HAS_PCH_CPT(dev_priv->dev)) {
- u32 pch_dpll;
-
- pch_dpll = I915_READ(PCH_DPLL_SEL);
-
- /* Make sure the selected PLL is enabled to the transcoder */
- WARN(!((pch_dpll >> (4 * pipe)) & 8),
- "transcoder %d PLL not enabled\n", pipe);
-
- /* Convert the transcoder pipe number to a pll pipe number */
- pipe = (pch_dpll >> (4 * pipe)) & 1;
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
+ return;
}
- reg = PCH_DPLL(pipe);
- val = I915_READ(reg);
+ if (WARN (!pll,
+ "asserting PCH PLL %s with no PLL\n", state_string(state)))
+ return;
+
+ val = I915_READ(pll->pll_reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
- "PCH PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
+ "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+ pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+ /* Make sure the selected PLL is correctly attached to the transcoder */
+ if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
+ u32 pch_dpll;
+
+ pch_dpll = I915_READ(PCH_DPLL_SEL);
+ cur_state = pll->pll_reg == _PCH_DPLL_B;
+ if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+ "PLL[%d] not attached to this transcoder %d: %08x\n",
+ cur_state, crtc->pipe, pch_dpll)) {
+ cur_state = !!(val >> (4*crtc->pipe + 3));
+ WARN(cur_state != state,
+ "PLL[%d] not %s on this transcoder %d: %08x\n",
+ pll->pll_reg == _PCH_DPLL_B,
+ state_string(state),
+ crtc->pipe,
+ val);
+ }
+ }
}
-#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
-#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -847,9 +961,16 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- reg = FDI_TX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_TX_ENABLE);
+ if (IS_HASWELL(dev_priv->dev)) {
+ /* On Haswell, DDI is used instead of FDI_TX_CTL */
+ reg = DDI_FUNC_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+ } else {
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ }
WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -864,9 +985,14 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- reg = FDI_RX_CTL(pipe);
- val = I915_READ(reg);
- cur_state = !!(val & FDI_RX_ENABLE);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
+ return;
+ } else {
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ }
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
@@ -884,6 +1010,10 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
if (dev_priv->info->gen == 5)
return;
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (IS_HASWELL(dev_priv->dev))
+ return;
+
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
@@ -895,6 +1025,10 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
+ return;
+ }
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1000,6 +1134,11 @@ static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
u32 val;
bool enabled;
+ if (HAS_PCH_LPT(dev_priv->dev)) {
+ DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
+ return;
+ }
+
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
@@ -1198,6 +1337,69 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(reg);
}
+/* SBI access */
+static void
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR,
+ (reg << 16));
+ I915_WRITE(SBI_DATA,
+ value);
+ I915_WRITE(SBI_CTL_STAT,
+ SBI_BUSY |
+ SBI_CTL_OP_CRWR);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+ goto out_unlock;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+}
+
+static u32
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+{
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to become ready\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(SBI_ADDR,
+ (reg << 16));
+ I915_WRITE(SBI_CTL_STAT,
+ SBI_BUSY |
+ SBI_CTL_OP_CRRD);
+
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ 100)) {
+ DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+ goto out_unlock;
+ }
+
+ value = I915_READ(SBI_DATA);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+ return value;
+}
+
/**
* intel_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
@@ -1206,60 +1408,88 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
-static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll;
int reg;
u32 val;
- if (pipe > 1)
+ /* PCH PLLs only available on ILK, SNB and IVB */
+ BUG_ON(dev_priv->info->gen < 5);
+ pll = intel_crtc->pch_pll;
+ if (pll == NULL)
return;
- /* PCH only available on ILK+ */
- BUG_ON(dev_priv->info->gen < 5);
+ if (WARN_ON(pll->refcount == 0))
+ return;
+
+ DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
- reg = PCH_DPLL(pipe);
+ if (pll->active++ && pll->on) {
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
+ return;
+ }
+
+ DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
+
+ reg = pll->pll_reg;
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
+
+ pll->on = true;
}
-static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
int reg;
- u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
- pll_sel = TRANSC_DPLL_ENABLE;
-
- if (pipe > 1)
- return;
+ u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
+ if (pll == NULL)
+ return;
- /* Make sure transcoder isn't still depending on us */
- assert_transcoder_disabled(dev_priv, pipe);
+ if (WARN_ON(pll->refcount == 0))
+ return;
- if (pipe == 0)
- pll_sel |= TRANSC_DPLLA_SEL;
- else if (pipe == 1)
- pll_sel |= TRANSC_DPLLB_SEL;
+ DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
+ pll->pll_reg, pll->active, pll->on,
+ intel_crtc->base.base.id);
+ if (WARN_ON(pll->active == 0)) {
+ assert_pch_pll_disabled(dev_priv, pll, NULL);
+ return;
+ }
- if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+ if (--pll->active) {
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
+ }
+
+ DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
+
+ /* Make sure transcoder isn't still depending on us */
+ assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
- reg = PCH_DPLL(pipe);
+ reg = pll->pll_reg;
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
+
+ pll->on = false;
}
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
@@ -1273,12 +1503,18 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
- assert_pch_pll_enabled(dev_priv, pipe);
+ assert_pch_pll_enabled(dev_priv,
+ to_intel_crtc(crtc)->pch_pll,
+ to_intel_crtc(crtc));
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
+ if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
+ DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
+ return;
+ }
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1415,7 +1651,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
-static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
@@ -1526,490 +1762,6 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
-static void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
-}
-
-static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int cfb_pitch;
- int plane, i;
- u32 fbc_ctl, fbc_ctl2;
-
- cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
- if (fb->pitches[0] < cfb_pitch)
- cfb_pitch = fb->pitches[0];
-
- /* FBC_CTL wants 64B units */
- cfb_pitch = (cfb_pitch / 64) - 1;
- plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
-
- /* Clear old tags */
- for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG + (i * 4), 0);
-
- /* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
- fbc_ctl2 |= plane;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, crtc->y);
-
- /* enable it... */
- fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
- if (IS_I945GM(dev))
- fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- fbc_ctl |= obj->fence_reg;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
- cfb_pitch, crtc->y, intel_crtc->plane);
-}
-
-static bool i8xx_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
-}
-
-static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
- u32 dpfc_ctl;
-
- dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
-
- I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
-
- /* enable it... */
- I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
-
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void g4x_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool g4x_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-static void sandybridge_blit_fbc_update(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 blt_ecoskpd;
-
- /* Make sure blitter notifies FBC of writes */
- gen6_gt_force_wake_get(dev_priv);
- blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
- GEN6_BLITTER_LOCK_SHIFT);
- I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
- POSTING_READ(GEN6_BLITTER_ECOSKPD);
- gen6_gt_force_wake_put(dev_priv);
-}
-
-static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_framebuffer *fb = crtc->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
- unsigned long stall_watermark = 200;
- u32 dpfc_ctl;
-
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- dpfc_ctl &= DPFC_RESERVED;
- dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- /* Set persistent mode for front-buffer rendering, ala X. */
- dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
- dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
- I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
-
- I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
- (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
- (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
- I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
- /* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- if (IS_GEN6(dev)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | obj->fence_reg);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
- sandybridge_blit_fbc_update(dev);
- }
-
- DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
-}
-
-static void ironlake_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpfc_ctl;
-
- /* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
-
- DRM_DEBUG_KMS("disabled FBC\n");
- }
-}
-
-static bool ironlake_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
-}
-
-bool intel_fbc_enabled(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.fbc_enabled)
- return false;
-
- return dev_priv->display.fbc_enabled(dev);
-}
-
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
- struct intel_fbc_work *work =
- container_of(to_delayed_work(__work),
- struct intel_fbc_work, work);
- struct drm_device *dev = work->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc_work) {
- /* Double check that we haven't switched fb without cancelling
- * the prior work.
- */
- if (work->crtc->fb == work->fb) {
- dev_priv->display.enable_fbc(work->crtc,
- work->interval);
-
- dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->cfb_fb = work->crtc->fb->base.id;
- dev_priv->cfb_y = work->crtc->y;
- }
-
- dev_priv->fbc_work = NULL;
- }
- mutex_unlock(&dev->struct_mutex);
-
- kfree(work);
-}
-
-static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->fbc_work == NULL)
- return;
-
- DRM_DEBUG_KMS("cancelling pending FBC enable\n");
-
- /* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc_work, so we can perform the cancellation
- * entirely asynchronously.
- */
- if (cancel_delayed_work(&dev_priv->fbc_work->work))
- /* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc_work);
-
- /* Mark the work as no longer wanted so that if it does
- * wake-up (because the work was already running and waiting
- * for our mutex), it will discover that is no longer
- * necessary to run.
- */
- dev_priv->fbc_work = NULL;
-}
-
-static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
-{
- struct intel_fbc_work *work;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->display.enable_fbc)
- return;
-
- intel_cancel_fbc_work(dev_priv);
-
- work = kzalloc(sizeof *work, GFP_KERNEL);
- if (work == NULL) {
- dev_priv->display.enable_fbc(crtc, interval);
- return;
- }
-
- work->crtc = crtc;
- work->fb = crtc->fb;
- work->interval = interval;
- INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
-
- dev_priv->fbc_work = work;
-
- DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
-
- /* Delay the actual enabling to let pageflipping cease and the
- * display to settle before starting the compression. Note that
- * this delay also serves a second purpose: it allows for a
- * vblank to pass after disabling the FBC before we attempt
- * to modify the control registers.
- *
- * A more complicated solution would involve tracking vblanks
- * following the termination of the page-flipping sequence
- * and indeed performing the enable as a co-routine and not
- * waiting synchronously upon the vblank.
- */
- schedule_delayed_work(&work->work, msecs_to_jiffies(50));
-}
-
-void intel_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_cancel_fbc_work(dev_priv);
-
- if (!dev_priv->display.disable_fbc)
- return;
-
- dev_priv->display.disable_fbc(dev);
- dev_priv->cfb_plane = -1;
-}
-
-/**
- * intel_update_fbc - enable/disable FBC as needed
- * @dev: the drm_device
- *
- * Set up the framebuffer compression hardware at mode set time. We
- * enable it if possible:
- * - plane A only (on pre-965)
- * - no pixel mulitply/line duplication
- * - no alpha buffer discard
- * - no dual wide
- * - framebuffer <= 2048 in width, 1536 in height
- *
- * We can't assume that any compression will take place (worst case),
- * so the compressed buffer has to be the same size as the uncompressed
- * one. It also must reside (along with the line length buffer) in
- * stolen memory.
- *
- * We need to enable/disable FBC on a global basis.
- */
-static void intel_update_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = NULL, *tmp_crtc;
- struct intel_crtc *intel_crtc;
- struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
- int enable_fbc;
-
- DRM_DEBUG_KMS("\n");
-
- if (!i915_powersave)
- return;
-
- if (!I915_HAS_FBC(dev))
- return;
-
- /*
- * If FBC is already on, we just have to verify that we can
- * keep it that way...
- * Need to disable if:
- * - more than one pipe is active
- * - changing FBC params (stride, fence, mode)
- * - new fb is too large to fit in compressed buffer
- * - going to an unsupported config (interlace, pixel multiply, etc.)
- */
- list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled && tmp_crtc->fb) {
- if (crtc) {
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
- goto out_disable;
- }
- crtc = tmp_crtc;
- }
- }
-
- if (!crtc || crtc->fb == NULL) {
- DRM_DEBUG_KMS("no output, disabling\n");
- dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
- goto out_disable;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- fb = crtc->fb;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
- enable_fbc = i915_enable_fbc;
- if (enable_fbc < 0) {
- DRM_DEBUG_KMS("fbc set to per-chip default\n");
- enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 6)
- enable_fbc = 0;
- }
- if (!enable_fbc) {
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
- goto out_disable;
- }
- if (intel_fb->obj->base.size > dev_priv->cfb_size) {
- DRM_DEBUG_KMS("framebuffer too large, disabling "
- "compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- goto out_disable;
- }
- if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
- (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
- goto out_disable;
- }
- if ((crtc->mode.hdisplay > 2048) ||
- (crtc->mode.vdisplay > 1536)) {
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
- goto out_disable;
- }
- if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
- DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_BAD_PLANE;
- goto out_disable;
- }
-
- /* The use of a CPU fence is mandatory in order to detect writes
- * by the CPU to the scanout and trigger updates to the FBC.
- */
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_NOT_TILED;
- goto out_disable;
- }
-
- /* If the kernel debugger is active, always disable compression */
- if (in_dbg_master())
- goto out_disable;
-
- /* If the scanout has not changed, don't modify the FBC settings.
- * Note that we make the fundamental assumption that the fb->obj
- * cannot be unpinned (and have its GTT offset and fence revoked)
- * without first being decoupled from the scanout and FBC disabled.
- */
- if (dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_fb == fb->base.id &&
- dev_priv->cfb_y == crtc->y)
- return;
-
- if (intel_fbc_enabled(dev)) {
- /* We update FBC along two paths, after changing fb/crtc
- * configuration (modeswitching) and after page-flipping
- * finishes. For the latter, we know that not only did
- * we disable the FBC at the start of the page-flip
- * sequence, but also more than one vblank has passed.
- *
- * For the former case of modeswitching, it is possible
- * to switch between two FBC valid configurations
- * instantaneously so we do need to disable the FBC
- * before we can modify its control registers. We also
- * have to wait for the next vblank for that to take
- * effect. However, since we delay enabling FBC we can
- * assume that a vblank has passed since disabling and
- * that we can safely alter the registers in the deferred
- * callback.
- *
- * In the scenario that we go from a valid to invalid
- * and then back to valid FBC configuration we have
- * no strict enforcement that a vblank occurred since
- * disabling the FBC. However, along all current pipe
- * disabling paths we do need to wait for a vblank at
- * some point. And we wait before enabling FBC anyway.
- */
- DRM_DEBUG_KMS("disabling active FBC for update\n");
- intel_disable_fbc(dev);
- }
-
- intel_enable_fbc(crtc, 500);
- return;
-
-out_disable:
- /* Multiple disables should be harmless */
- if (intel_fbc_enabled(dev)) {
- DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
- intel_disable_fbc(dev);
- }
-}
-
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -2050,13 +1802,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (obj->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence(obj, pipelined);
- if (ret)
- goto err_unpin;
+ ret = i915_gem_object_get_fence(obj);
+ if (ret)
+ goto err_unpin;
- i915_gem_object_pin_fence(obj);
- }
+ i915_gem_object_pin_fence(obj);
dev_priv->mm.interruptible = true;
return 0;
@@ -2137,7 +1887,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
} else
@@ -2217,7 +1967,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_WRITE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
POSTING_READ(reg);
@@ -2232,16 +1982,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = dev_priv->display.update_plane(crtc, fb, x, y);
- if (ret)
- return ret;
- intel_update_fbc(dev);
+ if (dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(crtc);
- return 0;
+ return dev_priv->display.update_plane(crtc, fb, x, y);
}
static int
@@ -2276,6 +2022,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
@@ -2286,16 +2033,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
- switch (intel_crtc->plane) {
- case 0:
- case 1:
- break;
- case 2:
- if (IS_IVYBRIDGE(dev))
- break;
- /* fall through otherwise */
- default:
- DRM_ERROR("no plane for crtc\n");
+ if(intel_crtc->plane > dev_priv->num_pipe) {
+ DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
+ intel_crtc->plane,
+ dev_priv->num_pipe);
return -EINVAL;
}
@@ -2312,8 +2053,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb)
intel_finish_fb(old_fb);
- ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
- LEAVE_ATOMIC_MODE_SET);
+ ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
if (ret) {
intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -2326,6 +2066,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
+ intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
if (!dev->primary->master)
@@ -2547,7 +2288,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
+ u32 reg, temp, i, retry;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -2599,15 +2340,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(500);
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
- break;
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_BIT_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done.\n");
+ break;
+ }
+ udelay(50);
}
+ if (retry < 5)
+ break;
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
@@ -2648,15 +2393,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(500);
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
- break;
+ for (retry = 0; retry < 5; retry++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ if (temp & FDI_RX_SYMBOL_LOCK) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done.\n");
+ break;
+ }
+ udelay(50);
}
+ if (retry < 5)
+ break;
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
@@ -2808,14 +2557,18 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(200);
- /* Enable CPU FDI TX PLL, always on for Ironlake */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ /* On Haswell, the PLL configuration for ports and pipes is handled
+ * separately, as part of DDI setup */
+ if (!IS_HASWELL(dev)) {
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
- udelay(100);
+ POSTING_READ(reg);
+ udelay(100);
+ }
}
}
@@ -2888,38 +2641,16 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
-/*
- * When we disable a pipe, we need to clear any pending scanline wait events
- * to avoid hanging the ring, which we assume we are waiting on.
- */
-static void intel_clear_scanline_wait(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- u32 tmp;
-
- if (IS_GEN2(dev))
- /* Can't break the hang on i8xx */
- return;
-
- ring = LP_RING(dev_priv);
- tmp = I915_READ_CTL(ring);
- if (tmp & RING_WAIT)
- I915_WRITE_CTL(ring, tmp);
-}
-
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
- struct drm_i915_gem_object *obj;
- struct drm_i915_private *dev_priv;
+ struct drm_device *dev = crtc->dev;
if (crtc->fb == NULL)
return;
- obj = to_intel_framebuffer(crtc->fb)->obj;
- dev_priv = crtc->dev->dev_private;
- wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj->pending_flip) == 0);
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->fb);
+ mutex_unlock(&dev->struct_mutex);
}
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
@@ -2936,6 +2667,22 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
if (encoder->base.crtc != crtc)
continue;
+ /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
+ * CPU handles all others */
+ if (IS_HASWELL(dev)) {
+ /* It is still unclear how this will work on PPT, so throw up a warning */
+ WARN_ON(!HAS_PCH_LPT(dev));
+
+ if (encoder->type == DRM_MODE_ENCODER_DAC) {
+ DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
+ return true;
+ } else {
+ DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
+ encoder->type);
+ return false;
+ }
+ }
+
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
@@ -2947,6 +2694,97 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 divsel, phaseinc, auxdiv, phasedir = 0;
+ u32 temp;
+
+ /* It is necessary to ungate the pixclk gate prior to programming
+ * the divisors, and gate it back when it is done.
+ */
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+ /* Disable SSCCTL */
+ intel_sbi_write(dev_priv, SBI_SSCCTL6,
+ intel_sbi_read(dev_priv, SBI_SSCCTL6) |
+ SBI_SSCCTL_DISABLE);
+
+ /* 20MHz is a corner case which is out of range for the 7-bit divisor */
+ if (crtc->mode.clock == 20000) {
+ auxdiv = 1;
+ divsel = 0x41;
+ phaseinc = 0x20;
+ } else {
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the crtc->mode.clock in in KHz. To get the divisors,
+ * it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor, msb_divisor_value, pi_value;
+
+ desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+ msb_divisor_value = desired_divisor / iclk_pi_range;
+ pi_value = desired_divisor % iclk_pi_range;
+
+ auxdiv = 0;
+ divsel = msb_divisor_value - 2;
+ phaseinc = pi_value;
+ }
+
+ /* This should not happen with any sane values */
+ WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ crtc->mode.clock,
+ auxdiv,
+ divsel,
+ phasedir,
+ phaseinc);
+
+ /* Program SSCDIVINTPHASE6 */
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+ temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+ temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+ temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+ temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+ temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+
+ intel_sbi_write(dev_priv,
+ SBI_SSCDIVINTPHASE6,
+ temp);
+
+ /* Program SSCAUXDIV */
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+ temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+ temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+ intel_sbi_write(dev_priv,
+ SBI_SSCAUXDIV6,
+ temp);
+
+
+ /* Enable modulator and associated divider */
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+ temp &= ~SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv,
+ SBI_SSCCTL6,
+ temp);
+
+ /* Wait for initialization time */
+ udelay(24);
+
+ I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+}
+
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
@@ -2961,29 +2799,41 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, transc_sel;
+ u32 reg, temp;
+
+ assert_transcoder_disabled(dev_priv, pipe);
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- intel_enable_pch_pll(dev_priv, pipe);
+ intel_enable_pch_pll(intel_crtc);
- if (HAS_PCH_CPT(dev)) {
- transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
- TRANSC_DPLLB_SEL;
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
+ lpt_program_iclkip(crtc);
+ } else if (HAS_PCH_CPT(dev)) {
+ u32 sel;
- /* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0) {
- temp &= ~(TRANSA_DPLLB_SEL);
- temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
- } else if (pipe == 1) {
- temp &= ~(TRANSB_DPLLB_SEL);
- temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
- } else if (pipe == 2) {
- temp &= ~(TRANSC_DPLLB_SEL);
- temp |= (TRANSC_DPLL_ENABLE | transc_sel);
+ switch (pipe) {
+ default:
+ case 0:
+ temp |= TRANSA_DPLL_ENABLE;
+ sel = TRANSA_DPLLB_SEL;
+ break;
+ case 1:
+ temp |= TRANSB_DPLL_ENABLE;
+ sel = TRANSB_DPLLB_SEL;
+ break;
+ case 2:
+ temp |= TRANSC_DPLL_ENABLE;
+ sel = TRANSC_DPLLB_SEL;
+ break;
}
+ if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
+ temp |= sel;
+ else
+ temp &= ~sel;
I915_WRITE(PCH_DPLL_SEL, temp);
}
@@ -2998,7 +2848,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
- intel_fdi_normal_train(crtc);
+ if (!IS_HASWELL(dev))
+ intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
@@ -3041,6 +2892,93 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_enable_transcoder(dev_priv, pipe);
}
+static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
+{
+ struct intel_pch_pll *pll = intel_crtc->pch_pll;
+
+ if (pll == NULL)
+ return;
+
+ if (pll->refcount == 0) {
+ WARN(1, "bad PCH PLL refcount\n");
+ return;
+ }
+
+ --pll->refcount;
+ intel_crtc->pch_pll = NULL;
+}
+
+static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
+{
+ struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct intel_pch_pll *pll;
+ int i;
+
+ pll = intel_crtc->pch_pll;
+ if (pll) {
+ DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto prepare;
+ }
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = intel_crtc->pipe;
+ pll = &dev_priv->pch_plls[i];
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+
+ goto found;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+
+ /* Only want to check enabled timings first */
+ if (pll->refcount == 0)
+ continue;
+
+ if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
+ fp == I915_READ(pll->fp0_reg)) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
+ intel_crtc->base.base.id,
+ pll->pll_reg, pll->refcount, pll->active);
+
+ goto found;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ pll = &dev_priv->pch_plls[i];
+ if (pll->refcount == 0) {
+ DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
+ intel_crtc->base.base.id, pll->pll_reg);
+ goto found;
+ }
+ }
+
+ return NULL;
+
+found:
+ intel_crtc->pch_pll = pll;
+ pll->refcount++;
+ DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
+prepare: /* separate function? */
+ DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
+
+ /* Wait for the clocks to stabilize before rewriting the regs */
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(pll->pll_reg);
+ udelay(150);
+
+ I915_WRITE(pll->fp0_reg, fp);
+ I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+ pll->on = false;
+ return pll;
+}
+
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3185,8 +3123,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
}
/* disable PCH DPLL */
- if (!intel_crtc->no_pll)
- intel_disable_pch_pll(dev_priv, pipe);
+ intel_disable_pch_pll(intel_crtc);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
@@ -3214,7 +3151,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_clear_scanline_wait(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -3242,6 +3178,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static void ironlake_crtc_off(struct drm_crtc *crtc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ intel_put_pch_pll(intel_crtc);
+}
+
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
@@ -3313,7 +3255,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
- intel_clear_scanline_wait(dev);
}
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -3333,6 +3274,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
+static void i9xx_crtc_off(struct drm_crtc *crtc)
+{
+}
+
/**
* Sets the power management mode of the pipe and plane.
*/
@@ -3380,25 +3325,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
-
- /* Flush any pending WAITs before we disable the pipe. Note that
- * we need to drop the struct_mutex in order to acquire it again
- * during the lowlevel dpms routines around a couple of the
- * operations. It does not look trivial nor desirable to move
- * that locking higher. So instead we leave a window for the
- * submission of further commands on the fb before we can actually
- * disable it. This race with userspace exists anyway, and we can
- * only rely on the pipe being disabled by userspace after it
- * receives the hotplug notification and has flushed any pending
- * batches.
- */
- if (crtc->fb) {
- mutex_lock(&dev->struct_mutex);
- intel_finish_fb(crtc->fb);
- mutex_unlock(&dev->struct_mutex);
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ dev_priv->display.off(crtc);
+
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
@@ -3448,8 +3379,7 @@ void intel_encoder_commit(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_device *dev = encoder->dev;
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3487,6 +3417,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+ return 400000; /* FIXME */
+}
+
static int i945_get_display_clock_speed(struct drm_device *dev)
{
return 400000;
@@ -3584,1342 +3519,6 @@ ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
-
-struct intel_watermark_params {
- unsigned long fifo_size;
- unsigned long max_wm;
- unsigned long default_wm;
- unsigned long guard_size;
- unsigned long cacheline_size;
-};
-
-/* Pineview has different values for various configs */
-static const struct intel_watermark_params pineview_display_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_display_hplloff_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_HPLLOFF_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params pineview_cursor_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params g4x_wm_info = {
- G4X_FIFO_SIZE,
- G4X_MAX_WM,
- G4X_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params g4x_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i965_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- I915_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params i945_wm_info = {
- I945_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i915_wm_info = {
- I915_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i855_wm_info = {
- I855GM_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params i830_wm_info = {
- I830_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params ironlake_display_wm_info = {
- ILK_DISPLAY_FIFO,
- ILK_DISPLAY_MAXWM,
- ILK_DISPLAY_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_wm_info = {
- ILK_CURSOR_FIFO,
- ILK_CURSOR_MAXWM,
- ILK_CURSOR_DFTWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_display_srwm_info = {
- ILK_DISPLAY_SR_FIFO,
- ILK_DISPLAY_MAX_SRWM,
- ILK_DISPLAY_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params ironlake_cursor_srwm_info = {
- ILK_CURSOR_SR_FIFO,
- ILK_CURSOR_MAX_SRWM,
- ILK_CURSOR_DFT_SRWM,
- 2,
- ILK_FIFO_LINE_SIZE
-};
-
-static const struct intel_watermark_params sandybridge_display_wm_info = {
- SNB_DISPLAY_FIFO,
- SNB_DISPLAY_MAXWM,
- SNB_DISPLAY_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_wm_info = {
- SNB_CURSOR_FIFO,
- SNB_CURSOR_MAXWM,
- SNB_CURSOR_DFTWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_display_srwm_info = {
- SNB_DISPLAY_SR_FIFO,
- SNB_DISPLAY_MAX_SRWM,
- SNB_DISPLAY_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
- SNB_CURSOR_SR_FIFO,
- SNB_CURSOR_MAX_SRWM,
- SNB_CURSOR_DFT_SRWM,
- 2,
- SNB_FIFO_LINE_SIZE
-};
-
-
-/**
- * intel_calculate_wm - calculate watermark level
- * @clock_in_khz: pixel clock
- * @wm: chip FIFO params
- * @pixel_size: display pixel size
- * @latency_ns: memory latency for the platform
- *
- * Calculate the watermark level (the level at which the display plane will
- * start fetching from memory again). Each chip has a different display
- * FIFO size and allocation, so the caller needs to figure that out and pass
- * in the correct intel_watermark_params structure.
- *
- * As the pixel clock runs, the FIFO will be drained at a rate that depends
- * on the pixel size. When it reaches the watermark level, it'll start
- * fetching FIFO line sized based chunks from memory until the FIFO fills
- * past the watermark point. If the FIFO drains completely, a FIFO underrun
- * will occur, and a display engine hang could result.
- */
-static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
- const struct intel_watermark_params *wm,
- int fifo_size,
- int pixel_size,
- unsigned long latency_ns)
-{
- long entries_required, wm_size;
-
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
-
- DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
-
- wm_size = fifo_size - (entries_required + wm->guard_size);
-
- DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
-
- /* Don't promote wm_size to unsigned... */
- if (wm_size > (long)wm->max_wm)
- wm_size = wm->max_wm;
- if (wm_size <= 0)
- wm_size = wm->default_wm;
- return wm_size;
-}
-
-struct cxsr_latency {
- int is_desktop;
- int is_ddr3;
- unsigned long fsb_freq;
- unsigned long mem_freq;
- unsigned long display_sr;
- unsigned long display_hpll_disable;
- unsigned long cursor_sr;
- unsigned long cursor_hpll_disable;
-};
-
-static const struct cxsr_latency cxsr_latency_table[] = {
- {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
- {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
- {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
- {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
- {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
-
- {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
- {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
- {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
- {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
- {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
-
- {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
- {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
- {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
- {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
- {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
-
- {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
- {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
- {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
- {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
- {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
-
- {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
- {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
- {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
- {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
- {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
-
- {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
- {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
- {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
- {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
- {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
-};
-
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
- int fsb,
- int mem)
-{
- const struct cxsr_latency *latency;
- int i;
-
- if (fsb == 0 || mem == 0)
- return NULL;
-
- for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
- latency = &cxsr_latency_table[i];
- if (is_desktop == latency->is_desktop &&
- is_ddr3 == latency->is_ddr3 &&
- fsb == latency->fsb_freq && mem == latency->mem_freq)
- return latency;
- }
-
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-
- return NULL;
-}
-
-static void pineview_disable_cxsr(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
-}
-
-/*
- * Latency for FIFO fetches is dependent on several factors:
- * - memory configuration (speed, channels)
- * - chipset
- * - current MCH state
- * It can be fairly high in some situations, so here we assume a fairly
- * pessimal value. It's a tradeoff between extra memory fetches (if we
- * set this value too high, the FIFO will fetch frequently to stay full)
- * and power consumption (set it too low to save power and we might see
- * FIFO underruns and display "flicker").
- *
- * A value of 5us seems to be a good balance; safe for very low end
- * platforms but not overly aggressive on lower latency configs.
- */
-static const int latency_ns = 5000;
-
-static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- if (plane)
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static int i85x_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x1ff;
- if (plane)
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static int i845_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 2; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A",
- size);
-
- return size;
-}
-
-static int i830_get_fifo_size(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dsparb = I915_READ(DSPARB);
- int size;
-
- size = dsparb & 0x7f;
- size >>= 1; /* Convert to cachelines */
-
- DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
- plane ? "B" : "A", size);
-
- return size;
-}
-
-static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
-{
- struct drm_crtc *crtc, *enabled = NULL;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled && crtc->fb) {
- if (enabled)
- return NULL;
- enabled = crtc;
- }
- }
-
- return enabled;
-}
-
-static void pineview_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- const struct cxsr_latency *latency;
- u32 reg;
- unsigned long wm;
-
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
- dev_priv->fsb_freq, dev_priv->mem_freq);
- if (!latency) {
- DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
- return;
- }
-
- crtc = single_enabled_crtc(dev);
- if (crtc) {
- int clock = crtc->mode.clock;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
-
- /* Display SR */
- wm = intel_calculate_wm(clock, &pineview_display_wm,
- pineview_display_wm.fifo_size,
- pixel_size, latency->display_sr);
- reg = I915_READ(DSPFW1);
- reg &= ~DSPFW_SR_MASK;
- reg |= wm << DSPFW_SR_SHIFT;
- I915_WRITE(DSPFW1, reg);
- DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
-
- /* cursor SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_wm,
- pineview_display_wm.fifo_size,
- pixel_size, latency->cursor_sr);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_CURSOR_SR_MASK;
- reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
- I915_WRITE(DSPFW3, reg);
-
- /* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->display_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_HPLL_SR_MASK;
- reg |= wm & DSPFW_HPLL_SR_MASK;
- I915_WRITE(DSPFW3, reg);
-
- /* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
- pineview_display_hplloff_wm.fifo_size,
- pixel_size, latency->cursor_hpll_disable);
- reg = I915_READ(DSPFW3);
- reg &= ~DSPFW_HPLL_CURSOR_MASK;
- reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
- I915_WRITE(DSPFW3, reg);
- DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
-
- /* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
- DRM_DEBUG_KMS("Self-refresh is enabled\n");
- } else {
- pineview_disable_cxsr(dev);
- DRM_DEBUG_KMS("Self-refresh is disabled\n");
- }
-}
-
-static bool g4x_compute_wm0(struct drm_device *dev,
- int plane,
- const struct intel_watermark_params *display,
- int display_latency_ns,
- const struct intel_watermark_params *cursor,
- int cursor_latency_ns,
- int *plane_wm,
- int *cursor_wm)
-{
- struct drm_crtc *crtc;
- int htotal, hdisplay, clock, pixel_size;
- int line_time_us, line_count;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
- *cursor_wm = cursor->guard_size;
- *plane_wm = display->guard_size;
- return false;
- }
-
- htotal = crtc->mode.htotal;
- hdisplay = crtc->mode.hdisplay;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- /* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *plane_wm = entries + display->guard_size;
- if (*plane_wm > (int)display->max_wm)
- *plane_wm = display->max_wm;
-
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = ((htotal * 1000) / clock);
- line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * 64 * pixel_size;
- tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
- if (*cursor_wm > (int)cursor->max_wm)
- *cursor_wm = (int)cursor->max_wm;
-
- return true;
-}
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool g4x_check_srwm(struct drm_device *dev,
- int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
- display_wm, cursor_wm);
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
- display_wm, display->max_wm);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
- cursor_wm, cursor->max_wm);
- return false;
- }
-
- if (!(display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("SR latency is 0, disabling\n");
- return false;
- }
-
- return true;
-}
-
-static bool g4x_compute_srwm(struct drm_device *dev,
- int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- int hdisplay, htotal, pixel_size, clock;
- unsigned long line_time_us;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return g4x_check_srwm(dev,
- *display_wm, *cursor_wm,
- display, cursor);
-}
-
-#define single_plane_enabled(mask) is_power_of_2(mask)
-
-static void g4x_update_wm(struct drm_device *dev)
-{
- static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int plane_sr, cursor_sr;
- unsigned int enabled = 0;
-
- if (g4x_compute_wm0(dev, 0,
- &g4x_wm_info, latency_ns,
- &g4x_cursor_wm_info, latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1;
-
- if (g4x_compute_wm0(dev, 1,
- &g4x_wm_info, latency_ns,
- &g4x_cursor_wm_info, latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 2;
-
- plane_sr = cursor_sr = 0;
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev, ffs(enabled) - 1,
- sr_latency_ns,
- &g4x_wm_info,
- &g4x_cursor_wm_info,
- &plane_sr, &cursor_sr))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- else
- I915_WRITE(FW_BLC_SELF,
- I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- plane_sr, cursor_sr);
-
- I915_WRITE(DSPFW1,
- (plane_sr << DSPFW_SR_SHIFT) |
- (cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) |
- planea_wm);
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
- (cursora_wm << DSPFW_CURSORA_SHIFT));
- /* HPLL off in SR has some issues on G4x... disable it */
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
- (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i965_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- int srwm = 1;
- int cursor_sr = 16;
-
- /* Calc sr entries for one plane configs */
- crtc = single_enabled_crtc(dev);
- if (crtc) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
- int clock = crtc->mode.clock;
- int htotal = crtc->mode.htotal;
- int hdisplay = crtc->mode.hdisplay;
- int pixel_size = crtc->fb->bits_per_pixel / 8;
- unsigned long line_time_us;
- int entries;
-
- line_time_us = ((htotal * 1000) / clock);
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
- entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
- srwm = I965_FIFO_SIZE - entries;
- if (srwm < 0)
- srwm = 1;
- srwm &= 0x1ff;
- DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
- entries, srwm);
-
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * 64;
- entries = DIV_ROUND_UP(entries,
- i965_cursor_wm_info.cacheline_size);
- cursor_sr = i965_cursor_wm_info.fifo_size -
- (entries + i965_cursor_wm_info.guard_size);
-
- if (cursor_sr > i965_cursor_wm_info.max_wm)
- cursor_sr = i965_cursor_wm_info.max_wm;
-
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", srwm, cursor_sr);
-
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- } else {
- /* Turn off self refresh if both pipes are enabled */
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
- }
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
- srwm);
-
- /* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
- (8 << 16) | (8 << 8) | (8 << 0));
- I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
- /* update cursor SR watermark */
- I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-}
-
-static void i9xx_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- const struct intel_watermark_params *wm_info;
- uint32_t fwater_lo;
- uint32_t fwater_hi;
- int cwm, srwm = 1;
- int fifo_size;
- int planea_wm, planeb_wm;
- struct drm_crtc *crtc, *enabled = NULL;
-
- if (IS_I945GM(dev))
- wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev))
- wm_info = &i915_wm_info;
- else
- wm_info = &i855_wm_info;
-
- fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- crtc = intel_get_crtc_for_plane(dev, 0);
- if (crtc->enabled && crtc->fb) {
- planea_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- enabled = crtc;
- } else
- planea_wm = fifo_size - wm_info->guard_size;
-
- fifo_size = dev_priv->display.get_fifo_size(dev, 1);
- crtc = intel_get_crtc_for_plane(dev, 1);
- if (crtc->enabled && crtc->fb) {
- planeb_wm = intel_calculate_wm(crtc->mode.clock,
- wm_info, fifo_size,
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- if (enabled == NULL)
- enabled = crtc;
- else
- enabled = NULL;
- } else
- planeb_wm = fifo_size - wm_info->guard_size;
-
- DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
-
- /*
- * Overlay gets an aggressive default since video jitter is bad.
- */
- cwm = 2;
-
- /* Play safe and disable self-refresh before adjusting watermarks. */
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
-
- /* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && enabled) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 6000;
- int clock = enabled->mode.clock;
- int htotal = enabled->mode.htotal;
- int hdisplay = enabled->mode.hdisplay;
- int pixel_size = enabled->fb->bits_per_pixel / 8;
- unsigned long line_time_us;
- int entries;
-
- line_time_us = (htotal * 1000) / clock;
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * hdisplay;
- entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
- srwm = wm_info->fifo_size - entries;
- if (srwm < 0)
- srwm = 1;
-
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else if (IS_I915GM(dev))
- I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
- }
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
- planea_wm, planeb_wm, cwm, srwm);
-
- fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
- fwater_hi = (cwm & 0x1f);
-
- /* Set request length to 8 cachelines per fetch */
- fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
- fwater_hi = fwater_hi | (1 << 8);
-
- I915_WRITE(FW_BLC, fwater_lo);
- I915_WRITE(FW_BLC2, fwater_hi);
-
- if (HAS_FW_BLC(dev)) {
- if (enabled) {
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
- DRM_DEBUG_KMS("memory self refresh enabled\n");
- } else
- DRM_DEBUG_KMS("memory self refresh disabled\n");
- }
-}
-
-static void i830_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- uint32_t fwater_lo;
- int planea_wm;
-
- crtc = single_enabled_crtc(dev);
- if (crtc == NULL)
- return;
-
- planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
- dev_priv->display.get_fifo_size(dev, 0),
- crtc->fb->bits_per_pixel / 8,
- latency_ns);
- fwater_lo = I915_READ(FW_BLC) & ~0xfff;
- fwater_lo |= (3<<8) | planea_wm;
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
-
- I915_WRITE(FW_BLC, fwater_lo);
-}
-
-#define ILK_LP0_PLANE_LATENCY 700
-#define ILK_LP0_CURSOR_LATENCY 1300
-
-/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
- */
-static bool ironlake_check_srwm(struct drm_device *dev, int level,
- int fbc_wm, int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
- " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
-
- if (fbc_wm > SNB_FBC_MAX_SRWM) {
- DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
- fbc_wm, SNB_FBC_MAX_SRWM, level);
-
- /* fbc has it's own way to disable FBC WM */
- I915_WRITE(DISP_ARB_CTL,
- I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
- return false;
- }
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
- display_wm, SNB_DISPLAY_MAX_SRWM, level);
- return false;
- }
-
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
- cursor_wm, SNB_CURSOR_MAX_SRWM, level);
- return false;
- }
-
- if (!(fbc_wm || display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
- return false;
- }
-
- return true;
-}
-
-/*
- * Compute watermark values of WM[1-3],
- */
-static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *fbc_wm, int *display_wm, int *cursor_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int hdisplay, htotal, pixel_size, clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *fbc_wm = *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- hdisplay = crtc->mode.hdisplay;
- htotal = crtc->mode.htotal;
- clock = crtc->mode.clock;
- pixel_size = crtc->fb->bits_per_pixel / 8;
-
- line_time_us = (htotal * 1000) / clock;
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
-
- /*
- * Spec says:
- * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
- */
- *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
-
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * pixel_size * 64;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
-
- return ironlake_check_srwm(dev, level,
- *fbc_wm, *display_wm, *cursor_wm,
- display, cursor);
-}
-
-static void ironlake_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, 0,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEA_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1;
- }
-
- if (g4x_compute_wm0(dev, 1,
- &ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
- &ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
- &plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEB_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 2;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled))
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- ILK_READ_WM1_LATENCY() * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- ILK_READ_WM2_LATENCY() * 500,
- &ironlake_display_srwm_info,
- &ironlake_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /*
- * WM3 is unsupported on ILK, probably because we don't have latency
- * data for that power state
- */
-}
-
-void sandybridge_update_wm(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
- int fbc_wm, plane_wm, cursor_wm;
- unsigned int enabled;
-
- enabled = 0;
- if (g4x_compute_wm0(dev, 0,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEA_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEA_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
- " plane %d, " "cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 1;
- }
-
- if (g4x_compute_wm0(dev, 1,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEB_ILK);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEB_ILK, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 2;
- }
-
- /* IVB has 3 pipes */
- if (IS_IVYBRIDGE(dev) &&
- g4x_compute_wm0(dev, 2,
- &sandybridge_display_wm_info, latency,
- &sandybridge_cursor_wm_info, latency,
- &plane_wm, &cursor_wm)) {
- val = I915_READ(WM0_PIPEC_IVB);
- val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
- I915_WRITE(WM0_PIPEC_IVB, val |
- ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
- DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
- " plane %d, cursor: %d\n",
- plane_wm, cursor_wm);
- enabled |= 3;
- }
-
- /*
- * Calculate and update the self-refresh watermark only when one
- * display plane is used.
- *
- * SNB support 3 levels of watermark.
- *
- * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
- * and disabled in the descending order
- *
- */
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- if (!single_plane_enabled(enabled) ||
- dev_priv->sprite_scaling_enabled)
- return;
- enabled = ffs(enabled) - 1;
-
- /* WM1 */
- if (!ironlake_compute_srwm(dev, 1, enabled,
- SNB_READ_WM1_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM1_LP_ILK,
- WM1_LP_SR_EN |
- (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM2 */
- if (!ironlake_compute_srwm(dev, 2, enabled,
- SNB_READ_WM2_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM2_LP_ILK,
- WM2_LP_EN |
- (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-
- /* WM3 */
- if (!ironlake_compute_srwm(dev, 3, enabled,
- SNB_READ_WM3_LATENCY() * 500,
- &sandybridge_display_srwm_info,
- &sandybridge_cursor_srwm_info,
- &fbc_wm, &plane_wm, &cursor_wm))
- return;
-
- I915_WRITE(WM3_LP_ILK,
- WM3_LP_EN |
- (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
- (fbc_wm << WM1_LP_FBC_SHIFT) |
- (plane_wm << WM1_LP_SR_SHIFT) |
- cursor_wm);
-}
-
-static bool
-sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int display_latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- int clock;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled) {
- *sprite_wm = display->guard_size;
- return false;
- }
-
- clock = crtc->mode.clock;
-
- /* Use the small buffer method to calculate the sprite watermark */
- entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size -
- sprite_width * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
- if (*sprite_wm > (int)display->max_wm)
- *sprite_wm = display->max_wm;
-
- return true;
-}
-
-static bool
-sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
- uint32_t sprite_width, int pixel_size,
- const struct intel_watermark_params *display,
- int latency_ns, int *sprite_wm)
-{
- struct drm_crtc *crtc;
- unsigned long line_time_us;
- int clock;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *sprite_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev, plane);
- clock = crtc->mode.clock;
- if (!clock) {
- *sprite_wm = 0;
- return false;
- }
-
- line_time_us = (sprite_width * 1000) / clock;
- if (!line_time_us) {
- *sprite_wm = 0;
- return false;
- }
-
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = sprite_width * pixel_size;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
-
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *sprite_wm = entries + display->guard_size;
-
- return *sprite_wm > 0x3ff ? false : true;
-}
-
-static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- u32 val;
- int sprite_wm, reg;
- int ret;
-
- switch (pipe) {
- case 0:
- reg = WM0_PIPEA_ILK;
- break;
- case 1:
- reg = WM0_PIPEB_ILK;
- break;
- case 2:
- reg = WM0_PIPEC_IVB;
- break;
- default:
- return; /* bad pipe */
- }
-
- ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
- &sandybridge_display_wm_info,
- latency, &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
- pipe);
- return;
- }
-
- val = I915_READ(reg);
- val &= ~WM0_PIPE_SPRITE_MASK;
- I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
- DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
-
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM1_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM1S_LP_ILK, sprite_wm);
-
- /* Only IVB has two more LP watermarks for sprite */
- if (!IS_IVYBRIDGE(dev))
- return;
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM2_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM2S_LP_IVB, sprite_wm);
-
- ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
- pixel_size,
- &sandybridge_display_srwm_info,
- SNB_READ_WM3_LATENCY() * 500,
- &sprite_wm);
- if (!ret) {
- DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
- pipe);
- return;
- }
- I915_WRITE(WM3S_LP_IVB, sprite_wm);
-}
-
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-static void intel_update_watermarks(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.update_wm)
- dev_priv->display.update_wm(dev);
-}
-
-void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
- uint32_t sprite_width, int pixel_size)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
- pixel_size);
-}
-
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
@@ -5143,6 +3742,222 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
}
}
+static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 temp;
+
+ temp = I915_READ(LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+ if (pipe == 1) {
+ temp |= LVDS_PIPEB_SELECT;
+ } else {
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+ /* Set the B0-B3 data pairs corresponding to whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock->p2 == 7)
+ temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more thoroughly into how
+ * panels behave in the two modes.
+ */
+ /* set the dithering flag on LVDS as needed */
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (dev_priv->lvds_dither)
+ temp |= LVDS_ENABLE_DITHER;
+ else
+ temp &= ~LVDS_ENABLE_DITHER;
+ }
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ temp |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ temp |= LVDS_VSYNC_POLARITY;
+ I915_WRITE(LVDS, temp);
+}
+
+static void i9xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+ bool is_sdvo;
+
+ is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ if (is_sdvo) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (pixel_multiplier > 1) {
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+ }
+ dpll |= DPLL_DVO_HIGH_SPEED;
+ }
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ dpll |= DPLL_DVO_HIGH_SPEED;
+
+ /* compute bitmask from p1 value */
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ else {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (IS_G4X(dev) && reduced_clock)
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ }
+ switch (clock->p2) {
+ case 5:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+ break;
+ case 7:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+ break;
+ case 10:
+ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+ break;
+ case 14:
+ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ break;
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+ if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ dpll |= PLL_REF_INPUT_TVCLKINBC;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ u32 temp = 0;
+ if (is_sdvo) {
+ temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+ }
+ I915_WRITE(DPLL_MD(pipe), temp);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+ }
+}
+
+static void i8xx_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock,
+ int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll;
+
+ dpll = DPLL_VGA_MODE_DIS;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ } else {
+ if (clock->p1 == 2)
+ dpll |= PLL_P1_DIVIDE_BY_TWO;
+ else
+ dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ if (clock->p2 == 4)
+ dpll |= PLL_P2_DIVIDE_BY_4;
+ }
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
+ /* XXX: just matching BIOS for now */
+ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ else
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ I915_WRITE(DPLL(pipe), dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ intel_update_lvds(crtc, clock, adjusted_mode);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(DPLL(pipe), dpll);
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -5156,15 +3971,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, dspcntr, pipeconf, vsyncshift;
- bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ u32 dspcntr, pipeconf, vsyncshift;
+ bool ok, has_reduced_clock = false, is_sdvo = false;
+ bool is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
- u32 temp;
- u32 lvds_sync = 0;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -5180,15 +3993,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (encoder->needs_tv_clock)
is_tv = true;
break;
- case INTEL_OUTPUT_DVO:
- is_dvo = true;
- break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
@@ -5235,71 +4042,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
&reduced_clock : NULL);
- dpll = DPLL_VGA_MODE_DIS;
-
- if (!IS_GEN2(dev)) {
- if (is_lvds)
- dpll |= DPLLB_MODE_LVDS;
- else
- dpll |= DPLLB_MODE_DAC_SERIAL;
- if (is_sdvo) {
- int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (pixel_multiplier > 1) {
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- }
- dpll |= DPLL_DVO_HIGH_SPEED;
- }
- if (is_dp)
- dpll |= DPLL_DVO_HIGH_SPEED;
-
- /* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev) && has_reduced_clock)
- dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- }
- switch (clock.p2) {
- case 5:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
- break;
- case 7:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
- break;
- case 10:
- dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
- break;
- case 14:
- dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
- break;
- }
- if (INTEL_INFO(dev)->gen >= 4)
- dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
- } else {
- if (is_lvds) {
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- } else {
- if (clock.p1 == 2)
- dpll |= PLL_P1_DIVIDE_BY_TWO;
- else
- dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (clock.p2 == 4)
- dpll |= PLL_P2_DIVIDE_BY_4;
- }
- }
-
- if (is_sdvo && is_tv)
- dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (is_tv)
- /* XXX: just matching BIOS for now */
- /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
- dpll |= 3;
- else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
- dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+ if (IS_GEN2(dev))
+ i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
else
- dpll |= PLL_REF_INPUT_DREFCLK;
+ i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
+ has_reduced_clock ? &reduced_clock : NULL,
+ num_connectors);
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
@@ -5336,97 +4084,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
- dpll |= DPLL_VCO_ENABLE;
-
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- /* The LVDS pin pair needs to be on before the DPLLs are enabled.
- * This is an exception to the general rule that mode_set doesn't turn
- * things on.
- */
- if (is_lvds) {
- temp = I915_READ(LVDS);
- temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (pipe == 1) {
- temp |= LVDS_PIPEB_SELECT;
- } else {
- temp &= ~LVDS_PIPEB_SELECT;
- }
- /* set the corresponsding LVDS_BORDER bit */
- temp |= dev_priv->lvds_border_bits;
- /* Set the B0-B3 data pairs corresponding to whether we're going to
- * set the DPLLs for dual-channel mode or not.
- */
- if (clock.p2 == 7)
- temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
-
- /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
- * appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
- */
- /* set the dithering flag on LVDS as needed */
- if (INTEL_INFO(dev)->gen >= 4) {
- if (dev_priv->lvds_dither)
- temp |= LVDS_ENABLE_DITHER;
- else
- temp &= ~LVDS_ENABLE_DITHER;
- }
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- lvds_sync |= LVDS_HSYNC_POLARITY;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- lvds_sync |= LVDS_VSYNC_POLARITY;
- if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
- != lvds_sync) {
- char flags[2] = "-+";
- DRM_INFO("Changing LVDS panel from "
- "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
- flags[!(temp & LVDS_HSYNC_POLARITY)],
- flags[!(temp & LVDS_VSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- temp |= lvds_sync;
- }
- I915_WRITE(LVDS, temp);
- }
-
- if (is_dp) {
- intel_dp_set_m_n(crtc, mode, adjusted_mode);
- }
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- if (INTEL_INFO(dev)->gen >= 4) {
- temp = 0;
- if (is_sdvo) {
- temp = intel_mode_get_pixel_multiplier(adjusted_mode);
- if (temp > 1)
- temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- else
- temp = 0;
- }
- I915_WRITE(DPLL_MD(pipe), temp);
- } else {
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
- }
-
if (HAS_PIPE_CXSR(dev)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
@@ -5492,7 +4152,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -5668,17 +4327,16 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
+ struct intel_encoder *encoder, *edp_encoder = NULL;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
- u32 lvds_sync = 0;
int target_clock, pixel_multiplier, lane, link_bw, factor;
unsigned int pipe_bpp;
bool dither;
+ bool is_cpu_edp = false, is_pch_edp = false;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -5704,7 +4362,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
- has_edp_encoder = encoder;
+ is_dp = true;
+ if (intel_encoder_is_pch_edp(&encoder->base))
+ is_pch_edp = true;
+ else
+ is_cpu_edp = true;
+ edp_encoder = encoder;
break;
}
@@ -5767,15 +4430,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
lane = 0;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
- if (has_edp_encoder &&
- !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_cpu_edp) {
target_clock = mode->clock;
- intel_edp_link_config(has_edp_encoder,
- &lane, &link_bw);
+ intel_edp_link_config(edp_encoder, &lane, &link_bw);
} else {
/* [e]DP over FDI requires target mode clock
instead of link clock */
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ if (is_dp)
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
@@ -5866,7 +4527,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
+ if (is_dp && !is_cpu_edp)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -5909,30 +4570,22 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
- /* PCH eDP needs FDI, but CPU eDP does not */
- if (!intel_crtc->no_pll) {
- if (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(PCH_FP0(pipe), fp);
- I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-
- POSTING_READ(PCH_DPLL(pipe));
- udelay(150);
- }
- } else {
- if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(0))) {
- intel_crtc->use_pll_a = true;
- DRM_DEBUG_KMS("using pipe a dpll\n");
- } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(1))) {
- intel_crtc->use_pll_a = false;
- DRM_DEBUG_KMS("using pipe b dpll\n");
- } else {
- DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
+ * pre-Haswell/LPT generation */
+ if (HAS_PCH_LPT(dev)) {
+ DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
+ pipe);
+ } else if (!is_cpu_edp) {
+ struct intel_pch_pll *pll;
+
+ pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+ pipe);
return -EINVAL;
}
- }
+ } else
+ intel_put_pch_pll(intel_crtc);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
@@ -5965,22 +4618,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
- lvds_sync |= LVDS_HSYNC_POLARITY;
+ temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
- lvds_sync |= LVDS_VSYNC_POLARITY;
- if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
- != lvds_sync) {
- char flags[2] = "-+";
- DRM_INFO("Changing LVDS panel from "
- "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
- flags[!(temp & LVDS_HSYNC_POLARITY)],
- flags[!(temp & LVDS_VSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
- flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
- temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
- temp |= lvds_sync;
- }
+ temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(PCH_LVDS, temp);
}
@@ -5990,7 +4632,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_SP;
}
- if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
/* For non-DP output, clear any trans DP clock recovery setting.*/
@@ -6000,13 +4642,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
- if (!intel_crtc->no_pll &&
- (!has_edp_encoder ||
- intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
- I915_WRITE(PCH_DPLL(pipe), dpll);
+ if (intel_crtc->pch_pll) {
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pipe));
+ POSTING_READ(intel_crtc->pch_pll->pll_reg);
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -6014,20 +4654,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pipe), dpll);
+ I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
}
intel_crtc->lowfreq_avail = false;
- if (!intel_crtc->no_pll) {
+ if (intel_crtc->pch_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(PCH_FP1(pipe), fp2);
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
- I915_WRITE(PCH_FP1(pipe), fp);
+ I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
@@ -6080,10 +4720,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
- if (has_edp_encoder &&
- !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
- }
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
@@ -6097,6 +4735,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_update_watermarks(dev);
+ intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
return ret;
}
@@ -6451,7 +5091,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (!visible && !intel_crtc->cursor_visible)
return;
- if (IS_IVYBRIDGE(dev)) {
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
ivb_update_cursor(crtc, base);
} else {
@@ -6461,9 +5101,6 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
else
i9xx_update_cursor(crtc, base);
}
-
- if (visible)
- intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
@@ -6987,7 +5624,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
return mode;
}
@@ -7086,7 +5722,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
- u32 dpll;
+ int dpll;
DRM_DEBUG_DRIVER("downclocking LVDS\n");
@@ -7100,6 +5736,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
}
+
}
/**
@@ -7158,12 +5795,16 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy)
+ if (!dev_priv->busy) {
+ intel_sanitize_pm(dev);
dev_priv->busy = true;
- else
+ } else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ if (obj == NULL)
+ return;
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
@@ -7336,18 +5977,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret)
- goto out;
+ goto err_unpin;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
@@ -7356,15 +5998,19 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(0); /* aux display base address, unused */
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, 0); /* aux display base address, unused */
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7377,33 +6023,38 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret)
- goto out;
+ goto err_unpin;
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
-
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7415,24 +6066,25 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0]);
- OUT_RING(obj->gtt_offset | obj->tiling_mode);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0]);
+ intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -7440,9 +6092,13 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7453,21 +6109,22 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitches[0] | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
+ intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(ring, obj->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7477,9 +6134,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- ADVANCE_LP_RING();
-out:
+ intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_advance(ring);
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7497,22 +6158,43 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ uint32_t plane_bit = 0;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
- goto out;
+ goto err;
+
+ switch(intel_crtc->plane) {
+ case PLANE_A:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+ break;
+ case PLANE_B:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+ break;
+ case PLANE_C:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+ break;
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ ret = -ENODEV;
+ goto err;
+ }
ret = intel_ring_begin(ring, 4);
if (ret)
- goto out;
+ goto err_unpin;
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
-out:
+ return 0;
+
+err_unpin:
+ intel_unpin_fb_obj(obj);
+err:
return ret;
}
@@ -7589,6 +6271,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
intel_disable_fbc(dev);
+ intel_mark_busy(dev, obj);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -7617,10 +6300,11 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
+ int i;
/* Clear any frame start delays used for debugging left by the BIOS */
- for_each_pipe(pipe) {
- reg = PIPECONF(pipe);
+ for_each_pipe(i) {
+ reg = PIPECONF(i);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
}
@@ -7690,6 +6374,23 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_pch_pll_init(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->num_pch_pll == 0) {
+ DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
+ return;
+ }
+
+ for (i = 0; i < dev_priv->num_pch_pll; i++) {
+ dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
+ dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
+ dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
+ }
+}
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7727,8 +6428,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
- if (pipe == 2 && IS_IVYBRIDGE(dev))
- intel_crtc->no_pll = true;
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
@@ -7747,15 +6446,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
@@ -7828,12 +6524,31 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_crt_init(dev);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_HASWELL(dev)) {
+ int found;
+
+ /* Haswell uses DDI functions to detect digital outputs */
+ found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
+ /* DDI A only supports eDP */
+ if (found)
+ intel_ddi_init(dev, PORT_A);
+
+ /* DDI B, C and D detection is indicated by the SFUSE_STRAP
+ * register */
+ found = I915_READ(SFUSE_STRAP);
+
+ if (found & SFUSE_STRAP_DDIB_DETECTED)
+ intel_ddi_init(dev, PORT_B);
+ if (found & SFUSE_STRAP_DDIC_DETECTED)
+ intel_ddi_init(dev, PORT_C);
+ if (found & SFUSE_STRAP_DDID_DETECTED)
+ intel_ddi_init(dev, PORT_D);
+ } else if (HAS_PCH_SPLIT(dev)) {
int found;
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev, PCH_SDVOB);
+ found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -7843,7 +6558,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(HDMIC) & PORT_DETECTED)
intel_hdmi_init(dev, HDMIC);
- if (I915_READ(HDMID) & PORT_DETECTED)
+ if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
intel_hdmi_init(dev, HDMID);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
@@ -7857,7 +6572,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
- found = intel_sdvo_init(dev, SDVOB);
+ found = intel_sdvo_init(dev, SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
@@ -7873,7 +6588,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
- found = intel_sdvo_init(dev, SDVOC);
+ found = intel_sdvo_init(dev, SDVOC, false);
}
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
@@ -8002,882 +6717,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fb_output_poll_changed,
};
-static struct drm_i915_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
-{
- struct drm_i915_gem_object *ctx;
- int ret;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
- DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
- return NULL;
- }
-
- ret = i915_gem_object_pin(ctx, 4096, true);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n", ret);
- goto err_unref;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
- if (ret) {
- DRM_ERROR("failed to set-domain on power context: %d\n", ret);
- goto err_unpin;
- }
-
- return ctx;
-
-err_unpin:
- i915_gem_object_unpin(ctx);
-err_unref:
- drm_gem_object_unreference(&ctx->base);
- mutex_unlock(&dev->struct_mutex);
- return NULL;
-}
-
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl;
-
- rgvswctl = I915_READ16(MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
- }
-
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE16(MEMSWCTL, rgvswctl);
- POSTING_READ16(MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE16(MEMSWCTL, rgvswctl);
-
- return true;
-}
-
-void ironlake_enable_drps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 rgvmodectl = I915_READ(MEMMODECTL);
- u8 fmax, fmin, fstart, vstart;
-
- /* Enable temp reporting */
- I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
-
- /* 100ms RC evaluation intervals */
- I915_WRITE(RCUPEI, 100000);
- I915_WRITE(RCDNEI, 100000);
-
- /* Set max/min thresholds to 90ms and 80ms respectively */
- I915_WRITE(RCBMAXAVG, 90000);
- I915_WRITE(RCBMINAVG, 80000);
-
- I915_WRITE(MEMIHYST, 1);
-
- /* Set up min, max, and cur for interrupt handling */
- fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
- fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
- fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
-
- vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
-
- dev_priv->fmax = fmax; /* IPS callback will increase this */
- dev_priv->fstart = fstart;
-
- dev_priv->max_delay = fstart;
- dev_priv->min_delay = fmin;
- dev_priv->cur_delay = fstart;
-
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
-
- I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
-
- /*
- * Interrupts will be enabled in ironlake_irq_postinstall
- */
-
- I915_WRITE(VIDSTART, vstart);
- POSTING_READ(VIDSTART);
-
- rgvmodectl |= MEMMODE_SWMODE_EN;
- I915_WRITE(MEMMODECTL, rgvmodectl);
-
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
- msleep(1);
-
- ironlake_set_drps(dev, fstart);
-
- dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
- I915_READ(0x112e0);
- dev_priv->last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->last_count2 = I915_READ(0x112f4);
- getrawmonotonic(&dev_priv->last_time2);
-}
-
-void ironlake_disable_drps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
-
- /* Ack interrupts, disable EFC interrupt */
- I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
- I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
- I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
- I915_WRITE(DEIIR, DE_PCU_EVENT);
- I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
-
- /* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->fstart);
- msleep(1);
- rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
- msleep(1);
-
-}
-
-void gen6_set_rps(struct drm_device *dev, u8 val)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 swreq;
-
- swreq = (val & 0x3ff) << 25;
- I915_WRITE(GEN6_RPNSWREQ, swreq);
-}
-
-void gen6_disable_rps(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
-
- spin_lock_irq(&dev_priv->rps_lock);
- dev_priv->pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps_lock);
-
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
- unsigned long freq;
- int div = (vidfreq & 0x3f0000) >> 16;
- int post = (vidfreq & 0x3000) >> 12;
- int pre = (vidfreq & 0x7);
-
- if (!pre)
- return 0;
-
- freq = ((div * 133333) / ((1<<post) * pre));
-
- return freq;
-}
-
-void intel_init_emon(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 lcfuse;
- u8 pxw[16];
- int i;
-
- /* Disable to program */
- I915_WRITE(ECR, 0);
- POSTING_READ(ECR);
-
- /* Program energy weights for various events */
- I915_WRITE(SDEW, 0x15040d00);
- I915_WRITE(CSIEW0, 0x007f0000);
- I915_WRITE(CSIEW1, 0x1e220004);
- I915_WRITE(CSIEW2, 0x04000004);
-
- for (i = 0; i < 5; i++)
- I915_WRITE(PEW + (i * 4), 0);
- for (i = 0; i < 3; i++)
- I915_WRITE(DEW + (i * 4), 0);
-
- /* Program P-state weights to account for frequency power adjustment */
- for (i = 0; i < 16; i++) {
- u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
- unsigned long freq = intel_pxfreq(pxvidfreq);
- unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
- unsigned long val;
-
- val = vid * vid;
- val *= (freq / 1000);
- val *= 255;
- val /= (127*127*900);
- if (val > 0xff)
- DRM_ERROR("bad pxval: %ld\n", val);
- pxw[i] = val;
- }
- /* Render standby states get 0 weight */
- pxw[14] = 0;
- pxw[15] = 0;
-
- for (i = 0; i < 4; i++) {
- u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
- (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
- I915_WRITE(PXW + (i * 4), val);
- }
-
- /* Adjust magic regs to magic values (more experimental results) */
- I915_WRITE(OGW0, 0);
- I915_WRITE(OGW1, 0);
- I915_WRITE(EG0, 0x00007f00);
- I915_WRITE(EG1, 0x0000000e);
- I915_WRITE(EG2, 0x000e0000);
- I915_WRITE(EG3, 0x68000300);
- I915_WRITE(EG4, 0x42000000);
- I915_WRITE(EG5, 0x00140031);
- I915_WRITE(EG6, 0);
- I915_WRITE(EG7, 0);
-
- for (i = 0; i < 8; i++)
- I915_WRITE(PXWL + (i * 4), 0);
-
- /* Enable PMON + select events */
- I915_WRITE(ECR, 0x80000019);
-
- lcfuse = I915_READ(LCFUSE02);
-
- dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-static int intel_enable_rc6(struct drm_device *dev)
-{
- /*
- * Respect the kernel parameter if it is set
- */
- if (i915_enable_rc6 >= 0)
- return i915_enable_rc6;
-
- /*
- * Disable RC6 on Ironlake
- */
- if (INTEL_INFO(dev)->gen == 5)
- return 0;
-
- /*
- * Disable rc6 on Sandybridge
- */
- if (INTEL_INFO(dev)->gen == 6) {
- DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
- return INTEL_RC6_ENABLE;
- }
- DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
- return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
-}
-
-void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
- u32 pcu_mbox, rc6_mask = 0;
- u32 gtfifodbg;
- int cur_freq, min_freq, max_freq;
- int rc6_mode;
- int i;
-
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
- I915_WRITE(GEN6_RC_STATE, 0);
- mutex_lock(&dev_priv->dev->struct_mutex);
-
- /* Clear the DBG now so we don't confuse earlier errors */
- if ((gtfifodbg = I915_READ(GTFIFODBG))) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- gen6_gt_force_wake_get(dev_priv);
-
- /* disable the counters and set deterministic thresholds */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
- I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for (i = 0; i < I915_NUM_RINGS; i++)
- I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
- I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
- rc6_mode = intel_enable_rc6(dev_priv->dev);
- if (rc6_mode & INTEL_RC6_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
-
- if (rc6_mode & INTEL_RC6p_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
-
- if (rc6_mode & INTEL_RC6pp_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
-
- DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
-
- I915_WRITE(GEN6_RC_CONTROL,
- rc6_mask |
- GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
-
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(10) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN6_FREQUENCY(12));
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- 18 << 24 |
- 6 << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
- I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
- I915_WRITE(GEN6_PCODE_DATA, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
- min_freq = (rp_state_cap & 0xff0000) >> 16;
- max_freq = rp_state_cap & 0xff;
- cur_freq = (gt_perf_status & 0xff00) >> 8;
-
- /* Check for overclock support */
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
- pcu_mbox = I915_READ(GEN6_PCODE_DATA);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500))
- DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- if (pcu_mbox & (1<<31)) { /* OC supported */
- max_freq = pcu_mbox & 0xff;
- DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
- }
-
- /* In units of 100MHz */
- dev_priv->max_delay = max_freq;
- dev_priv->min_delay = min_freq;
- dev_priv->cur_delay = cur_freq;
-
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER,
- GEN6_PM_MBOX_EVENT |
- GEN6_PM_THERMAL_EVENT |
- GEN6_PM_RP_DOWN_TIMEOUT |
- GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_UP_EI_EXPIRED |
- GEN6_PM_RP_DOWN_EI_EXPIRED);
- spin_lock_irq(&dev_priv->rps_lock);
- WARN_ON(dev_priv->pm_iir != 0);
- I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps_lock);
- /* enable all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
-
- gen6_gt_force_wake_put(dev_priv);
- mutex_unlock(&dev_priv->dev->struct_mutex);
-}
-
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- int min_freq = 15;
- int gpu_freq, ia_freq, max_ia_freq;
- int scaling_factor = 180;
-
- max_ia_freq = cpufreq_quick_get_max(0);
- /*
- * Default to measured freq if none found, PCU will ensure we don't go
- * over
- */
- if (!max_ia_freq)
- max_ia_freq = tsc_khz;
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
-
- mutex_lock(&dev_priv->dev->struct_mutex);
-
- /*
- * For each potential GPU frequency, load a ring frequency we'd like
- * to use for memory access. We do this by specifying the IA frequency
- * the PCU should use as a reference to determine the ring frequency.
- */
- for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
- gpu_freq--) {
- int diff = dev_priv->max_delay - gpu_freq;
-
- /*
- * For GPU frequencies less than 750MHz, just use the lowest
- * ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
-
- I915_WRITE(GEN6_PCODE_DATA,
- (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
- gpu_freq);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
- GEN6_PCODE_READY) == 0, 10)) {
- DRM_ERROR("pcode write of freq table timed out\n");
- continue;
- }
- }
-
- mutex_unlock(&dev_priv->dev->struct_mutex);
-}
-
-static void ironlake_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- /* Required for FBC */
- dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
- DPFCRUNIT_CLOCK_GATE_DISABLE |
- DPFDUNIT_CLOCK_GATE_DISABLE;
- /* Required for CxSR */
- dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_3DCGDIS0,
- MARIUNIT_CLOCK_GATE_DISABLE |
- SVSMUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(PCH_3DCGDIS1,
- VFMUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- /*
- * According to the spec the following bits should be set in
- * order to enable memory self-refresh
- * The bit 22/21 of 0x42004
- * The bit 5 of 0x42020
- * The bit 15 of 0x45000
- */
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- (I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL));
- I915_WRITE(ILK_DSPCLK_GATE,
- (I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE));
- I915_WRITE(DISP_ARB_CTL,
- (I915_READ(DISP_ARB_CTL) |
- DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- /*
- * Based on the document from hardware guys the following bits
- * should be set unconditionally in order to enable FBC.
- * The bit 22 of 0x42000
- * The bit 22 of 0x42004
- * The bit 7,8,9 of 0x42020.
- */
- if (IS_IRONLAKE_M(dev)) {
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPFC_DIS1 |
- ILK_DPFC_DIS2 |
- ILK_CLK_FBC);
- }
-
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_ELPIN_409_SELECT);
- I915_WRITE(_3D_CHICKEN2,
- _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
- _3D_CHICKEN2_WM_READ_PIPELINED);
-}
-
-static void gen6_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_ELPIN_409_SELECT);
-
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- I915_WRITE(GEN6_UCGCTL1,
- I915_READ(GEN6_UCGCTL1) |
- GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
-
- /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
- * gating disable must be set. Failure to set it results in
- * flickering pixels due to Z write ordering failures after
- * some amount of runtime in the Mesa "fire" demo, and Unigine
- * Sanctuary and Tropics, and apparently anything else with
- * alpha test or pixel discard.
- *
- * According to the spec, bit 11 (RCCUNIT) must also be set,
- * but we didn't debug actual testcases to find it out.
- */
- I915_WRITE(GEN6_UCGCTL2,
- GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
- GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * According to the spec the following bits should be
- * set in order to enable memory self-refresh and fbc:
- * The bit21 and bit22 of 0x42000
- * The bit21 and bit22 of 0x42004
- * The bit5 and bit7 of 0x42020
- * The bit14 of 0x70180
- * The bit14 of 0x71180
- */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
- I915_WRITE(ILK_DISPLAY_CHICKEN2,
- I915_READ(ILK_DISPLAY_CHICKEN2) |
- ILK_DPARB_GATE | ILK_VSDPFD_FULL);
- I915_WRITE(ILK_DSPCLK_GATE,
- I915_READ(ILK_DSPCLK_GATE) |
- ILK_DPARB_CLK_GATE |
- ILK_DPFD_CLK_GATE);
-
- for_each_pipe(pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
-}
-
-static void ivybridge_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
- uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
- I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
-
- /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
- * This implements the WaDisableRCZUnitClockGating workaround.
- */
- I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
-
- I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
- I915_WRITE(IVB_CHICKEN3,
- CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
- CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
- /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
- I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
- GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
- /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
- I915_WRITE(GEN7_L3CNTLREG1,
- GEN7_WA_FOR_GEN7_L3_CONTROL);
- I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
-
- /* This is required by WaCatErrorRejectionIssue */
- I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
- GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
- for_each_pipe(pipe) {
- I915_WRITE(DSPCNTR(pipe),
- I915_READ(DSPCNTR(pipe)) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_display_plane(dev_priv, pipe);
- }
-}
-
-static void g4x_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dspclk_gate;
-
- I915_WRITE(RENCLK_GATE_D1, 0);
- I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
- GS_UNIT_CLOCK_GATE_DISABLE |
- CL_UNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(RAMCLK_GATE_D, 0);
- dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
- OVRUNIT_CLOCK_GATE_DISABLE |
- OVCUNIT_CLOCK_GATE_DISABLE;
- if (IS_GM45(dev))
- dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
-}
-
-static void crestline_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(DSPCLK_GATE_D, 0);
- I915_WRITE(RAMCLK_GATE_D, 0);
- I915_WRITE16(DEUC, 0);
-}
-
-static void broadwater_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
- I965_RCC_CLOCK_GATE_DISABLE |
- I965_RCPB_CLOCK_GATE_DISABLE |
- I965_ISC_CLOCK_GATE_DISABLE |
- I965_FBC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
-}
-
-static void gen3_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dstate = I915_READ(D_STATE);
-
- dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
- DSTATE_DOT_CLOCK_GATING;
- I915_WRITE(D_STATE, dstate);
-}
-
-static void i85x_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
-}
-
-static void i830_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
- DPLS_EDP_PPS_FIX_DIS);
- /* Without this, mode sets may fail silently on FDI */
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
-static void ironlake_teardown_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->renderctx) {
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(&dev_priv->renderctx->base);
- dev_priv->renderctx = NULL;
- }
-
- if (dev_priv->pwrctx) {
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(&dev_priv->pwrctx->base);
- dev_priv->pwrctx = NULL;
- }
-}
-
-static void ironlake_disable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (I915_READ(PWRCTXA)) {
- /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
- wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
- 50);
-
- I915_WRITE(PWRCTXA, 0);
- POSTING_READ(PWRCTXA);
-
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- POSTING_READ(RSTDBYCTL);
- }
-
- ironlake_teardown_rc6(dev);
-}
-
-static int ironlake_setup_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (!dev_priv->renderctx)
- return -ENOMEM;
-
- if (dev_priv->pwrctx == NULL)
- dev_priv->pwrctx = intel_alloc_context_page(dev);
- if (!dev_priv->pwrctx) {
- ironlake_teardown_rc6(dev);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void ironlake_enable_rc6(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- /* rc6 disabled by default due to repeated reports of hanging during
- * boot and resume.
- */
- if (!intel_enable_rc6(dev))
- return;
-
- mutex_lock(&dev->struct_mutex);
- ret = ironlake_setup_rc6(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- /*
- * GPU can automatically power down the render unit if given a page
- * to save state.
- */
- ret = BEGIN_LP_RING(6);
- if (ret) {
- ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
- OUT_RING(MI_SET_CONTEXT);
- OUT_RING(dev_priv->renderctx->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- OUT_RING(MI_SUSPEND_FLUSH);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_FLUSH);
- ADVANCE_LP_RING();
-
- /*
- * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
- * does an implicit flush, combined with MI_FLUSH above, it should be
- * safe to assume that renderctx is valid
- */
- ret = intel_wait_ring_idle(LP_RING(dev_priv));
- if (ret) {
- DRM_ERROR("failed to enable ironlake power power savings\n");
- ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
- return;
- }
-
- I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
- I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- mutex_unlock(&dev->struct_mutex);
-}
-
-void intel_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- dev_priv->display.init_clock_gating(dev);
-
- if (dev_priv->display.init_pch_clock_gating)
- dev_priv->display.init_pch_clock_gating(dev);
-}
-
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -8887,32 +6726,20 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
}
- if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
- dev_priv->display.enable_fbc = ironlake_enable_fbc;
- dev_priv->display.disable_fbc = ironlake_disable_fbc;
- } else if (IS_GM45(dev)) {
- dev_priv->display.fbc_enabled = g4x_fbc_enabled;
- dev_priv->display.enable_fbc = g4x_enable_fbc;
- dev_priv->display.disable_fbc = g4x_disable_fbc;
- } else if (IS_CRESTLINE(dev)) {
- dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
- dev_priv->display.enable_fbc = i8xx_enable_fbc;
- dev_priv->display.disable_fbc = i8xx_disable_fbc;
- }
- /* 855GM needs testing */
- }
-
/* Returns the core display clock speed */
- if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ if (IS_VALLEYVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ valleyview_get_display_clock_speed;
+ else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
@@ -8934,124 +6761,27 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- /* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
-
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured MT forcewake,
- * and if the device is in RC6, then force_wake_mt_get will not wake
- * the device and the ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT forcewake being
- * disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->display.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->display.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
-
- if (HAS_PCH_IBX(dev))
- dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
- else if (HAS_PCH_CPT(dev))
- dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
if (IS_GEN5(dev)) {
- if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
- dev_priv->display.update_wm = ironlake_update_wm;
- else {
- DRM_DEBUG_KMS("Failed to get proper latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
- if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- dev_priv->display.init_clock_gating = gen6_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- if (SNB_READ_WM0_LATENCY()) {
- dev_priv->display.update_wm = sandybridge_update_wm;
- dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
- } else {
- DRM_DEBUG_KMS("Failed to read display plane latency. "
- "Disable CxSR\n");
- dev_priv->display.update_wm = NULL;
- }
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
} else
dev_priv->display.update_wm = NULL;
- } else if (IS_PINEVIEW(dev)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
- dev_priv->is_ddr3,
- dev_priv->fsb_freq,
- dev_priv->mem_freq)) {
- DRM_INFO("failed to find known CxSR latency "
- "(found ddr%s fsb freq %d, mem freq %d), "
- "disabling CxSR\n",
- (dev_priv->is_ddr3 == 1) ? "3" : "2",
- dev_priv->fsb_freq, dev_priv->mem_freq);
- /* Disable CxSR and never update its watermark again */
- pineview_disable_cxsr(dev);
- dev_priv->display.update_wm = NULL;
- } else
- dev_priv->display.update_wm = pineview_update_wm;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.force_wake_get = vlv_force_wake_get;
+ dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
- dev_priv->display.update_wm = g4x_update_wm;
- dev_priv->display.init_clock_gating = g4x_init_clock_gating;
- } else if (IS_GEN4(dev)) {
- dev_priv->display.update_wm = i965_update_wm;
- if (IS_CRESTLINE(dev))
- dev_priv->display.init_clock_gating = crestline_init_clock_gating;
- else if (IS_BROADWATER(dev))
- dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
- } else if (IS_GEN3(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
- } else if (IS_I865G(dev)) {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
- } else if (IS_I85X(dev)) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- } else {
- dev_priv->display.update_wm = i830_update_wm;
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
- if (IS_845G(dev))
- dev_priv->display.get_fifo_size = i845_get_fifo_size;
- else
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
/* Default just returns -ENODEV to indicate unsupported */
@@ -9090,7 +6820,7 @@ static void quirk_pipea_force(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
- DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
+ DRM_INFO("applying pipe a force quirk\n");
}
/*
@@ -9100,6 +6830,18 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
}
struct intel_quirk {
@@ -9109,7 +6851,7 @@ struct intel_quirk {
void (*hook)(struct drm_device *dev);
};
-struct intel_quirk intel_quirks[] = {
+static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -9134,6 +6876,9 @@ struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -9166,7 +6911,7 @@ static void i915_disable_vga(struct drm_device *dev)
vga_reg = VGACNTRL;
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
- outb(1, VGA_SR_INDEX);
+ outb(SR01, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
@@ -9176,6 +6921,24 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_init_clock_gating(dev);
+
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ }
+
+ if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
+}
+
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9189,10 +6952,14 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ dev->mode_config.funcs = &intel_mode_funcs;
intel_init_quirks(dev);
+ intel_init_pm(dev);
+
+ intel_prepare_ddi(dev);
+
intel_init_display(dev);
if (IS_GEN2(dev)) {
@@ -9217,22 +6984,12 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
+ intel_pch_pll_init(dev);
+
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
- intel_init_clock_gating(dev);
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- intel_init_emon(dev);
- }
-
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
-
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -9240,8 +6997,7 @@ void intel_modeset_init(struct drm_device *dev)
void intel_modeset_gem_init(struct drm_device *dev)
{
- if (IS_IRONLAKE_M(dev))
- ironlake_enable_rc6(dev);
+ intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
}
@@ -9271,12 +7027,15 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
gen6_disable_rps(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_rc6(dev);
+ if (IS_VALLEYVIEW(dev))
+ vlv_init_dpio(dev);
+
mutex_unlock(&dev->struct_mutex);
/* Disable the irq before mode object teardown, for the irq might
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4b637919f74f..c0449324143c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -32,6 +32,7 @@
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -67,6 +68,8 @@ struct intel_dp {
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ struct edid *edid; /* cached EDID for eDP */
+ int edid_mode_count;
};
/**
@@ -266,6 +269,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -368,7 +374,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
- int try, precharge = 5;
+ int try, precharge;
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
@@ -388,6 +394,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
aux_clock_divider = intel_hrawclk(dev) / 2;
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ(ch_ctl);
@@ -688,7 +699,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- int bpp;
+ int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -702,24 +713,33 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = intel_dp->panel_fixed_mode->clock;
}
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return false;
+
+ DRM_DEBUG_KMS("DP link computation with max lane count %i "
+ "max bw %02x pixel clock %iKHz\n",
+ max_lane_count, bws[max_clock], mode->clock);
+
if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+ mode_rate = intel_dp_link_required(mode->clock, bpp);
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(mode->clock, bpp)
- <= link_avail) {
+ if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("Display port link bw %02x lane "
- "count %d clock %d\n",
+ DRM_DEBUG_KMS("DP link bw %02x lane "
+ "count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
+ adjusted_mode->clock, bpp);
+ DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+ mode_rate, link_avail);
return true;
}
}
@@ -1148,10 +1168,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
- WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(dev_priv);
- pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -1259,18 +1279,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ /* Make sure the panel is off before trying to change the mode. But also
+ * ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
- /* Wake up the sink first */
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
-
- /* Make sure the panel is off before trying to
- * change the mode
- */
}
static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1302,10 +1320,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
+ /* Switching the panel off requires vdd. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
@@ -1954,6 +1973,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
}
+static void
+intel_dp_probe_oui(struct intel_dp *intel_dp)
+{
+ u8 buf[3];
+
+ if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ ironlake_edp_panel_vdd_on(intel_dp);
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+}
+
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
@@ -2088,10 +2128,22 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct edid *edid;
+ int size;
+
+ if (is_edp(intel_dp)) {
+ if (!intel_dp->edid)
+ return NULL;
+
+ size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+ edid = kmalloc(size, GFP_KERNEL);
+ if (!edid)
+ return NULL;
+
+ memcpy(edid, intel_dp->edid, size);
+ return edid;
+ }
- ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
return edid;
}
@@ -2101,9 +2153,17 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
- ironlake_edp_panel_vdd_on(intel_dp);
+ if (is_edp(intel_dp)) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_dp->edid);
+ ret = drm_add_edid_modes(connector, intel_dp->edid);
+ drm_edid_to_eld(connector,
+ intel_dp->edid);
+ connector->display_info.raw_edid = NULL;
+ return intel_dp->edid_mode_count;
+ }
+
ret = intel_ddc_get_modes(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
@@ -2137,6 +2197,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (status != connector_status_connected)
return status;
+ intel_dp_probe_oui(intel_dp);
+
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
@@ -2291,6 +2353,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
+ kfree(intel_dp->edid);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
@@ -2438,6 +2501,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -2473,16 +2537,26 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
bool ret;
struct edp_power_seq cur, vbt;
u32 pp_on, pp_off, pp_div;
+ struct edid *edid;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
+ if (!pp_on || !pp_off || !pp_div) {
+ DRM_INFO("bad panel power sequencing delays, disabling panel\n");
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ intel_dp_destroy(&intel_connector->base);
+ return;
+ }
+
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
@@ -2538,9 +2612,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp_destroy(&intel_connector->base);
return;
}
- }
- intel_dp_i2c_init(intel_dp, intel_connector, name);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ intel_dp->edid_mode_count =
+ drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ intel_dp->edid = edid;
+ }
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ }
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 715afa153025..3e0918834e7e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -45,6 +45,18 @@
ret__; \
})
+#define wait_for_atomic_us(COND, US) ({ \
+ int i, ret__ = -ETIMEDOUT; \
+ for (i = 0; i < (US); i++) { \
+ if ((COND)) { \
+ ret__ = 0; \
+ break; \
+ } \
+ udelay(1); \
+ } \
+ ret__; \
+})
+
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
@@ -171,8 +183,8 @@ struct intel_crtc {
bool cursor_visible;
unsigned int bpp;
- bool no_pll; /* tertiary pipe for IVB */
- bool use_pll_a;
+ /* We can share PLLs across outputs if the timings match */
+ struct intel_pch_pll *pch_pll;
};
struct intel_plane {
@@ -196,6 +208,25 @@ struct intel_plane {
struct drm_intel_sprite_colorkey *key);
};
+struct intel_watermark_params {
+ unsigned long fifo_size;
+ unsigned long max_wm;
+ unsigned long default_wm;
+ unsigned long guard_size;
+ unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+ int is_desktop;
+ int is_ddr3;
+ unsigned long fsb_freq;
+ unsigned long mem_freq;
+ unsigned long display_sr;
+ unsigned long display_hpll_disable;
+ unsigned long cursor_sr;
+ unsigned long cursor_hpll_disable;
+};
+
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
@@ -207,6 +238,8 @@ struct intel_plane {
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
+#define DIP_AVI_PR_1 0
+#define DIP_AVI_PR_2 1
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
@@ -240,23 +273,36 @@ struct dip_infoframe {
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
- /* PB5 - PR 3:0 */
- uint8_t PR;
+ /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
+ uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
uint16_t left_bar_end;
uint16_t right_bar_start;
- } avi;
+ } __attribute__ ((packed)) avi;
struct {
uint8_t vn[8];
uint8_t pd[16];
uint8_t sdi;
- } spd;
+ } __attribute__ ((packed)) spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
+struct intel_hdmi {
+ struct intel_encoder base;
+ u32 sdvox_reg;
+ int ddc_bus;
+ int ddi_port;
+ uint32_t color_range;
+ bool has_hdmi_sink;
+ bool has_audio;
+ enum hdmi_force_audio force_audio;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
+};
+
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@@ -296,8 +342,13 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
-void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
-extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
+extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
+extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
+extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
+ bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev,
@@ -311,6 +362,10 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane);
+
+void intel_sanitize_pm(struct drm_device *dev);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -368,12 +423,9 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void ironlake_disable_rc6(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
-extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
-extern void gen6_disable_rps(struct drm_device *dev);
-extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -411,16 +463,43 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_prepare_ddi(struct drm_device *dev);
+extern void hsw_fdi_link_train(struct drm_crtc *crtc);
+extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size);
+extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
+
+/* Power-related functions, located in intel_pm.c */
+extern void intel_init_pm(struct drm_device *dev);
+/* FBC */
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern void intel_update_fbc(struct drm_device *dev);
+/* IPS */
+extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+extern void intel_gpu_ips_teardown(void);
+
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
+
+extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
+extern void intel_ddi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 020a7d7f744d..60ba50b956f2 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -243,7 +243,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* that's not the case.
*/
intel_ddc_get_modes(connector,
- &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
if (!list_empty(&connector->probed_modes))
return 1;
@@ -375,7 +375,7 @@ void intel_dvo_init(struct drm_device *dev)
* special cases, but otherwise default to what's defined
* in the spec.
*/
- if (dvo->gpio != 0)
+ if (intel_gmbus_is_port_valid(dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
gpio = GMBUS_PORT_SSC;
@@ -386,7 +386,7 @@ void intel_dvo_init(struct drm_device *dev)
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- i2c = &dev_priv->gmbus[gpio].adapter;
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
intel_dvo->dev = *dvo;
if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 6e9ee33fd412..bf8690720a0c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, obj, false);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2d7f47b56b6a..2ead3bf7c21d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,19 +37,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_hdmi {
- struct intel_encoder base;
- u32 sdvox_reg;
- int ddc_bus;
- uint32_t color_range;
- bool has_hdmi_sink;
- bool has_audio;
- enum hdmi_force_audio force_audio;
- void (*write_infoframe)(struct drm_encoder *encoder,
- struct dip_infoframe *frame);
-};
-
-static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
}
@@ -75,108 +63,246 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
frame->checksum = 0x100 - sum;
}
-static u32 intel_infoframe_index(struct dip_infoframe *frame)
+static u32 g4x_infoframe_index(struct dip_infoframe *frame)
{
- u32 flags = 0;
-
switch (frame->type) {
case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_SELECT_AVI;
- break;
+ return VIDEO_DIP_SELECT_AVI;
case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_SELECT_SPD;
- break;
+ return VIDEO_DIP_SELECT_SPD;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
+ return 0;
}
-
- return flags;
}
-static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
{
- u32 flags = 0;
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return VIDEO_DIP_ENABLE_AVI;
+ case DIP_TYPE_SPD:
+ return VIDEO_DIP_ENABLE_SPD;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
+}
+static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+{
switch (frame->type) {
case DIP_TYPE_AVI:
- flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
- break;
+ return VIDEO_DIP_ENABLE_AVI_HSW;
case DIP_TYPE_SPD:
- flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
- break;
+ return VIDEO_DIP_ENABLE_SPD_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
- break;
+ return 0;
}
+}
- return flags;
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
+{
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ return HSW_TVIDEO_DIP_AVI_DATA(pipe);
+ case DIP_TYPE_SPD:
+ return HSW_TVIDEO_DIP_SPD_DATA(pipe);
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ return 0;
+ }
}
-static void i9xx_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
-
- /* XXX first guess at handling video port, is this corrent? */
+ val &= ~VIDEO_DIP_PORT_MASK;
if (intel_hdmi->sdvox_reg == SDVOB)
- port = VIDEO_DIP_PORT_B;
+ val |= VIDEO_DIP_PORT_B;
else if (intel_hdmi->sdvox_reg == SDVOC)
- port = VIDEO_DIP_PORT_C;
+ val |= VIDEO_DIP_PORT_C;
else
return;
- flags = intel_infoframe_index(frame);
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
- val &= ~VIDEO_DIP_SELECT_MASK;
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+ I915_WRITE(VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
- flags |= intel_infoframe_flags(frame);
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+ I915_WRITE(VIDEO_DIP_CTL, val);
}
-static void ironlake_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
- u32 flags, val = I915_READ(reg);
+ u32 val = I915_READ(reg);
+
+ val &= ~VIDEO_DIP_PORT_MASK;
+ switch (intel_hdmi->sdvox_reg) {
+ case HDMIB:
+ val |= VIDEO_DIP_PORT_B;
+ break;
+ case HDMIC:
+ val |= VIDEO_DIP_PORT_C;
+ break;
+ case HDMID:
+ val |= VIDEO_DIP_PORT_D;
+ break;
+ default:
+ return;
+ }
intel_wait_for_vblank(dev, intel_crtc->pipe);
- flags = intel_infoframe_index(frame);
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+}
+
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ /* The DIP control register spec says that we need to update the AVI
+ * infoframe without clearing its enable bit */
+ if (frame->type == DIP_TYPE_AVI)
+ val |= VIDEO_DIP_ENABLE_AVI;
+ else
+ val &= ~g4x_infoframe_enable(frame);
+
+ val |= VIDEO_DIP_ENABLE;
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+ I915_WRITE(reg, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
- flags |= intel_infoframe_flags(frame);
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+ I915_WRITE(reg, val);
}
+
+static void vlv_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(frame);
+
+ val &= ~g4x_infoframe_enable(frame);
+ val |= VIDEO_DIP_ENABLE;
+
+ I915_WRITE(reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ val |= g4x_infoframe_enable(frame);
+ val &= ~VIDEO_DIP_FREQ_MASK;
+ val |= VIDEO_DIP_FREQ_VSYNC;
+
+ I915_WRITE(reg, val);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
+ unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+ u32 val = I915_READ(ctl_reg);
+
+ if (data_reg == 0)
+ return;
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ val &= ~hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(data_reg + i, *data);
+ data++;
+ }
+
+ val |= hsw_infoframe_enable(frame);
+ I915_WRITE(ctl_reg, val);
+}
+
static void intel_set_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
@@ -189,7 +315,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
intel_hdmi->write_infoframe(encoder, frame);
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
@@ -197,10 +324,13 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
.len = DIP_LEN_AVI,
};
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+
intel_set_infoframe(encoder, &avi_if);
}
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
@@ -221,8 +351,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
@@ -259,7 +388,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
- intel_hdmi_set_avi_infoframe(encoder);
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
@@ -334,7 +463,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -367,7 +497,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
*/
return intel_ddc_get_modes(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
}
static bool
@@ -379,7 +510,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
bool has_audio = false;
edid = drm_get_edid(connector,
- &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
@@ -393,8 +525,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
static int
intel_hdmi_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
+ struct drm_property *property,
+ uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
@@ -453,6 +585,14 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
+ .dpms = intel_ddi_dpms,
+ .mode_fixup = intel_hdmi_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_ddi_mode_set,
+ .commit = intel_encoder_commit,
+};
+
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
@@ -542,20 +682,60 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ intel_hdmi->ddi_port = PORT_B;
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ intel_hdmi->ddi_port = PORT_C;
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
+ } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
+ DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
+ intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
+ intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ intel_hdmi->ddi_port = PORT_D;
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
+ } else {
+ /* If we got an unknown sdvox_reg, things are pretty much broken
+ * in a way that we should let the kernel know about it */
+ BUG();
}
intel_hdmi->sdvox_reg = sdvox_reg;
if (!HAS_PCH_SPLIT(dev)) {
- intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ intel_hdmi->write_infoframe = g4x_write_infoframe;
I915_WRITE(VIDEO_DIP_CTL, 0);
+ } else if (IS_VALLEYVIEW(dev)) {
+ intel_hdmi->write_infoframe = vlv_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
+ } else if (IS_HASWELL(dev)) {
+ /* FIXME: Haswell has a new set of DIP frame registers, but we are
+ * just doing the minimal required for HDMI to work at this stage.
+ */
+ intel_hdmi->write_infoframe = hsw_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
+ } else if (HAS_PCH_IBX(dev)) {
+ intel_hdmi->write_infoframe = ibx_write_infoframe;
+ for_each_pipe(i)
+ I915_WRITE(TVIDEO_DIP_CTL(i), 0);
} else {
- intel_hdmi->write_infoframe = ironlake_write_infoframe;
+ intel_hdmi->write_infoframe = cpt_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
}
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+ if (IS_HASWELL(dev))
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
+ else
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8fdc95700218..1991a4408cf9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -35,6 +35,20 @@
#include "i915_drm.h"
#include "i915_drv.h"
+struct gmbus_port {
+ const char *name;
+ int reg;
+};
+
+static const struct gmbus_port gmbus_ports[] = {
+ { "ssc", GPIOB },
+ { "vga", GPIOA },
+ { "panel", GPIOC },
+ { "dpc", GPIOD },
+ { "dpb", GPIOE },
+ { "dpd", GPIOF },
+};
+
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 10
@@ -49,10 +63,7 @@ void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_PCH_SPLIT(dev))
- I915_WRITE(PCH_GMBUS0, 0);
- else
- I915_WRITE(GMBUS0, 0);
+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
}
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -140,63 +151,173 @@ static void set_data(void *data, int state_high)
POSTING_READ(bus->gpio_reg);
}
-static bool
+static int
+intel_gpio_pre_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ intel_i2c_reset(dev_priv->dev);
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ udelay(I2C_RISEFALL_TIME);
+ return 0;
+}
+
+static void
+intel_gpio_post_xfer(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ set_data(bus, 1);
+ set_clock(bus, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+}
+
+static void
intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- static const int map_pin_to_reg[] = {
- 0,
- GPIOB,
- GPIOA,
- GPIOC,
- GPIOD,
- GPIOE,
- 0,
- GPIOF,
- };
struct i2c_algo_bit_data *algo;
- if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
- return false;
-
algo = &bus->bit_algo;
- bus->gpio_reg = map_pin_to_reg[pin];
- if (HAS_PCH_SPLIT(dev_priv->dev))
- bus->gpio_reg += PCH_GPIOA - GPIOA;
+ /* -1 to map pin pair to gmbus index */
+ bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
bus->adapter.algo_data = algo;
algo->setsda = set_data;
algo->setscl = set_clock;
algo->getsda = get_data;
algo->getscl = get_clock;
+ algo->pre_xfer = intel_gpio_pre_xfer;
+ algo->post_xfer = intel_gpio_post_xfer;
algo->udelay = I2C_RISEFALL_TIME;
algo->timeout = usecs_to_jiffies(2200);
algo->data = bus;
+}
- return true;
+static int
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+
+ I915_WRITE(GMBUS1 + reg_offset,
+ gmbus1_index |
+ GMBUS_CYCLE_WAIT |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 val, loop = 0;
+ u32 gmbus2;
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
+ if (ret)
+ return -ETIMEDOUT;
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+
+ val = I915_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ }
+
+ return 0;
}
static int
-intel_i2c_quirk_xfer(struct intel_gmbus *bus,
- struct i2c_msg *msgs,
- int num)
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u16 len = msg->len;
+ u8 *buf = msg->buf;
+ u32 val, loop;
+
+ val = loop = 0;
+ while (len && loop < 4) {
+ val |= *buf++ << (8 * loop++);
+ len -= 1;
+ }
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+ I915_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT |
+ (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ while (len) {
+ int ret;
+ u32 gmbus2;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ I915_WRITE(GMBUS3 + reg_offset, val);
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_RDY),
+ 50);
+ if (ret)
+ return -ETIMEDOUT;
+ if (gmbus2 & GMBUS_SATOER)
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with a read that
+ * immediately follows it by using an "INDEX" cycle.
+ */
+static bool
+gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+{
+ return (i + 1 < num &&
+ !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD));
+}
+
+static int
+gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ u32 gmbus1_index = 0;
+ u32 gmbus5 = 0;
int ret;
- intel_i2c_reset(dev_priv->dev);
+ if (msgs[0].len == 2)
+ gmbus5 = GMBUS_2BYTE_INDEX_EN |
+ msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+ if (msgs[0].len == 1)
+ gmbus1_index = GMBUS_CYCLE_INDEX |
+ (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
- intel_i2c_quirk_set(dev_priv, true);
- set_data(bus, 1);
- set_clock(bus, 1);
- udelay(I2C_RISEFALL_TIME);
+ /* GMBUS5 holds 16-bit index */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, gmbus5);
- ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num);
+ ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
- set_data(bus, 1);
- set_clock(bus, 1);
- intel_i2c_quirk_set(dev_priv, false);
+ /* Clear GMBUS5 after each index transfer */
+ if (gmbus5)
+ I915_WRITE(GMBUS5 + reg_offset, 0);
return ret;
}
@@ -210,117 +331,111 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int i, reg_offset, ret;
+ int i, reg_offset;
+ int ret = 0;
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
goto out;
}
- reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+ reg_offset = dev_priv->gpio_mmio_base;
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
- u16 len = msgs[i].len;
- u8 *buf = msgs[i].buf;
-
- if (msgs[i].flags & I2C_M_RD) {
- I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
- (len << GMBUS_BYTE_COUNT_SHIFT) |
- (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
- POSTING_READ(GMBUS2+reg_offset);
- do {
- u32 val, loop = 0;
-
- if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
- goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- goto clear_err;
-
- val = I915_READ(GMBUS3 + reg_offset);
- do {
- *buf++ = val & 0xff;
- val >>= 8;
- } while (--len && ++loop < 4);
- } while (len);
+ u32 gmbus2;
+
+ if (gmbus_is_index_read(msgs, i, num)) {
+ ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+ i += 1; /* set i to the index of the read xfer */
+ } else if (msgs[i].flags & I2C_M_RD) {
+ ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
- u32 val, loop;
-
- val = loop = 0;
- do {
- val |= *buf++ << (8 * loop);
- } while (--len && ++loop < 4);
-
- I915_WRITE(GMBUS3 + reg_offset, val);
- I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT |
- (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
- (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
- (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
- POSTING_READ(GMBUS2+reg_offset);
-
- while (len) {
- if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
- goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
- goto clear_err;
-
- val = loop = 0;
- do {
- val |= *buf++ << (8 * loop);
- } while (--len && ++loop < 4);
-
- I915_WRITE(GMBUS3 + reg_offset, val);
- POSTING_READ(GMBUS2+reg_offset);
- }
+ ret = gmbus_xfer_write(dev_priv, &msgs[i]);
}
- if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ if (ret == -ETIMEDOUT)
+ goto timeout;
+ if (ret == -ENXIO)
+ goto clear_err;
+
+ ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
+ (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
+ 50);
+ if (ret)
goto timeout;
- if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ if (gmbus2 & GMBUS_SATOER)
goto clear_err;
}
- goto done;
+ /* Generate a STOP condition on the bus. Note that gmbus can't generata
+ * a STOP on the very first cycle. To simplify the code we
+ * unconditionally generate the STOP condition with an additional gmbus
+ * cycle. */
+ I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
+ */
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+ I915_WRITE(GMBUS0 + reg_offset, 0);
+ ret = ret ?: i;
+ goto out;
clear_err:
+ /*
+ * Wait for bus to IDLE before clearing NAK.
+ * If we clear the NAK while bus is still active, then it will stay
+ * active and the next transaction may fail.
+ *
+ * If no ACK is received during the address phase of a transaction, the
+ * adapter must report -ENXIO. It is not clear what to return if no ACK
+ * is received at other times. But we have to be careful to not return
+ * spurious -ENXIO because that will prevent i2c and drm edid functions
+ * from retrying. So return -ENXIO only when gmbus properly quiescents -
+ * timing out seems to happen when there _is_ a ddc chip present, but
+ * it's slow responding and only answers on the 2nd retry.
+ */
+ ret = -ENXIO;
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
+ 10)) {
+ DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
+ adapter->name);
+ ret = -ETIMEDOUT;
+ }
+
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
I915_WRITE(GMBUS1 + reg_offset, 0);
-
-done:
- /* Mark the GMBUS interface as disabled after waiting for idle.
- * We will re-enable it at the start of the next xfer,
- * till then let it sleep.
- */
- if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10))
- DRM_INFO("GMBUS timed out waiting for idle\n");
I915_WRITE(GMBUS0 + reg_offset, 0);
- ret = i;
+
+ DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+
goto out;
timeout:
- DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
- bus->reg0 & 0xff, bus->adapter.name);
+ DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- if (!bus->has_gpio) {
- ret = -EIO;
- } else {
- bus->force_bit = true;
- ret = intel_i2c_quirk_xfer(bus, msgs, num);
- }
+ bus->force_bit = true;
+ ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+
out:
mutex_unlock(&dev_priv->gmbus_mutex);
return ret;
@@ -346,35 +461,26 @@ static const struct i2c_algorithm gmbus_algorithm = {
*/
int intel_setup_gmbus(struct drm_device *dev)
{
- static const char *names[GMBUS_NUM_PORTS] = {
- "disabled",
- "ssc",
- "vga",
- "panel",
- "dpc",
- "dpb",
- "reserved",
- "dpd",
- };
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
- GFP_KERNEL);
- if (dev_priv->gmbus == NULL)
- return -ENOMEM;
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+ else
+ dev_priv->gpio_mmio_base = 0;
mutex_init(&dev_priv->gmbus_mutex);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ u32 port = i + 1; /* +1 to map gmbus index to pin pair */
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s",
- names[i]);
+ gmbus_ports[i].name);
bus->adapter.dev.parent = &dev->pdev->dev;
bus->dev_priv = dev_priv;
@@ -385,13 +491,13 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
/* By default use a conservative clock rate */
- bus->reg0 = i | GMBUS_RATE_100KHZ;
+ bus->reg0 = port | GMBUS_RATE_100KHZ;
- bus->has_gpio = intel_gpio_setup(bus, i);
-
- /* XXX force bit banging until GMBUS is fully debugged */
- if (bus->has_gpio)
+ /* gmbus seems to be broken on i830 */
+ if (IS_I830(dev))
bus->force_bit = true;
+
+ intel_gpio_setup(bus, port);
}
intel_i2c_reset(dev_priv->dev);
@@ -403,11 +509,18 @@ err:
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
- kfree(dev_priv->gmbus);
- dev_priv->gmbus = NULL;
return ret;
}
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+ unsigned port)
+{
+ WARN_ON(!intel_gmbus_is_port_valid(port));
+ /* -1 to map pin pair to gmbus index */
+ return (intel_gmbus_is_port_valid(port)) ?
+ &dev_priv->gmbus[port - 1].adapter : NULL;
+}
+
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
@@ -419,8 +532,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- if (bus->has_gpio)
- bus->force_bit = force_bit;
+ bus->force_bit = force_bit;
}
void intel_teardown_gmbus(struct drm_device *dev)
@@ -435,7 +547,4 @@ void intel_teardown_gmbus(struct drm_device *dev)
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
-
- kfree(dev_priv->gmbus);
- dev_priv->gmbus = NULL;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9c71183629c2..08eb04c787e8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -480,7 +480,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
+ DRM_INFO("Skipping forced modeset for %s\n", id->ident);
return 1;
}
@@ -628,7 +628,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
- DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
+ DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
}
@@ -747,6 +747,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard t5745",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
@@ -851,8 +859,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
child->device_type != DEVICE_TYPE_LFP)
continue;
- if (child->i2c_pin)
- *i2c_pin = child->i2c_pin;
+ if (intel_gmbus_is_port_valid(child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
* the VBT correctly. Since LVDS requires additional
@@ -993,7 +1001,8 @@ bool intel_lvds_init(struct drm_device *dev)
* preferred mode is the right one.
*/
intel_lvds->edid = drm_get_edid(connector,
- &dev_priv->gmbus[pin].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ pin));
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d1928e79d9b6..d67ec3a51e42 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -56,7 +56,8 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
}
};
- return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
+ return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
+ msgs, 2) == 2;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 289140bc83cb..18bd0af855dc 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,6 +25,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/acpi_io.h>
#include <acpi/video.h>
@@ -149,7 +151,7 @@ struct opregion_asle {
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
if (!(bclp & ASLE_BCLP_VALID))
@@ -161,7 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
max = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, bclp * max / 255);
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+ iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
return 0;
}
@@ -198,14 +200,14 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -213,31 +215,31 @@ void intel_opregion_asle_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_ALS_ILLUM)
- asle_stat |= asle_set_als_illum(dev, asle->alsi);
+ asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT)
- asle_stat |= asle_set_pfit(dev, asle->pfit);
+ asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
if (asle_req & ASLE_SET_PWM_FREQ)
- asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+ asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
- asle_req = asle->aslc & ASLE_REQ_MSK;
+ asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
@@ -250,7 +252,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
+ asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (asle_req & ASLE_SET_PFIT) {
DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -262,7 +264,7 @@ void intel_opregion_gse_intr(struct drm_device *dev)
asle_stat |= ASLE_PWM_FREQ_FAILED;
}
- asle->aslc = asle_stat;
+ iowrite32(asle_stat, &asle->aslc);
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
@@ -272,15 +274,16 @@ void intel_opregion_gse_intr(struct drm_device *dev)
void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
if (asle) {
if (IS_MOBILE(dev))
intel_enable_asle(dev);
- asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
- ASLE_PFMB_EN;
- asle->ardy = 1;
+ iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+ ASLE_PFMB_EN,
+ &asle->tche);
+ iowrite32(1, &asle->ardy);
}
}
@@ -298,7 +301,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
Linux, these are handled by the dock, button and video drivers.
*/
- struct opregion_acpi *acpi;
+ struct opregion_acpi __iomem *acpi;
struct acpi_bus_event *event = data;
int ret = NOTIFY_OK;
@@ -310,10 +313,11 @@ static int intel_opregion_video_event(struct notifier_block *nb,
acpi = system_opregion->acpi;
- if (event->type == 0x80 && !(acpi->cevt & 0x1))
+ if (event->type == 0x80 &&
+ (ioread32(&acpi->cevt) & 1) == 0)
ret = NOTIFY_BAD;
- acpi->csts = 0;
+ iowrite32(0, &acpi->csts);
return ret;
}
@@ -337,6 +341,7 @@ static void intel_didl_outputs(struct drm_device *dev)
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
+ u32 temp;
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
@@ -355,7 +360,7 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
- printk(KERN_WARNING "No ACPI video bus found\n");
+ pr_warn("No ACPI video bus found\n");
return;
}
@@ -371,7 +376,8 @@ static void intel_didl_outputs(struct drm_device *dev)
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
- opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ iowrite32((u32)(device_id & 0x0f0f),
+ &opregion->acpi->didl[i]);
i++;
}
}
@@ -379,7 +385,7 @@ static void intel_didl_outputs(struct drm_device *dev)
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
- opregion->acpi->didl[i] = 0;
+ iowrite32(0, &opregion->acpi->didl[i]);
return;
blind_set:
@@ -413,7 +419,9 @@ blind_set:
output_type = ACPI_LVDS_OUTPUT;
break;
}
- opregion->acpi->didl[i] |= (1<<31) | output_type | i;
+ temp = ioread32(&opregion->acpi->didl[i]);
+ iowrite32(temp | (1<<31) | output_type | i,
+ &opregion->acpi->didl[i]);
i++;
}
goto end;
@@ -434,8 +442,8 @@ void intel_opregion_init(struct drm_device *dev)
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
+ iowrite32(0, &opregion->acpi->csts);
+ iowrite32(1, &opregion->acpi->drdy);
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
@@ -454,7 +462,7 @@ void intel_opregion_fini(struct drm_device *dev)
return;
if (opregion->acpi) {
- opregion->acpi->drdy = 0;
+ iowrite32(0, &opregion->acpi->drdy);
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
@@ -474,8 +482,9 @@ int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
- void *base;
+ void __iomem *base;
u32 asls, mboxes;
+ char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
@@ -489,7 +498,9 @@ int intel_opregion_setup(struct drm_device *dev)
if (!base)
return -ENOMEM;
- if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ memcpy_fromio(buf, base, sizeof(buf));
+
+ if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
@@ -499,7 +510,7 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->lid_state = base + ACPI_CLID;
- mboxes = opregion->header->mboxes;
+ mboxes = ioread32(&opregion->header->mboxes);
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 80b331c322fb..458743da3774 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -187,14 +187,14 @@ struct intel_overlay {
void (*flip_tail)(struct intel_overlay *);
};
-static struct overlay_registers *
+static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset);
@@ -203,7 +203,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
}
static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
io_mapping_unmap(regs);
@@ -215,20 +215,21 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
+ i915_gem_retire_requests(dev);
overlay->last_flip_req = 0;
return 0;
@@ -262,7 +263,7 @@ i830_activate_pipe_a(struct drm_device *dev)
DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
mode = drm_mode_duplicate(dev, &vesa_640x480);
- drm_mode_set_crtcinfo(mode, 0);
+
if (!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
@@ -287,6 +288,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
@@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay)
goto out;
}
- ret = BEGIN_LP_RING(4);
+ ret = intel_ring_begin(ring, 4);
if (ret) {
kfree(request);
goto out;
}
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- OUT_RING(overlay->flip_addr | OFC_UPDATE);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
@@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@@ -351,16 +354,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- ret = BEGIN_LP_RING(2);
+ ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_advance(ring);
- ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -401,6 +404,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
int ret;
@@ -417,20 +421,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- ret = BEGIN_LP_RING(6);
+ ret = intel_ring_begin(ring, 6);
if (ret) {
kfree(request);
return ret;
}
/* wait for overlay to go idle */
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- OUT_RING(flip_addr);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_advance(ring);
return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
@@ -442,15 +446,16 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
- true);
+ ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
+ i915_gem_retire_requests(dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
@@ -467,6 +472,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
/* Only wait if there is actually an old frame to release to
@@ -483,15 +489,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (request == NULL)
return -ENOMEM;
- ret = BEGIN_LP_RING(2);
+ ret = intel_ring_begin(ring, 2);
if (ret) {
kfree(request);
return ret;
}
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
@@ -619,14 +625,15 @@ static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
0x3000, 0x0800, 0x3000
};
-static void update_polyphase_filter(struct overlay_registers *regs)
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
{
- memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
- memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+ memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+ sizeof(uv_static_hcoeffs));
}
static bool update_scaling_factors(struct intel_overlay *overlay,
- struct overlay_registers *regs,
+ struct overlay_registers __iomem *regs,
struct put_image_params *params)
{
/* fixed point with a 12 bit shift */
@@ -665,16 +672,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
overlay->old_xscale = xscale;
overlay->old_yscale = yscale;
- regs->YRGBSCALE = (((yscale & FRACT_MASK) << 20) |
- ((xscale >> FP_SHIFT) << 16) |
- ((xscale & FRACT_MASK) << 3));
+ iowrite32(((yscale & FRACT_MASK) << 20) |
+ ((xscale >> FP_SHIFT) << 16) |
+ ((xscale & FRACT_MASK) << 3),
+ &regs->YRGBSCALE);
- regs->UVSCALE = (((yscale_UV & FRACT_MASK) << 20) |
- ((xscale_UV >> FP_SHIFT) << 16) |
- ((xscale_UV & FRACT_MASK) << 3));
+ iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+ ((xscale_UV >> FP_SHIFT) << 16) |
+ ((xscale_UV & FRACT_MASK) << 3),
+ &regs->UVSCALE);
- regs->UVSCALEV = ((((yscale >> FP_SHIFT) << 16) |
- ((yscale_UV >> FP_SHIFT) << 0)));
+ iowrite32((((yscale >> FP_SHIFT) << 16) |
+ ((yscale_UV >> FP_SHIFT) << 0)),
+ &regs->UVSCALEV);
if (scale_changed)
update_polyphase_filter(regs);
@@ -683,30 +693,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
}
static void update_colorkey(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
u32 key = overlay->color_key;
switch (overlay->crtc->base.fb->bits_per_pixel) {
case 8:
- regs->DCLRKV = 0;
- regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ iowrite32(0, &regs->DCLRKV);
+ iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
case 16:
if (overlay->crtc->base.fb->depth == 15) {
- regs->DCLRKV = RGB15_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
} else {
- regs->DCLRKV = RGB16_TO_COLORKEY(key);
- regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
+ iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
+ &regs->DCLRKM);
}
break;
case 24:
case 32:
- regs->DCLRKV = key;
- regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ iowrite32(key, &regs->DCLRKV);
+ iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
break;
}
}
@@ -761,9 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct put_image_params *params)
{
int ret, tmp_width;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
bool scale_changed = false;
struct drm_device *dev = overlay->dev;
+ u32 swidth, swidthsw, sheight, ostride;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
@@ -782,16 +795,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
if (!overlay->active) {
+ u32 oconfig;
regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
- regs->OCONFIG = OCONF_CC_OUT_8BIT;
+ oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
- regs->OCONFIG |= OCONF_CSC_MODE_BT709;
- regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ oconfig |= OCONF_CSC_MODE_BT709;
+ oconfig |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
+ iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
@@ -805,42 +820,46 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
- regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
- regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+ iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+ iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
if (params->format & I915_OVERLAY_YUV_PACKED)
tmp_width = packed_width_bytes(params->format, params->src_w);
else
tmp_width = params->src_w;
- regs->SWIDTH = params->src_w;
- regs->SWIDTHSW = calc_swidthsw(overlay->dev,
- params->offset_Y, tmp_width);
- regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
- regs->OSTRIDE = params->stride_Y;
+ swidth = params->src_w;
+ swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ sheight = params->src_h;
+ iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+ ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
int uv_hscale = uv_hsubsampling(params->format);
int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V;
- regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ swidth |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
params->src_w/uv_hscale);
- regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
- regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
- regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
- regs->OSTRIDE |= params->stride_UV << 16;
+ swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
+ sheight |= (params->src_h/uv_vscale) << 16;
+ iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
+ iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+ ostride |= params->stride_UV << 16;
}
+ iowrite32(swidth, &regs->SWIDTH);
+ iowrite32(swidthsw, &regs->SWIDTHSW);
+ iowrite32(sheight, &regs->SHEIGHT);
+ iowrite32(ostride, &regs->OSTRIDE);
+
scale_changed = update_scaling_factors(overlay, regs, params);
update_colorkey(overlay, regs);
- regs->OCMD = overlay_cmd_reg(params);
+ iowrite32(overlay_cmd_reg(params), &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
@@ -860,7 +879,7 @@ out_unpin:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
struct drm_device *dev = overlay->dev;
int ret;
@@ -879,7 +898,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
return ret;
regs = intel_overlay_map_regs(overlay);
- regs->OCMD = 0;
+ iowrite32(0, &regs->OCMD);
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_off(overlay);
@@ -1109,11 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct put_image_params *params;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1250,10 +1265,11 @@ out_free:
}
static void update_reg_attrs(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
- regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
- regs->OCLRC1 = overlay->saturation;
+ iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+ &regs->OCLRC0);
+ iowrite32(overlay->saturation, &regs->OCLRC1);
}
static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
@@ -1306,14 +1322,10 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct drm_intel_overlay_attrs *attrs = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
+ /* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@@ -1396,7 +1408,7 @@ void intel_setup_overlay(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
int ret;
if (!HAS_OVERLAY(dev))
@@ -1451,7 +1463,7 @@ void intel_setup_overlay(struct drm_device *dev)
if (!regs)
goto out_unpin_bo;
- memset(regs, 0, sizeof(struct overlay_registers));
+ memset_io(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
update_reg_attrs(overlay, regs);
@@ -1499,14 +1511,17 @@ struct intel_overlay_error_state {
u32 isr;
};
-static struct overlay_registers *
+static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
drm_i915_private_t *dev_priv = overlay->dev->dev_private;
- struct overlay_registers *regs;
+ struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- regs = overlay->reg_bo->phys_obj->handle->vaddr;
+ /* Cast to make sparse happy, but it's wc memory anyway, so
+ * equivalent to the wc io mapping on X86. */
+ regs = (struct overlay_registers __iomem *)
+ overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
overlay->reg_bo->gtt_offset);
@@ -1515,7 +1530,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
}
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
- struct overlay_registers *regs)
+ struct overlay_registers __iomem *regs)
{
if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
io_mapping_unmap_atomic(regs);
@@ -1540,9 +1555,9 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+ error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = (long) overlay->reg_bo->gtt_offset;
+ error->base = overlay->reg_bo->gtt_offset;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 48177ec4720e..2a1625d84a69 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -28,6 +28,9 @@
* Chris Wilson <chris@chris-wilson.co.uk>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
#include "intel_drv.h"
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
@@ -169,7 +172,7 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
- printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+ pr_warn_once("fixme: max PWM is zero\n");
return 1;
}
@@ -189,6 +192,27 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
return max;
}
+static int i915_panel_invert_brightness;
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
+static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (i915_panel_invert_brightness < 0)
+ return val;
+
+ if (i915_panel_invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
+ return intel_panel_get_max_backlight(dev) - val;
+
+ return val;
+}
+
u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -209,6 +233,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
}
}
+ val = intel_panel_compute_brightness(dev, val);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
@@ -226,6 +251,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
u32 tmp;
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+ level = intel_panel_compute_brightness(dev, level);
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
@@ -342,6 +368,7 @@ int intel_panel_setup_backlight(struct drm_device *dev)
else
return -ENODEV;
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = intel_panel_get_max_backlight(dev);
dev_priv->backlight =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
new file mode 100644
index 000000000000..d0ce2a5b1d3f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -0,0 +1,3820 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "../../../platform/x86/intel_ips.h"
+#include <linux/module.h>
+
+/* FBC, or Frame Buffer Compression, is a technique employed to compress the
+ * framebuffer contents in-memory, aiming at reducing the required bandwidth
+ * during in-memory transfers and, therefore, reduce the power packet.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns.
+ *
+ * FBC-related functionality can be enabled by the means of the
+ * i915.i915_enable_fbc parameter
+ */
+
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
+ int plane, i;
+ u32 fbc_ctl, fbc_ctl2;
+
+ cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
+
+ /* FBC_CTL wants 64B units */
+ cfb_pitch = (cfb_pitch / 64) - 1;
+ plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+ /* Clear old tags */
+ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+ I915_WRITE(FBC_TAG + (i * 4), 0);
+
+ /* Set it up... */
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
+ I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+ I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+ /* enable it... */
+ fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+ fbc_ctl |= obj->fence_reg;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
+ cfb_pitch, crtc->y, intel_crtc->plane);
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+ /* enable it... */
+ I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void g4x_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 blt_ecoskpd;
+
+ /* Make sure blitter notifies FBC of writes */
+ gen6_gt_force_wake_get(dev_priv);
+ blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+ GEN6_BLITTER_LOCK_SHIFT);
+ I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+ POSTING_READ(GEN6_BLITTER_ECOSKPD);
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ /* Set persistent mode for front-buffer rendering, ala X. */
+ dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ sandybridge_blit_fbc_update(dev);
+ }
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ if (dpfc_ctl & DPFC_CTL_EN) {
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+ }
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.fbc_enabled)
+ return false;
+
+ return dev_priv->display.fbc_enabled(dev);
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc,
+ work->interval);
+
+ dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->cfb_fb = work->crtc->fb->base.id;
+ dev_priv->cfb_y = work->crtc->y;
+ }
+
+ dev_priv->fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc_work = NULL;
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->display.enable_fbc)
+ return;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL) {
+ dev_priv->display.enable_fbc(crtc, interval);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->fb;
+ work->interval = interval;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc_work = work;
+
+ DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_cancel_fbc_work(dev_priv);
+
+ if (!dev_priv->display.disable_fbc)
+ return;
+
+ dev_priv->display.disable_fbc(dev);
+ dev_priv->cfb_plane = -1;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time. We
+ * enable it if possible:
+ * - plane A only (on pre-965)
+ * - no pixel mulitply/line duplication
+ * - no alpha buffer discard
+ * - no dual wide
+ * - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one. It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_update_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL, *tmp_crtc;
+ struct intel_crtc *intel_crtc;
+ struct drm_framebuffer *fb;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int enable_fbc;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!i915_powersave)
+ return;
+
+ if (!I915_HAS_FBC(dev))
+ return;
+
+ /*
+ * If FBC is already on, we just have to verify that we can
+ * keep it that way...
+ * Need to disable if:
+ * - more than one pipe is active
+ * - changing FBC params (stride, fence, mode)
+ * - new fb is too large to fit in compressed buffer
+ * - going to an unsupported config (interlace, pixel multiply, etc.)
+ */
+ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+ if (tmp_crtc->enabled && tmp_crtc->fb) {
+ if (crtc) {
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ goto out_disable;
+ }
+ crtc = tmp_crtc;
+ }
+ }
+
+ if (!crtc || crtc->fb == NULL) {
+ DRM_DEBUG_KMS("no output, disabling\n");
+ dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ goto out_disable;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ fb = crtc->fb;
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ enable_fbc = i915_enable_fbc;
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+ if (INTEL_INFO(dev)->gen <= 6)
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
+ dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ goto out_disable;
+ }
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+ "compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ goto out_disable;
+ }
+ if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+ (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ goto out_disable;
+ }
+ if ((crtc->mode.hdisplay > 2048) ||
+ (crtc->mode.vdisplay > 1536)) {
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ goto out_disable;
+ }
+ if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ goto out_disable;
+ }
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ goto out_disable;
+ }
+
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_fb == fb->base.id &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
+ intel_enable_fbc(crtc, 500);
+ return;
+
+out_disable:
+ /* Multiple disables should be harmless */
+ if (intel_fbc_enabled(dev)) {
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ intel_disable_fbc(dev);
+ }
+}
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ tmp = I915_READ(CLKCFG);
+
+ switch (tmp & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_533:
+ dev_priv->fsb_freq = 533; /* 133*4 */
+ break;
+ case CLKCFG_FSB_800:
+ dev_priv->fsb_freq = 800; /* 200*4 */
+ break;
+ case CLKCFG_FSB_667:
+ dev_priv->fsb_freq = 667; /* 167*4 */
+ break;
+ case CLKCFG_FSB_400:
+ dev_priv->fsb_freq = 400; /* 100*4 */
+ break;
+ }
+
+ switch (tmp & CLKCFG_MEM_MASK) {
+ case CLKCFG_MEM_533:
+ dev_priv->mem_freq = 533;
+ break;
+ case CLKCFG_MEM_667:
+ dev_priv->mem_freq = 667;
+ break;
+ case CLKCFG_MEM_800:
+ dev_priv->mem_freq = 800;
+ break;
+ }
+
+ /* detect pineview DDR3 setting */
+ tmp = I915_READ(CSHRDDR3CTL);
+ dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 ddrpll, csipll;
+
+ ddrpll = I915_READ16(DDRMPLL1);
+ csipll = I915_READ16(CSIPLL0);
+
+ switch (ddrpll & 0xff) {
+ case 0xc:
+ dev_priv->mem_freq = 800;
+ break;
+ case 0x10:
+ dev_priv->mem_freq = 1066;
+ break;
+ case 0x14:
+ dev_priv->mem_freq = 1333;
+ break;
+ case 0x18:
+ dev_priv->mem_freq = 1600;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+ ddrpll & 0xff);
+ dev_priv->mem_freq = 0;
+ break;
+ }
+
+ dev_priv->r_t = dev_priv->mem_freq;
+
+ switch (csipll & 0x3ff) {
+ case 0x00c:
+ dev_priv->fsb_freq = 3200;
+ break;
+ case 0x00e:
+ dev_priv->fsb_freq = 3733;
+ break;
+ case 0x010:
+ dev_priv->fsb_freq = 4266;
+ break;
+ case 0x012:
+ dev_priv->fsb_freq = 4800;
+ break;
+ case 0x014:
+ dev_priv->fsb_freq = 5333;
+ break;
+ case 0x016:
+ dev_priv->fsb_freq = 5866;
+ break;
+ case 0x018:
+ dev_priv->fsb_freq = 6400;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+ csipll & 0x3ff);
+ dev_priv->fsb_freq = 0;
+ break;
+ }
+
+ if (dev_priv->fsb_freq == 3200) {
+ dev_priv->c_m = 0;
+ } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+ dev_priv->c_m = 1;
+ } else {
+ dev_priv->c_m = 2;
+ }
+}
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+ {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
+ {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
+ {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
+ {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
+ {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
+
+ {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
+ {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
+ {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
+ {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
+ {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
+
+ {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
+ {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
+ {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
+ {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
+ {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
+
+ {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
+ {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
+ {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
+ {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
+ {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
+
+ {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
+ {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
+ {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
+ {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
+ {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
+
+ {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
+ {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
+ {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
+ {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
+ {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+ int is_ddr3,
+ int fsb,
+ int mem)
+{
+ const struct cxsr_latency *latency;
+ int i;
+
+ if (fsb == 0 || mem == 0)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+ latency = &cxsr_latency_table[i];
+ if (is_desktop == latency->is_desktop &&
+ is_ddr3 == latency->is_ddr3 &&
+ fsb == latency->fsb_freq && mem == latency->mem_freq)
+ return latency;
+ }
+
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+ return NULL;
+}
+
+static void pineview_disable_cxsr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* deactivate cxsr */
+ I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ * - memory configuration (speed, channels)
+ * - chipset
+ * - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value. It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int latency_ns = 5000;
+
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 2; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
+ size);
+
+ return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dsparb = I915_READ(DSPARB);
+ int size;
+
+ size = dsparb & 0x7f;
+ size >>= 1; /* Convert to cachelines */
+
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
+
+ return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params g4x_wm_info = {
+ G4X_FIFO_SIZE,
+ G4X_MAX_WM,
+ G4X_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_wm_info = {
+ VALLEYVIEW_FIFO_SIZE,
+ VALLEYVIEW_MAX_WM,
+ VALLEYVIEW_MAX_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ VALLEYVIEW_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+ I945_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i915_wm_info = {
+ I915_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i855_wm_info = {
+ I855GM_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i830_wm_info = {
+ I830_FIFO_SIZE,
+ I915_MAX_WM,
+ 1,
+ 2,
+ I830_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params ironlake_display_wm_info = {
+ ILK_DISPLAY_FIFO,
+ ILK_DISPLAY_MAXWM,
+ ILK_DISPLAY_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
+ ILK_CURSOR_FIFO,
+ ILK_CURSOR_MAXWM,
+ ILK_CURSOR_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_display_srwm_info = {
+ ILK_DISPLAY_SR_FIFO,
+ ILK_DISPLAY_MAX_SRWM,
+ ILK_DISPLAY_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
+ ILK_CURSOR_SR_FIFO,
+ ILK_CURSOR_MAX_SRWM,
+ ILK_CURSOR_DFT_SRWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again). Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size. When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point. If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
+ int pixel_size,
+ unsigned long latency_ns)
+{
+ long entries_required, wm_size;
+
+ /*
+ * Note: we need to make sure we don't overflow for various clock &
+ * latency values.
+ * clocks go from a few thousand to several hundred thousand.
+ * latency is usually a few thousand
+ */
+ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+ 1000;
+ entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
+
+ DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+ wm_size = fifo_size - (entries_required + wm->guard_size);
+
+ DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+ /* Don't promote wm_size to unsigned... */
+ if (wm_size > (long)wm->max_wm)
+ wm_size = wm->max_wm;
+ if (wm_size <= 0)
+ wm_size = wm->default_wm;
+ return wm_size;
+}
+
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled && crtc->fb) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ const struct cxsr_latency *latency;
+ u32 reg;
+ unsigned long wm;
+
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ if (!latency) {
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
+ return;
+ }
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Display SR */
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->display_sr);
+ reg = I915_READ(DSPFW1);
+ reg &= ~DSPFW_SR_MASK;
+ reg |= wm << DSPFW_SR_SHIFT;
+ I915_WRITE(DSPFW1, reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+ /* cursor SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
+ pixel_size, latency->cursor_sr);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_CURSOR_SR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+
+ /* Display HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->display_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_SR_MASK;
+ reg |= wm & DSPFW_HPLL_SR_MASK;
+ I915_WRITE(DSPFW3, reg);
+
+ /* cursor HPLL off SR */
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
+ pixel_size, latency->cursor_hpll_disable);
+ reg = I915_READ(DSPFW3);
+ reg &= ~DSPFW_HPLL_CURSOR_MASK;
+ reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+ I915_WRITE(DSPFW3, reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+ /* activate cxsr */
+ I915_WRITE(DSPFW3,
+ I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ } else {
+ pineview_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ }
+}
+
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
+ return false;
+ }
+
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
+
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
+
+ return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
+
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static bool vlv_compute_drain_latency(struct drm_device *dev,
+ int plane,
+ int *plane_prec_mult,
+ int *plane_dl,
+ int *cursor_prec_mult,
+ int *cursor_dl)
+{
+ struct drm_crtc *crtc;
+ int clock, pixel_size;
+ int entries;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled)
+ return false;
+
+ clock = crtc->mode.clock; /* VESA DOT Clock */
+ pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
+
+ entries = (clock / 1000) * pixel_size;
+ *plane_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
+ pixel_size);
+
+ entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
+ *cursor_prec_mult = (entries > 256) ?
+ DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+ *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+
+ return true;
+}
+
+/*
+ * Update drain latency registers of memory arbiter
+ *
+ * Valleyview SoC has a new memory arbiter and needs drain latency registers
+ * to be programmed. Each plane has a drain latency multiplier and a drain
+ * latency value.
+ */
+
+static void vlv_update_drain_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_prec, planea_dl, planeb_prec, planeb_dl;
+ int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
+ int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
+ either 16 or 32 */
+
+ /* For plane A, Cursor A */
+ if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
+ &cursor_prec_mult, &cursora_dl)) {
+ cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+ planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+
+ I915_WRITE(VLV_DDL1, cursora_prec |
+ (cursora_dl << DDL_CURSORA_SHIFT) |
+ planea_prec | planea_dl);
+ }
+
+ /* For plane B, Cursor B */
+ if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
+ &cursor_prec_mult, &cursorb_dl)) {
+ cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+ planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+ DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+
+ I915_WRITE(VLV_DDL2, cursorb_prec |
+ (cursorb_dl << DDL_CURSORB_SHIFT) |
+ planeb_prec | planeb_dl);
+ }
+}
+
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
+static void valleyview_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ vlv_update_drain_latency(dev);
+
+ if (g4x_compute_wm0(dev, 0,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &valleyview_wm_info, latency_ns,
+ &valleyview_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &valleyview_wm_info,
+ &valleyview_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ else
+ I915_WRITE(FW_BLC_SELF_VLV,
+ I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+}
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, 0,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ else
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
+ (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (cursora_wm << DSPFW_CURSORA_SHIFT));
+ /* HPLL off in SR has some issues on G4x... disable it */
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i965_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ int srwm = 1;
+ int cursor_sr = 16;
+
+ /* Calc sr entries for one plane configs */
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = ((htotal * 1000) / clock);
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
+
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ if (IS_CRESTLINE(dev))
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
+
+ /* 965 has limitations... */
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i9xx_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
+ uint32_t fwater_lo;
+ uint32_t fwater_hi;
+ int cwm, srwm = 1;
+ int fifo_size;
+ int planea_wm, planeb_wm;
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
+ else if (!IS_GEN2(dev))
+ wm_info = &i915_wm_info;
+ else
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (crtc->enabled && crtc->fb) {
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (crtc->enabled && crtc->fb) {
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
+
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+ /*
+ * Overlay gets an aggressive default since video jitter is bad.
+ */
+ cwm = 2;
+
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
+ /* Calc sr entries for one plane configs */
+ if (HAS_FW_BLC(dev) && enabled) {
+ /* self-refresh has much higher latency */
+ static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
+
+ line_time_us = (htotal * 1000) / clock;
+
+ /* Use ns/us then divide to preserve precision */
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
+ if (srwm < 0)
+ srwm = 1;
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ }
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ planea_wm, planeb_wm, cwm, srwm);
+
+ fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+ fwater_hi = (cwm & 0x1f);
+
+ /* Set request length to 8 cachelines per fetch */
+ fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+ fwater_hi = fwater_hi | (1 << 8);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+ I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
+}
+
+static void i830_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
+ int planea_wm;
+
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
+
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ fwater_lo |= (3<<8) | planea_wm;
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+ I915_WRITE(FW_BLC, fwater_lo);
+}
+
+#define ILK_LP0_PLANE_LATENCY 700
+#define ILK_LP0_CURSOR_LATENCY 1300
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /*
+ * Spec says:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return ironlake_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static void ironlake_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled))
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
+ &ironlake_display_srwm_info,
+ &ironlake_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /*
+ * WM3 is unsupported on ILK, probably because we don't have latency
+ * data for that power state
+ */
+}
+
+static void sandybridge_update_wm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
+
+ enabled = 0;
+ if (g4x_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 1;
+ }
+
+ if (g4x_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 2;
+ }
+
+ if ((dev_priv->num_pipe == 3) &&
+ g4x_compute_wm0(dev, 2,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled |= 3;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
+ return;
+ enabled = ffs(enabled) - 1;
+
+ /* WM1 */
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sandybridge_display_srwm_info,
+ &sandybridge_cursor_srwm_info,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
+static void
+haswell_update_linetime_wm(struct drm_device *dev, int pipe,
+ struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 temp;
+
+ temp = I915_READ(PIPE_WM_LINETIME(pipe));
+ temp &= ~PIPE_WM_LINETIME_MASK;
+
+ /* The WM are computed with base on how long it takes to fill a single
+ * row at the given clock rate, multiplied by 8.
+ * */
+ temp |= PIPE_WM_LINETIME_TIME(
+ ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
+
+ /* IPS watermarks are only used by pipe A, and are ignored by
+ * pipes B and C. They are calculated similarly to the common
+ * linetime values, except that we are using CD clock frequency
+ * in MHz instead of pixel rate for the division.
+ *
+ * This is a placeholder for the IPS watermark calculation code.
+ */
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
+}
+
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+ if (!clock) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_time_us = (sprite_width * 1000) / clock;
+ if (!line_time_us) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
+ int sprite_wm, reg;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+ pipe);
+ return;
+ }
+
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
+}
+
+void intel_update_linetime_watermarks(struct drm_device *dev,
+ int pipe, struct drm_display_mode *mode)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_linetime_wm)
+ dev_priv->display.update_linetime_wm(dev, pipe, mode);
+}
+
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size);
+}
+
+static struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+ struct drm_i915_gem_object *ctx;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ctx = i915_gem_alloc_object(dev, 4096);
+ if (!ctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ ret = i915_gem_object_pin(ctx, 4096, true);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+
+ return ctx;
+
+err_unpin:
+ i915_gem_object_unpin(ctx);
+err_unref:
+ drm_gem_object_unreference(&ctx->base);
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl;
+
+ rgvswctl = I915_READ16(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+ POSTING_READ16(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE16(MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+ u8 fmax, fmin, fstart, vstart;
+
+ /* Enable temp reporting */
+ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->fmax = fmax; /* IPS callback will increase this */
+ dev_priv->fstart = fstart;
+
+ dev_priv->max_delay = fstart;
+ dev_priv->min_delay = fmin;
+ dev_priv->cur_delay = fstart;
+
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ msleep(1);
+
+ ironlake_set_drps(dev, fstart);
+
+ dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+ I915_READ(0x112e0);
+ dev_priv->last_time1 = jiffies_to_msecs(jiffies);
+ dev_priv->last_count2 = I915_READ(0x112f4);
+ getrawmonotonic(&dev_priv->last_time2);
+}
+
+void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ ironlake_set_drps(dev, dev_priv->fstart);
+ msleep(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits;
+
+ limits = 0;
+ if (val >= dev_priv->max_delay)
+ val = dev_priv->max_delay;
+ else
+ limits |= dev_priv->max_delay << 24;
+
+ if (val <= dev_priv->min_delay)
+ val = dev_priv->min_delay;
+ else
+ limits |= dev_priv->min_delay << 16;
+
+ if (val == dev_priv->cur_delay)
+ return;
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+ dev_priv->cur_delay = val;
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ /* Complete PM interrupt masking here doesn't race with the rps work
+ * item again unmasking PM interrupts because that is using a different
+ * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+ * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+ spin_lock_irq(&dev_priv->rps_lock);
+ dev_priv->pm_iir = 0;
+ spin_unlock_irq(&dev_priv->rps_lock);
+
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
+int intel_enable_rc6(const struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /* Sorry Haswell, no RC6 for you for now. */
+ if (IS_HASWELL(dev))
+ return 0;
+
+ /*
+ * Disable rc6 on Sandybridge
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+ return INTEL_RC6_ENABLE;
+ }
+ DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+ return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+}
+
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ struct intel_ring_buffer *ring;
+ u32 rp_state_cap;
+ u32 gt_perf_status;
+ u32 pcu_mbox, rc6_mask = 0;
+ u32 gtfifodbg;
+ int rc6_mode;
+ int i;
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+ /* In units of 100MHz */
+ dev_priv->max_delay = rp_state_cap & 0xff;
+ dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->cur_delay = 0;
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ rc6_mode = intel_enable_rc6(dev_priv->dev);
+ if (rc6_mode & INTEL_RC6_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+
+ DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ dev_priv->max_delay << 24 |
+ dev_priv->min_delay << 16);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ /* Check for overclock support */
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+ pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+ if (pcu_mbox & (1<<31)) { /* OC supported */
+ dev_priv->max_delay = pcu_mbox & 0xff;
+ DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+ }
+
+ gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER,
+ GEN6_PM_MBOX_EVENT |
+ GEN6_PM_THERMAL_EVENT |
+ GEN6_PM_RP_DOWN_TIMEOUT |
+ GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED);
+ spin_lock_irq(&dev_priv->rps_lock);
+ WARN_ON(dev_priv->pm_iir != 0);
+ I915_WRITE(GEN6_PMIMR, 0);
+ spin_unlock_irq(&dev_priv->rps_lock);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ gen6_gt_force_wake_put(dev_priv);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+{
+ int min_freq = 15;
+ int gpu_freq, ia_freq, max_ia_freq;
+ int scaling_factor = 180;
+
+ max_ia_freq = cpufreq_quick_get_max(0);
+ /*
+ * Default to measured freq if none found, PCU will ensure we don't go
+ * over
+ */
+ if (!max_ia_freq)
+ max_ia_freq = tsc_khz;
+
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->max_delay - gpu_freq;
+
+ /*
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+
+ I915_WRITE(GEN6_PCODE_DATA,
+ (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
+ gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode write of freq table timed out\n");
+ continue;
+ }
+ }
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static void ironlake_teardown_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx) {
+ i915_gem_object_unpin(dev_priv->renderctx);
+ drm_gem_object_unreference(&dev_priv->renderctx->base);
+ dev_priv->renderctx = NULL;
+ }
+
+ if (dev_priv->pwrctx) {
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(&dev_priv->pwrctx->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
+void ironlake_disable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_READ(PWRCTXA)) {
+ /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+ wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+ 50);
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ POSTING_READ(RSTDBYCTL);
+ }
+
+ ironlake_teardown_rc6(dev);
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx == NULL)
+ dev_priv->renderctx = intel_alloc_context_page(dev);
+ if (!dev_priv->renderctx)
+ return -ENOMEM;
+
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
+ if (!dev_priv->pwrctx) {
+ ironlake_teardown_rc6(dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ironlake_enable_rc6(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+ int ret;
+
+ /* rc6 disabled by default due to repeated reports of hanging during
+ * boot and resume.
+ */
+ if (!intel_enable_rc6(dev))
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = ironlake_setup_rc6(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ ret = intel_ring_begin(ring, 6);
+ if (ret) {
+ ironlake_teardown_rc6(dev);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_advance(ring);
+
+ /*
+ * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+ * does an implicit flush, combined with MI_FLUSH above, it should be
+ * safe to assume that renderctx is valid
+ */
+ ret = intel_wait_ring_idle(ring);
+ if (ret) {
+ DRM_ERROR("failed to enable ironlake power power savings\n");
+ ironlake_teardown_rc6(dev);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+
+ I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ unsigned long freq;
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ freq = ((div * 133333) / ((1<<post) * pre));
+
+ return freq;
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+ u64 total_count, diff, ret;
+ u32 count1, count2, count3, m = 0, c = 0;
+ unsigned long now = jiffies_to_msecs(jiffies), diff1;
+ int i;
+
+ diff1 = now - dev_priv->last_time1;
+
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
+ count1 = I915_READ(DMIEC);
+ count2 = I915_READ(DDREC);
+ count3 = I915_READ(CSIEC);
+
+ total_count = count1 + count2 + count3;
+
+ /* FIXME: handle per-counter overflow */
+ if (total_count < dev_priv->last_count1) {
+ diff = ~0UL - dev_priv->last_count1;
+ diff += total_count;
+ } else {
+ diff = total_count - dev_priv->last_count1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == dev_priv->c_m &&
+ cparams[i].t == dev_priv->r_t) {
+ m = cparams[i].m;
+ c = cparams[i].c;
+ break;
+ }
+ }
+
+ diff = div_u64(diff, diff1);
+ ret = ((m * diff) + c);
+ ret = div_u64(ret, 10);
+
+ dev_priv->last_count1 = total_count;
+ dev_priv->last_time1 = now;
+
+ dev_priv->chipset_power = ret;
+
+ return ret;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long m, x, b;
+ u32 tsfs;
+
+ tsfs = I915_READ(TSFS);
+
+ m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+ x = I915_READ8(TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+
+ return ((m * x) / 127) - b;
+}
+
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+ struct timespec now, diff1;
+ u64 diff;
+ unsigned long diffms;
+ u32 count;
+
+ if (dev_priv->info->gen != 5)
+ return;
+
+ getrawmonotonic(&now);
+ diff1 = timespec_sub(now, dev_priv->last_time2);
+
+ /* Don't divide by 0 */
+ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+ if (!diffms)
+ return;
+
+ count = I915_READ(GFXEC);
+
+ if (count < dev_priv->last_count2) {
+ diff = ~0UL - dev_priv->last_count2;
+ diff += count;
+ } else {
+ diff = count - dev_priv->last_count2;
+ }
+
+ dev_priv->last_count2 = count;
+ dev_priv->last_time2 = now;
+
+ /* More magic constants... */
+ diff = diff * 1181;
+ diff = div_u64(diff, diffms * 10);
+ dev_priv->gfx_power = diff;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+ state1 = ext_v;
+
+ t = i915_mch_val(dev_priv);
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ if (t > 80)
+ corr = ((t * 2349) + 135940);
+ else if (t >= 50)
+ corr = ((t * 964) + 29317);
+ else /* < 50 */
+ corr = ((t * 301) + 1004);
+
+ corr = corr * ((150142 * state1) / 10000 - 78642);
+ corr /= 100000;
+ corr2 = (corr * dev_priv->corr);
+
+ state2 = (corr2 * state1) / 10000;
+ state2 /= 100; /* convert to mW */
+
+ i915_update_gfx_val(dev_priv);
+
+ return dev_priv->gfx_power + state2;
+}
+
+/* Global for IPS driver to get at the current i915 device */
+static struct drm_i915_private *i915_mch_dev;
+/*
+ * Lock protecting IPS related data structures
+ * - i915_mch_dev
+ * - dev_priv->max_delay
+ * - dev_priv->min_delay
+ * - dev_priv->fmax
+ * - dev_priv->gpu_busy
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *dev_priv;
+ unsigned long chipset_val, graphics_val, ret = 0;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ chipset_val = i915_chipset_val(dev_priv);
+ graphics_val = i915_gfx_val(dev_priv);
+
+ ret = chipset_val + graphics_val;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay > dev_priv->fmax)
+ dev_priv->max_delay--;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ if (dev_priv->max_delay < dev_priv->min_delay)
+ dev_priv->max_delay++;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = false;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev)
+ goto out_unlock;
+ dev_priv = i915_mch_dev;
+
+ ret = dev_priv->busy;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *dev_priv;
+ bool ret = true;
+
+ spin_lock(&mchdev_lock);
+ if (!i915_mch_dev) {
+ ret = false;
+ goto out_unlock;
+ }
+ dev_priv = i915_mch_dev;
+
+ dev_priv->max_delay = dev_priv->fstart;
+
+ if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
+ ret = false;
+
+out_unlock:
+ spin_unlock(&mchdev_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+{
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ dev_priv->mchdev_lock = &mchdev_lock;
+ spin_unlock(&mchdev_lock);
+
+ ips_ping_for_i915_load();
+}
+
+void intel_gpu_ips_teardown(void)
+{
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = NULL;
+ spin_unlock(&mchdev_lock);
+}
+
+void intel_init_emon(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 lcfuse;
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ I915_WRITE(ECR, 0);
+ POSTING_READ(ECR);
+
+ /* Program energy weights for various events */
+ I915_WRITE(SDEW, 0x15040d00);
+ I915_WRITE(CSIEW0, 0x007f0000);
+ I915_WRITE(CSIEW1, 0x1e220004);
+ I915_WRITE(CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ I915_WRITE(PEW + (i * 4), 0);
+ for (i = 0; i < 3; i++)
+ I915_WRITE(DEW + (i * 4), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ unsigned long freq = intel_pxfreq(pxvidfreq);
+ unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+ unsigned long val;
+
+ val = vid * vid;
+ val *= (freq / 1000);
+ val *= 255;
+ val /= (127*127*900);
+ if (val > 0xff)
+ DRM_ERROR("bad pxval: %ld\n", val);
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+ (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+ I915_WRITE(PXW + (i * 4), val);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ I915_WRITE(OGW0, 0);
+ I915_WRITE(OGW1, 0);
+ I915_WRITE(EG0, 0x00007f00);
+ I915_WRITE(EG1, 0x0000000e);
+ I915_WRITE(EG2, 0x000e0000);
+ I915_WRITE(EG3, 0x68000300);
+ I915_WRITE(EG4, 0x42000000);
+ I915_WRITE(EG5, 0x00140031);
+ I915_WRITE(EG6, 0);
+ I915_WRITE(EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ I915_WRITE(PXWL + (i * 4), 0);
+
+ /* Enable PMON + select events */
+ I915_WRITE(ECR, 0x80000019);
+
+ lcfuse = I915_READ(LCFUSE02);
+
+ dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+static void ironlake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ /* Required for FBC */
+ dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+ DPFCRUNIT_CLOCK_GATE_DISABLE |
+ DPFDUNIT_CLOCK_GATE_DISABLE;
+ /* Required for CxSR */
+ dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_3DCGDIS0,
+ MARIUNIT_CLOCK_GATE_DISABLE |
+ SVSMUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(PCH_3DCGDIS1,
+ VFMUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ /*
+ * According to the spec the following bits should be set in
+ * order to enable memory self-refresh
+ * The bit 22/21 of 0x42004
+ * The bit 5 of 0x42020
+ * The bit 15 of 0x45000
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ (I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+ I915_WRITE(ILK_DSPCLK_GATE,
+ (I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE));
+ I915_WRITE(DISP_ARB_CTL,
+ (I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS));
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPFC_DIS1 |
+ ILK_DPFC_DIS2 |
+ ILK_CLK_FBC);
+ }
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+}
+
+static void gen6_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ I915_WRITE(CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) |
+ GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* Bspec says we need to always set all mask bits. */
+ I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
+ _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+}
+
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+{
+ uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+ reg &= ~GEN7_FF_SCHED_MASK;
+ reg |= GEN7_FF_TS_SCHED_HW;
+ reg |= GEN7_FF_VS_SCHED_HW;
+ reg |= GEN7_FF_DS_SCHED_HW;
+
+ I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+}
+
+static void g4x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dspclk_gate;
+
+ I915_WRITE(RENCLK_GATE_D1, 0);
+ I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+ GS_UNIT_CLOCK_GATE_DISABLE |
+ CL_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+ OVRUNIT_CLOCK_GATE_DISABLE |
+ OVCUNIT_CLOCK_GATE_DISABLE;
+ if (IS_GM45(dev))
+ dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+}
+
+static void crestline_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ I915_WRITE(RAMCLK_GATE_D, 0);
+ I915_WRITE16(DEUC, 0);
+}
+
+static void broadwater_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+ I965_RCC_CLOCK_GATE_DISABLE |
+ I965_RCPB_CLOCK_GATE_DISABLE |
+ I965_ISC_CLOCK_GATE_DISABLE |
+ I965_FBC_CLOCK_GATE_DISABLE);
+ I915_WRITE(RENCLK_GATE_D2, 0);
+}
+
+static void gen3_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dstate = I915_READ(D_STATE);
+
+ dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ DSTATE_DOT_CLOCK_GATING;
+ I915_WRITE(D_STATE, dstate);
+
+ if (IS_PINEVIEW(dev))
+ I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+}
+
+static void i85x_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+}
+
+static void i830_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
+}
+
+void intel_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.init_clock_gating(dev);
+
+ if (dev_priv->display.init_pch_clock_gating)
+ dev_priv->display.init_pch_clock_gating(dev);
+}
+
+static void gen6_sanitize_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 limits, delay, old;
+
+ gen6_gt_force_wake_get(dev_priv);
+
+ old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ limits &= ~(0x3f << 16 | 0x3f << 24);
+ delay = dev_priv->cur_delay;
+ if (delay < dev_priv->max_delay)
+ limits |= (dev_priv->max_delay & 0x3f) << 24;
+ if (delay > dev_priv->min_delay)
+ limits |= (dev_priv->min_delay & 0x3f) << 16;
+
+ if (old != limits) {
+ /* Note that the known failure case is to read back 0. */
+ DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
+ "expected %08x, was %08x\n", limits, old);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+ }
+
+ gen6_gt_force_wake_put(dev_priv);
+}
+
+void intel_sanitize_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.sanitize_pm)
+ dev_priv->display.sanitize_pm(dev);
+}
+
+/* Starting with Haswell, we have different power wells for
+ * different parts of the GPU. This attempts to enable them all.
+ */
+void intel_init_power_wells(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long power_wells[] = {
+ HSW_PWR_WELL_CTL1,
+ HSW_PWR_WELL_CTL2,
+ HSW_PWR_WELL_CTL4
+ };
+ int i;
+
+ if (!IS_HASWELL(dev))
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
+ int well = I915_READ(power_wells[i]);
+
+ if ((well & HSW_PWR_WELL_STATE) == 0) {
+ I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
+ if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+ DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
+ }
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/* Set up chip specific power management-related functions */
+void intel_init_pm(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (I915_HAS_FBC(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
+ dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+ dev_priv->display.enable_fbc = g4x_enable_fbc;
+ dev_priv->display.disable_fbc = g4x_disable_fbc;
+ } else if (IS_CRESTLINE(dev)) {
+ dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+ dev_priv->display.enable_fbc = i8xx_enable_fbc;
+ dev_priv->display.disable_fbc = i8xx_disable_fbc;
+ }
+ /* 855GM needs testing */
+ }
+
+ /* For cxsr */
+ if (IS_PINEVIEW(dev))
+ i915_pineview_get_mem_freq(dev);
+ else if (IS_GEN5(dev))
+ i915_ironlake_get_mem_freq(dev);
+
+ /* For FIFO watermark updates */
+ if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured MT forcewake,
+ * and if the device is in RC6, then force_wake_mt_get will not wake
+ * the device and the ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT forcewake being
+ * disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
+ if (HAS_PCH_IBX(dev))
+ dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
+ else if (HAS_PCH_CPT(dev))
+ dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
+
+ if (IS_GEN5(dev)) {
+ if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ dev_priv->display.update_wm = ironlake_update_wm;
+ else {
+ DRM_DEBUG_KMS("Failed to get proper latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else if (IS_HASWELL(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.sanitize_pm = gen6_sanitize_pm;
+ } else
+ dev_priv->display.update_wm = NULL;
+ } else if (IS_VALLEYVIEW(dev)) {
+ dev_priv->display.update_wm = valleyview_update_wm;
+ dev_priv->display.init_clock_gating =
+ valleyview_init_clock_gating;
+ dev_priv->display.force_wake_get = vlv_force_wake_get;
+ dev_priv->display.force_wake_put = vlv_force_wake_put;
+ } else if (IS_PINEVIEW(dev)) {
+ if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->is_ddr3,
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq)) {
+ DRM_INFO("failed to find known CxSR latency "
+ "(found ddr%s fsb freq %d, mem freq %d), "
+ "disabling CxSR\n",
+ (dev_priv->is_ddr3 == 1) ? "3" : "2",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
+ /* Disable CxSR and never update its watermark again */
+ pineview_disable_cxsr(dev);
+ dev_priv->display.update_wm = NULL;
+ } else
+ dev_priv->display.update_wm = pineview_update_wm;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_G4X(dev)) {
+ dev_priv->display.update_wm = g4x_update_wm;
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ } else if (IS_GEN4(dev)) {
+ dev_priv->display.update_wm = i965_update_wm;
+ if (IS_CRESTLINE(dev))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ } else if (IS_GEN3(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ } else if (IS_I865G(dev)) {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ } else {
+ dev_priv->display.update_wm = i830_update_wm;
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ if (IS_845G(dev))
+ dev_priv->display.get_fifo_size = i845_get_fifo_size;
+ else
+ dev_priv->display.get_fifo_size = i830_get_fifo_size;
+ }
+
+ /* We attempt to init the necessary power wells early in the initialization
+ * time, so the subsystems that expect power to be enabled can work.
+ */
+ intel_init_power_wells(dev);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 62892a826ede..e5b84ff89ca5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,9 +53,35 @@ static inline int ring_space(struct intel_ring_buffer *ring)
}
static int
-render_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ u32 cmd;
+ int ret;
+
+ cmd = MI_FLUSH;
+ if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+ cmd |= MI_NO_WRITE_FLUSH;
+
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
struct drm_device *dev = ring->dev;
u32 cmd;
@@ -90,17 +116,8 @@ render_ring_flush(struct intel_ring_buffer *ring,
*/
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
+ if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
- if (INTEL_INFO(dev)->gen < 4) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
@@ -249,10 +266,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
static int init_ring_common(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
u32 head;
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_get(dev_priv);
+
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
@@ -290,9 +312,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
- if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
- I915_READ_START(ring) != obj->gtt_offset ||
- (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
+ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+ I915_READ_START(ring) == obj->gtt_offset &&
+ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -300,7 +322,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -309,9 +332,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
ring->head = I915_READ_HEAD(ring);
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring_space(ring);
+ ring->last_retired_head = -1;
}
- return 0;
+out:
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_put(dev_priv);
+
+ return ret;
}
static int
@@ -384,12 +412,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
int ret = init_ring_common(ring);
if (INTEL_INFO(dev)->gen > 3) {
- int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
- I915_WRITE(MI_MODE, mode);
+ I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
- GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
- GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+ _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
}
if (INTEL_INFO(dev)->gen >= 5) {
@@ -398,7 +425,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
-
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -406,13 +432,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
* policy is not supported."
*/
I915_WRITE(CACHE_MODE_0,
- CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+ _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6) {
- I915_WRITE(INSTPM,
- INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
- }
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return ret;
}
@@ -483,21 +507,30 @@ gen6_add_request(struct intel_ring_buffer *ring,
* @seqno - seqno which the waiter will block on
*/
static int
-intel_ring_sync(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- int ring,
- u32 seqno)
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+ struct intel_ring_buffer *signaller,
+ u32 seqno)
{
int ret;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
+ /* Throughout all of the GEM code, seqno passed implies our current
+ * seqno is >= the last seqno executed. However for hardware the
+ * comparison is strictly greater than.
+ */
+ seqno -= 1;
+
+ WARN_ON(signaller->semaphore_register[waiter->id] ==
+ MI_SEMAPHORE_SYNC_INVALID);
+
ret = intel_ring_begin(waiter, 4);
if (ret)
return ret;
- intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+ intel_ring_emit(waiter,
+ dw1 | signaller->semaphore_register[waiter->id]);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
@@ -506,47 +539,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter,
return 0;
}
-/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
-int
-render_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- RCS,
- seqno);
-}
-
-/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
-int
-gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- VCS,
- seqno);
-}
-
-/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
-int
-gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
- struct intel_ring_buffer *signaller,
- u32 seqno)
-{
- WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
- return intel_ring_sync(waiter,
- signaller,
- BCS,
- seqno);
-}
-
-
-
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
@@ -608,27 +600,6 @@ pc_render_add_request(struct intel_ring_buffer *ring,
return 0;
}
-static int
-render_ring_add_request(struct intel_ring_buffer *ring,
- u32 *result)
-{
- u32 seqno = i915_gem_next_request_seqno(ring);
- int ret;
-
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, seqno);
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- intel_ring_advance(ring);
-
- *result = seqno;
- return 0;
-}
-
static u32
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
{
@@ -655,76 +626,115 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
return pc->cpu_page[0];
}
-static void
-ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
- dev_priv->gt_irq_mask &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
}
static void
-ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
{
- dev_priv->gt_irq_mask |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
-static void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+static bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
{
- dev_priv->irq_mask &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ return true;
}
static void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
{
- dev_priv->irq_mask |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
+ }
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-render_ring_get_irq(struct intel_ring_buffer *ring)
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_enable_irq(dev_priv,
- GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
- else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-render_ring_put_irq(struct intel_ring_buffer *ring)
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- if (HAS_PCH_SPLIT(dev))
- ironlake_disable_irq(dev_priv,
- GT_USER_INTERRUPT |
- GT_PIPE_NOTIFY);
- else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ dev_priv->irq_mask |= ring->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(IMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -776,7 +786,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
}
static int
-ring_add_request(struct intel_ring_buffer *ring,
+i9xx_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
u32 seqno;
@@ -799,10 +809,11 @@ ring_add_request(struct intel_ring_buffer *ring,
}
static bool
-gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
if (!dev->irq_enabled)
return false;
@@ -812,120 +823,87 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
* blt/bsd rings on ivb. */
gen6_gt_force_wake_get(dev_priv);
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- ring->irq_mask &= ~rflag;
- I915_WRITE_IMR(ring, ring->irq_mask);
- ironlake_enable_irq(dev_priv, gflag);
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&ring->irq_lock);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- ring->irq_mask |= rflag;
- I915_WRITE_IMR(ring, ring->irq_mask);
- ironlake_disable_irq(dev_priv, gflag);
+ I915_WRITE_IMR(ring, ~0);
+ dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
- spin_unlock(&ring->irq_lock);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
gen6_gt_force_wake_put(dev_priv);
}
-static bool
-bsd_ring_get_irq(struct intel_ring_buffer *ring)
+static int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev->irq_enabled)
- return false;
+ int ret;
- spin_lock(&ring->irq_lock);
- if (ring->irq_refcount++ == 0) {
- if (IS_G4X(dev))
- i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- return true;
-}
-static void
-bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START |
+ MI_BATCH_GTT |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
- spin_lock(&ring->irq_lock);
- if (--ring->irq_refcount == 0) {
- if (IS_G4X(dev))
- i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
+ return 0;
}
static int
-ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
int ret;
- ret = intel_ring_begin(ring, 2);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(ring, offset);
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
}
static int
-render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
- struct drm_device *dev = ring->dev;
int ret;
- if (IS_I830(dev) || IS_845G(dev)) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_BATCH_BUFFER);
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- intel_ring_emit(ring, offset + len - 8);
- intel_ring_emit(ring, 0);
- } else {
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- if (INTEL_INFO(dev)->gen >= 4) {
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(ring, offset);
- } else {
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START | (2 << 6));
- intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
- }
- }
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_advance(ring);
return 0;
@@ -933,7 +911,6 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
static void cleanup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
obj = ring->status_page.obj;
@@ -944,14 +921,11 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
-
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}
static int init_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
@@ -972,7 +946,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
}
ring->status_page.obj = obj;
@@ -992,8 +965,8 @@ err:
return ret;
}
-int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_init_ring_buffer(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
int ret;
@@ -1002,10 +975,9 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
+ ring->size = 32 * PAGE_SIZE;
init_waitqueue_head(&ring->irq_queue);
- spin_lock_init(&ring->irq_lock);
- ring->irq_mask = ~0;
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
@@ -1026,20 +998,18 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unref;
- ring->map.size = ring->size;
- ring->map.offset = dev->agp->base + obj->gtt_offset;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto err_unpin;
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
+ ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
+ ring->size);
+ if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ret = -EINVAL;
goto err_unpin;
}
- ring->virtual_start = ring->map.handle;
ret = ring->init(ring);
if (ret)
goto err_unmap;
@@ -1055,7 +1025,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
return 0;
err_unmap:
- drm_core_ioremapfree(&ring->map, dev);
+ iounmap(ring->virtual_start);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
@@ -1083,7 +1053,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring, 0);
- drm_core_ioremapfree(&ring->map, ring->dev);
+ iounmap(ring->virtual_start);
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
@@ -1097,7 +1067,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
- unsigned int *virt;
+ uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
if (ring->space < rem) {
@@ -1106,12 +1076,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return ret;
}
- virt = (unsigned int *)(ring->virtual_start + ring->tail);
- rem /= 8;
- while (rem--) {
- *virt++ = MI_NOOP;
- *virt++ = MI_NOOP;
- }
+ virt = ring->virtual_start + ring->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
ring->tail = 0;
ring->space = ring_space(ring);
@@ -1132,9 +1100,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- ret = i915_wait_request(ring, seqno, true);
+ ret = i915_wait_request(ring, seqno);
dev_priv->mm.interruptible = was_interruptible;
+ if (!ret)
+ i915_gem_retire_requests_ring(ring);
return ret;
}
@@ -1208,15 +1178,12 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return ret;
trace_i915_ring_wait_begin(ring);
- if (drm_core_check_feature(dev, DRIVER_GEM))
- /* With GEM the hangcheck timer should kick us out of the loop,
- * leaving it early runs the risk of corrupting GEM state (due
- * to running on almost untested codepaths). But on resume
- * timers don't work yet, so prevent a complete hang in that
- * case by choosing an insanely large timeout. */
- end = jiffies + 60 * HZ;
- else
- end = jiffies + 3 * HZ;
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
@@ -1268,48 +1235,14 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
void intel_ring_advance(struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
ring->tail &= ring->size - 1;
+ if (dev_priv->stop_rings & intel_ring_flag(ring))
+ return;
ring->write_tail(ring, ring->tail);
}
-static const struct intel_ring_buffer render_ring = {
- .name = "render ring",
- .id = RCS,
- .mmio_base = RENDER_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_render_ring,
- .write_tail = ring_write_tail,
- .flush = render_ring_flush,
- .add_request = render_ring_add_request,
- .get_seqno = ring_get_seqno,
- .irq_get = render_ring_get_irq,
- .irq_put = render_ring_put_irq,
- .dispatch_execbuffer = render_ring_dispatch_execbuffer,
- .cleanup = render_ring_cleanup,
- .sync_to = render_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_RV,
- MI_SEMAPHORE_SYNC_RB},
- .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
-};
-
-/* ring buffer for bit-stream decoder */
-
-static const struct intel_ring_buffer bsd_ring = {
- .name = "bsd ring",
- .id = VCS,
- .mmio_base = BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = ring_write_tail,
- .flush = bsd_ring_flush,
- .add_request = ring_add_request,
- .get_seqno = ring_get_seqno,
- .irq_get = bsd_ring_get_irq,
- .irq_put = bsd_ring_put_irq,
- .dispatch_execbuffer = ring_dispatch_execbuffer,
-};
-
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
@@ -1372,77 +1305,8 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
-static bool
-gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_USER_INTERRUPT,
- GEN6_RENDER_USER_INTERRUPT);
-}
-
-static void
-gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_put_irq(ring,
- GT_USER_INTERRUPT,
- GEN6_RENDER_USER_INTERRUPT);
-}
-
-static bool
-gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_GEN6_BSD_USER_INTERRUPT,
- GEN6_BSD_USER_INTERRUPT);
-}
-
-static void
-gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_put_irq(ring,
- GT_GEN6_BSD_USER_INTERRUPT,
- GEN6_BSD_USER_INTERRUPT);
-}
-
-/* ring buffer for Video Codec for Gen6+ */
-static const struct intel_ring_buffer gen6_bsd_ring = {
- .name = "gen6 bsd ring",
- .id = VCS,
- .mmio_base = GEN6_BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = gen6_bsd_ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
- .irq_get = gen6_bsd_ring_get_irq,
- .irq_put = gen6_bsd_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_bsd_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
- MI_SEMAPHORE_SYNC_INVALID,
- MI_SEMAPHORE_SYNC_VB},
- .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
-};
-
/* Blitter support (SandyBridge+) */
-static bool
-blt_ring_get_irq(struct intel_ring_buffer *ring)
-{
- return gen6_ring_get_irq(ring,
- GT_BLT_USER_INTERRUPT,
- GEN6_BLITTER_USER_INTERRUPT);
-}
-
-static void
-blt_ring_put_irq(struct intel_ring_buffer *ring)
-{
- gen6_ring_put_irq(ring,
- GT_BLT_USER_INTERRUPT,
- GEN6_BLITTER_USER_INTERRUPT);
-}
-
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
@@ -1464,42 +1328,63 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
-static const struct intel_ring_buffer gen6_blt_ring = {
- .name = "blt ring",
- .id = BCS,
- .mmio_base = BLT_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_ring_common,
- .write_tail = ring_write_tail,
- .flush = blt_ring_flush,
- .add_request = gen6_add_request,
- .get_seqno = gen6_ring_get_seqno,
- .irq_get = blt_ring_get_irq,
- .irq_put = blt_ring_put_irq,
- .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .sync_to = gen6_blt_ring_sync_to,
- .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
- MI_SEMAPHORE_SYNC_BV,
- MI_SEMAPHORE_SYNC_INVALID},
- .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
-};
-
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- *ring = render_ring;
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen6_render_ring_flush;
- ring->irq_get = gen6_render_ring_get_irq;
- ring->irq_put = gen6_render_ring_put_irq;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+ ring->signal_mbox[0] = GEN6_VRSYNC;
+ ring->signal_mbox[1] = GEN6_BRSYNC;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
+ ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+ } else {
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
}
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 6)
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
if (!I915_NEED_GFX_HWS(dev)) {
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1514,16 +1399,42 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- *ring = render_ring;
+ ring->name = "render ring";
+ ring->id = RCS;
+ ring->mmio_base = RENDER_RING_BASE;
+
if (INTEL_INFO(dev)->gen >= 6) {
- ring->add_request = gen6_add_request;
- ring->irq_get = gen6_render_ring_get_irq;
- ring->irq_put = gen6_render_ring_put_irq;
- } else if (IS_GEN5(dev)) {
- ring->add_request = pc_render_add_request;
- ring->get_seqno = pc_render_get_seqno;
+ /* non-kms not supported on gen6+ */
+ return -ENODEV;
}
+ /* Note: gem is not supported on gen5/ilk without kms (the corresponding
+ * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+ * the special gen5 functions. */
+ ring->add_request = i9xx_add_request;
+ if (INTEL_INFO(dev)->gen < 4)
+ ring->flush = gen2_render_ring_flush;
+ else
+ ring->flush = gen4_render_ring_flush;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN2(dev)) {
+ ring->irq_get = i8xx_ring_get_irq;
+ ring->irq_put = i8xx_ring_put_irq;
+ } else {
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
+ ring->write_tail = ring_write_tail;
+ if (INTEL_INFO(dev)->gen >= 4)
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev) || IS_845G(dev))
+ ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+ ring->init = init_render_ring;
+ ring->cleanup = render_ring_cleanup;
+
if (!I915_NEED_GFX_HWS(dev))
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1537,20 +1448,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
if (IS_I830(ring->dev))
ring->effective_size -= 128;
- ring->map.offset = start;
- ring->map.size = size;
- ring->map.type = 0;
- ring->map.flags = 0;
- ring->map.mtrr = 0;
-
- drm_core_ioremap_wc(&ring->map, dev);
- if (ring->map.handle == NULL) {
+ ring->virtual_start = ioremap_wc(start, size);
+ if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
- ring->virtual_start = (void __force __iomem *)ring->map.handle;
return 0;
}
@@ -1559,10 +1463,46 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
- if (IS_GEN6(dev) || IS_GEN7(dev))
- *ring = gen6_bsd_ring;
- else
- *ring = bsd_ring;
+ ring->name = "bsd ring";
+ ring->id = VCS;
+
+ ring->write_tail = ring_write_tail;
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ ring->mmio_base = GEN6_BSD_RING_BASE;
+ /* gen6 bsd needs a special wa for tail updates */
+ if (IS_GEN6(dev))
+ ring->write_tail = gen6_bsd_ring_write_tail;
+ ring->flush = gen6_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+ ring->signal_mbox[0] = GEN6_RVSYNC;
+ ring->signal_mbox[1] = GEN6_BVSYNC;
+ } else {
+ ring->mmio_base = BSD_RING_BASE;
+ ring->flush = bsd_ring_flush;
+ ring->add_request = i9xx_add_request;
+ ring->get_seqno = ring_get_seqno;
+ if (IS_GEN5(dev)) {
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ ring->irq_get = gen5_ring_get_irq;
+ ring->irq_put = gen5_ring_put_irq;
+ } else {
+ ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ }
+ ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ }
+ ring->init = init_ring_common;
+
return intel_init_ring_buffer(dev, ring);
}
@@ -1572,7 +1512,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
- *ring = gen6_blt_ring;
+ ring->name = "blitter ring";
+ ring->id = BCS;
+
+ ring->mmio_base = BLT_RING_BASE;
+ ring->write_tail = ring_write_tail;
+ ring->flush = blt_ring_flush;
+ ring->add_request = gen6_add_request;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
+ ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ ring->sync_to = gen6_ring_sync;
+ ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->signal_mbox[0] = GEN6_RBSYNC;
+ ring->signal_mbox[1] = GEN6_VBSYNC;
+ ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bc0365b8fa4d..55d3da26bae7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,7 +2,7 @@
#define _INTEL_RINGBUFFER_H_
struct intel_hw_status_page {
- u32 __iomem *page_addr;
+ u32 *page_addr;
unsigned int gfx_addr;
struct drm_i915_gem_object *obj;
};
@@ -56,12 +56,9 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
- spinlock_t irq_lock;
- u32 irq_refcount;
- u32 irq_mask;
- u32 irq_seqno; /* last seq seem at irq time */
+ u32 irq_refcount; /* protected by dev_priv->irq_lock */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
- u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
@@ -118,11 +115,16 @@ struct intel_ring_buffer {
u32 outstanding_lazy_request;
wait_queue_head_t irq_queue;
- drm_local_map_t map;
void *private;
};
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+ return ring->obj != NULL;
+}
+
static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring)
{
@@ -152,7 +154,9 @@ static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
int reg)
{
- return ioread32(ring->status_page.page_addr + reg);
+ /* Ensure that the compiler doesn't optimize away the load. */
+ barrier();
+ return ring->status_page.page_addr[reg];
}
/**
@@ -170,10 +174,7 @@ intel_read_status_page(struct intel_ring_buffer *ring,
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
-#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
-#define I915_BREADCRUMB_INDEX 0x21
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ae5e748f39bb..b6a9d45fc3c6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -41,7 +41,7 @@
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
-#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
SDVO_TV_MASK)
@@ -74,7 +74,7 @@ struct intel_sdvo {
struct i2c_adapter ddc;
/* Register for the SDVO device: SDVOB or SDVOC */
- int sdvo_reg;
+ uint32_t sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
@@ -114,6 +114,9 @@ struct intel_sdvo {
*/
bool is_tv;
+ /* On different gens SDVOB is at different places. */
+ bool is_sdvob;
+
/* This is for current tv format name */
int tv_format_index;
@@ -403,8 +406,7 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
-#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
@@ -441,9 +443,17 @@ static const char *cmd_status_names[] = {
static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
- u8 buf[args_len*2 + 2], status;
- struct i2c_msg msgs[args_len + 3];
- int i, ret;
+ u8 *buf, status;
+ struct i2c_msg *msgs;
+ int i, ret = true;
+
+ buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
+ if (!buf)
+ return false;
+
+ msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
+ if (!msgs)
+ return false;
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
@@ -477,15 +487,19 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
- return false;
+ ret = false;
+ goto out;
}
if (ret != i+3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
- return false;
+ ret = false;
}
- return true;
+out:
+ kfree(msgs);
+ kfree(buf);
+ return ret;
}
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
@@ -733,18 +747,18 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
- width = mode->crtc_hdisplay;
- height = mode->crtc_vdisplay;
+ width = mode->hdisplay;
+ height = mode->vdisplay;
/* do some mode translations */
- h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
- h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ h_blank_len = mode->htotal - mode->hdisplay;
+ h_sync_len = mode->hsync_end - mode->hsync_start;
- v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
- v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ v_blank_len = mode->vtotal - mode->vdisplay;
+ v_sync_len = mode->vsync_end - mode->vsync_start;
- h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
- v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+ h_sync_offset = mode->hsync_start - mode->hdisplay;
+ v_sync_offset = mode->vsync_start - mode->vdisplay;
mode_clock = mode->clock;
mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
@@ -769,10 +783,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
((v_sync_len & 0x30) >> 4);
dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- dtd->part2.dtd_flags |= 0x2;
+ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- dtd->part2.dtd_flags |= 0x4;
+ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -806,9 +822,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
- if (dtd->part2.dtd_flags & 0x2)
+ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
- if (dtd->part2.dtd_flags & 0x4)
+ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
@@ -873,17 +891,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
};
uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
uint8_t set_buf_index[2] = { 1, 0 };
- uint64_t *data = (uint64_t *)&avi_if;
+ uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+ uint64_t *data = (uint64_t *)sdvo_data;
unsigned i;
intel_dip_infoframe_csum(&avi_if);
+ /* sdvo spec says that the ecc is handled by the hw, and it looks like
+ * we must not send the ecc field, either. */
+ memcpy(sdvo_data, &avi_if, 3);
+ sdvo_data[3] = avi_if.checksum;
+ memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2))
return false;
- for (i = 0; i < sizeof(avi_if); i += 8) {
+ for (i = 0; i < sizeof(sdvo_data); i += 8) {
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_DATA,
data, 8))
@@ -1260,10 +1285,11 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
struct drm_i915_private *dev_priv = connector->dev->dev_private;
return drm_get_edid(connector,
- &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ intel_gmbus_get_adapter(dev_priv,
+ dev_priv->crt_ddc_pin));
}
-enum drm_connector_status
+static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
@@ -1349,8 +1375,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
/* add 30ms delay when the output type might be TV */
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+ if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
mdelay(30);
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
@@ -1570,9 +1595,6 @@ end:
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
- 0);
-
intel_sdvo->is_lvds = true;
break;
}
@@ -1901,7 +1923,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
{
struct sdvo_device_mapping *mapping;
- if (IS_SDVOB(reg))
+ if (sdvo->is_sdvob)
mapping = &(dev_priv->sdvo_mappings[0]);
else
mapping = &(dev_priv->sdvo_mappings[1]);
@@ -1919,7 +1941,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
u8 pin;
- if (IS_SDVOB(reg))
+ if (sdvo->is_sdvob)
mapping = &dev_priv->sdvo_mappings[0];
else
mapping = &dev_priv->sdvo_mappings[1];
@@ -1928,12 +1950,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
if (mapping->initialized)
pin = mapping->i2c_pin;
- if (pin < GMBUS_NUM_PORTS) {
- sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ if (intel_gmbus_is_port_valid(pin)) {
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
intel_gmbus_force_bit(sdvo->i2c, true);
} else {
- sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+ sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
}
}
@@ -1944,12 +1966,12 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
}
static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
- if (IS_SDVOB(sdvo_reg)) {
+ if (sdvo->is_sdvob) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@@ -1974,7 +1996,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
- if (IS_SDVOB(sdvo_reg))
+ if (sdvo->is_sdvob)
return 0x70;
else
return 0x72;
@@ -2199,6 +2221,10 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
+ if (flags & SDVO_OUTPUT_YPRPB0)
+ if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+ return false;
+
if (flags & SDVO_OUTPUT_RGB0)
if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
@@ -2490,7 +2516,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
return i2c_add_adapter(&sdvo->ddc) == 0;
}
-bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
@@ -2502,7 +2528,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
return false;
intel_sdvo->sdvo_reg = sdvo_reg;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ intel_sdvo->is_sdvob = is_sdvob;
+ intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
kfree(intel_sdvo);
@@ -2519,13 +2546,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
u8 byte;
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
- DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
- IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ DRM_DEBUG_KMS("No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
}
- if (IS_SDVOB(sdvo_reg))
+ if (intel_sdvo->is_sdvob)
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
@@ -2546,8 +2573,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
- DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
- IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 6b7b22f4d63e..9d030142ee43 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
u16 output_flags;
} __attribute__((packed));
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE (1 << 7)
+
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index e90dfb625c42..2a20fb0781d7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -110,14 +110,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
- dev_priv->sprite_scaling_enabled = true;
- sandybridge_update_wm(dev);
- intel_wait_for_vblank(dev, pipe);
+ if (!dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = true;
+ intel_update_watermarks(dev);
+ intel_wait_for_vblank(dev, pipe);
+ }
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
} else {
- dev_priv->sprite_scaling_enabled = false;
- /* potentially re-enable LP watermarks */
- sandybridge_update_wm(dev);
+ if (dev_priv->sprite_scaling_enabled) {
+ dev_priv->sprite_scaling_enabled = false;
+ /* potentially re-enable LP watermarks */
+ intel_update_watermarks(dev);
+ }
}
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -133,7 +137,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -149,8 +153,11 @@ ivb_disable_plane(struct drm_plane *plane)
/* Can't leave the scaler enabled... */
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
- I915_WRITE(SPRSURF(pipe), 0);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
+
+ dev_priv->sprite_scaling_enabled = false;
+ intel_update_watermarks(dev);
}
static int
@@ -208,7 +215,7 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
}
static void
-snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -218,7 +225,7 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size;
- u32 dvscntr, dvsscale = 0;
+ u32 dvscntr, dvsscale;
dvscntr = I915_READ(DVSCNTR(pipe));
@@ -262,8 +269,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
- /* must disable */
- dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+ if (IS_GEN6(dev))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
/* Sizes are 0 based */
@@ -274,7 +281,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
- if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = 0;
+ if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -290,12 +298,12 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
POSTING_READ(DVSSURF(pipe));
}
static void
-snb_disable_plane(struct drm_plane *plane)
+ilk_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -306,7 +314,7 @@ snb_disable_plane(struct drm_plane *plane)
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
- I915_WRITE(DVSSURF(pipe), 0);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
}
@@ -333,7 +341,7 @@ intel_disable_primary(struct drm_crtc *crtc)
}
static int
-snb_update_colorkey(struct drm_plane *plane,
+ilk_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
@@ -362,7 +370,7 @@ snb_update_colorkey(struct drm_plane *plane,
}
static void
-snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -550,14 +558,13 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
- if (!dev_priv)
- return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@@ -584,14 +591,13 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
- if (!dev_priv)
- return -EINVAL;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
mutex_lock(&dev->mode_config.mutex);
@@ -616,6 +622,14 @@ static const struct drm_plane_funcs intel_plane_funcs = {
.destroy = intel_destroy_plane,
};
+static uint32_t ilk_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
static uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
@@ -630,34 +644,56 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
{
struct intel_plane *intel_plane;
unsigned long possible_crtcs;
+ const uint32_t *plane_formats;
+ int num_plane_formats;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
if (!intel_plane)
return -ENOMEM;
- if (IS_GEN6(dev)) {
+ switch (INTEL_INFO(dev)->gen) {
+ case 5:
+ case 6:
intel_plane->max_downscale = 16;
- intel_plane->update_plane = snb_update_plane;
- intel_plane->disable_plane = snb_disable_plane;
- intel_plane->update_colorkey = snb_update_colorkey;
- intel_plane->get_colorkey = snb_get_colorkey;
- } else if (IS_GEN7(dev)) {
+ intel_plane->update_plane = ilk_update_plane;
+ intel_plane->disable_plane = ilk_disable_plane;
+ intel_plane->update_colorkey = ilk_update_colorkey;
+ intel_plane->get_colorkey = ilk_get_colorkey;
+
+ if (IS_GEN6(dev)) {
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ } else {
+ plane_formats = ilk_plane_formats;
+ num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+ }
+ break;
+
+ case 7:
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
intel_plane->get_colorkey = ivb_get_colorkey;
+
+ plane_formats = snb_plane_formats;
+ num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ break;
+
+ default:
+ return -ENODEV;
}
intel_plane->pipe = pipe;
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs, snb_plane_formats,
- ARRAY_SIZE(snb_plane_formats), false);
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ false);
if (ret)
kfree(intel_plane);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 05f765ef5464..a233a51fd7e6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -674,6 +674,54 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
{
+ .name = "480p",
+ .clock = 107520,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 107520,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
.name = "720p@60Hz",
.clock = 148800,
.refresh = 60000,
@@ -811,7 +859,7 @@ intel_tv_mode_lookup(const char *tv_format)
{
int i;
- for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
@@ -1153,6 +1201,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
DAC_B_0_7_V |
DAC_C_0_7_V);
+
+ /*
+ * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+ * the TV is misdetected. This is hardware requirement.
+ */
+ if (IS_GM45(dev))
+ tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
@@ -1185,6 +1242,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
I915_WRITE(TV_CTL, save_tv_ctl);
+ POSTING_READ(TV_CTL);
+
+ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
@@ -1240,11 +1302,8 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
mode = reported_modes[0];
- drm_mode_set_crtcinfo(&mode, 0);
- if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
- type = intel_tv_detect_type(intel_tv, connector);
- } else if (force) {
+ if (force) {
struct intel_load_detect_pipe tmp;
if (intel_get_load_detect_pipe(&intel_tv->base, connector,
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
new file mode 100644
index 000000000000..d63013497f66
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -0,0 +1,15 @@
+config DRM_MGAG200
+ tristate "Kernel modesetting driver for MGA G200 server engines"
+ depends on DRM && PCI && EXPERIMENTAL
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for the MGA G200 server chips, it
+ does not support the original MGA G200 or any of the desktop
+ chips. It requires 0.3.0 of the modesetting userspace driver,
+ and a version of mga driver that will fail on KMS enabled
+ devices.
+
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
new file mode 100644
index 000000000000..7db592eedbf1
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+mgag200-y := mgag200_main.o mgag200_mode.o \
+ mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
+
+obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
new file mode 100644
index 000000000000..93e832d6c328
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+#include "drm_pciids.h"
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+int mgag200_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, mgag200_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+ { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
+ { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
+ { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
+ { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
+ { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+ kfree(ap);
+}
+
+
+static int __devinit
+mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ mgag200_kick_out_firmware_fb(pdev);
+
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void mga_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static const struct file_operations mgag200_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = mgag200_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
+ .load = mgag200_driver_load,
+ .unload = mgag200_driver_unload,
+ .fops = &mgag200_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_init_object = mgag200_gem_init_object,
+ .gem_free_object = mgag200_gem_free_object,
+ .dumb_create = mgag200_dumb_create,
+ .dumb_map_offset = mgag200_dumb_mmap_offset,
+ .dumb_destroy = mgag200_dumb_destroy,
+};
+
+static struct pci_driver mgag200_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = mga_pci_probe,
+ .remove = mga_pci_remove,
+};
+
+static int __init mgag200_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && mgag200_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (mgag200_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &mgag200_pci_driver);
+}
+
+static void __exit mgag200_exit(void)
+{
+ drm_pci_exit(&driver, &mgag200_pci_driver);
+}
+
+module_init(mgag200_init);
+module_exit(mgag200_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
new file mode 100644
index 000000000000..6f13b3563234
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#ifndef __MGAG200_DRV_H__
+#define __MGAG200_DRV_H__
+
+#include <video/vga.h>
+
+#include "drm/drm_fb_helper.h"
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "mgag200_reg.h"
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "mgag200"
+#define DRIVER_DESC "MGA G200 SE"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define MGAG200FB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
+
+#define ATTR_INDEX 0x1fc0
+#define ATTR_DATA 0x1fc1
+
+#define WREG_ATTR(reg, v) \
+ do { \
+ RREG8(0x1fda); \
+ WREG8(ATTR_INDEX, reg); \
+ WREG8(ATTR_DATA, v); \
+ } while (0) \
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(MGAREG_SEQ_INDEX, reg); \
+ WREG8(MGAREG_SEQ_DATA, v); \
+ } while (0) \
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTC_INDEX, reg); \
+ WREG8(MGAREG_CRTC_DATA, v); \
+ } while (0) \
+
+
+#define WREG_ECRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTCEXT_INDEX, reg); \
+ WREG8(MGAREG_CRTCEXT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0x1fce
+#define GFX_DATA 0x1fcf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+#define DAC_INDEX 0x3c00
+#define DAC_DATA 0x3c0a
+
+#define WREG_DAC(reg, v) \
+ do { \
+ WREG8(DAC_INDEX, reg); \
+ WREG8(DAC_DATA, v); \
+ } while (0) \
+
+#define MGA_MISC_OUT 0x1fc2
+#define MGA_MISC_IN 0x1fcc
+
+#define MGAG200_MAX_FB_HEIGHT 4096
+#define MGAG200_MAX_FB_WIDTH 4096
+
+#define MATROX_DPMS_CLEARED (-1)
+
+#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
+#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
+#define to_mga_connector(x) container_of(x, struct mga_connector, base)
+#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
+
+struct mga_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct mga_fbdev {
+ struct drm_fb_helper helper;
+ struct mga_framebuffer mfb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+};
+
+struct mga_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct mga_mode_info {
+ bool mode_config_initialized;
+ struct mga_crtc *crtc;
+};
+
+struct mga_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+
+struct mga_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+ int data, clock;
+};
+
+struct mga_connector {
+ struct drm_connector base;
+ struct mga_i2c_chan *i2c;
+};
+
+
+struct mga_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+ resource_size_t vram_window;
+};
+
+enum mga_type {
+ G200_SE_A,
+ G200_SE_B,
+ G200_WB,
+ G200_EV,
+ G200_EH,
+ G200_ER,
+};
+
+#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+
+struct mga_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ drm_local_map_t *framebuffer;
+
+ struct mga_mc mc;
+ struct mga_mode_info mode_info;
+
+ struct mga_fbdev *mfbdev;
+
+ bool suspended;
+ int num_crtc;
+ enum mga_type type;
+ int has_sdram;
+ struct drm_display_mode mode;
+
+ int bpp_shifts[4];
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+ u32 reg_1e24; /* SE model number */
+};
+
+
+struct mgag200_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
+
+static inline struct mgag200_bo *
+mgag200_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct mgag200_bo, bo);
+}
+ /* mga_crtc.c */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ /* mgag200_mode.c */
+int mgag200_modeset_init(struct mga_device *mdev);
+void mgag200_modeset_fini(struct mga_device *mdev);
+
+ /* mga_fbdev.c */
+int mgag200_fbdev_init(struct mga_device *mdev);
+void mgag200_fbdev_fini(struct mga_device *mdev);
+
+ /* mgag200_main.c */
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *mfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
+int mgag200_driver_unload(struct drm_device *dev);
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int mgag200_gem_init_object(struct drm_gem_object *obj);
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int mgag200_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+void mgag200_gem_free_object(struct drm_gem_object *obj);
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+ /* mga_i2c.c */
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait);
+void mgag200_bo_unreserve(struct mgag200_bo *bo);
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pastbo);
+int mgag200_mm_init(struct mga_device *mdev);
+void mgag200_mm_fini(struct mga_device *mdev);
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int mgag200_bo_unpin(struct mgag200_bo *bo);
+int mgag200_bo_push_sysram(struct mgag200_bo *bo);
+#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
new file mode 100644
index 000000000000..880d3369760e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "mgag200_drv.h"
+
+static void mga_dirty_update(struct mga_fbdev *mfbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct mgag200_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = mfbdev->mfb.obj;
+ bo = gem_to_mga_bo(obj);
+
+ ret = mgag200_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ mgag200_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_bo_unreserve(bo);
+}
+
+static void mga_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_fillrect(info, rect);
+ mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void mga_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_copyarea(info, area);
+ mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void mga_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_imageblit(info, image);
+ mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops mgag200fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = mga_fillrect,
+ .fb_copyarea = mga_copyarea,
+ .fb_imageblit = mga_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int mgag200fb_create_object(struct mga_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = mgag200_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int mgag200fb_create(struct mga_fbdev *mfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = mfbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct mga_device *mdev = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *gobj = NULL;
+ struct device *device = &dev->pdev->dev;
+ struct mgag200_bo *bo;
+ int ret;
+ void *sysram;
+ int size;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_mga_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = mfbdev;
+
+ ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ mfbdev->sysram = sysram;
+ mfbdev->size = size;
+
+ fb = &mfbdev->mfb.base;
+
+ /* setup helper */
+ mfbdev->helper.fb = fb;
+ mfbdev->helper.fbdev = info;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ strcpy(info->fix.id, "mgadrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &mgag200fb_ops;
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = mdev->mc.vram_size;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+ return 0;
+out:
+ return ret;
+}
+
+static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size
+ *sizes)
+{
+ struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = mgag200fb_create(mfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static int mga_fbdev_destroy(struct drm_device *dev,
+ struct mga_fbdev *mfbdev)
+{
+ struct fb_info *info;
+ struct mga_framebuffer *mfb = &mfbdev->mfb;
+
+ if (mfbdev->helper.fbdev) {
+ info = mfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (mfb->obj) {
+ drm_gem_object_unreference_unlocked(mfb->obj);
+ mfb->obj = NULL;
+ }
+ drm_fb_helper_fini(&mfbdev->helper);
+ vfree(mfbdev->sysram);
+ drm_framebuffer_cleanup(&mfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+ .gamma_set = mga_crtc_fb_gamma_set,
+ .gamma_get = mga_crtc_fb_gamma_get,
+ .fb_probe = mga_fb_find_or_create_single,
+};
+
+int mgag200_fbdev_init(struct mga_device *mdev)
+{
+ struct mga_fbdev *mfbdev;
+ int ret;
+
+ mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL);
+ if (!mfbdev)
+ return -ENOMEM;
+
+ mdev->mfbdev = mfbdev;
+ mfbdev->helper.funcs = &mga_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
+ mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+ if (ret) {
+ kfree(mfbdev);
+ return ret;
+ }
+ drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+ drm_fb_helper_initial_config(&mfbdev->helper, 32);
+
+ return 0;
+}
+
+void mgag200_fbdev_fini(struct mga_device *mdev)
+{
+ if (!mdev->mfbdev)
+ return;
+
+ mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
+ kfree(mdev->mfbdev);
+ mdev->mfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
new file mode 100644
index 000000000000..dd3568a1b6b0
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+static int mga_i2c_read_gpio(struct mga_device *mdev)
+{
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ return RREG8(DAC_DATA);
+}
+
+static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val)
+{
+ int tmp;
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = (RREG8(DAC_DATA) & mask) | val;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0);
+}
+
+static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
+{
+ if (state)
+ state = 0;
+ else
+ state = mask;
+ mga_i2c_set_gpio(mdev, ~mask, state);
+}
+
+static void mga_gpio_setsda(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->data, state);
+}
+
+static void mga_gpio_setscl(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->clock, state);
+}
+
+static int mga_gpio_getsda(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
+}
+
+static int mga_gpio_getscl(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+}
+
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mga_i2c_chan *i2c;
+ int ret;
+ int data, clock;
+
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
+ WREG_DAC(MGA1064_GEN_IO_CTL, 0);
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ case G200_EV:
+ case G200_WB:
+ data = 1;
+ clock = 2;
+ break;
+ case G200_EH:
+ case G200_ER:
+ data = 2;
+ clock = 1;
+ break;
+ default:
+ data = 2;
+ clock = 8;
+ break;
+ }
+
+ i2c = kzalloc(sizeof(struct mga_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->data = data;
+ i2c->clock = clock;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
+
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 10;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = mga_gpio_setsda;
+ i2c->bit.setscl = mga_gpio_setscl;
+ i2c->bit.getsda = mga_gpio_getsda;
+ i2c->bit.getscl = mga_gpio_getscl;
+
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ kfree(i2c);
+ i2c = NULL;
+ }
+ return i2c;
+}
+
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
new file mode 100644
index 000000000000..636a81cd2f37
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "mgag200_drv.h"
+
+static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
+ if (mga_fb->obj)
+ drm_gem_object_unreference_unlocked(mga_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs mga_fb_funcs = {
+ .destroy = mga_user_framebuffer_destroy,
+ .create_handle = mga_user_framebuffer_create_handle,
+};
+
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+mgag200_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
+ if (!mga_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(mga_fb);
+ return ERR_PTR(ret);
+ }
+ return &mga_fb->base;
+}
+
+static const struct drm_mode_config_funcs mga_mode_funcs = {
+ .fb_create = mgag200_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void mga_vram_fini(struct mga_device *mdev)
+{
+ pci_iounmap(mdev->dev->pdev, mdev->rmmio);
+ mdev->rmmio = NULL;
+ if (mdev->mc.vram_base)
+ release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
+}
+
+static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
+{
+ int offset;
+ int orig;
+ int test1, test2;
+ int orig1, orig2;
+
+ /* Probe */
+ orig = ioread16(mem);
+ iowrite16(0, mem);
+
+ for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+ orig1 = ioread8(mem + offset);
+ orig2 = ioread8(mem + offset + 0x100);
+
+ iowrite16(0xaa55, mem + offset);
+ iowrite16(0xaa55, mem + offset + 0x100);
+
+ test1 = ioread16(mem + offset);
+ test2 = ioread16(mem);
+
+ iowrite16(orig1, mem + offset);
+ iowrite16(orig2, mem + offset + 0x100);
+
+ if (test1 != 0xaa55) {
+ break;
+ }
+
+ if (test2) {
+ break;
+ }
+ }
+
+ iowrite16(orig, mem);
+ return offset - 65536;
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int mga_vram_init(struct mga_device *mdev)
+{
+ void __iomem *mem;
+ struct apertures_struct *aper = alloc_apertures(1);
+
+ /* BAR 0 is VRAM */
+ mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
+ mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+
+ aper->ranges[0].base = mdev->mc.vram_base;
+ aper->ranges[0].size = mdev->mc.vram_window;
+ aper->count = 1;
+
+ remove_conflicting_framebuffers(aper, "mgafb", true);
+
+ if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
+ "mgadrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ mem = pci_iomap(mdev->dev->pdev, 0, 0);
+
+ mdev->mc.vram_size = mga_probe_vram(mdev, mem);
+
+ pci_iounmap(mdev->dev->pdev, mem);
+
+ return 0;
+}
+
+static int mgag200_device_init(struct drm_device *dev,
+ uint32_t flags)
+{
+ struct mga_device *mdev = dev->dev_private;
+ int ret, option;
+
+ mdev->type = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ mdev->num_crtc = 1;
+
+ pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ mdev->has_sdram = !(option & (1 << 14));
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
+ mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+
+ if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
+ "mgadrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
+ if (mdev->rmmio == NULL)
+ return -ENOMEM;
+
+ /* stash G200 SE model number for later use */
+ if (IS_G200_SE(mdev))
+ mdev->reg_1e24 = RREG32(0x1e24);
+
+ ret = mga_vram_init(mdev);
+ if (ret) {
+ release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+ return ret;
+ }
+
+ mdev->bpp_shifts[0] = 0;
+ mdev->bpp_shifts[1] = 1;
+ mdev->bpp_shifts[2] = 0;
+ mdev->bpp_shifts[3] = 2;
+ return 0;
+}
+
+void mgag200_device_fini(struct mga_device *mdev)
+{
+ release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+ mga_vram_fini(mdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct mga_device *mdev;
+ int r;
+
+ mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL);
+ if (mdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)mdev;
+ mdev->dev = dev;
+
+ r = mgag200_device_init(dev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ goto out;
+ }
+ r = mgag200_mm_init(mdev);
+ if (r)
+ goto out;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = (void *)&mga_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ r = mgag200_modeset_init(mdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+out:
+ if (r)
+ mgag200_driver_unload(dev);
+ return r;
+}
+
+int mgag200_driver_unload(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+
+ if (mdev == NULL)
+ return 0;
+ mgag200_modeset_fini(mdev);
+ mgag200_fbdev_fini(mdev);
+ drm_mode_config_cleanup(dev);
+ mgag200_mm_fini(mdev);
+ mgag200_device_fini(mdev);
+ kfree(mdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct mgag200_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = mgag200_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int mgag200_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int mgag200_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void mgag200_bo_unref(struct mgag200_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void mgag200_gem_free_object(struct drm_gem_object *obj)
+{
+ struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
+
+ if (!mgag200_bo)
+ return;
+ mgag200_bo_unref(&mgag200_bo);
+}
+
+
+static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct mgag200_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_mga_bo(obj);
+ *offset = mgag200_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
new file mode 100644
index 000000000000..d303061b251e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -0,0 +1,1533 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+
+#include <linux/delay.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "mgag200_drv.h"
+
+#define MGAG200_LUT_SIZE 256
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void mga_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
+ }
+}
+
+static inline void mga_wait_vsync(struct mga_device *mdev)
+{
+ unsigned int count = 0;
+ unsigned int status = 0;
+
+ do {
+ status = RREG32(MGAREG_Status);
+ count++;
+ } while ((status & 0x08) && (count < 250000));
+ count = 0;
+ status = 0;
+ do {
+ status = RREG32(MGAREG_Status);
+ count++;
+ } while (!(status & 0x08) && (count < 250000));
+}
+
+static inline void mga_wait_busy(struct mga_device *mdev)
+{
+ unsigned int count = 0;
+ unsigned int status = 0;
+ do {
+ status = RREG8(MGAREG_Status + 2);
+ count++;
+ } while ((status & 0x01) && (count < 500000));
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+
+ m = n = p = 0;
+ vcomax = 320000;
+ vcomin = 160000;
+ pllreffreq = 25000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 8; testp > 0; testp /= 2) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 17; testn < 256; testn++) {
+ for (testm = 1; testm < 32; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm - 1;
+ n = testn - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ if (delta > permitteddelta) {
+ printk(KERN_WARNING "PLL delta too large\n");
+ return 1;
+ }
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_PIX_PLLC_P, p);
+ return 0;
+}
+
+static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ bool pll_locked = false;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 1; testp < 9; testp++) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 17; testm++) {
+ for (testn = 1; testn < 151; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) | ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ if (i > 0) {
+ WREG8(MGAREG_CRTC_INDEX, 0x1e);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ if (tmp < 0xff)
+ WREG8(MGAREG_CRTC_DATA, tmp+1);
+ }
+
+ /* set pixclkdis to 1 */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ /* select PLL Set C */
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ /* reset the PLL */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(50);
+
+ /* program pixel pll register */
+ WREG_DAC(MGA1064_WB_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ /* turn pll on */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(500);
+
+ /* select the pixel pll */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+ tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ /* reset dotclock rate bit */
+ WREG8(MGAREG_SEQ_INDEX, 1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+ tmp &= ~0x8;
+ WREG8(MGAREG_SEQ_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ return 0;
+}
+
+static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 50000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 1; testn < 257; testn++) {
+ for (testm = 1; testm < 17; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = testm - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3 << 2);
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ return 0;
+}
+
+static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ u8 tmp;
+ bool pll_locked = false;
+
+ m = n = p = 0;
+ vcomax = 800000;
+ vcomin = 400000;
+ pllreffreq = 3333;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 33; testm++) {
+ for (testn = 1; testn < 257; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) | ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
+ if ((clock * testp) >= 600000)
+ p |= 80;
+ }
+ }
+ }
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_EH_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_P, p);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+
+ return 0;
+}
+
+static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta;
+ unsigned int testr, testn, testm, testo;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int tmp;
+
+ m = n = p = 0;
+ vcomax = 1488000;
+ vcomin = 1056000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+
+ for (testr = 0; testr < 4; testr++) {
+ if (delta == 0)
+ break;
+ for (testn = 5; testn < 129; testn++) {
+ if (delta == 0)
+ break;
+ for (testm = 3; testm >= 0; testm--) {
+ if (delta == 0)
+ break;
+ for (testo = 5; testo < 33; testo++) {
+ computed = pllreffreq * (testn + 1) /
+ (testr + 1);
+ if (computed < vcomin)
+ continue;
+ if (computed > vcomax)
+ continue;
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm | (testo << 3);
+ n = testn;
+ p = testr | (testr << 3);
+ }
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3<<2) | 0xc0;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_ER_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ return 0;
+}
+
+static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
+{
+ switch(mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ return mga_g200se_set_plls(mdev, clock);
+ break;
+ case G200_WB:
+ return mga_g200wb_set_plls(mdev, clock);
+ break;
+ case G200_EV:
+ return mga_g200ev_set_plls(mdev, clock);
+ break;
+ case G200_EH:
+ return mga_g200eh_set_plls(mdev, clock);
+ break;
+ case G200_ER:
+ return mga_g200er_set_plls(mdev, clock);
+ break;
+ }
+ return 0;
+}
+
+static void mga_g200wb_prepare(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u8 tmp;
+ int iter_max;
+
+ /* 1- The first step is to warn the BMC of an upcoming mode change.
+ * We are putting the misc<0> to output.*/
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+
+ /* we are putting a 1 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+
+ /* 2- Second step to mask and further scan request
+ * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
+ */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x80;
+ WREG_DAC(MGA1064_SPAREREG, tmp);
+
+ /* 3a- the third step is to verifu if there is an active scan
+ * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
+ */
+ iter_max = 300;
+ while (!(tmp & 0x1) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+
+ /* 3b- this step occurs only if the remove is actually scanning
+ * we are waiting for the end of the frame which is a 1 on
+ * remvsyncsts (XSPAREREG<1>)
+ */
+ if (iter_max) {
+ iter_max = 300;
+ while ((tmp & 0x2) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+ }
+}
+
+static void mga_g200wb_commit(struct drm_crtc *crtc)
+{
+ u8 tmp;
+ struct mga_device *mdev = crtc->dev->dev_private;
+
+ /* 1- The first step is to ensure that the vrsten and hrsten are set */
+ WREG8(MGAREG_CRTCEXT_INDEX, 1);
+ tmp = RREG8(MGAREG_CRTCEXT_DATA);
+ WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
+
+ /* 2- second step is to assert the rstlvl2 */
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x8;
+ WREG8(DAC_DATA, tmp);
+
+ /* wait 10 us */
+ udelay(10);
+
+ /* 3- deassert rstlvl2 */
+ tmp &= ~0x08;
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ WREG8(DAC_DATA, tmp);
+
+ /* 4- remove mask of scan request */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x80;
+ WREG8(DAC_DATA, tmp);
+
+ /* 5- put back a 0 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+}
+
+
+void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u32 addr;
+ int count;
+
+ while (RREG8(0x1fda) & 0x08);
+ while (!(RREG8(0x1fda) & 0x08));
+
+ count = RREG8(MGAREG_VCOUNT) + 2;
+ while (RREG8(MGAREG_VCOUNT) < count);
+
+ addr = offset >> 2;
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+ WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
+ WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+}
+
+
+/* ast is different - we will force move buffers out of VRAM */
+static int mga_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ struct mgag200_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ mga_fb = to_mga_framebuffer(fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+
+ mga_fb = to_mga_framebuffer(crtc->fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ mgag200_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&mdev->mfbdev->mfb == mga_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+
+ }
+ mgag200_bo_unreserve(bo);
+
+ DRM_INFO("mga base %llx\n", gpu_addr);
+
+ mga_set_start_address(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int mga_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ int hdisplay, hsyncstart, hsyncend, htotal;
+ int vdisplay, vsyncstart, vsyncend, vtotal;
+ int pitch;
+ int option = 0, option2 = 0;
+ int i;
+ unsigned char misc = 0;
+ unsigned char ext_vga[6];
+ unsigned char ext_vga_index24;
+ unsigned char dac_index90 = 0;
+ u8 bppshift;
+
+ static unsigned char dacvalue[] = {
+ /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0,
+ /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x18: */ 0x00, 0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
+ /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28: */ 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x40,
+ /* 0x30: */ 0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
+ /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
+ /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+ bppshift = mdev->bpp_shifts[(crtc->fb->bits_per_pixel >> 3) - 1];
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ dacvalue[MGA1064_VREF_CTL] = 0x03;
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
+ MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ if (mdev->has_sdram)
+ option = 0x40049120;
+ else
+ option = 0x4004d120;
+ option2 = 0x00008000;
+ break;
+ case G200_WB:
+ dacvalue[MGA1064_VREF_CTL] = 0x07;
+ option = 0x41049120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EV:
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EH:
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_ER:
+ dac_index90 = 0;
+ break;
+ }
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
+ else
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
+ break;
+ case 24:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_24bits;
+ break;
+ case 32:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_32_24bits;
+ break;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= 0x40;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= 0x80;
+
+
+ for (i = 0; i < sizeof(dacvalue); i++) {
+ if ((i <= 0x03) ||
+ (i == 0x07) ||
+ (i == 0x0b) ||
+ (i == 0x0f) ||
+ ((i >= 0x13) && (i <= 0x17)) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ if (IS_G200_SE(mdev) &&
+ ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
+ continue;
+ if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+ (i >= 0x44) && (i <= 0x4e))
+ continue;
+
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ if (mdev->type == G200_ER) {
+ WREG_DAC(0x90, dac_index90);
+ }
+
+
+ if (option)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+ if (option2)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+
+ WREG_SEQ(2, 0xf);
+ WREG_SEQ(3, 0);
+ WREG_SEQ(4, 0xe);
+
+ pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
+ if (crtc->fb->bits_per_pixel == 24)
+ pitch = pitch >> (4 - bppshift);
+ else
+ pitch = pitch >> (4 - bppshift);
+
+ hdisplay = mode->hdisplay / 8 - 1;
+ hsyncstart = mode->hsync_start / 8 - 1;
+ hsyncend = mode->hsync_end / 8 - 1;
+ htotal = mode->htotal / 8 - 1;
+
+ /* Work around hardware quirk */
+ if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
+ htotal++;
+
+ vdisplay = mode->vdisplay - 1;
+ vsyncstart = mode->vsync_start - 1;
+ vsyncend = mode->vsync_end - 1;
+ vtotal = mode->vtotal - 2;
+
+ WREG_GFX(0, 0);
+ WREG_GFX(1, 0);
+ WREG_GFX(2, 0);
+ WREG_GFX(3, 0);
+ WREG_GFX(4, 0);
+ WREG_GFX(5, 0x40);
+ WREG_GFX(6, 0x5);
+ WREG_GFX(7, 0xf);
+ WREG_GFX(8, 0xf);
+
+ WREG_CRT(0, htotal - 4);
+ WREG_CRT(1, hdisplay);
+ WREG_CRT(2, hdisplay);
+ WREG_CRT(3, (htotal & 0x1F) | 0x80);
+ WREG_CRT(4, hsyncstart);
+ WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
+ WREG_CRT(6, vtotal & 0xFF);
+ WREG_CRT(7, ((vtotal & 0x100) >> 8) |
+ ((vdisplay & 0x100) >> 7) |
+ ((vsyncstart & 0x100) >> 6) |
+ ((vdisplay & 0x100) >> 5) |
+ ((vdisplay & 0x100) >> 4) | /* linecomp */
+ ((vtotal & 0x200) >> 4)|
+ ((vdisplay & 0x200) >> 3) |
+ ((vsyncstart & 0x200) >> 2));
+ WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
+ ((vdisplay & 0x200) >> 3));
+ WREG_CRT(10, 0);
+ WREG_CRT(11, 0);
+ WREG_CRT(12, 0);
+ WREG_CRT(13, 0);
+ WREG_CRT(14, 0);
+ WREG_CRT(15, 0);
+ WREG_CRT(16, vsyncstart & 0xFF);
+ WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
+ WREG_CRT(18, vdisplay & 0xFF);
+ WREG_CRT(19, pitch & 0xFF);
+ WREG_CRT(20, 0);
+ WREG_CRT(21, vdisplay & 0xFF);
+ WREG_CRT(22, (vtotal + 1) & 0xFF);
+ WREG_CRT(23, 0xc3);
+ WREG_CRT(24, vdisplay & 0xFF);
+
+ ext_vga[0] = 0;
+ ext_vga[5] = 0;
+
+ /* TODO interlace */
+
+ ext_vga[0] |= (pitch & 0x300) >> 4;
+ ext_vga[1] = (((htotal - 4) & 0x100) >> 8) |
+ ((hdisplay & 0x100) >> 7) |
+ ((hsyncstart & 0x100) >> 6) |
+ (htotal & 0x40);
+ ext_vga[2] = ((vtotal & 0xc00) >> 10) |
+ ((vdisplay & 0x400) >> 8) |
+ ((vdisplay & 0xc00) >> 7) |
+ ((vsyncstart & 0xc00) >> 5) |
+ ((vdisplay & 0x400) >> 3);
+ if (crtc->fb->bits_per_pixel == 24)
+ ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
+ else
+ ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
+ ext_vga[4] = 0;
+ if (mdev->type == G200_WB)
+ ext_vga[1] |= 0x88;
+
+ ext_vga_index24 = 0x05;
+
+ /* Set pixel clocks */
+ misc = 0x2d;
+ WREG8(MGA_MISC_OUT, misc);
+
+ mga_crtc_set_plls(mdev, mode->clock);
+
+ for (i = 0; i < 6; i++) {
+ WREG_ECRT(i, ext_vga[i]);
+ }
+
+ if (mdev->type == G200_ER)
+ WREG_ECRT(24, ext_vga_index24);
+
+ if (mdev->type == G200_EV) {
+ WREG_ECRT(6, 0);
+ }
+
+ WREG_ECRT(0, ext_vga[0]);
+ /* Enable mga pixel clock */
+ misc = 0x2d;
+
+ WREG8(MGA_MISC_OUT, misc);
+
+ if (adjusted_mode)
+ memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
+
+ mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+ /* reset tagfifo */
+ if (mdev->type == G200_ER) {
+ u32 mem_ctl = RREG32(MGAREG_MEMCTL);
+ u8 seq1;
+
+ /* screen off */
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 = RREG8(MGAREG_SEQ_DATA) | 0x20;
+ WREG8(MGAREG_SEQ_DATA, seq1);
+
+ WREG32(MGAREG_MEMCTL, mem_ctl | 0x00200000);
+ udelay(1000);
+ WREG32(MGAREG_MEMCTL, mem_ctl & ~0x00200000);
+
+ WREG8(MGAREG_SEQ_DATA, seq1 & ~0x20);
+ }
+
+
+ if (IS_G200_SE(mdev)) {
+ if (mdev->reg_1e24 >= 0x02) {
+ u8 hi_pri_lvl;
+ u32 bpp;
+ u32 mb;
+
+ if (crtc->fb->bits_per_pixel > 16)
+ bpp = 32;
+ else if (crtc->fb->bits_per_pixel > 8)
+ bpp = 16;
+ else
+ bpp = 8;
+
+ mb = (mode->clock * bpp) / 1000;
+ if (mb > 3100)
+ hi_pri_lvl = 0;
+ else if (mb > 2600)
+ hi_pri_lvl = 1;
+ else if (mb > 1900)
+ hi_pri_lvl = 2;
+ else if (mb > 1160)
+ hi_pri_lvl = 3;
+ else if (mb > 440)
+ hi_pri_lvl = 4;
+ else
+ hi_pri_lvl = 5;
+
+ WREG8(0x1fde, 0x06);
+ WREG8(0x1fdf, hi_pri_lvl);
+ } else {
+ if (mdev->reg_1e24 >= 0x01)
+ WREG8(0x1fdf, 0x03);
+ else
+ WREG8(0x1fdf, 0x04);
+ }
+ }
+ return 0;
+}
+
+#if 0 /* code from mjg to attempt D3 on crtc dpms off - revisit later */
+static int mga_suspend(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (mdev->suspended)
+ return 0;
+
+ WREG_SEQ(1, 0x20);
+ WREG_ECRT(1, 0x30);
+ /* Disable the pixel clock */
+ WREG_DAC(0x1a, 0x05);
+ /* Power down the DAC */
+ WREG_DAC(0x1e, 0x18);
+ /* Power down the pixel PLL */
+ WREG_DAC(0x1a, 0x0d);
+
+ /* Disable PLLs and clocks */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x1F8024);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_disable_device(pdev);
+
+ mdev->suspended = true;
+
+ return 0;
+}
+
+static int mga_resume(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (!mdev->suspended)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_device(pdev);
+
+ /* Disable sysclk */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x4);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+
+ mdev->suspended = false;
+
+ return 0;
+}
+
+#endif
+
+static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 seq1 = 0, crtcext1 = 0;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ seq1 = 0;
+ crtcext1 = 0;
+ mga_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ seq1 = 0x20;
+ crtcext1 = 0x10;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ seq1 = 0x20;
+ crtcext1 = 0x20;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ seq1 = 0x20;
+ crtcext1 = 0x30;
+ break;
+ }
+
+#if 0
+ if (mode == DRM_MODE_DPMS_OFF) {
+ mga_suspend(crtc);
+ }
+#endif
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 |= RREG8(MGAREG_SEQ_DATA) & ~0x20;
+ mga_wait_vsync(mdev);
+ mga_wait_busy(mdev);
+ WREG8(MGAREG_SEQ_DATA, seq1);
+ msleep(20);
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x01);
+ crtcext1 |= RREG8(MGAREG_CRTCEXT_DATA) & ~0x30;
+ WREG8(MGAREG_CRTCEXT_DATA, crtcext1);
+
+#if 0
+ if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
+ mga_resume(crtc);
+ drm_helper_resume_force_mode(dev);
+ }
+#endif
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void mga_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 tmp;
+
+ /* mga_resume(crtc);*/
+
+ WREG8(MGAREG_CRTC_INDEX, 0x11);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ WREG_CRT(0x11, tmp | 0x80);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ WREG_SEQ(0, 1);
+ msleep(50);
+ WREG_SEQ(1, 0x20);
+ msleep(20);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ /* start sync reset */
+ WREG_SEQ(0, 1);
+ WREG_SEQ(1, tmp | 0x20);
+ }
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_prepare(crtc);
+
+ WREG_CRT(17, 0);
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void mga_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ u8 tmp;
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_commit(crtc);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ msleep(50);
+ WREG_SEQ(1, 0x0);
+ msleep(20);
+ WREG_SEQ(0, 0x3);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ tmp &= ~0x20;
+ WREG_SEQ(0x1, tmp);
+ WREG_SEQ(0, 3);
+ }
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
+ int i;
+
+ for (i = start; i < end; i++) {
+ mga_crtc->lut_r[i] = red[i] >> 8;
+ mga_crtc->lut_g[i] = green[i] >> 8;
+ mga_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ mga_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void mga_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(mga_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs mga_crtc_funcs = {
+ .gamma_set = mga_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mga_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+ .dpms = mga_crtc_dpms,
+ .mode_fixup = mga_crtc_mode_fixup,
+ .mode_set = mga_crtc_mode_set,
+ .mode_set_base = mga_crtc_mode_set_base,
+ .prepare = mga_crtc_prepare,
+ .commit = mga_crtc_commit,
+ .load_lut = mga_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void mga_crtc_init(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mga_crtc *mga_crtc;
+ int i;
+
+ mga_crtc = kzalloc(sizeof(struct mga_crtc) +
+ (MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (mga_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
+ mdev->mode_info.crtc = mga_crtc;
+
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ mga_crtc->lut_r[i] = i;
+ mga_crtc->lut_g[i] = i;
+ mga_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ mga_crtc->lut_r[regno] = red >> 8;
+ mga_crtc->lut_g[regno] = green >> 8;
+ mga_crtc->lut_b[regno] = blue >> 8;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ *red = (u16)mga_crtc->lut_r[regno] << 8;
+ *green = (u16)mga_crtc->lut_g[regno] << 8;
+ *blue = (u16)mga_crtc->lut_b[regno] << 8;
+}
+
+/*
+ * The encoder comes after the CRTC in the output pipeline, but before
+ * the connector. It's responsible for ensuring that the digital
+ * stream is appropriately converted into the output format. Setup is
+ * very simple in this case - all we have to do is inform qemu of the
+ * colour depth in order to ensure that it displays appropriately
+ */
+
+/*
+ * These functions are analagous to those in the CRTC code, but are intended
+ * to handle any encoder-specific limitations
+ */
+static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mga_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void mga_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void mga_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void mga_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mga_encoder);
+}
+
+static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
+ .dpms = mga_encoder_dpms,
+ .mode_fixup = mga_encoder_mode_fixup,
+ .mode_set = mga_encoder_mode_set,
+ .prepare = mga_encoder_prepare,
+ .commit = mga_encoder_commit,
+};
+
+static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
+ .destroy = mga_encoder_destroy,
+};
+
+static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct mga_encoder *mga_encoder;
+
+ mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
+ if (!mga_encoder)
+ return NULL;
+
+ encoder = &mga_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+static int mga_vga_get_modes(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ return ret;
+}
+
+static int mga_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* FIXME: Add bandwidth and g200se limitations */
+
+ if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
+ mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
+ mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
+ mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status mga_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void mga_connector_destroy(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ mgag200_i2c_destroy(mga_connector->i2c);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
+ .get_modes = mga_vga_get_modes,
+ .mode_valid = mga_vga_mode_valid,
+ .best_encoder = mga_connector_best_encoder,
+};
+
+struct drm_connector_funcs mga_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = mga_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mga_connector_destroy,
+};
+
+static struct drm_connector *mga_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct mga_connector *mga_connector;
+
+ mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
+ if (!mga_connector)
+ return NULL;
+
+ connector = &mga_connector->base;
+
+ drm_connector_init(dev, connector,
+ &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+
+ mga_connector->i2c = mgag200_i2c_create(dev);
+ if (!mga_connector->i2c)
+ DRM_ERROR("failed to add ddc bus\n");
+
+ return connector;
+}
+
+
+int mgag200_modeset_init(struct mga_device *mdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ mdev->mode_info.mode_config_initialized = true;
+
+ mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+ mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+
+ mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+
+ mga_crtc_init(mdev->dev);
+
+ encoder = mga_encoder_init(mdev->dev);
+ if (!encoder) {
+ DRM_ERROR("mga_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = mga_vga_init(mdev->dev);
+ if (!connector) {
+ DRM_ERROR("mga_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = mgag200_fbdev_init(mdev);
+ if (ret) {
+ DRM_ERROR("mga_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void mgag200_modeset_fini(struct mga_device *mdev)
+{
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
new file mode 100644
index 000000000000..fb24d8655feb
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -0,0 +1,661 @@
+/*
+ * MGA Millennium (MGA2064W) functions
+ * MGA Mystique (MGA1064SG) functions
+ *
+ * Copyright 1996 The XFree86 Project, Inc.
+ *
+ * Authors
+ * Dirk Hohndel
+ * hohndel@XFree86.Org
+ * David Dawes
+ * dawes@XFree86.Org
+ * Contributors:
+ * Guy DESBIEF, Aix-en-provence, France
+ * g.desbief@aix.pacwan.net
+ * MGA1064SG Mystique register file
+ */
+
+
+#ifndef _MGA_REG_H_
+#define _MGA_REG_H_
+
+#define MGAREG_DWGCTL 0x1c00
+#define MGAREG_MACCESS 0x1c04
+/* the following is a mystique only register */
+#define MGAREG_MCTLWTST 0x1c08
+#define MGAREG_ZORG 0x1c0c
+
+#define MGAREG_PAT0 0x1c10
+#define MGAREG_PAT1 0x1c14
+#define MGAREG_PLNWT 0x1c1c
+
+#define MGAREG_BCOL 0x1c20
+#define MGAREG_FCOL 0x1c24
+
+#define MGAREG_SRC0 0x1c30
+#define MGAREG_SRC1 0x1c34
+#define MGAREG_SRC2 0x1c38
+#define MGAREG_SRC3 0x1c3c
+
+#define MGAREG_XYSTRT 0x1c40
+#define MGAREG_XYEND 0x1c44
+
+#define MGAREG_SHIFT 0x1c50
+/* the following is a mystique only register */
+#define MGAREG_DMAPAD 0x1c54
+#define MGAREG_SGN 0x1c58
+#define MGAREG_LEN 0x1c5c
+
+#define MGAREG_AR0 0x1c60
+#define MGAREG_AR1 0x1c64
+#define MGAREG_AR2 0x1c68
+#define MGAREG_AR3 0x1c6c
+#define MGAREG_AR4 0x1c70
+#define MGAREG_AR5 0x1c74
+#define MGAREG_AR6 0x1c78
+
+#define MGAREG_CXBNDRY 0x1c80
+#define MGAREG_FXBNDRY 0x1c84
+#define MGAREG_YDSTLEN 0x1c88
+#define MGAREG_PITCH 0x1c8c
+
+#define MGAREG_YDST 0x1c90
+#define MGAREG_YDSTORG 0x1c94
+#define MGAREG_YTOP 0x1c98
+#define MGAREG_YBOT 0x1c9c
+
+#define MGAREG_CXLEFT 0x1ca0
+#define MGAREG_CXRIGHT 0x1ca4
+#define MGAREG_FXLEFT 0x1ca8
+#define MGAREG_FXRIGHT 0x1cac
+
+#define MGAREG_XDST 0x1cb0
+
+#define MGAREG_DR0 0x1cc0
+#define MGAREG_DR1 0x1cc4
+#define MGAREG_DR2 0x1cc8
+#define MGAREG_DR3 0x1ccc
+
+#define MGAREG_DR4 0x1cd0
+#define MGAREG_DR5 0x1cd4
+#define MGAREG_DR6 0x1cd8
+#define MGAREG_DR7 0x1cdc
+
+#define MGAREG_DR8 0x1ce0
+#define MGAREG_DR9 0x1ce4
+#define MGAREG_DR10 0x1ce8
+#define MGAREG_DR11 0x1cec
+
+#define MGAREG_DR12 0x1cf0
+#define MGAREG_DR13 0x1cf4
+#define MGAREG_DR14 0x1cf8
+#define MGAREG_DR15 0x1cfc
+
+#define MGAREG_SRCORG 0x2cb4
+#define MGAREG_DSTORG 0x2cb8
+
+/* add or or this to one of the previous "power registers" to start
+ the drawing engine */
+
+#define MGAREG_EXEC 0x0100
+
+#define MGAREG_FIFOSTATUS 0x1e10
+#define MGAREG_Status 0x1e14
+#define MGAREG_CACHEFLUSH 0x1fff
+#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_IEN 0x1e1c
+
+#define MGAREG_VCOUNT 0x1e20
+
+#define MGAREG_Reset 0x1e40
+
+#define MGAREG_OPMODE 0x1e54
+
+/* Warp Registers */
+#define MGAREG_WIADDR 0x1dc0
+#define MGAREG_WIADDR2 0x1dd8
+#define MGAREG_WGETMSB 0x1dc8
+#define MGAREG_WVRTXSZ 0x1dcc
+#define MGAREG_WACCEPTSEQ 0x1dd4
+#define MGAREG_WMISC 0x1e70
+
+#define MGAREG_MEMCTL 0x2e08
+
+/* OPMODE register additives */
+
+#define MGAOPM_DMA_GENERAL (0x00 << 2)
+#define MGAOPM_DMA_BLIT (0x01 << 2)
+#define MGAOPM_DMA_VECTOR (0x10 << 2)
+
+/* MACCESS register additives */
+#define MGAMAC_PW8 0x00
+#define MGAMAC_PW16 0x01
+#define MGAMAC_PW24 0x03 /* not a typo */
+#define MGAMAC_PW32 0x02 /* not a typo */
+#define MGAMAC_BYPASS332 0x10000000
+#define MGAMAC_NODITHER 0x40000000
+#define MGAMAC_DIT555 0x80000000
+
+/* DWGCTL register additives */
+
+/* Lines */
+
+#define MGADWG_LINE_OPEN 0x00
+#define MGADWG_AUTOLINE_OPEN 0x01
+#define MGADWG_LINE_CLOSE 0x02
+#define MGADWG_AUTOLINE_CLOSE 0x03
+
+/* Trapezoids */
+#define MGADWG_TRAP 0x04
+#define MGADWG_TEXTURE_TRAP 0x06
+
+/* BitBlts */
+
+#define MGADWG_BITBLT 0x08
+#define MGADWG_FBITBLT 0x0c
+#define MGADWG_ILOAD 0x09
+#define MGADWG_ILOAD_SCALE 0x0d
+#define MGADWG_ILOAD_FILTER 0x0f
+#define MGADWG_ILOAD_HIQH 0x07
+#define MGADWG_ILOAD_HIQHV 0x0e
+#define MGADWG_IDUMP 0x0a
+
+/* atype access to WRAM */
+
+#define MGADWG_RPL ( 0x00 << 4 )
+#define MGADWG_RSTR ( 0x01 << 4 )
+#define MGADWG_ZI ( 0x03 << 4 )
+#define MGADWG_BLK ( 0x04 << 4 )
+#define MGADWG_I ( 0x07 << 4 )
+
+/* specifies whether bit blits are linear or xy */
+#define MGADWG_LINEAR ( 0x01 << 7 )
+
+/* z drawing mode. use MGADWG_NOZCMP for always */
+
+#define MGADWG_NOZCMP ( 0x00 << 8 )
+#define MGADWG_ZE ( 0x02 << 8 )
+#define MGADWG_ZNE ( 0x03 << 8 )
+#define MGADWG_ZLT ( 0x04 << 8 )
+#define MGADWG_ZLTE ( 0x05 << 8 )
+#define MGADWG_GT ( 0x06 << 8 )
+#define MGADWG_GTE ( 0x07 << 8 )
+
+/* use this to force colour expansion circuitry to do its stuff */
+
+#define MGADWG_SOLID ( 0x01 << 11 )
+
+/* ar register at zero */
+
+#define MGADWG_ARZERO ( 0x01 << 12 )
+
+#define MGADWG_SGNZERO ( 0x01 << 13 )
+
+#define MGADWG_SHIFTZERO ( 0x01 << 14 )
+
+/* See table on 4-43 for bop ALU operations */
+
+/* See table on 4-44 for translucidity masks */
+
+#define MGADWG_BMONOLEF ( 0x00 << 25 )
+#define MGADWG_BMONOWF ( 0x04 << 25 )
+#define MGADWG_BPLAN ( 0x01 << 25 )
+
+/* note that if bfcol is specified and you're doing a bitblt, it causes
+ a fbitblt to be performed, so check that you obey the fbitblt rules */
+
+#define MGADWG_BFCOL ( 0x02 << 25 )
+#define MGADWG_BUYUV ( 0x0e << 25 )
+#define MGADWG_BU32BGR ( 0x03 << 25 )
+#define MGADWG_BU32RGB ( 0x07 << 25 )
+#define MGADWG_BU24BGR ( 0x0b << 25 )
+#define MGADWG_BU24RGB ( 0x0f << 25 )
+
+#define MGADWG_PATTERN ( 0x01 << 29 )
+#define MGADWG_TRANSC ( 0x01 << 30 )
+#define MGAREG_MISC_WRITE 0x3c2
+#define MGAREG_MISC_READ 0x3cc
+#define MGAREG_MEM_MISC_WRITE 0x1fc2
+#define MGAREG_MEM_MISC_READ 0x1fcc
+
+#define MGAREG_MISC_IOADSEL (0x1 << 0)
+#define MGAREG_MISC_RAMMAPEN (0x1 << 1)
+#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
+#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
+#define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
+#define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
+
+/* MMIO VGA registers */
+#define MGAREG_SEQ_INDEX 0x1fc4
+#define MGAREG_SEQ_DATA 0x1fc5
+#define MGAREG_CRTC_INDEX 0x1fd4
+#define MGAREG_CRTC_DATA 0x1fd5
+#define MGAREG_CRTCEXT_INDEX 0x1fde
+#define MGAREG_CRTCEXT_DATA 0x1fdf
+
+
+
+/* MGA bits for registers PCI_OPTION_REG */
+#define MGA1064_OPT_SYS_CLK_PCI ( 0x00 << 0 )
+#define MGA1064_OPT_SYS_CLK_PLL ( 0x01 << 0 )
+#define MGA1064_OPT_SYS_CLK_EXT ( 0x02 << 0 )
+#define MGA1064_OPT_SYS_CLK_MSK ( 0x03 << 0 )
+
+#define MGA1064_OPT_SYS_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_OPT_G_CLK_DIV_1 ( 0x01 << 3 )
+#define MGA1064_OPT_M_CLK_DIV_1 ( 0x01 << 4 )
+
+#define MGA1064_OPT_SYS_PLL_PDN ( 0x01 << 5 )
+#define MGA1064_OPT_VGA_ION ( 0x01 << 8 )
+
+/* MGA registers in PCI config space */
+#define PCI_MGA_INDEX 0x44
+#define PCI_MGA_DATA 0x48
+#define PCI_MGA_OPTION 0x40
+#define PCI_MGA_OPTION2 0x50
+#define PCI_MGA_OPTION3 0x54
+
+#define RAMDAC_OFFSET 0x3c00
+
+/* TVP3026 direct registers */
+
+#define TVP3026_INDEX 0x00
+#define TVP3026_WADR_PAL 0x00
+#define TVP3026_COL_PAL 0x01
+#define TVP3026_PIX_RD_MSK 0x02
+#define TVP3026_RADR_PAL 0x03
+#define TVP3026_CUR_COL_ADDR 0x04
+#define TVP3026_CUR_COL_DATA 0x05
+#define TVP3026_DATA 0x0a
+#define TVP3026_CUR_RAM 0x0b
+#define TVP3026_CUR_XLOW 0x0c
+#define TVP3026_CUR_XHI 0x0d
+#define TVP3026_CUR_YLOW 0x0e
+#define TVP3026_CUR_YHI 0x0f
+
+/* TVP3026 indirect registers */
+
+#define TVP3026_SILICON_REV 0x01
+#define TVP3026_CURSOR_CTL 0x06
+#define TVP3026_LATCH_CTL 0x0f
+#define TVP3026_TRUE_COLOR_CTL 0x18
+#define TVP3026_MUX_CTL 0x19
+#define TVP3026_CLK_SEL 0x1a
+#define TVP3026_PAL_PAGE 0x1c
+#define TVP3026_GEN_CTL 0x1d
+#define TVP3026_MISC_CTL 0x1e
+#define TVP3026_GEN_IO_CTL 0x2a
+#define TVP3026_GEN_IO_DATA 0x2b
+#define TVP3026_PLL_ADDR 0x2c
+#define TVP3026_PIX_CLK_DATA 0x2d
+#define TVP3026_MEM_CLK_DATA 0x2e
+#define TVP3026_LOAD_CLK_DATA 0x2f
+#define TVP3026_KEY_RED_LOW 0x32
+#define TVP3026_KEY_RED_HI 0x33
+#define TVP3026_KEY_GREEN_LOW 0x34
+#define TVP3026_KEY_GREEN_HI 0x35
+#define TVP3026_KEY_BLUE_LOW 0x36
+#define TVP3026_KEY_BLUE_HI 0x37
+#define TVP3026_KEY_CTL 0x38
+#define TVP3026_MCLK_CTL 0x39
+#define TVP3026_SENSE_TEST 0x3a
+#define TVP3026_TEST_DATA 0x3b
+#define TVP3026_CRC_LSB 0x3c
+#define TVP3026_CRC_MSB 0x3d
+#define TVP3026_CRC_CTL 0x3e
+#define TVP3026_ID 0x3f
+#define TVP3026_RESET 0xff
+
+
+/* MGA1064 DAC Register file */
+/* MGA1064 direct registers */
+
+#define MGA1064_INDEX 0x00
+#define MGA1064_WADR_PAL 0x00
+#define MGA1064_SPAREREG 0x00
+#define MGA1064_COL_PAL 0x01
+#define MGA1064_PIX_RD_MSK 0x02
+#define MGA1064_RADR_PAL 0x03
+#define MGA1064_DATA 0x0a
+
+#define MGA1064_CUR_XLOW 0x0c
+#define MGA1064_CUR_XHI 0x0d
+#define MGA1064_CUR_YLOW 0x0e
+#define MGA1064_CUR_YHI 0x0f
+
+/* MGA1064 indirect registers */
+#define MGA1064_DVI_PIPE_CTL 0x03
+#define MGA1064_CURSOR_BASE_ADR_LOW 0x04
+#define MGA1064_CURSOR_BASE_ADR_HI 0x05
+#define MGA1064_CURSOR_CTL 0x06
+#define MGA1064_CURSOR_COL0_RED 0x08
+#define MGA1064_CURSOR_COL0_GREEN 0x09
+#define MGA1064_CURSOR_COL0_BLUE 0x0a
+
+#define MGA1064_CURSOR_COL1_RED 0x0c
+#define MGA1064_CURSOR_COL1_GREEN 0x0d
+#define MGA1064_CURSOR_COL1_BLUE 0x0e
+
+#define MGA1064_CURSOR_COL2_RED 0x010
+#define MGA1064_CURSOR_COL2_GREEN 0x011
+#define MGA1064_CURSOR_COL2_BLUE 0x012
+
+#define MGA1064_VREF_CTL 0x018
+
+#define MGA1064_MUL_CTL 0x19
+#define MGA1064_MUL_CTL_8bits 0x0
+#define MGA1064_MUL_CTL_15bits 0x01
+#define MGA1064_MUL_CTL_16bits 0x02
+#define MGA1064_MUL_CTL_24bits 0x03
+#define MGA1064_MUL_CTL_32bits 0x04
+#define MGA1064_MUL_CTL_2G8V16bits 0x05
+#define MGA1064_MUL_CTL_G16V16bits 0x06
+#define MGA1064_MUL_CTL_32_24bits 0x07
+
+#define MGA1064_PIX_CLK_CTL 0x1a
+#define MGA1064_PIX_CLK_CTL_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_PIX_CLK_CTL_CLK_POW_DOWN ( 0x01 << 3 )
+#define MGA1064_PIX_CLK_CTL_SEL_PCI ( 0x00 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_PLL ( 0x01 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_EXT ( 0x02 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_MSK ( 0x03 << 0 )
+
+#define MGA1064_GEN_CTL 0x1d
+#define MGA1064_GEN_CTL_SYNC_ON_GREEN_DIS (0x01 << 5)
+#define MGA1064_MISC_CTL 0x1e
+#define MGA1064_MISC_CTL_DAC_EN ( 0x01 << 0 )
+#define MGA1064_MISC_CTL_VGA ( 0x01 << 1 )
+#define MGA1064_MISC_CTL_DIS_CON ( 0x03 << 1 )
+#define MGA1064_MISC_CTL_MAFC ( 0x02 << 1 )
+#define MGA1064_MISC_CTL_VGA8 ( 0x01 << 3 )
+#define MGA1064_MISC_CTL_DAC_RAM_CS ( 0x01 << 4 )
+
+#define MGA1064_GEN_IO_CTL2 0x29
+#define MGA1064_GEN_IO_CTL 0x2a
+#define MGA1064_GEN_IO_DATA 0x2b
+#define MGA1064_SYS_PLL_M 0x2c
+#define MGA1064_SYS_PLL_N 0x2d
+#define MGA1064_SYS_PLL_P 0x2e
+#define MGA1064_SYS_PLL_STAT 0x2f
+
+#define MGA1064_REMHEADCTL 0x30
+#define MGA1064_REMHEADCTL_CLKDIS ( 0x01 << 0 )
+#define MGA1064_REMHEADCTL_CLKSL_OFF ( 0x00 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PLL ( 0x01 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PCI ( 0x02 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_MSK ( 0x03 << 1 )
+
+#define MGA1064_REMHEADCTL2 0x31
+
+#define MGA1064_ZOOM_CTL 0x38
+#define MGA1064_SENSE_TST 0x3a
+
+#define MGA1064_CRC_LSB 0x3c
+#define MGA1064_CRC_MSB 0x3d
+#define MGA1064_CRC_CTL 0x3e
+#define MGA1064_COL_KEY_MSK_LSB 0x40
+#define MGA1064_COL_KEY_MSK_MSB 0x41
+#define MGA1064_COL_KEY_LSB 0x42
+#define MGA1064_COL_KEY_MSB 0x43
+#define MGA1064_PIX_PLLA_M 0x44
+#define MGA1064_PIX_PLLA_N 0x45
+#define MGA1064_PIX_PLLA_P 0x46
+#define MGA1064_PIX_PLLB_M 0x48
+#define MGA1064_PIX_PLLB_N 0x49
+#define MGA1064_PIX_PLLB_P 0x4a
+#define MGA1064_PIX_PLLC_M 0x4c
+#define MGA1064_PIX_PLLC_N 0x4d
+#define MGA1064_PIX_PLLC_P 0x4e
+
+#define MGA1064_PIX_PLL_STAT 0x4f
+
+/*Added for G450 dual head*/
+
+#define MGA1064_VID_PLL_STAT 0x8c
+#define MGA1064_VID_PLL_P 0x8D
+#define MGA1064_VID_PLL_M 0x8E
+#define MGA1064_VID_PLL_N 0x8F
+
+/* Modified PLL for G200 Winbond (G200WB) */
+#define MGA1064_WB_PIX_PLLC_M 0xb7
+#define MGA1064_WB_PIX_PLLC_N 0xb6
+#define MGA1064_WB_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200EV) */
+#define MGA1064_EV_PIX_PLLC_M 0xb6
+#define MGA1064_EV_PIX_PLLC_N 0xb7
+#define MGA1064_EV_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 EH */
+#define MGA1064_EH_PIX_PLLC_M 0xb6
+#define MGA1064_EH_PIX_PLLC_N 0xb7
+#define MGA1064_EH_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200ER) */
+#define MGA1064_ER_PIX_PLLC_M 0xb7
+#define MGA1064_ER_PIX_PLLC_N 0xb6
+#define MGA1064_ER_PIX_PLLC_P 0xb8
+
+#define MGA1064_DISP_CTL 0x8a
+#define MGA1064_DISP_CTL_DAC1OUTSEL_MASK 0x01
+#define MGA1064_DISP_CTL_DAC1OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC1OUTSEL_EN 0x01
+#define MGA1064_DISP_CTL_DAC2OUTSEL_MASK (0x03 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC1 (0x01 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC2 (0x02 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_TVE (0x03 << 2)
+#define MGA1064_DISP_CTL_PANOUTSEL_MASK (0x03 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC1 (0x01 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2RGB (0x02 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2656 (0x03 << 5)
+
+#define MGA1064_SYNC_CTL 0x8b
+
+#define MGA1064_PWR_CTL 0xa0
+#define MGA1064_PWR_CTL_DAC2_EN (0x01 << 0)
+#define MGA1064_PWR_CTL_VID_PLL_EN (0x01 << 1)
+#define MGA1064_PWR_CTL_PANEL_EN (0x01 << 2)
+#define MGA1064_PWR_CTL_RFIFO_EN (0x01 << 3)
+#define MGA1064_PWR_CTL_CFIFO_EN (0x01 << 4)
+
+#define MGA1064_PAN_CTL 0xa2
+
+/* Using crtc2 */
+#define MGAREG2_C2CTL 0x10
+#define MGAREG2_C2HPARAM 0x14
+#define MGAREG2_C2HSYNC 0x18
+#define MGAREG2_C2VPARAM 0x1c
+#define MGAREG2_C2VSYNC 0x20
+#define MGAREG2_C2STARTADD0 0x28
+
+#define MGAREG2_C2OFFSET 0x40
+#define MGAREG2_C2DATACTL 0x4c
+
+#define MGAREG_C2CTL 0x3c10
+#define MGAREG_C2CTL_C2_EN 0x01
+
+#define MGAREG_C2_HIPRILVL_M (0x07 << 4)
+#define MGAREG_C2_MAXHIPRI_M (0x07 << 8)
+
+#define MGAREG_C2CTL_PIXCLKSEL_MASK (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSELH_MASK (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_PCICLK 0x00
+#define MGAREG_C2CTL_PIXCLKSEL_VDOCLK (0x01 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_PIXELPLL (0x02 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VIDEOPLL (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VDCLK (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKSEL_CRISTAL (0x01 << 1) | (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_SYSTEMPLL (0x02 << 1) | (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKDIS_MASK (0x01 << 3)
+#define MGAREG_C2CTL_PIXCLKDIS_DISABLE (0x01 << 3)
+
+#define MGAREG_C2CTL_CRTCDACSEL_MASK (0x01 << 20)
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC1 0x00
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC2 (0x01 << 20)
+
+#define MGAREG_C2HPARAM 0x3c14
+#define MGAREG_C2HSYNC 0x3c18
+#define MGAREG_C2VPARAM 0x3c1c
+#define MGAREG_C2VSYNC 0x3c20
+#define MGAREG_C2STARTADD0 0x3c28
+
+#define MGAREG_C2OFFSET 0x3c40
+#define MGAREG_C2DATACTL 0x3c4c
+
+/* video register */
+
+#define MGAREG_BESA1C3ORG 0x3d60
+#define MGAREG_BESA1CORG 0x3d10
+#define MGAREG_BESA1ORG 0x3d00
+#define MGAREG_BESCTL 0x3d20
+#define MGAREG_BESGLOBCTL 0x3dc0
+#define MGAREG_BESHCOORD 0x3d28
+#define MGAREG_BESHISCAL 0x3d30
+#define MGAREG_BESHSRCEND 0x3d3c
+#define MGAREG_BESHSRCLST 0x3d50
+#define MGAREG_BESHSRCST 0x3d38
+#define MGAREG_BESLUMACTL 0x3d40
+#define MGAREG_BESPITCH 0x3d24
+#define MGAREG_BESV1SRCLST 0x3d54
+#define MGAREG_BESV1WGHT 0x3d48
+#define MGAREG_BESVCOORD 0x3d2c
+#define MGAREG_BESVISCAL 0x3d34
+
+/* texture engine registers */
+
+#define MGAREG_TMR0 0x2c00
+#define MGAREG_TMR1 0x2c04
+#define MGAREG_TMR2 0x2c08
+#define MGAREG_TMR3 0x2c0c
+#define MGAREG_TMR4 0x2c10
+#define MGAREG_TMR5 0x2c14
+#define MGAREG_TMR6 0x2c18
+#define MGAREG_TMR7 0x2c1c
+#define MGAREG_TMR8 0x2c20
+#define MGAREG_TEXORG 0x2c24
+#define MGAREG_TEXWIDTH 0x2c28
+#define MGAREG_TEXHEIGHT 0x2c2c
+#define MGAREG_TEXCTL 0x2c30
+# define MGA_TW4 (0x00000000)
+# define MGA_TW8 (0x00000001)
+# define MGA_TW15 (0x00000002)
+# define MGA_TW16 (0x00000003)
+# define MGA_TW12 (0x00000004)
+# define MGA_TW32 (0x00000006)
+# define MGA_TW8A (0x00000007)
+# define MGA_TW8AL (0x00000008)
+# define MGA_TW422 (0x0000000A)
+# define MGA_TW422UYVY (0x0000000B)
+# define MGA_PITCHLIN (0x00000100)
+# define MGA_NOPERSPECTIVE (0x00200000)
+# define MGA_TAKEY (0x02000000)
+# define MGA_TAMASK (0x04000000)
+# define MGA_CLAMPUV (0x18000000)
+# define MGA_TEXMODULATE (0x20000000)
+#define MGAREG_TEXCTL2 0x2c3c
+# define MGA_G400_TC2_MAGIC (0x00008000)
+# define MGA_TC2_DECALBLEND (0x00000001)
+# define MGA_TC2_IDECAL (0x00000002)
+# define MGA_TC2_DECALDIS (0x00000004)
+# define MGA_TC2_CKSTRANSDIS (0x00000010)
+# define MGA_TC2_BORDEREN (0x00000020)
+# define MGA_TC2_SPECEN (0x00000040)
+# define MGA_TC2_DUALTEX (0x00000080)
+# define MGA_TC2_TABLEFOG (0x00000100)
+# define MGA_TC2_BUMPMAP (0x00000200)
+# define MGA_TC2_SELECT_TMU1 (0x80000000)
+#define MGAREG_TEXTRANS 0x2c34
+#define MGAREG_TEXTRANSHIGH 0x2c38
+#define MGAREG_TEXFILTER 0x2c58
+# define MGA_MIN_NRST (0x00000000)
+# define MGA_MIN_BILIN (0x00000002)
+# define MGA_MIN_ANISO (0x0000000D)
+# define MGA_MAG_NRST (0x00000000)
+# define MGA_MAG_BILIN (0x00000020)
+# define MGA_FILTERALPHA (0x00100000)
+#define MGAREG_ALPHASTART 0x2c70
+#define MGAREG_ALPHAXINC 0x2c74
+#define MGAREG_ALPHAYINC 0x2c78
+#define MGAREG_ALPHACTRL 0x2c7c
+# define MGA_SRC_ZERO (0x00000000)
+# define MGA_SRC_ONE (0x00000001)
+# define MGA_SRC_DST_COLOR (0x00000002)
+# define MGA_SRC_ONE_MINUS_DST_COLOR (0x00000003)
+# define MGA_SRC_ALPHA (0x00000004)
+# define MGA_SRC_ONE_MINUS_SRC_ALPHA (0x00000005)
+# define MGA_SRC_DST_ALPHA (0x00000006)
+# define MGA_SRC_ONE_MINUS_DST_ALPHA (0x00000007)
+# define MGA_SRC_SRC_ALPHA_SATURATE (0x00000008)
+# define MGA_SRC_BLEND_MASK (0x0000000f)
+# define MGA_DST_ZERO (0x00000000)
+# define MGA_DST_ONE (0x00000010)
+# define MGA_DST_SRC_COLOR (0x00000020)
+# define MGA_DST_ONE_MINUS_SRC_COLOR (0x00000030)
+# define MGA_DST_SRC_ALPHA (0x00000040)
+# define MGA_DST_ONE_MINUS_SRC_ALPHA (0x00000050)
+# define MGA_DST_DST_ALPHA (0x00000060)
+# define MGA_DST_ONE_MINUS_DST_ALPHA (0x00000070)
+# define MGA_DST_BLEND_MASK (0x00000070)
+# define MGA_ALPHACHANNEL (0x00000100)
+# define MGA_VIDEOALPHA (0x00000200)
+# define MGA_DIFFUSEDALPHA (0x01000000)
+# define MGA_MODULATEDALPHA (0x02000000)
+#define MGAREG_TDUALSTAGE0 (0x2CF8)
+#define MGAREG_TDUALSTAGE1 (0x2CFC)
+# define MGA_TDS_COLOR_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ARG2_SPECULAR (0x00000001)
+# define MGA_TDS_COLOR_ARG2_FCOL (0x00000002)
+# define MGA_TDS_COLOR_ARG2_PREVSTAGE (0x00000003)
+# define MGA_TDS_COLOR_ALPHA_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ALPHA_FCOL (0x00000004)
+# define MGA_TDS_COLOR_ALPHA_CURRTEX (0x00000008)
+# define MGA_TDS_COLOR_ALPHA_PREVTEX (0x0000000c)
+# define MGA_TDS_COLOR_ALPHA_PREVSTAGE (0x00000010)
+# define MGA_TDS_COLOR_ARG1_REPLICATEALPHA (0x00000020)
+# define MGA_TDS_COLOR_ARG1_INV (0x00000040)
+# define MGA_TDS_COLOR_ARG2_REPLICATEALPHA (0x00000080)
+# define MGA_TDS_COLOR_ARG2_INV (0x00000100)
+# define MGA_TDS_COLOR_ALPHA1INV (0x00000200)
+# define MGA_TDS_COLOR_ALPHA2INV (0x00000400)
+# define MGA_TDS_COLOR_ARG1MUL_ALPHA1 (0x00000800)
+# define MGA_TDS_COLOR_ARG2MUL_ALPHA2 (0x00001000)
+# define MGA_TDS_COLOR_ARG1ADD_MULOUT (0x00002000)
+# define MGA_TDS_COLOR_ARG2ADD_MULOUT (0x00004000)
+# define MGA_TDS_COLOR_MODBRIGHT_2X (0x00008000)
+# define MGA_TDS_COLOR_MODBRIGHT_4X (0x00010000)
+# define MGA_TDS_COLOR_ADD_SUB (0x00000000)
+# define MGA_TDS_COLOR_ADD_ADD (0x00020000)
+# define MGA_TDS_COLOR_ADD2X (0x00040000)
+# define MGA_TDS_COLOR_ADDBIAS (0x00080000)
+# define MGA_TDS_COLOR_BLEND (0x00100000)
+# define MGA_TDS_COLOR_SEL_ARG1 (0x00000000)
+# define MGA_TDS_COLOR_SEL_ARG2 (0x00200000)
+# define MGA_TDS_COLOR_SEL_ADD (0x00400000)
+# define MGA_TDS_COLOR_SEL_MUL (0x00600000)
+# define MGA_TDS_ALPHA_ARG1_INV (0x00800000)
+# define MGA_TDS_ALPHA_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_ALPHA_ARG2_FCOL (0x01000000)
+# define MGA_TDS_ALPHA_ARG2_PREVTEX (0x02000000)
+# define MGA_TDS_ALPHA_ARG2_PREVSTAGE (0x03000000)
+# define MGA_TDS_ALPHA_ARG2_INV (0x04000000)
+# define MGA_TDS_ALPHA_ADD (0x08000000)
+# define MGA_TDS_ALPHA_ADDBIAS (0x10000000)
+# define MGA_TDS_ALPHA_ADD2X (0x20000000)
+# define MGA_TDS_ALPHA_SEL_ARG1 (0x00000000)
+# define MGA_TDS_ALPHA_SEL_ARG2 (0x40000000)
+# define MGA_TDS_ALPHA_SEL_ADD (0x80000000)
+# define MGA_TDS_ALPHA_SEL_MUL (0xc0000000)
+
+#define MGAREG_DWGSYNC 0x2c4c
+
+#define MGAREG_AGP_PLL 0x1e4c
+#define MGA_AGP2XPLL_ENABLE 0x1
+#define MGA_AGP2XPLL_DISABLE 0x0
+
+#endif
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
new file mode 100644
index 000000000000..b223dcb7a710
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "mgag200_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct mga_device *
+mgag200_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct mga_device, ttm.bdev);
+}
+
+static int
+mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int mgag200_ttm_global_init(struct mga_device *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &mgag200_ttm_mem_global_init;
+ global_ref->release = &mgag200_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+mgag200_ttm_global_release(struct mga_device *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct mgag200_bo *bo;
+
+ bo = container_of(tbo, struct mgag200_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &mgag200_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+ if (!mgag200_ttm_bo_is_mgag200_bo(bo))
+ return;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM);
+ *pl = mgabo->placement;
+}
+
+static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct mga_device *mdev = mgag200_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(mdev->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int mgag200_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func mgag200_tt_backend_func = {
+ .destroy = &mgag200_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &mgag200_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver mgag200_bo_driver = {
+ .ttm_tt_create = mgag200_ttm_tt_create,
+ .ttm_tt_populate = mgag200_ttm_tt_populate,
+ .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
+ .init_mem_type = mgag200_bo_init_mem_type,
+ .evict_flags = mgag200_bo_evict_flags,
+ .move = mgag200_bo_move,
+ .verify_access = mgag200_bo_verify_access,
+ .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
+ .io_mem_free = &mgag200_ttm_io_mem_free,
+};
+
+int mgag200_mm_init(struct mga_device *mdev)
+{
+ int ret;
+ struct drm_device *dev = mdev->dev;
+ struct ttm_bo_device *bdev = &mdev->ttm.bdev;
+
+ ret = mgag200_ttm_global_init(mdev);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&mdev->ttm.bdev,
+ mdev->ttm.bo_global_ref.ref.object,
+ &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void mgag200_mm_fini(struct mga_device *mdev)
+{
+ struct drm_device *dev = mdev->dev;
+ ttm_bo_device_release(&mdev->ttm.bdev);
+
+ mgag200_ttm_global_release(mdev);
+
+ if (mdev->fb_mtrr >= 0) {
+ drm_mtrr_del(mdev->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ mdev->fb_mtrr = -1;
+ }
+}
+
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void mgag200_bo_unreserve(struct mgag200_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pmgabo)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mgag200_bo *mgabo;
+ size_t acc_size;
+ int ret;
+
+ mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL);
+ if (!mgabo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &mgabo->gem, size);
+ if (ret) {
+ kfree(mgabo);
+ return ret;
+ }
+
+ mgabo->gem.driver_private = NULL;
+ mgabo->bo.bdev = &mdev->ttm.bdev;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size,
+ sizeof(struct mgag200_bo));
+
+ ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
+ ttm_bo_type_device, &mgabo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ NULL, mgag200_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pmgabo = mgabo;
+ return 0;
+}
+
+static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ }
+
+ mgag200_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
+}
+
+int mgag200_bo_unpin(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int mgag200_bo_push_sysram(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct mga_device *mdev;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ mdev = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1a2ad7eb1734..fe5267d06ab5 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,10 +16,13 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
nv50_fb.o nvc0_fb.o \
- nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
+ nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \
+ nv84_fifo.o nvc0_fifo.o nve0_fifo.o \
+ nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
+ nv04_software.o nv50_software.o nvc0_software.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
- nv40_graph.o nv50_graph.o nvc0_graph.o \
- nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
+ nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
+ nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \
nv84_crypt.o nv98_crypt.o \
nva3_copy.o nvc0_copy.o \
nv31_mpeg.o nv50_mpeg.o \
@@ -37,7 +40,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv50_calc.o \
nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
nv50_vram.o nvc0_vram.o \
- nv50_vm.o nvc0_vm.o
+ nv50_vm.o nvc0_vm.o nouveau_prime.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 284bd25d5d21..fc841e87b343 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -338,7 +338,8 @@ void nouveau_switcheroo_optimus_dsm(void)
void nouveau_unregister_dsm_handler(void)
{
- vga_switcheroo_unregister_handler();
+ if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
+ vga_switcheroo_unregister_handler();
}
/* retrieve the ROM in 4k blocks */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0be4a815e706..2f11e16a81a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -30,6 +30,7 @@
#include "nouveau_gpio.h"
#include <linux/io-mapping.h>
+#include <linux/firmware.h>
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
@@ -195,35 +196,24 @@ static void
bios_shadow_acpi(struct nvbios *bios)
{
struct pci_dev *pdev = bios->dev->pdev;
- int ptr, len, ret;
- u8 data[3];
+ int cnt = 65536 / ROM_BIOS_PAGE;
+ int ret;
if (!nouveau_acpi_rom_supported(pdev))
return;
- ret = nouveau_acpi_get_bios_chunk(data, 0, sizeof(data));
- if (ret != sizeof(data))
- return;
-
- bios->length = min(data[2] * 512, 65536);
- bios->data = kmalloc(bios->length, GFP_KERNEL);
+ bios->data = kmalloc(cnt * ROM_BIOS_PAGE, GFP_KERNEL);
if (!bios->data)
return;
- len = bios->length;
- ptr = 0;
- while (len) {
- int size = (len > ROM_BIOS_PAGE) ? ROM_BIOS_PAGE : len;
-
- ret = nouveau_acpi_get_bios_chunk(bios->data, ptr, size);
- if (ret != size) {
- kfree(bios->data);
- bios->data = NULL;
+ bios->length = 0;
+ while (cnt--) {
+ ret = nouveau_acpi_get_bios_chunk(bios->data, bios->length,
+ ROM_BIOS_PAGE);
+ if (ret != ROM_BIOS_PAGE)
return;
- }
- len -= size;
- ptr += size;
+ bios->length += ROM_BIOS_PAGE;
}
}
@@ -249,8 +239,12 @@ bios_shadow(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
struct methods *mthd, *best;
+ const struct firmware *fw;
+ char fname[32];
+ int ret;
if (nouveau_vbios) {
+ /* try to match one of the built-in methods */
mthd = shadow_methods;
do {
if (strcasecmp(nouveau_vbios, mthd->desc))
@@ -263,6 +257,22 @@ bios_shadow(struct drm_device *dev)
return true;
} while ((++mthd)->shadow);
+ /* attempt to load firmware image */
+ snprintf(fname, sizeof(fname), "nouveau/%s", nouveau_vbios);
+ ret = request_firmware(&fw, fname, &dev->pdev->dev);
+ if (ret == 0) {
+ bios->length = fw->size;
+ bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ release_firmware(fw);
+
+ NV_INFO(dev, "VBIOS image: %s\n", nouveau_vbios);
+ if (score_vbios(bios, 1))
+ return true;
+
+ kfree(bios->data);
+ bios->data = NULL;
+ }
+
NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
}
@@ -273,6 +283,7 @@ bios_shadow(struct drm_device *dev)
mthd->score = score_vbios(bios, mthd->rw);
mthd->size = bios->length;
mthd->data = bios->data;
+ bios->data = NULL;
} while (mthd->score != 3 && (++mthd)->shadow);
mthd = shadow_methods;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7d15a774f9c9..7f80ed523562 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -35,6 +35,8 @@
#include "nouveau_dma.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
+#include "nouveau_fence.h"
+#include "nouveau_ramht.h"
#include <linux/log2.h>
#include <linux/slab.h>
@@ -89,12 +91,17 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int
nouveau_bo_new(struct drm_device *dev, int size, int align,
uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ struct sg_table *sg,
struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
size_t acc_size;
int ret;
+ int type = ttm_bo_type_device;
+
+ if (sg)
+ type = ttm_bo_type_sg;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
@@ -120,8 +127,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
sizeof(struct nouveau_bo));
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
- ttm_bo_type_device, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ type, &nvbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -473,7 +480,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(chan, &fence, true);
+ ret = nouveau_fence_new(chan, &fence);
if (ret)
return ret;
@@ -484,6 +491,76 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
}
static int
+nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ int ret = RING_SPACE(chan, 10);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
+ OUT_RING (chan, upper_32_bits(node->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(node->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, new_mem->num_pages);
+ BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
+ }
+ return ret;
+}
+
+static int
+nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ }
+ return ret;
+}
+
+static int
+nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 8191) ? 8191 : page_count;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, line_count);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING (chan, 0x00000110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
@@ -501,17 +578,17 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(dst_offset));
OUT_RING (chan, lower_32_bits(dst_offset));
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
+ BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, lower_32_bits(src_offset));
OUT_RING (chan, PAGE_SIZE); /* src_pitch */
OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
OUT_RING (chan, PAGE_SIZE); /* line_length */
OUT_RING (chan, line_count);
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
OUT_RING (chan, 0x00100110);
page_count -= line_count;
@@ -523,6 +600,102 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
}
static int
+nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
+ u32 page_count = new_mem->num_pages;
+ int ret;
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 8191) ? 8191 : page_count;
+
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+
+ BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
+ OUT_RING (chan, upper_32_bits(src_offset));
+ OUT_RING (chan, lower_32_bits(src_offset));
+ OUT_RING (chan, upper_32_bits(dst_offset));
+ OUT_RING (chan, lower_32_bits(dst_offset));
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, PAGE_SIZE);
+ OUT_RING (chan, line_count);
+ BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING (chan, 0x00000110);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return 0;
+}
+
+static int
+nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
+ OUT_RING (chan, upper_32_bits(node->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(node->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, 0x00000000 /* COPY */);
+ OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
+ }
+ return ret;
+}
+
+static int
+nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_mem *node = old_mem->mm_node;
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
+ OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
+ OUT_RING (chan, upper_32_bits(node->vma[0].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[0].offset));
+ OUT_RING (chan, upper_32_bits(node->vma[1].offset));
+ OUT_RING (chan, lower_32_bits(node->vma[1].offset));
+ OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
+ }
+ return ret;
+}
+
+static int
+nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
+ &chan->m2mf_ntfy);
+ if (ret == 0) {
+ ret = RING_SPACE(chan, 6);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
+ OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, NvDmaFB);
+ OUT_RING (chan, NvDmaFB);
+ } else {
+ nouveau_ramht_remove(chan, NvNotify0);
+ }
+ }
+
+ return ret;
+}
+
+static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
@@ -546,7 +719,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
+ BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, stride);
@@ -559,7 +732,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+ BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
OUT_RING (chan, 1);
}
if (old_mem->mem_type == TTM_PL_VRAM &&
@@ -568,7 +741,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
+ BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, stride);
@@ -581,7 +754,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+ BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
OUT_RING (chan, 1);
}
@@ -589,10 +762,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+ BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, upper_32_bits(dst_offset));
- BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
+ BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
OUT_RING (chan, lower_32_bits(src_offset));
OUT_RING (chan, lower_32_bits(dst_offset));
OUT_RING (chan, stride);
@@ -601,7 +774,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, height);
OUT_RING (chan, 0x00000101);
OUT_RING (chan, 0x00000000);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
OUT_RING (chan, 0);
length -= amount;
@@ -612,6 +785,24 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0;
}
+static int
+nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+ int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
+ &chan->m2mf_ntfy);
+ if (ret == 0) {
+ ret = RING_SPACE(chan, 4);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
+ OUT_RING (chan, NvNotify0);
+ }
+ }
+
+ return ret;
+}
+
static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
@@ -634,7 +825,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+ BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
@@ -646,7 +837,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
- BEGIN_RING(chan, NvSubM2MF,
+ BEGIN_NV04(chan, NvSubCopy,
NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
OUT_RING (chan, src_offset);
OUT_RING (chan, dst_offset);
@@ -656,7 +847,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, line_count);
OUT_RING (chan, 0x00000101);
OUT_RING (chan, 0x00000000);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
OUT_RING (chan, 0);
page_count -= line_count;
@@ -716,13 +907,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
goto out;
}
- if (dev_priv->card_type < NV_50)
- ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- else
- if (dev_priv->card_type < NV_C0)
- ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
- else
- ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+ ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
no_wait_reserve,
@@ -734,6 +919,49 @@ out:
return ret;
}
+void
+nouveau_bo_move_init(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ static const struct {
+ const char *name;
+ int engine;
+ u32 oclass;
+ int (*exec)(struct nouveau_channel *,
+ struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+ int (*init)(struct nouveau_channel *, u32 handle);
+ } _methods[] = {
+ { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
+ { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
+ { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
+ { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
+ { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
+ {},
+ { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
+ }, *mthd = _methods;
+ const char *name = "CPU";
+ int ret;
+
+ do {
+ u32 handle = (mthd->engine << 16) | mthd->oclass;
+ ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
+ if (ret == 0) {
+ ret = mthd->init(chan, handle);
+ if (ret == 0) {
+ dev_priv->ttm.move = mthd->exec;
+ name = mthd->name;
+ break;
+ }
+ }
+ } while ((++mthd)->exec);
+
+ NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
+}
+
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
@@ -817,9 +1045,14 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
} else
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->spg_shift) {
- nouveau_vm_map_sg(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
+ if (((struct nouveau_mem *)new_mem->mm_node)->sg)
+ nouveau_vm_map_sg_table(vma, 0, new_mem->
+ num_pages << PAGE_SHIFT,
+ new_mem->mm_node);
+ else
+ nouveau_vm_map_sg(vma, 0, new_mem->
+ num_pages << PAGE_SHIFT,
+ new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
@@ -885,8 +1118,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
}
- /* Software copy if the card isn't up and running yet. */
- if (!dev_priv->channel) {
+ /* CPU copy if we have no accelerated method available */
+ if (!dev_priv->ttm.move) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -1030,26 +1263,10 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
- nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
return nouveau_bo_validate(nvbo, false, true, false);
}
-void
-nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
-{
- struct nouveau_fence *old_fence;
-
- if (likely(fence))
- nouveau_fence_ref(fence);
-
- spin_lock(&nvbo->bo.bdev->fence_lock);
- old_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = fence;
- spin_unlock(&nvbo->bo.bdev->fence_lock);
-
- nouveau_fence_unref(&old_fence);
-}
-
static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
@@ -1058,10 +1275,19 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
struct drm_device *dev;
unsigned i;
int r;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
+ if (slave && ttm->sg) {
+ /* make userspace faulting work */
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+ ttm_dma->dma_address, ttm->num_pages);
+ ttm->state = tt_unbound;
+ return 0;
+ }
+
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
@@ -1106,6 +1332,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct drm_nouveau_private *dev_priv;
struct drm_device *dev;
unsigned i;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+ if (slave)
+ return;
dev_priv = nouveau_bdev(ttm->bdev);
dev = dev_priv->dev;
@@ -1134,6 +1364,52 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_pool_unpopulate(ttm);
}
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+ struct nouveau_fence *old_fence = NULL;
+
+ if (likely(fence))
+ nouveau_fence_ref(fence);
+
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ old_fence = nvbo->bo.sync_obj;
+ nvbo->bo.sync_obj = fence;
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+ nouveau_fence_unref(&old_fence);
+}
+
+static void
+nouveau_bo_fence_unref(void **sync_obj)
+{
+ nouveau_fence_unref((struct nouveau_fence **)sync_obj);
+}
+
+static void *
+nouveau_bo_fence_ref(void *sync_obj)
+{
+ return nouveau_fence_ref(sync_obj);
+}
+
+static bool
+nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+{
+ return nouveau_fence_done(sync_obj);
+}
+
+static int
+nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+{
+ return nouveau_fence_wait(sync_obj, lazy, intr);
+}
+
+static int
+nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+{
+ return 0;
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
@@ -1144,11 +1420,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
.move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
- .sync_obj_signaled = __nouveau_fence_signalled,
- .sync_obj_wait = __nouveau_fence_wait,
- .sync_obj_flush = __nouveau_fence_flush,
- .sync_obj_unref = __nouveau_fence_unref,
- .sync_obj_ref = __nouveau_fence_ref,
+ .sync_obj_signaled = nouveau_bo_fence_signalled,
+ .sync_obj_wait = nouveau_bo_fence_wait,
+ .sync_obj_flush = nouveau_bo_fence_flush,
+ .sync_obj_unref = nouveau_bo_fence_unref,
+ .sync_obj_ref = nouveau_bo_fence_ref,
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
@@ -1181,9 +1457,12 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- nouveau_vm_map_sg(vma, 0, size, node);
+ else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
+ if (node->sg)
+ nouveau_vm_map_sg_table(vma, 0, size, node);
+ else
+ nouveau_vm_map_sg(vma, 0, size, node);
+ }
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 846afb0bfef4..629d8a2df5bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,7 +27,10 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
static int
nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
@@ -38,7 +41,7 @@ nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
int ret;
/* allocate buffer object */
- ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
+ ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
if (ret)
goto out;
@@ -117,8 +120,9 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
struct drm_file *file_priv,
uint32_t vram_handle, uint32_t gart_handle)
{
+ struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
unsigned long flags;
@@ -155,10 +159,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
}
NV_DEBUG(dev, "initialising channel %d\n", chan->id);
- INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
- INIT_LIST_HEAD(&chan->nvsw.flip);
- INIT_LIST_HEAD(&chan->fence.pending);
- spin_lock_init(&chan->fence.lock);
/* setup channel's memory and vm */
ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
@@ -188,20 +188,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
chan->user_put = 0x40;
chan->user_get = 0x44;
if (dev_priv->card_type >= NV_50)
- chan->user_get_hi = 0x60;
+ chan->user_get_hi = 0x60;
- /* disable the fifo caches */
- pfifo->reassign(dev, false);
-
- /* Construct initial RAMFC for new channel */
- ret = pfifo->create_context(chan);
+ /* create fifo context */
+ ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
if (ret) {
nouveau_channel_put(&chan);
return ret;
}
- pfifo->reassign(dev, true);
-
/* Insert NOPs for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret) {
@@ -211,9 +206,28 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
OUT_RING (chan, 0x00000000);
+
+ ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
+ if (ret) {
+ nouveau_channel_put(&chan);
+ return ret;
+ }
+
+ if (dev_priv->card_type < NV_C0) {
+ ret = RING_SPACE(chan, 2);
+ if (ret) {
+ nouveau_channel_put(&chan);
+ return ret;
+ }
+
+ BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
+ OUT_RING (chan, NvSw);
+ FIRE_RING (chan);
+ }
+
FIRE_RING(chan);
- ret = nouveau_fence_channel_init(chan);
+ ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
if (ret) {
nouveau_channel_put(&chan);
return ret;
@@ -268,7 +282,6 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
struct nouveau_channel *chan = *pchan;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
unsigned long flags;
int i;
@@ -285,24 +298,12 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
/* give it chance to idle */
nouveau_channel_idle(chan);
- /* ensure all outstanding fences are signaled. they should be if the
- * above attempts at idling were OK, but if we failed this'll tell TTM
- * we're done with the buffers.
- */
- nouveau_fence_channel_fini(chan);
-
- /* boot it off the hardware */
- pfifo->reassign(dev, false);
-
/* destroy the engine specific contexts */
- pfifo->destroy_context(chan);
- for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
+ for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
if (chan->engctx[i])
dev_priv->eng[i]->context_del(chan, i);
}
- pfifo->reassign(dev, true);
-
/* aside from its resources, the channel should now be dead,
* remove it from the channel list
*/
@@ -354,38 +355,37 @@ nouveau_channel_ref(struct nouveau_channel *chan,
*pchan = chan;
}
-void
+int
nouveau_channel_idle(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_fence *fence = NULL;
int ret;
- nouveau_fence_update(chan);
-
- if (chan->fence.sequence != chan->fence.sequence_ack) {
- ret = nouveau_fence_new(chan, &fence, true);
- if (!ret) {
- ret = nouveau_fence_wait(fence, false, false);
- nouveau_fence_unref(&fence);
- }
-
- if (ret)
- NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ ret = nouveau_fence_new(chan, &fence);
+ if (!ret) {
+ ret = nouveau_fence_wait(fence, false, false);
+ nouveau_fence_unref(&fence);
}
+
+ if (ret)
+ NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ return ret;
}
/* cleans up all the fifos from file_priv */
void
nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan;
int i;
+ if (!pfifo)
+ return;
+
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
- for (i = 0; i < engine->fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
chan = nouveau_channel_get(file_priv, i);
if (IS_ERR(chan))
continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fa860358add1..7b11edb077d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -654,7 +654,13 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
if (nv_connector->edid && connector->display_info.bpc)
return;
- /* if not, we're out of options unless we're LVDS, default to 8bpc */
+ /* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
+ if (nv_connector->type == DCB_CONNECTOR_eDP) {
+ connector->display_info.bpc = 6;
+ return;
+ }
+
+ /* we're out of options unless we're LVDS, default to 8bpc */
if (nv_encoder->dcb->type != OUTPUT_LVDS) {
connector->display_info.bpc = 8;
return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index fa2ec491f6a7..188c92b327e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -67,8 +67,6 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
nvchan_rd32(chan, 0x8c));
}
- seq_printf(m, "last fence : %d\n", chan->fence.sequence);
- seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a85e112863d1..69688ef5cf46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -33,7 +33,9 @@
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
#include "nouveau_connector.h"
+#include "nouveau_software.h"
#include "nouveau_gpio.h"
+#include "nouveau_fence.h"
#include "nv50_display.h"
static void
@@ -300,7 +302,7 @@ nouveau_display_create(struct drm_device *dev)
disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
}
- dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+ dev->mode_config.funcs = &nouveau_mode_config_funcs;
dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
dev->mode_config.min_width = 0;
@@ -325,14 +327,21 @@ nouveau_display_create(struct drm_device *dev)
ret = disp->create(dev);
if (ret)
- return ret;
+ goto disp_create_err;
if (dev->mode_config.num_crtc) {
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret)
- return ret;
+ goto vblank_err;
}
+ return 0;
+
+vblank_err:
+ disp->destroy(dev);
+disp_create_err:
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
return ret;
}
@@ -425,6 +434,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
+ struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
unsigned long flags;
@@ -432,7 +442,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
/* Queue it to the pending list */
spin_lock_irqsave(&dev->event_lock, flags);
- list_add_tail(&s->head, &chan->nvsw.flip);
+ list_add_tail(&s->head, &swch->flip);
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Synchronize with the old framebuffer */
@@ -446,17 +456,17 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
goto fail;
if (dev_priv->card_type < NV_C0) {
- BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
OUT_RING (chan, 0x00000000);
OUT_RING (chan, 0x00000000);
} else {
- BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
- OUT_RING (chan, ++chan->fence.sequence);
- BEGIN_NVC0(chan, 8, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
+ BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
+ OUT_RING (chan, 0);
+ BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
}
FIRE_RING (chan);
- ret = nouveau_fence_new(chan, pfence, true);
+ ret = nouveau_fence_new(chan, pfence);
if (ret)
goto fail;
@@ -477,7 +487,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan;
+ struct nouveau_channel *chan = NULL;
struct nouveau_fence *fence;
int ret;
@@ -500,7 +510,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
new_bo->bo.offset };
/* Choose the channel the flip will be handled in */
- chan = nouveau_fence_channel(new_bo->bo.sync_obj);
+ fence = new_bo->bo.sync_obj;
+ if (fence)
+ chan = nouveau_channel_get_unlocked(fence->channel);
if (!chan)
chan = nouveau_channel_get_unlocked(dev_priv->channel);
mutex_lock(&chan->mutex);
@@ -540,20 +552,20 @@ int
nouveau_finish_page_flip(struct nouveau_channel *chan,
struct nouveau_page_flip_state *ps)
{
+ struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
struct drm_device *dev = chan->dev;
struct nouveau_page_flip_state *s;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (list_empty(&chan->nvsw.flip)) {
+ if (list_empty(&swch->flip)) {
NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
spin_unlock_irqrestore(&dev->event_lock, flags);
return -EINVAL;
}
- s = list_first_entry(&chan->nvsw.flip,
- struct nouveau_page_flip_state, head);
+ s = list_first_entry(&swch->flip, struct nouveau_page_flip_state, head);
if (s->event) {
struct drm_pending_vblank_event *e = s->event;
struct timeval now;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 23d4edf992b7..8db68be9544f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -48,12 +48,12 @@ void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
/* Hardcoded object assignments to subchannels (subchannel id). */
enum {
- NvSubM2MF = 0,
+ NvSubCtxSurf2D = 0,
NvSubSw = 1,
- NvSub2D = 2,
- NvSubCtxSurf2D = 2,
+ NvSubImageBlit = 2,
+ NvSub2D = 3,
NvSubGdiRect = 3,
- NvSubImageBlit = 4
+ NvSubCopy = 4,
};
/* Object handles. */
@@ -73,6 +73,7 @@ enum {
NvSema = 0x8000000f,
NvEvoSema0 = 0x80000010,
NvEvoSema1 = 0x80000011,
+ NvNotify1 = 0x80000012,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
@@ -127,15 +128,33 @@ extern void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
static inline void
-BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
+BEGIN_NV04(struct nouveau_channel *chan, int subc, int mthd, int size)
{
- OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
+ OUT_RING(chan, 0x00000000 | (subc << 13) | (size << 18) | mthd);
}
static inline void
-BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
+BEGIN_NI04(struct nouveau_channel *chan, int subc, int mthd, int size)
{
- OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
+ OUT_RING(chan, 0x40000000 | (subc << 13) | (size << 18) | mthd);
+}
+
+static inline void
+BEGIN_NVC0(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+ OUT_RING(chan, 0x20000000 | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
+BEGIN_NIC0(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+ OUT_RING(chan, 0x60000000 | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
+BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
+{
+ OUT_RING(chan, 0x80000000 | (data << 16) | (subc << 13) | (mthd >> 2));
}
#define WRITE_PUT(val) do { \
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index d996134b1b28..7e289d2ad8e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -510,6 +510,25 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
nouveau_dp_link_train(encoder, datarate, func);
}
+static void
+nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_chan *auxch,
+ u8 *dpcd)
+{
+ u8 buf[3];
+
+ if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (!auxch_tx(dev, auxch->drive, 9, DP_SINK_OUI, buf, 3))
+ NV_DEBUG_KMS(dev, "Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (!auxch_tx(dev, auxch->drive, 9, DP_BRANCH_OUI, buf, 3))
+ NV_DEBUG_KMS(dev, "Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+}
+
bool
nouveau_dp_detect(struct drm_encoder *encoder)
{
@@ -544,6 +563,8 @@ nouveau_dp_detect(struct drm_encoder *encoder)
NV_DEBUG_KMS(dev, "maximum: %dx%d\n",
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
+ nouveau_dp_probe_oui(dev, auxch, dpcd);
+
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 4f2030bd5676..cad254c8e387 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -33,6 +33,7 @@
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "nouveau_pm.h"
+#include "nouveau_fifo.h"
#include "nv50_display.h"
#include "drm_pciids.h"
@@ -175,7 +176,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan;
struct drm_crtc *crtc;
int ret, i, e;
@@ -214,17 +215,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
NV_INFO(dev, "Idling channels...\n");
- for (i = 0; i < pfifo->channels; i++) {
+ for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
chan = dev_priv->channels.ptr[i];
if (chan && chan->pushbuf_bo)
nouveau_channel_idle(chan);
}
- pfifo->reassign(dev, false);
- pfifo->disable(dev);
- pfifo->unload_context(dev);
-
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (!dev_priv->eng[e])
continue;
@@ -265,8 +262,6 @@ out_abort:
if (dev_priv->eng[e])
dev_priv->eng[e]->init(dev, e);
}
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
return ret;
}
@@ -274,6 +269,7 @@ int
nouveau_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
@@ -321,7 +317,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (dev_priv->eng[i])
dev_priv->eng[i]->init(dev, i);
}
- engine->fifo.init(dev);
nouveau_irq_postinstall(dev);
@@ -330,7 +325,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct nouveau_channel *chan;
int j;
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->pushbuf_bo)
continue;
@@ -408,7 +403,7 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_MODESET,
+ DRIVER_MODESET | DRIVER_PRIME,
.load = nouveau_load,
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
@@ -430,6 +425,12 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
.fops = &nouveau_driver_fops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = nouveau_gem_prime_export,
+ .gem_prime_import = nouveau_gem_prime_import,
+
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3aef353a926c..8613cb23808c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -70,7 +70,7 @@ struct nouveau_mem;
#define MAX_NUM_DCB_ENTRIES 16
-#define NOUVEAU_MAX_CHANNEL_NR 128
+#define NOUVEAU_MAX_CHANNEL_NR 4096
#define NOUVEAU_MAX_TILE_NR 15
struct nouveau_mem {
@@ -86,6 +86,7 @@ struct nouveau_mem {
u32 memtype;
u64 offset;
u64 size;
+ struct sg_table *sg;
};
struct nouveau_tile_reg {
@@ -122,6 +123,9 @@ struct nouveau_bo {
struct drm_gem_object *gem;
int pin_refcnt;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ int vmapping_count;
};
#define nouveau_bo_tile_layout(nvbo) \
@@ -164,8 +168,10 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
#define NVOBJ_ENGINE_BSP 6
#define NVOBJ_ENGINE_VP 7
-#define NVOBJ_ENGINE_DISPLAY 15
+#define NVOBJ_ENGINE_FIFO 14
+#define NVOBJ_ENGINE_FENCE 15
#define NVOBJ_ENGINE_NR 16
+#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
#define NVOBJ_FLAG_DONT_MAP (1 << 0)
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
@@ -233,17 +239,6 @@ struct nouveau_channel {
uint32_t user_get_hi;
uint32_t user_put;
- /* Fencing */
- struct {
- /* lock protects the pending list only */
- spinlock_t lock;
- struct list_head pending;
- uint32_t sequence;
- uint32_t sequence_ack;
- atomic_t last_sequence_irq;
- struct nouveau_vma vma;
- } fence;
-
/* DMA push buffer */
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
@@ -257,8 +252,6 @@ struct nouveau_channel {
/* PFIFO context */
struct nouveau_gpuobj *ramfc;
- struct nouveau_gpuobj *cache;
- void *fifo_priv;
/* Execution engine contexts */
void *engctx[NVOBJ_ENGINE_NR];
@@ -292,18 +285,6 @@ struct nouveau_channel {
int ib_put;
} dma;
- uint32_t sw_subchannel[8];
-
- struct nouveau_vma dispc_vma[4];
- struct {
- struct nouveau_gpuobj *vblsem;
- uint32_t vblsem_head;
- uint32_t vblsem_offset;
- uint32_t vblsem_rval;
- struct list_head vbl_wait;
- struct list_head flip;
- } nvsw;
-
struct {
bool active;
char name[32];
@@ -366,30 +347,6 @@ struct nouveau_fb_engine {
void (*free_tile_region)(struct drm_device *dev, int i);
};
-struct nouveau_fifo_engine {
- void *priv;
- int channels;
-
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
-
- int (*init)(struct drm_device *);
- void (*takedown)(struct drm_device *);
-
- void (*disable)(struct drm_device *);
- void (*enable)(struct drm_device *);
- bool (*reassign)(struct drm_device *, bool enable);
- bool (*cache_pull)(struct drm_device *dev, bool enable);
-
- int (*channel_id)(struct drm_device *);
-
- int (*create_context)(struct nouveau_channel *);
- void (*destroy_context)(struct nouveau_channel *);
- int (*load_context)(struct nouveau_channel *);
- int (*unload_context)(struct drm_device *);
- void (*tlb_flush)(struct drm_device *dev);
-};
-
struct nouveau_display_engine {
void *priv;
int (*early_init)(struct drm_device *);
@@ -597,7 +554,6 @@ struct nouveau_engine {
struct nouveau_mc_engine mc;
struct nouveau_timer_engine timer;
struct nouveau_fb_engine fb;
- struct nouveau_fifo_engine fifo;
struct nouveau_display_engine display;
struct nouveau_gpio_engine gpio;
struct nouveau_pm_engine pm;
@@ -740,6 +696,9 @@ struct drm_nouveau_private {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
atomic_t validate_sequence;
+ int (*move)(struct nouveau_channel *,
+ struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
} ttm;
struct {
@@ -977,7 +936,7 @@ extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
extern void nouveau_channel_put(struct nouveau_channel **);
extern void nouveau_channel_ref(struct nouveau_channel *chan,
struct nouveau_channel **pchan);
-extern void nouveau_channel_idle(struct nouveau_channel *chan);
+extern int nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
#define NVOBJ_ENGINE_ADD(d, e, p) do { \
@@ -1209,56 +1168,6 @@ extern void nv50_fb_vm_trap(struct drm_device *, int display);
extern int nvc0_fb_init(struct drm_device *);
extern void nvc0_fb_takedown(struct drm_device *);
-/* nv04_fifo.c */
-extern int nv04_fifo_init(struct drm_device *);
-extern void nv04_fifo_fini(struct drm_device *);
-extern void nv04_fifo_disable(struct drm_device *);
-extern void nv04_fifo_enable(struct drm_device *);
-extern bool nv04_fifo_reassign(struct drm_device *, bool);
-extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
-extern int nv04_fifo_channel_id(struct drm_device *);
-extern int nv04_fifo_create_context(struct nouveau_channel *);
-extern void nv04_fifo_destroy_context(struct nouveau_channel *);
-extern int nv04_fifo_load_context(struct nouveau_channel *);
-extern int nv04_fifo_unload_context(struct drm_device *);
-extern void nv04_fifo_isr(struct drm_device *);
-
-/* nv10_fifo.c */
-extern int nv10_fifo_init(struct drm_device *);
-extern int nv10_fifo_channel_id(struct drm_device *);
-extern int nv10_fifo_create_context(struct nouveau_channel *);
-extern int nv10_fifo_load_context(struct nouveau_channel *);
-extern int nv10_fifo_unload_context(struct drm_device *);
-
-/* nv40_fifo.c */
-extern int nv40_fifo_init(struct drm_device *);
-extern int nv40_fifo_create_context(struct nouveau_channel *);
-extern int nv40_fifo_load_context(struct nouveau_channel *);
-extern int nv40_fifo_unload_context(struct drm_device *);
-
-/* nv50_fifo.c */
-extern int nv50_fifo_init(struct drm_device *);
-extern void nv50_fifo_takedown(struct drm_device *);
-extern int nv50_fifo_channel_id(struct drm_device *);
-extern int nv50_fifo_create_context(struct nouveau_channel *);
-extern void nv50_fifo_destroy_context(struct nouveau_channel *);
-extern int nv50_fifo_load_context(struct nouveau_channel *);
-extern int nv50_fifo_unload_context(struct drm_device *);
-extern void nv50_fifo_tlb_flush(struct drm_device *dev);
-
-/* nvc0_fifo.c */
-extern int nvc0_fifo_init(struct drm_device *);
-extern void nvc0_fifo_takedown(struct drm_device *);
-extern void nvc0_fifo_disable(struct drm_device *);
-extern void nvc0_fifo_enable(struct drm_device *);
-extern bool nvc0_fifo_reassign(struct drm_device *, bool);
-extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
-extern int nvc0_fifo_channel_id(struct drm_device *);
-extern int nvc0_fifo_create_context(struct nouveau_channel *);
-extern void nvc0_fifo_destroy_context(struct nouveau_channel *);
-extern int nvc0_fifo_load_context(struct nouveau_channel *);
-extern int nvc0_fifo_unload_context(struct drm_device *);
-
/* nv04_graph.c */
extern int nv04_graph_create(struct drm_device *);
extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
@@ -1277,18 +1186,23 @@ extern int nv20_graph_create(struct drm_device *);
/* nv40_graph.c */
extern int nv40_graph_create(struct drm_device *);
-extern void nv40_grctx_init(struct nouveau_grctx *);
+extern void nv40_grctx_init(struct drm_device *, u32 *size);
+extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
/* nv50_graph.c */
extern int nv50_graph_create(struct drm_device *);
-extern int nv50_grctx_init(struct nouveau_grctx *);
extern struct nouveau_enum nv50_data_error_names[];
extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
+extern int nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
+extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
/* nvc0_graph.c */
extern int nvc0_graph_create(struct drm_device *);
extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
+/* nve0_graph.c */
+extern int nve0_graph_create(struct drm_device *);
+
/* nv84_crypt.c */
extern int nv84_crypt_create(struct drm_device *);
@@ -1414,9 +1328,12 @@ extern int nv04_crtc_create(struct drm_device *, int index);
/* nouveau_bo.c */
extern struct ttm_bo_driver nouveau_bo_driver;
+extern void nouveau_bo_move_init(struct nouveau_channel *);
extern int nouveau_bo_new(struct drm_device *, int size, int align,
uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, struct nouveau_bo **);
+ uint32_t tile_flags,
+ struct sg_table *sg,
+ struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1437,50 +1354,6 @@ extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
struct nouveau_vma *);
extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
-/* nouveau_fence.c */
-struct nouveau_fence;
-extern int nouveau_fence_init(struct drm_device *);
-extern void nouveau_fence_fini(struct drm_device *);
-extern int nouveau_fence_channel_init(struct nouveau_channel *);
-extern void nouveau_fence_channel_fini(struct nouveau_channel *);
-extern void nouveau_fence_update(struct nouveau_channel *);
-extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
- bool emit);
-extern int nouveau_fence_emit(struct nouveau_fence *);
-extern void nouveau_fence_work(struct nouveau_fence *fence,
- void (*work)(void *priv, bool signalled),
- void *priv);
-struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
-
-extern bool __nouveau_fence_signalled(void *obj, void *arg);
-extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
-extern int __nouveau_fence_flush(void *obj, void *arg);
-extern void __nouveau_fence_unref(void **obj);
-extern void *__nouveau_fence_ref(void *obj);
-
-static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
-{
- return __nouveau_fence_signalled(obj, NULL);
-}
-static inline int
-nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
-{
- return __nouveau_fence_wait(obj, NULL, lazy, intr);
-}
-extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-static inline int nouveau_fence_flush(struct nouveau_fence *obj)
-{
- return __nouveau_fence_flush(obj, NULL);
-}
-static inline void nouveau_fence_unref(struct nouveau_fence **obj)
-{
- __nouveau_fence_unref((void **)obj);
-}
-static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
-{
- return __nouveau_fence_ref(obj);
-}
-
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, int size, int align,
uint32_t domain, uint32_t tile_mode,
@@ -1501,6 +1374,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
+extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
/* nouveau_display.c */
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
@@ -1772,6 +1650,7 @@ nv44_graph_class(struct drm_device *dev)
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
+#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
#define NV10_SUBCHAN_REF_CNT 0x00000050
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8113e9201ed9..1074bc5dd418 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -153,7 +153,7 @@ nouveau_fbcon_sync(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- int ret, i;
+ int ret;
if (!chan || !chan->accel_done || in_interrupt() ||
info->state != FBINFO_STATE_RUNNING ||
@@ -163,38 +163,8 @@ nouveau_fbcon_sync(struct fb_info *info)
if (!mutex_trylock(&chan->mutex))
return 0;
- ret = RING_SPACE(chan, 4);
- if (ret) {
- mutex_unlock(&chan->mutex);
- nouveau_fbcon_gpu_lockup(info);
- return 0;
- }
-
- if (dev_priv->card_type >= NV_C0) {
- BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
- OUT_RING (chan, 0);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
- OUT_RING (chan, 0);
- } else {
- BEGIN_RING(chan, 0, 0x0104, 1);
- OUT_RING (chan, 0);
- BEGIN_RING(chan, 0, 0x0100, 1);
- OUT_RING (chan, 0);
- }
-
- nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
- FIRE_RING(chan);
+ ret = nouveau_channel_idle(chan);
mutex_unlock(&chan->mutex);
-
- ret = -EBUSY;
- for (i = 0; i < 100000; i++) {
- if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
- ret = 0;
- break;
- }
- DRM_UDELAY(1);
- }
-
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return 0;
@@ -497,7 +467,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
ret = drm_fb_helper_init(dev, &nfbdev->helper,
- nv_two_heads(dev) ? 2 : 1, 4);
+ dev->mode_config.num_crtc, 4);
if (ret) {
kfree(nfbdev);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c1dc20f6cb85..3c180493dab8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,220 +32,100 @@
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
#include "nouveau_dma.h"
-#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
-
-struct nouveau_fence {
- struct nouveau_channel *channel;
- struct kref refcount;
- struct list_head entry;
-
- uint32_t sequence;
- bool signalled;
-
- void (*work)(void *priv, bool signalled);
- void *priv;
-};
-
-struct nouveau_semaphore {
- struct kref ref;
- struct drm_device *dev;
- struct drm_mm_node *mem;
-};
-
-static inline struct nouveau_fence *
-nouveau_fence(void *sync_obj)
+void
+nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
- return (struct nouveau_fence *)sync_obj;
+ struct nouveau_fence *fence, *fnext;
+ spin_lock(&fctx->lock);
+ list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+ if (fence->work)
+ fence->work(fence->priv, false);
+ fence->channel = NULL;
+ list_del(&fence->head);
+ nouveau_fence_unref(&fence);
+ }
+ spin_unlock(&fctx->lock);
}
-static void
-nouveau_fence_del(struct kref *ref)
+void
+nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence *fence =
- container_of(ref, struct nouveau_fence, refcount);
-
- nouveau_channel_ref(NULL, &fence->channel);
- kfree(fence);
+ INIT_LIST_HEAD(&fctx->pending);
+ spin_lock_init(&fctx->lock);
}
void
nouveau_fence_update(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
- struct nouveau_fence *tmp, *fence;
- uint32_t sequence;
+ struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ struct nouveau_fence *fence, *fnext;
- spin_lock(&chan->fence.lock);
-
- /* Fetch the last sequence if the channel is still up and running */
- if (likely(!list_empty(&chan->fence.pending))) {
- if (USE_REFCNT(dev))
- sequence = nvchan_rd32(chan, 0x48);
- else
- sequence = atomic_read(&chan->fence.last_sequence_irq);
-
- if (chan->fence.sequence_ack == sequence)
- goto out;
- chan->fence.sequence_ack = sequence;
- }
-
- list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
- if (fence->sequence > chan->fence.sequence_ack)
+ spin_lock(&fctx->lock);
+ list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+ if (priv->read(chan) < fence->sequence)
break;
- fence->signalled = true;
- list_del(&fence->entry);
if (fence->work)
fence->work(fence->priv, true);
-
- kref_put(&fence->refcount, nouveau_fence_del);
- }
-
-out:
- spin_unlock(&chan->fence.lock);
-}
-
-int
-nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
- bool emit)
-{
- struct nouveau_fence *fence;
- int ret = 0;
-
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (!fence)
- return -ENOMEM;
- kref_init(&fence->refcount);
- nouveau_channel_ref(chan, &fence->channel);
-
- if (emit)
- ret = nouveau_fence_emit(fence);
-
- if (ret)
+ fence->channel = NULL;
+ list_del(&fence->head);
nouveau_fence_unref(&fence);
- *pfence = fence;
- return ret;
-}
-
-struct nouveau_channel *
-nouveau_fence_channel(struct nouveau_fence *fence)
-{
- return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
+ }
+ spin_unlock(&fctx->lock);
}
int
-nouveau_fence_emit(struct nouveau_fence *fence)
+nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_channel *chan = fence->channel;
struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
int ret;
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
- if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
- nouveau_fence_update(chan);
+ fence->channel = chan;
+ fence->timeout = jiffies + (3 * DRM_HZ);
+ fence->sequence = ++fctx->sequence;
- BUG_ON(chan->fence.sequence ==
- chan->fence.sequence_ack - 1);
+ ret = priv->emit(fence);
+ if (!ret) {
+ kref_get(&fence->kref);
+ spin_lock(&fctx->lock);
+ list_add_tail(&fence->head, &fctx->pending);
+ spin_unlock(&fctx->lock);
}
- fence->sequence = ++chan->fence.sequence;
-
- kref_get(&fence->refcount);
- spin_lock(&chan->fence.lock);
- list_add_tail(&fence->entry, &chan->fence.pending);
- spin_unlock(&chan->fence.lock);
-
- if (USE_REFCNT(dev)) {
- if (dev_priv->card_type < NV_C0)
- BEGIN_RING(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
- else
- BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
- } else {
- BEGIN_RING(chan, NvSubSw, 0x0150, 1);
- }
- OUT_RING (chan, fence->sequence);
- FIRE_RING(chan);
-
- return 0;
-}
-
-void
-nouveau_fence_work(struct nouveau_fence *fence,
- void (*work)(void *priv, bool signalled),
- void *priv)
-{
- BUG_ON(fence->work);
-
- spin_lock(&fence->channel->fence.lock);
-
- if (fence->signalled) {
- work(priv, true);
- } else {
- fence->work = work;
- fence->priv = priv;
- }
-
- spin_unlock(&fence->channel->fence.lock);
-}
-
-void
-__nouveau_fence_unref(void **sync_obj)
-{
- struct nouveau_fence *fence = nouveau_fence(*sync_obj);
-
- if (fence)
- kref_put(&fence->refcount, nouveau_fence_del);
- *sync_obj = NULL;
-}
-
-void *
-__nouveau_fence_ref(void *sync_obj)
-{
- struct nouveau_fence *fence = nouveau_fence(sync_obj);
-
- kref_get(&fence->refcount);
- return sync_obj;
+ return ret;
}
bool
-__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_fence_done(struct nouveau_fence *fence)
{
- struct nouveau_fence *fence = nouveau_fence(sync_obj);
- struct nouveau_channel *chan = fence->channel;
-
- if (fence->signalled)
- return true;
-
- nouveau_fence_update(chan);
- return fence->signalled;
+ if (fence->channel)
+ nouveau_fence_update(fence->channel);
+ return !fence->channel;
}
int
-__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
- unsigned long timeout = jiffies + (3 * DRM_HZ);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
int ret = 0;
- while (1) {
- if (__nouveau_fence_signalled(sync_obj, sync_arg))
- break;
-
- if (time_after_eq(jiffies, timeout)) {
+ while (!nouveau_fence_done(fence)) {
+ if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
break;
}
- __set_current_state(intr ? TASK_INTERRUPTIBLE
- : TASK_UNINTERRUPTIBLE);
+ __set_current_state(intr ? TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
if (lazy) {
t = ktime_set(0, sleep_time);
schedule_hrtimeout(&t, HRTIMER_MODE_REL);
@@ -261,354 +141,72 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
}
__set_current_state(TASK_RUNNING);
-
return ret;
}
-static struct nouveau_semaphore *
-semaphore_alloc(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_semaphore *sema;
- int size = (dev_priv->chipset < 0x84) ? 4 : 16;
- int ret, i;
-
- if (!USE_SEMA(dev))
- return NULL;
-
- sema = kmalloc(sizeof(*sema), GFP_KERNEL);
- if (!sema)
- goto fail;
-
- ret = drm_mm_pre_get(&dev_priv->fence.heap);
- if (ret)
- goto fail;
-
- spin_lock(&dev_priv->fence.lock);
- sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
- if (sema->mem)
- sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
- spin_unlock(&dev_priv->fence.lock);
-
- if (!sema->mem)
- goto fail;
-
- kref_init(&sema->ref);
- sema->dev = dev;
- for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
- nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
-
- return sema;
-fail:
- kfree(sema);
- return NULL;
-}
-
-static void
-semaphore_free(struct kref *ref)
-{
- struct nouveau_semaphore *sema =
- container_of(ref, struct nouveau_semaphore, ref);
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
-
- spin_lock(&dev_priv->fence.lock);
- drm_mm_put_block(sema->mem);
- spin_unlock(&dev_priv->fence.lock);
-
- kfree(sema);
-}
-
-static void
-semaphore_work(void *priv, bool signalled)
-{
- struct nouveau_semaphore *sema = priv;
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
-
- if (unlikely(!signalled))
- nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
-
- kref_put(&sema->ref, semaphore_free);
-}
-
-static int
-semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_fence *fence = NULL;
- u64 offset = chan->fence.vma.offset + sema->mem->start;
- int ret;
-
- if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 4);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
- OUT_RING (chan, NvSema);
- OUT_RING (chan, offset);
- OUT_RING (chan, 1);
- } else
- if (dev_priv->chipset < 0xc0) {
- ret = RING_SPACE(chan, 7);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram_handle);
- BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 1); /* ACQUIRE_EQ */
- } else {
- ret = RING_SPACE(chan, 5);
- if (ret)
- return ret;
-
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
- }
-
- /* Delay semaphore destruction until its work is done */
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret)
- return ret;
-
- kref_get(&sema->ref);
- nouveau_fence_work(fence, semaphore_work, sema);
- nouveau_fence_unref(&fence);
- return 0;
-}
-
-static int
-semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_fence *fence = NULL;
- u64 offset = chan->fence.vma.offset + sema->mem->start;
- int ret;
-
- if (dev_priv->chipset < 0x84) {
- ret = RING_SPACE(chan, 5);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvSema);
- OUT_RING (chan, offset);
- BEGIN_RING(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
- OUT_RING (chan, 1);
- } else
- if (dev_priv->chipset < 0xc0) {
- ret = RING_SPACE(chan, 7);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram_handle);
- BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 2); /* RELEASE */
- } else {
- ret = RING_SPACE(chan, 5);
- if (ret)
- return ret;
-
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(offset));
- OUT_RING (chan, lower_32_bits(offset));
- OUT_RING (chan, 1);
- OUT_RING (chan, 0x1002); /* RELEASE */
- }
-
- /* Delay semaphore destruction until its work is done */
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret)
- return ret;
-
- kref_get(&sema->ref);
- nouveau_fence_work(fence, semaphore_work, sema);
- nouveau_fence_unref(&fence);
- return 0;
-}
-
int
-nouveau_fence_sync(struct nouveau_fence *fence,
- struct nouveau_channel *wchan)
+nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_channel *chan = nouveau_fence_channel(fence);
- struct drm_device *dev = wchan->dev;
- struct nouveau_semaphore *sema;
+ struct drm_device *dev = chan->dev;
+ struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
+ struct nouveau_channel *prev;
int ret = 0;
- if (likely(!chan || chan == wchan ||
- nouveau_fence_signalled(fence)))
- goto out;
-
- sema = semaphore_alloc(dev);
- if (!sema) {
- /* Early card or broken userspace, fall back to
- * software sync. */
- ret = nouveau_fence_wait(fence, true, false);
- goto out;
- }
-
- /* try to take chan's mutex, if we can't take it right away
- * we have to fallback to software sync to prevent locking
- * order issues
- */
- if (!mutex_trylock(&chan->mutex)) {
- ret = nouveau_fence_wait(fence, true, false);
- goto out_unref;
+ prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
+ if (prev) {
+ if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
+ ret = priv->sync(fence, prev, chan);
+ if (unlikely(ret))
+ ret = nouveau_fence_wait(fence, true, false);
+ }
+ nouveau_channel_put_unlocked(&prev);
}
- /* Make wchan wait until it gets signalled */
- ret = semaphore_acquire(wchan, sema);
- if (ret)
- goto out_unlock;
-
- /* Signal the semaphore from chan */
- ret = semaphore_release(chan, sema);
-
-out_unlock:
- mutex_unlock(&chan->mutex);
-out_unref:
- kref_put(&sema->ref, semaphore_free);
-out:
- if (chan)
- nouveau_channel_put_unlocked(&chan);
return ret;
}
-int
-__nouveau_fence_flush(void *sync_obj, void *sync_arg)
+static void
+nouveau_fence_del(struct kref *kref)
{
- return 0;
+ struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
+ kfree(fence);
}
-int
-nouveau_fence_channel_init(struct nouveau_channel *chan)
+void
+nouveau_fence_unref(struct nouveau_fence **pfence)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- if (dev_priv->card_type < NV_C0) {
- /* Create an NV_SW object for various sync purposes */
- ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
- if (ret)
- return ret;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
- OUT_RING (chan, NvSw);
- FIRE_RING (chan);
- }
-
- /* Setup area of memory shared between all channels for x-chan sync */
- if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
- struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
-
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
- mem->start << PAGE_SHIFT,
- mem->size, NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &obj);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(chan, NvSema, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret)
- return ret;
- } else
- if (USE_SEMA(dev)) {
- /* map fence bo into channel's vm */
- ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
- &chan->fence.vma);
- if (ret)
- return ret;
- }
-
- atomic_set(&chan->fence.last_sequence_irq, 0);
- return 0;
+ if (*pfence)
+ kref_put(&(*pfence)->kref, nouveau_fence_del);
+ *pfence = NULL;
}
-void
-nouveau_fence_channel_fini(struct nouveau_channel *chan)
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *fence)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_fence *tmp, *fence;
-
- spin_lock(&chan->fence.lock);
- list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
- fence->signalled = true;
- list_del(&fence->entry);
-
- if (unlikely(fence->work))
- fence->work(fence->priv, false);
-
- kref_put(&fence->refcount, nouveau_fence_del);
- }
- spin_unlock(&chan->fence.lock);
-
- nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
+ kref_get(&fence->kref);
+ return fence;
}
int
-nouveau_fence_init(struct drm_device *dev)
+nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
- int ret;
-
- /* Create a shared VRAM heap for cross-channel sync. */
- if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
- 0, 0, &dev_priv->fence.bo);
- if (ret)
- return ret;
+ struct nouveau_fence *fence;
+ int ret = 0;
- ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
- if (ret)
- goto fail;
+ if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE]))
+ return -ENODEV;
- ret = nouveau_bo_map(dev_priv->fence.bo);
- if (ret)
- goto fail;
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return -ENOMEM;
+ kref_init(&fence->kref);
- ret = drm_mm_init(&dev_priv->fence.heap, 0,
- dev_priv->fence.bo->bo.mem.size);
+ if (chan) {
+ ret = nouveau_fence_emit(fence, chan);
if (ret)
- goto fail;
-
- spin_lock_init(&dev_priv->fence.lock);
+ nouveau_fence_unref(&fence);
}
- return 0;
-fail:
- nouveau_bo_unmap(dev_priv->fence.bo);
- nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+ *pfence = fence;
return ret;
}
-
-void
-nouveau_fence_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (USE_SEMA(dev)) {
- drm_mm_takedown(&dev_priv->fence.heap);
- nouveau_bo_unmap(dev_priv->fence.bo);
- nouveau_bo_unpin(dev_priv->fence.bo);
- nouveau_bo_ref(NULL, &dev_priv->fence.bo);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
new file mode 100644
index 000000000000..82ba733393ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -0,0 +1,52 @@
+#ifndef __NOUVEAU_FENCE_H__
+#define __NOUVEAU_FENCE_H__
+
+struct nouveau_fence {
+ struct list_head head;
+ struct kref kref;
+
+ struct nouveau_channel *channel;
+ unsigned long timeout;
+ u32 sequence;
+
+ void (*work)(void *priv, bool signalled);
+ void *priv;
+};
+
+int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **);
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *);
+void nouveau_fence_unref(struct nouveau_fence **);
+
+int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
+bool nouveau_fence_done(struct nouveau_fence *);
+int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
+int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
+void nouveau_fence_idle(struct nouveau_channel *);
+void nouveau_fence_update(struct nouveau_channel *);
+
+struct nouveau_fence_chan {
+ struct list_head pending;
+ spinlock_t lock;
+ u32 sequence;
+};
+
+struct nouveau_fence_priv {
+ struct nouveau_exec_engine engine;
+ int (*emit)(struct nouveau_fence *);
+ int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
+ struct nouveau_channel *);
+ u32 (*read)(struct nouveau_channel *);
+};
+
+void nouveau_fence_context_new(struct nouveau_fence_chan *);
+void nouveau_fence_context_del(struct nouveau_fence_chan *);
+
+int nv04_fence_create(struct drm_device *dev);
+int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
+
+int nv10_fence_create(struct drm_device *dev);
+int nv84_fence_create(struct drm_device *dev);
+int nvc0_fence_create(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.h b/drivers/gpu/drm/nouveau/nouveau_fifo.h
new file mode 100644
index 000000000000..ce99cab2f257
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fifo.h
@@ -0,0 +1,32 @@
+#ifndef __NOUVEAU_FIFO_H__
+#define __NOUVEAU_FIFO_H__
+
+struct nouveau_fifo_priv {
+ struct nouveau_exec_engine base;
+ u32 channels;
+};
+
+struct nouveau_fifo_chan {
+};
+
+bool nv04_fifo_cache_pull(struct drm_device *, bool);
+void nv04_fifo_context_del(struct nouveau_channel *, int);
+int nv04_fifo_fini(struct drm_device *, int, bool);
+int nv04_fifo_init(struct drm_device *, int);
+void nv04_fifo_isr(struct drm_device *);
+void nv04_fifo_destroy(struct drm_device *, int);
+
+void nv50_fifo_playlist_update(struct drm_device *);
+void nv50_fifo_destroy(struct drm_device *, int);
+void nv50_fifo_tlb_flush(struct drm_device *, int);
+
+int nv04_fifo_create(struct drm_device *);
+int nv10_fifo_create(struct drm_device *);
+int nv17_fifo_create(struct drm_device *);
+int nv40_fifo_create(struct drm_device *);
+int nv50_fifo_create(struct drm_device *);
+int nv84_fifo_create(struct drm_device *);
+int nvc0_fifo_create(struct drm_device *);
+int nve0_fifo_create(struct drm_device *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ed52a6f41613..30f542316944 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -23,12 +23,14 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/dma-buf.h>
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_fence.h"
#define nouveau_gem_pushbuf_sync(chan) 0
@@ -53,6 +55,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
nouveau_bo_unpin(nvbo);
}
+ if (gem->import_attach)
+ drm_prime_gem_destroy(gem, nvbo->bo.sg);
+
ttm_bo_unref(&bo);
drm_gem_object_release(gem);
@@ -139,7 +144,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
flags |= TTM_PL_FLAG_SYSTEM;
ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
- tile_flags, pnvbo);
+ tile_flags, NULL, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
@@ -704,7 +709,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
if (chan->dma.ib_max) {
- ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
+ ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
NV_INFO(dev, "nv50cal_space: %d\n", ret);
goto out;
@@ -774,7 +779,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
}
- ret = nouveau_fence_new(chan, &fence, true);
+ ret = nouveau_fence_new(chan, &fence);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
index a580cc62337a..82c19e82ff02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -387,7 +387,7 @@ nouveau_gpio_reset(struct drm_device *dev)
if (dev_priv->card_type >= NV_D0) {
nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
if (unk1--)
- nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
+ nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
} else
if (dev_priv->card_type >= NV_50) {
static const u32 regs[] = { 0xe100, 0xe28c };
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
index 86c2e374e938..b0795ececbda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -18,7 +18,6 @@ struct nouveau_grctx {
uint32_t ctxvals_base;
};
-#ifdef CP_CTX
static inline void
cp_out(struct nouveau_grctx *ctx, uint32_t inst)
{
@@ -88,10 +87,8 @@ _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
(state ? 0 : CP_BRA_IF_CLEAR));
}
#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
-#ifdef CP_BRA_MOD
#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
-#endif
static inline void
_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
@@ -128,6 +125,5 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
nv_wo32(ctx->data, reg * 4, val);
}
-#endif
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index ba896e54b799..b87ad3bd7739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -1018,11 +1018,6 @@ nv_load_state_ext(struct drm_device *dev, int head,
}
NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
-
- /* Enable vblank interrupts. */
- NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
- (dev->vblank_enabled[head] ? 1 : 0));
- NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index b08065f981df..5b498ea32e14 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -39,6 +39,8 @@
#include "nouveau_pm.h"
#include "nouveau_mm.h"
#include "nouveau_vm.h"
+#include "nouveau_fifo.h"
+#include "nouveau_fence.h"
/*
* NV10-NV40 tiling helpers
@@ -50,7 +52,6 @@ nv10_mem_update_tile_region(struct drm_device *dev,
uint32_t size, uint32_t pitch, uint32_t flags)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i = tile - dev_priv->tile.reg, j;
unsigned long save;
@@ -64,8 +65,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
pfb->init_tile_region(dev, i, addr, size, pitch, flags);
spin_lock_irqsave(&dev_priv->context_switch_lock, save);
- pfifo->reassign(dev, false);
- pfifo->cache_pull(dev, false);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+ nv04_fifo_cache_pull(dev, false);
nouveau_wait_for_idle(dev);
@@ -75,8 +76,8 @@ nv10_mem_update_tile_region(struct drm_device *dev,
dev_priv->eng[j]->set_tile_region(dev, i);
}
- pfifo->cache_pull(dev, true);
- pfifo->reassign(dev, true);
+ nv04_fifo_cache_pull(dev, true);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
}
@@ -89,7 +90,7 @@ nv10_mem_get_tile_region(struct drm_device *dev, int i)
spin_lock(&dev_priv->tile.lock);
if (!tile->used &&
- (!tile->fence || nouveau_fence_signalled(tile->fence)))
+ (!tile->fence || nouveau_fence_done(tile->fence)))
tile->used = true;
else
tile = NULL;
@@ -416,7 +417,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (dev_priv->card_type < NV_50) {
ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
- 0, 0, &dev_priv->vga_ram);
+ 0, 0, NULL, &dev_priv->vga_ram);
if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram,
TTM_PL_FLAG_VRAM);
@@ -843,6 +844,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
break;
case NV_C0:
+ case NV_D0:
ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
break;
default:
@@ -977,6 +979,8 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
break;
case NV_MEM_TYPE_DDR3:
tDLLK = 12000;
+ tCKSRE = 2000;
+ tXS = 1000;
mr1_dlloff = 0x00000001;
break;
case NV_MEM_TYPE_GDDR3:
@@ -1023,6 +1027,7 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
exec->refresh_self(exec, false);
exec->refresh_auto(exec, true);
exec->wait(exec, tXS);
+ exec->wait(exec, tXS);
/* update MRs */
if (mr[2] != info->mr[2]) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index cc419fae794b..b190cc01c820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -34,9 +34,10 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
+#include "nouveau_software.h"
#include "nouveau_vm.h"
-#include "nv50_display.h"
struct nouveau_gpuobj_method {
struct list_head head;
@@ -120,12 +121,13 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
u32 class, u32 mthd, u32 data)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_channel *chan = NULL;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
+ if (chid >= 0 && chid < pfifo->channels)
chan = dev_priv->channels.ptr[chid];
if (chan)
ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -133,37 +135,6 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
return ret;
}
-/* NVidia uses context objects to drive drawing operations.
-
- Context objects can be selected into 8 subchannels in the FIFO,
- and then used via DMA command buffers.
-
- A context object is referenced by a user defined handle (CARD32). The HW
- looks up graphics objects in a hash table in the instance RAM.
-
- An entry in the hash table consists of 2 CARD32. The first CARD32 contains
- the handle, the second one a bitfield, that contains the address of the
- object in instance RAM.
-
- The format of the second CARD32 seems to be:
-
- NV4 to NV30:
-
- 15: 0 instance_addr >> 4
- 17:16 engine (here uses 1 = graphics)
- 28:24 channel id (here uses 0)
- 31 valid (use 1)
-
- NV40:
-
- 15: 0 instance_addr >> 4 (maybe 19-0)
- 21:20 engine (here uses 1 = graphics)
- I'm unsure about the other bits, but using 0 seems to work.
-
- The key into the hash table depends on the object handle and channel id and
- is given as:
-*/
-
int
nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
uint32_t size, int align, uint32_t flags,
@@ -267,7 +238,7 @@ nouveau_gpuobj_takedown(struct drm_device *dev)
kfree(oc);
}
- BUG_ON(!list_empty(&dev_priv->gpuobj_list));
+ WARN_ON(!list_empty(&dev_priv->gpuobj_list));
}
@@ -361,34 +332,6 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
return 0;
}
-/*
- DMA objects are used to reference a piece of memory in the
- framebuffer, PCI or AGP address space. Each object is 16 bytes big
- and looks as follows:
-
- entry[0]
- 11:0 class (seems like I can always use 0 here)
- 12 page table present?
- 13 page entry linear?
- 15:14 access: 0 rw, 1 ro, 2 wo
- 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
- 31:20 dma adjust (bits 0-11 of the address)
- entry[1]
- dma limit (size of transfer)
- entry[X]
- 1 0 readonly, 1 readwrite
- 31:12 dma frame address of the page (bits 12-31 of the address)
- entry[N]
- page table terminator, same value as the first pte, as does nvidia
- rivatv uses 0xffffffff
-
- Non linear page tables need a list of frame addresses afterwards,
- the rivatv project has some info on this.
-
- The method below creates a DMA object in instance RAM and returns a handle
- to it that can be used to set up context objects.
-*/
-
void
nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
u64 base, u64 size, int target, int access,
@@ -540,82 +483,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
return 0;
}
-/* Context objects in the instance RAM have the following structure.
- * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
-
- NV4 - NV30:
-
- entry[0]
- 11:0 class
- 12 chroma key enable
- 13 user clip enable
- 14 swizzle enable
- 17:15 patch config:
- scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
- 18 synchronize enable
- 19 endian: 1 big, 0 little
- 21:20 dither mode
- 23 single step enable
- 24 patch status: 0 invalid, 1 valid
- 25 context_surface 0: 1 valid
- 26 context surface 1: 1 valid
- 27 context pattern: 1 valid
- 28 context rop: 1 valid
- 29,30 context beta, beta4
- entry[1]
- 7:0 mono format
- 15:8 color format
- 31:16 notify instance address
- entry[2]
- 15:0 dma 0 instance address
- 31:16 dma 1 instance address
- entry[3]
- dma method traps
-
- NV40:
- No idea what the exact format is. Here's what can be deducted:
-
- entry[0]:
- 11:0 class (maybe uses more bits here?)
- 17 user clip enable
- 21:19 patch config
- 25 patch status valid ?
- entry[1]:
- 15:0 DMA notifier (maybe 20:0)
- entry[2]:
- 15:0 DMA 0 instance (maybe 20:0)
- 24 big endian
- entry[3]:
- 15:0 DMA 1 instance (maybe 20:0)
- entry[4]:
- entry[5]:
- set to 0?
-*/
-static int
-nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_gpuobj *gpuobj;
- int ret;
-
- gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
- if (!gpuobj)
- return -ENOMEM;
- gpuobj->dev = chan->dev;
- gpuobj->engine = NVOBJ_ENGINE_SW;
- gpuobj->class = class;
- kref_init(&gpuobj->refcount);
- gpuobj->cinst = 0x40;
-
- spin_lock(&dev_priv->ramin_lock);
- list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
- spin_unlock(&dev_priv->ramin_lock);
-
- ret = nouveau_ramht_insert(chan, handle, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return ret;
-}
-
int
nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
{
@@ -632,9 +499,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
if (oc->id != class)
continue;
- if (oc->engine == NVOBJ_ENGINE_SW)
- return nouveau_gpuobj_sw_new(chan, handle, class);
-
if (!chan->engctx[oc->engine]) {
ret = eng->context_new(chan, oc->engine);
if (ret)
@@ -644,7 +508,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
return eng->object_new(chan, oc->engine, handle, class);
}
- NV_ERROR(dev, "illegal object class: 0x%x\n", class);
return -EINVAL;
}
@@ -693,11 +556,10 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
static int
nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *pgd = NULL;
struct nouveau_vm_pgd *vpgd;
- int ret, i;
+ int ret;
ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
if (ret)
@@ -722,19 +584,6 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
nv_wo32(chan->ramin, 0x0208, 0xffffffff);
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
- /* map display semaphore buffers into channel's vm */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo;
- if (dev_priv->card_type >= NV_D0)
- bo = nvd0_display_crtc_sema(dev, i);
- else
- bo = nv50_display(dev)->crtc[i].sem.bo;
-
- ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -747,7 +596,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret, i;
+ int ret;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
if (dev_priv->card_type >= NV_C0)
@@ -795,25 +644,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_gpuobj *sem = NULL;
- struct nv50_display_crtc *dispc =
- &nv50_display(dev)->crtc[i];
- u64 offset = dispc->sem.bo->bo.offset;
-
- ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &sem);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
- nouveau_gpuobj_ref(NULL, &sem);
- if (ret)
- return ret;
- }
}
/* VRAM ctxdma */
@@ -873,25 +703,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- if (dev_priv->card_type >= NV_D0) {
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
- nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
- }
- } else
- if (dev_priv->card_type >= NV_50) {
- struct nv50_display *disp = nv50_display(dev);
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nv50_display_crtc *dispc = &disp->crtc[i];
- nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
- }
- }
+ NV_DEBUG(chan->dev, "ch%d\n", chan->id);
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
@@ -956,6 +768,17 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
if (init->handle == ~0)
return -EINVAL;
+ /* compatibility with userspace that assumes 506e for all chipsets */
+ if (init->class == 0x506e) {
+ init->class = nouveau_software_class(dev);
+ if (init->class == 0x906e)
+ return 0;
+ } else
+ if (init->class == 0x906e) {
+ NV_ERROR(dev, "906e not supported yet\n");
+ return -EINVAL;
+ }
+
chan = nouveau_channel_get(file_priv, init->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 69a528d106e6..ea6acf1c4a78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -83,7 +83,7 @@ nouveau_perf_entry(struct drm_device *dev, int idx,
return NULL;
}
-static u8 *
+u8 *
nouveau_perf_rammap(struct drm_device *dev, u32 freq,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 3f82dfea61dd..07cac72c72b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -61,8 +61,10 @@ int nouveau_voltage_gpio_set(struct drm_device *, int voltage);
/* nouveau_perf.c */
void nouveau_perf_init(struct drm_device *);
void nouveau_perf_fini(struct drm_device *);
-u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
+ u8 *hdr, u8 *cnt, u8 *len);
u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
/* nouveau_mem.c */
void nouveau_mem_timing_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
new file mode 100644
index 000000000000..a25cf2cb931f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct nouveau_bo *nvbo = attachment->dmabuf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+ int npages = nvbo->bo.num_pages;
+ struct sg_table *sg;
+ int nents;
+
+ mutex_lock(&dev->struct_mutex);
+ sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+
+ if (nvbo->gem->export_dma_buf == dma_buf) {
+ nvbo->gem->export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(nvbo->gem);
+ }
+}
+
+static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ if (nvbo->vmapping_count) {
+ nvbo->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
+ &nvbo->dma_buf_vmap);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ nvbo->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return nvbo->dma_buf_vmap.virtual;
+}
+
+static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ nvbo->vmapping_count--;
+ if (nvbo->vmapping_count == 0) {
+ ttm_bo_kunmap(&nvbo->dma_buf_vmap);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static const struct dma_buf_ops nouveau_dmabuf_ops = {
+ .map_dma_buf = nouveau_gem_map_dma_buf,
+ .unmap_dma_buf = nouveau_gem_unmap_dma_buf,
+ .release = nouveau_gem_dmabuf_release,
+ .kmap = nouveau_gem_kmap,
+ .kmap_atomic = nouveau_gem_kmap_atomic,
+ .kunmap = nouveau_gem_kunmap,
+ .kunmap_atomic = nouveau_gem_kunmap_atomic,
+ .mmap = nouveau_gem_prime_mmap,
+ .vmap = nouveau_gem_prime_vmap,
+ .vunmap = nouveau_gem_prime_vunmap,
+};
+
+static int
+nouveau_prime_new(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ u32 flags = 0;
+ int ret;
+
+ flags = TTM_PL_FLAG_TT;
+
+ ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
+ sg, pnvbo);
+ if (ret)
+ return ret;
+ nvbo = *pnvbo;
+
+ /* we restrict allowed domains on nv50+ to only the types
+ * that were requested at creation time. not possibly on
+ * earlier chips without busting the ABI.
+ */
+ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
+ nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
+ if (!nvbo->gem) {
+ nouveau_bo_ref(NULL, pnvbo);
+ return -ENOMEM;
+ }
+
+ nvbo->gem->driver_private = nvbo;
+ return 0;
+}
+
+struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+ int ret = 0;
+
+ /* pin buffer into GTT */
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
+}
+
+struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ if (dma_buf->ops == &nouveau_dmabuf_ops) {
+ nvbo = dma_buf->priv;
+ if (nvbo->gem) {
+ if (nvbo->gem->dev == dev) {
+ drm_gem_object_reference(nvbo->gem);
+ return nvbo->gem;
+ }
+ }
+ }
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(PTR_ERR(attach));
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
+ if (ret)
+ goto fail_unmap;
+
+ nvbo->gem->import_attach = attach;
+
+ return nvbo->gem;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 47f245edf538..38483a042bc2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -290,7 +290,10 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
struct nouveau_mem *node = mem->mm_node;
/* noop: bound in move_notify() */
- node->pages = nvbe->ttm.dma_address;
+ if (ttm->sg) {
+ node->sg = ttm->sg;
+ } else
+ node->pages = nvbe->ttm.dma_address;
return 0;
}
@@ -338,10 +341,10 @@ nouveau_sgdma_init(struct drm_device *dev)
u32 aper_size, align;
int ret;
- if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
+ if (dev_priv->card_type >= NV_40)
aper_size = 512 * 1024 * 1024;
else
- aper_size = 64 * 1024 * 1024;
+ aper_size = 128 * 1024 * 1024;
/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
* christmas. The cards before it have them, the cards after
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
new file mode 100644
index 000000000000..e60bc6ce9003
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_software.h
@@ -0,0 +1,69 @@
+#ifndef __NOUVEAU_SOFTWARE_H__
+#define __NOUVEAU_SOFTWARE_H__
+
+struct nouveau_software_priv {
+ struct nouveau_exec_engine base;
+ struct list_head vblank;
+};
+
+struct nouveau_software_chan {
+ struct list_head flip;
+ struct {
+ struct list_head list;
+ struct nouveau_bo *bo;
+ u32 offset;
+ u32 value;
+ u32 head;
+ } vblank;
+};
+
+static inline void
+nouveau_software_vblank(struct drm_device *dev, int crtc)
+{
+ struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
+ struct nouveau_software_chan *pch, *tmp;
+
+ list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
+ if (pch->vblank.head != crtc)
+ continue;
+
+ nouveau_bo_wr32(pch->vblank.bo, pch->vblank.offset,
+ pch->vblank.value);
+ list_del(&pch->vblank.list);
+ drm_vblank_put(dev, crtc);
+ }
+}
+
+static inline void
+nouveau_software_context_new(struct nouveau_software_chan *pch)
+{
+ INIT_LIST_HEAD(&pch->flip);
+}
+
+static inline void
+nouveau_software_create(struct nouveau_software_priv *psw)
+{
+ INIT_LIST_HEAD(&psw->vblank);
+}
+
+static inline u16
+nouveau_software_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->card_type <= NV_04)
+ return 0x006e;
+ if (dev_priv->card_type <= NV_40)
+ return 0x016e;
+ if (dev_priv->card_type <= NV_50)
+ return 0x506e;
+ if (dev_priv->card_type <= NV_E0)
+ return 0x906e;
+ return 0x0000;
+}
+
+int nv04_software_create(struct drm_device *);
+int nv50_software_create(struct drm_device *);
+int nvc0_software_create(struct drm_device *);
+u64 nvc0_software_crtc(struct nouveau_channel *, int crtc);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index c2a8511e855a..19706f0532ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -39,6 +39,9 @@
#include "nouveau_gpio.h"
#include "nouveau_pm.h"
#include "nv50_display.h"
+#include "nouveau_fifo.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
static int nouveau_stub_init(struct drm_device *dev) { return 0; }
@@ -66,18 +69,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv04_fb_init;
engine->fb.takedown = nv04_fb_takedown;
- engine->fifo.channels = 16;
- engine->fifo.init = nv04_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv04_fifo_channel_id;
- engine->fifo.create_context = nv04_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv04_fifo_load_context;
- engine->fifo.unload_context = nv04_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -111,18 +102,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv10_fb_init_tile_region;
engine->fb.set_tile_region = nv10_fb_set_tile_region;
engine->fb.free_tile_region = nv10_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv10_fifo_load_context;
- engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -162,18 +141,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv20_fb_init_tile_region;
engine->fb.set_tile_region = nv20_fb_set_tile_region;
engine->fb.free_tile_region = nv20_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv10_fifo_load_context;
- engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -209,18 +176,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv30_fb_init_tile_region;
engine->fb.set_tile_region = nv10_fb_set_tile_region;
engine->fb.free_tile_region = nv30_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv10_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv10_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv10_fifo_load_context;
- engine->fifo.unload_context = nv10_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -259,18 +214,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fb.init_tile_region = nv30_fb_init_tile_region;
engine->fb.set_tile_region = nv40_fb_set_tile_region;
engine->fb.free_tile_region = nv30_fb_free_tile_region;
- engine->fifo.channels = 32;
- engine->fifo.init = nv40_fifo_init;
- engine->fifo.takedown = nv04_fifo_fini;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.cache_pull = nv04_fifo_cache_pull;
- engine->fifo.channel_id = nv10_fifo_channel_id;
- engine->fifo.create_context = nv40_fifo_create_context;
- engine->fifo.destroy_context = nv04_fifo_destroy_context;
- engine->fifo.load_context = nv40_fifo_load_context;
- engine->fifo.unload_context = nv40_fifo_unload_context;
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
@@ -317,18 +260,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv50_fb_init;
engine->fb.takedown = nv50_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nv50_fifo_init;
- engine->fifo.takedown = nv50_fifo_takedown;
- engine->fifo.disable = nv04_fifo_disable;
- engine->fifo.enable = nv04_fifo_enable;
- engine->fifo.reassign = nv04_fifo_reassign;
- engine->fifo.channel_id = nv50_fifo_channel_id;
- engine->fifo.create_context = nv50_fifo_create_context;
- engine->fifo.destroy_context = nv50_fifo_destroy_context;
- engine->fifo.load_context = nv50_fifo_load_context;
- engine->fifo.unload_context = nv50_fifo_unload_context;
- engine->fifo.tlb_flush = nv50_fifo_tlb_flush;
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
@@ -392,17 +323,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nvc0_fifo_init;
- engine->fifo.takedown = nvc0_fifo_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.channel_id = nvc0_fifo_channel_id;
- engine->fifo.create_context = nvc0_fifo_create_context;
- engine->fifo.destroy_context = nvc0_fifo_destroy_context;
- engine->fifo.load_context = nvc0_fifo_load_context;
- engine->fifo.unload_context = nvc0_fifo_unload_context;
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
@@ -445,17 +365,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 128;
- engine->fifo.init = nvc0_fifo_init;
- engine->fifo.takedown = nvc0_fifo_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.channel_id = nvc0_fifo_channel_id;
- engine->fifo.create_context = nvc0_fifo_create_context;
- engine->fifo.destroy_context = nvc0_fifo_destroy_context;
- engine->fifo.load_context = nvc0_fifo_load_context;
- engine->fifo.unload_context = nvc0_fifo_unload_context;
engine->display.early_init = nouveau_stub_init;
engine->display.late_takedown = nouveau_stub_takedown;
engine->display.create = nvd0_display_create;
@@ -496,13 +405,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nvc0_fb_init;
engine->fb.takedown = nvc0_fb_takedown;
- engine->fifo.channels = 0;
- engine->fifo.init = nouveau_stub_init;
- engine->fifo.takedown = nouveau_stub_takedown;
- engine->fifo.disable = nvc0_fifo_disable;
- engine->fifo.enable = nvc0_fifo_enable;
- engine->fifo.reassign = nvc0_fifo_reassign;
- engine->fifo.unload_context = nouveau_stub_init;
engine->display.early_init = nouveau_stub_init;
engine->display.late_takedown = nouveau_stub_takedown;
engine->display.create = nvd0_display_create;
@@ -607,61 +509,24 @@ nouveau_card_channel_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
- int ret, oclass;
+ int ret;
ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
dev_priv->channel = chan;
if (ret)
return ret;
-
mutex_unlock(&dev_priv->channel->mutex);
- if (dev_priv->card_type <= NV_50) {
- if (dev_priv->card_type < NV_50)
- oclass = 0x0039;
- else
- oclass = 0x5039;
-
- ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
- if (ret)
- goto error;
-
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
- &chan->m2mf_ntfy);
- if (ret)
- goto error;
-
- ret = RING_SPACE(chan, 6);
- if (ret)
- goto error;
-
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
- OUT_RING (chan, NvM2MF);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
- OUT_RING (chan, NvNotify0);
- OUT_RING (chan, chan->vram_handle);
- OUT_RING (chan, chan->gart_handle);
- } else
- if (dev_priv->card_type <= NV_D0) {
- ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
- if (ret)
- goto error;
-
- ret = RING_SPACE(chan, 2);
- if (ret)
- goto error;
-
- BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
- OUT_RING (chan, 0x00009039);
- }
-
- FIRE_RING (chan);
-error:
- if (ret)
- nouveau_card_channel_fini(dev);
- return ret;
+ nouveau_bo_move_init(chan);
+ return 0;
}
+static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
+ .set_gpu_state = nouveau_switcheroo_set_state,
+ .reprobe = nouveau_switcheroo_reprobe,
+ .can_switch = nouveau_switcheroo_can_switch,
+};
+
int
nouveau_card_init(struct drm_device *dev)
{
@@ -670,9 +535,7 @@ nouveau_card_init(struct drm_device *dev)
int ret, e = 0;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
- vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
- nouveau_switcheroo_reprobe,
- nouveau_switcheroo_can_switch);
+ vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
/* Initialise internal driver API hooks */
ret = nouveau_init_engine_ptrs(dev);
@@ -745,6 +608,81 @@ nouveau_card_init(struct drm_device *dev)
if (!dev_priv->noaccel) {
switch (dev_priv->card_type) {
case NV_04:
+ nv04_fifo_create(dev);
+ break;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ if (dev_priv->chipset < 0x17)
+ nv10_fifo_create(dev);
+ else
+ nv17_fifo_create(dev);
+ break;
+ case NV_40:
+ nv40_fifo_create(dev);
+ break;
+ case NV_50:
+ if (dev_priv->chipset == 0x50)
+ nv50_fifo_create(dev);
+ else
+ nv84_fifo_create(dev);
+ break;
+ case NV_C0:
+ case NV_D0:
+ nvc0_fifo_create(dev);
+ break;
+ case NV_E0:
+ nve0_fifo_create(dev);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ nv04_fence_create(dev);
+ break;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ case NV_50:
+ if (dev_priv->chipset < 0x84)
+ nv10_fence_create(dev);
+ else
+ nv84_fence_create(dev);
+ break;
+ case NV_C0:
+ case NV_D0:
+ case NV_E0:
+ nvc0_fence_create(dev);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ nv04_software_create(dev);
+ break;
+ case NV_50:
+ nv50_software_create(dev);
+ break;
+ case NV_C0:
+ case NV_D0:
+ case NV_E0:
+ nvc0_software_create(dev);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->card_type) {
+ case NV_04:
nv04_graph_create(dev);
break;
case NV_10:
@@ -764,6 +702,9 @@ nouveau_card_init(struct drm_device *dev)
case NV_D0:
nvc0_graph_create(dev);
break;
+ case NV_E0:
+ nve0_graph_create(dev);
+ break;
default:
break;
}
@@ -796,8 +737,9 @@ nouveau_card_init(struct drm_device *dev)
}
break;
case NV_C0:
- nvc0_copy_create(dev, 0);
nvc0_copy_create(dev, 1);
+ case NV_D0:
+ nvc0_copy_create(dev, 0);
break;
default:
break;
@@ -830,16 +772,11 @@ nouveau_card_init(struct drm_device *dev)
goto out_engine;
}
}
-
- /* PFIFO */
- ret = engine->fifo.init(dev);
- if (ret)
- goto out_engine;
}
ret = nouveau_irq_init(dev);
if (ret)
- goto out_fifo;
+ goto out_engine;
ret = nouveau_display_create(dev);
if (ret)
@@ -848,14 +785,10 @@ nouveau_card_init(struct drm_device *dev)
nouveau_backlight_init(dev);
nouveau_pm_init(dev);
- ret = nouveau_fence_init(dev);
- if (ret)
- goto out_pm;
-
if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
ret = nouveau_card_channel_init(dev);
if (ret)
- goto out_fence;
+ goto out_pm;
}
if (dev->mode_config.num_crtc) {
@@ -870,17 +803,12 @@ nouveau_card_init(struct drm_device *dev)
out_chan:
nouveau_card_channel_fini(dev);
-out_fence:
- nouveau_fence_fini(dev);
out_pm:
nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
out_irq:
nouveau_irq_fini(dev);
-out_fifo:
- if (!dev_priv->noaccel)
- engine->fifo.takedown(dev);
out_engine:
if (!dev_priv->noaccel) {
for (e = e - 1; e >= 0; e--) {
@@ -912,6 +840,7 @@ out_bios:
out_display_early:
engine->display.late_takedown(dev);
out:
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
return ret;
}
@@ -928,13 +857,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
nouveau_card_channel_fini(dev);
- nouveau_fence_fini(dev);
nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
if (!dev_priv->noaccel) {
- engine->fifo.takedown(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (dev_priv->eng[e]) {
dev_priv->eng[e]->fini(dev, e, false);
@@ -969,6 +896,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_irq_fini(dev);
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -1176,7 +1104,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
goto err_priv;
}
- NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+ NV_INFO(dev, "Detected an NV%02x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
/* map the mmio regs, limiting the amount to preserve vmap space */
@@ -1219,6 +1147,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
if (nouveau_noaccel == -1) {
switch (dev_priv->chipset) {
case 0xd9: /* known broken */
+ case 0xe4: /* needs binary driver firmware */
+ case 0xe7: /* needs binary driver firmware */
NV_INFO(dev, "acceleration disabled by default, pass "
"noaccel=0 to force enable\n");
dev_priv->noaccel = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 2bf6c0350b4b..11edd5e91a0a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -77,6 +77,63 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
}
void
+nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+ struct nouveau_mem *mem)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ unsigned m, sglen;
+ u32 end, len;
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+ sglen = sg_dma_len(sg) >> PAGE_SHIFT;
+
+ end = pte + sglen;
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+
+ if (num == 0)
+ goto finish;
+ }
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ if (m < sglen) {
+ for (; m < sglen; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+ if (num == 0)
+ goto finish;
+ }
+ }
+
+ }
+finish:
+ vm->flush(vm);
+}
+
+void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 4fb6e728734d..a8246e7e4a89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -72,6 +72,9 @@ struct nouveau_vm {
u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
+
+ void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
@@ -90,7 +93,8 @@ void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
struct nouveau_mem *);
-
+void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+ struct nouveau_mem *mem);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 728d07584d39..4c31c63e5528 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1047,7 +1047,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 7047d37e8dab..44488e3a257d 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -98,6 +98,13 @@ nv04_display_early_init(struct drm_device *dev)
NVSetOwner(dev, 0);
}
+ /* ensure vblank interrupts are off, they can't be enabled until
+ * drm_vblank has been initialised
+ */
+ NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
+ if (nv_two_heads(dev))
+ NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
+
return 0;
}
@@ -246,6 +253,10 @@ nv04_display_init(struct drm_device *dev)
void
nv04_display_fini(struct drm_device *dev)
{
+ /* disable vblank interrupts */
+ NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
+ if (nv_two_heads(dev))
+ NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 7a1189371096..7cd7857347ef 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -41,7 +41,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
if (ret)
return ret;
- BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0300, 3);
OUT_RING(chan, (region->sy << 16) | region->sx);
OUT_RING(chan, (region->dy << 16) | region->dx);
OUT_RING(chan, (region->height << 16) | region->width);
@@ -62,15 +62,15 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
if (ret)
return ret;
- BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
- BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x03fc, 1);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
OUT_RING(chan, rect->color);
- BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0400, 2);
OUT_RING(chan, (rect->dx << 16) | rect->dy);
OUT_RING(chan, (rect->width << 16) | rect->height);
FIRE_RING(chan);
@@ -110,7 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
bg = image->bg_color;
}
- BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0be4, 7);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
OUT_RING(chan, ((image->dy + image->height) << 16) |
((image->dx + image->width) & 0xffff));
@@ -127,7 +127,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
if (ret)
return ret;
- BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0c00, iter_len);
OUT_RINGp(chan, data, iter_len);
data += iter_len;
dsize -= iter_len;
@@ -209,25 +209,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
return 0;
}
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, sub, 0x0184, 2);
+ BEGIN_NV04(chan, sub, 0x0184, 2);
OUT_RING(chan, NvDmaFB);
OUT_RING(chan, NvDmaFB);
- BEGIN_RING(chan, sub, 0x0300, 4);
+ BEGIN_NV04(chan, sub, 0x0300, 4);
OUT_RING(chan, surface_fmt);
OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvRop);
- BEGIN_RING(chan, sub, 0x0300, 1);
+ BEGIN_NV04(chan, sub, 0x0300, 1);
OUT_RING(chan, 0x55);
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvImagePatt);
- BEGIN_RING(chan, sub, 0x0300, 8);
+ BEGIN_NV04(chan, sub, 0x0300, 8);
OUT_RING(chan, pattern_fmt);
#ifdef __BIG_ENDIAN
OUT_RING(chan, 2);
@@ -241,31 +241,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, ~0);
OUT_RING(chan, ~0);
- BEGIN_RING(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, sub, 0x0000, 1);
OUT_RING(chan, NvClipRect);
- BEGIN_RING(chan, sub, 0x0300, 2);
+ BEGIN_NV04(chan, sub, 0x0300, 2);
OUT_RING(chan, 0);
OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
- BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
OUT_RING(chan, NvImageBlit);
- BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
OUT_RING(chan, 3);
- BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
OUT_RING(chan, NvGdiRect);
- BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
OUT_RING(chan, NvImagePatt);
OUT_RING(chan, NvRop);
- BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
OUT_RING(chan, rect_fmt);
- BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, 3);
FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
new file mode 100644
index 000000000000..abe89db6de24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nv04_fence_chan {
+ struct nouveau_fence_chan base;
+ atomic_t sequence;
+};
+
+struct nv04_fence_priv {
+ struct nouveau_fence_priv base;
+};
+
+static int
+nv04_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
+ OUT_RING (chan, fence->sequence);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+static int
+nv04_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ return -ENODEV;
+}
+
+int
+nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ atomic_set(&fctx->sequence, data);
+ return 0;
+}
+
+static u32
+nv04_fence_read(struct nouveau_channel *chan)
+{
+ struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ return atomic_read(&fctx->sequence);
+}
+
+static void
+nv04_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_fence_chan *fctx = chan->engctx[engine];
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv04_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (fctx) {
+ nouveau_fence_context_new(&fctx->base);
+ atomic_set(&fctx->sequence, 0);
+ chan->engctx[engine] = fctx;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static int
+nv04_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nv04_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nv04_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fence_priv *priv = nv_engine(dev, engine);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv04_fence_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fence_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nv04_fence_destroy;
+ priv->base.engine.init = nv04_fence_init;
+ priv->base.engine.fini = nv04_fence_fini;
+ priv->base.engine.context_new = nv04_fence_context_new;
+ priv->base.engine.context_del = nv04_fence_context_del;
+ priv->base.emit = nv04_fence_emit;
+ priv->base.sync = nv04_fence_sync;
+ priv->base.read = nv04_fence_read;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index db465a3ee1b2..a6295cd00ec7 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -27,49 +27,38 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
+#include "nouveau_fifo.h"
#include "nouveau_util.h"
-
-#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
-#define NV04_RAMFC__SIZE 32
-#define NV04_RAMFC_DMA_PUT 0x00
-#define NV04_RAMFC_DMA_GET 0x04
-#define NV04_RAMFC_DMA_INSTANCE 0x08
-#define NV04_RAMFC_DMA_STATE 0x0C
-#define NV04_RAMFC_DMA_FETCH 0x10
-#define NV04_RAMFC_ENGINE 0x14
-#define NV04_RAMFC_PULL1_ENGINE 0x18
-
-#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
-#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
-
-void
-nv04_fifo_disable(struct drm_device *dev)
-{
- uint32_t tmp;
-
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
- tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
-}
-
-void
-nv04_fifo_enable(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-}
-
-bool
-nv04_fifo_reassign(struct drm_device *dev, bool enable)
-{
- uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
-
- nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
- return (reassign == 1);
-}
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv04_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+struct nv04_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv04_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
bool
nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
@@ -86,13 +75,13 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
* invalidate the most recently calculated instance.
*/
if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
- NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
+ NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
- NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
+ NV_PFIFO_INTR_CACHE_ERROR);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
}
@@ -100,242 +89,182 @@ nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
return pull & 1;
}
-int
-nv04_fifo_channel_id(struct drm_device *dev)
-{
- return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
-}
-
-#ifdef __BIG_ENDIAN
-#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
-#else
-#define DMA_FETCH_ENDIANNESS 0
-#endif
-
-int
-nv04_fifo_create_context(struct nouveau_channel *chan)
+static int
+nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv04_fifo_chan *fctx;
unsigned long flags;
int ret;
- ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
- NV04_RAMFC__SIZE,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE,
- &chan->ramfc);
- if (ret)
- return ret;
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ /* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
- return -ENOMEM;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- /* Setup initial state */
- RAMFC_WR(DMA_PUT, chan->pushbuf_base);
- RAMFC_WR(DMA_GET, chan->pushbuf_base);
- RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
- RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
- DMA_FETCH_ENDIANNESS));
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- /* enable the fifo dma operation */
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 32, ~0, 32,
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x14, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x18, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- return 0;
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
void
-nv04_fifo_destroy_context(struct nouveau_channel *chan)
+nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv04_fifo_chan *fctx = chan->engctx[engine];
+ struct ramfc_desc *c = priv->ramfc_desc;
unsigned long flags;
+ int chid;
+ /* prevent fifo context switches */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- pfifo->reassign(dev, false);
-
- /* Unload the context if it's the currently active one */
- if (pfifo->channel_id(dev) == chan->id) {
- pfifo->disable(dev);
- pfifo->unload_context(dev);
- pfifo->enable(dev);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ /* if this channel is active, replace it with a null context */
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
+ if (chid == chan->id) {
+ nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+ do {
+ u32 mask = ((1ULL << c->bits) - 1) << c->regs;
+ nv_mask(dev, c->regp, mask, 0x00000000);
+ } while ((++c)->bits);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
- /* Keep it from being rescheduled */
+ /* restore normal operation, after disabling dma mode */
nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
-
- pfifo->reassign(dev, true);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- /* Free the channel resources */
+ /* clean up */
+ nouveau_gpuobj_ref(NULL, &fctx->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
-static void
-nv04_fifo_do_load_context(struct drm_device *dev, int chid)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV04_RAMFC(chid), tmp;
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
- tmp = nv_ri32(dev, fc + 8);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-}
-
-int
-nv04_fifo_load_context(struct nouveau_channel *chan)
-{
- uint32_t tmp;
-
- nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
- NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
- nv04_fifo_do_load_context(chan->dev, chan->id);
- nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
-
- /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
- tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
- nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
-
- return 0;
}
int
-nv04_fifo_unload_context(struct drm_device *dev)
+nv04_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_channel *chan = NULL;
- uint32_t tmp;
- int chid;
-
- chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
- return 0;
-
- chan = dev_priv->channels.ptr[chid];
- if (!chan) {
- NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
- return -EINVAL;
- }
-
- RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
- tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
- RAMFC_WR(DMA_INSTANCE, tmp);
- RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
- RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
- RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
- RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
-
- nv04_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
- return 0;
-}
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
-static void
-nv04_fifo_init_reset(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
-
- nv_wr32(dev, 0x003224, 0x000f0078);
- nv_wr32(dev, 0x002044, 0x0101ffff);
- nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002500, 0x00000000);
- nv_wr32(dev, 0x003000, 0x00000000);
- nv_wr32(dev, 0x003050, 0x00000000);
- nv_wr32(dev, 0x003200, 0x00000000);
- nv_wr32(dev, 0x003250, 0x00000000);
- nv_wr32(dev, 0x003220, 0x00000000);
-
- nv_wr32(dev, 0x003250, 0x00000000);
- nv_wr32(dev, 0x003270, 0x00000000);
- nv_wr32(dev, 0x003210, 0x00000000);
-}
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
-static void
-nv04_fifo_init_ramxx(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->pinst >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
-}
-static void
-nv04_fifo_init_intr(struct drm_device *dev)
-{
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
}
int
-nv04_fifo_init(struct drm_device *dev)
+nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int i;
-
- nv04_fifo_init_reset(dev);
- nv04_fifo_init_ramxx(dev);
-
- nv04_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ struct nouveau_channel *chan;
+ int chid;
- nv04_fifo_init_intr(dev);
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
+ /* prevent context switches and halt fifo operation */
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->channels.ptr[i]) {
- uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
- nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
- }
+ /* store current fifo context in ramfc */
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
+ chan = dev_priv->channels.ptr[chid];
+ if (suspend && chid != priv->base.channels && chan) {
+ struct nv04_fifo_chan *fctx = chan->engctx[engine];
+ struct nouveau_gpuobj *ctx = fctx->ramfc;
+ struct ramfc_desc *c = priv->ramfc_desc;
+ do {
+ u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+ u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+ u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
+ u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
+ nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
+ } while ((++c)->bits);
}
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
return 0;
}
-void
-nv04_fifo_fini(struct drm_device *dev)
-{
- nv_wr32(dev, 0x2140, 0x00000000);
- nouveau_irq_unregister(dev, 8);
-}
-
static bool
nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct nouveau_gpuobj *obj;
@@ -346,7 +275,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
u32 engine;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
+ if (likely(chid >= 0 && chid < pfifo->channels))
chan = dev_priv->channels.ptr[chid];
if (unlikely(!chan))
goto out;
@@ -357,7 +286,6 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
break;
- chan->sw_subchannel[subc] = obj->class;
engine = 0x0000000f << (subc * 4);
nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
@@ -368,7 +296,7 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
break;
- if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
+ if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
mthd, data))
handled = true;
break;
@@ -391,8 +319,8 @@ static const char *nv_dma_state_err(u32 state)
void
nv04_fifo_isr(struct drm_device *dev)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
uint32_t status, reassign;
int cnt = 0;
@@ -402,7 +330,7 @@ nv04_fifo_isr(struct drm_device *dev)
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
- chid = engine->fifo.channel_id(dev);
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
@@ -541,3 +469,38 @@ nv04_fifo_isr(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
}
+
+void
+nv04_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 8);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv04_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv04_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv04_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 15;
+ priv->ramfc_desc = nv04_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index dbdea8ed3925..72f1a62903b3 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -356,12 +356,12 @@ static struct nouveau_channel *
nv04_graph_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = dev_priv->engine.fifo.channels;
+ int chid = 15;
if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
- if (chid >= dev_priv->engine.fifo.channels)
+ if (chid > 15)
return NULL;
return dev_priv->channels.ptr[chid];
@@ -404,7 +404,6 @@ nv04_graph_load_context(struct nouveau_channel *chan)
static int
nv04_graph_unload_context(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct graph_state *ctx;
uint32_t tmp;
@@ -420,7 +419,7 @@ nv04_graph_unload_context(struct drm_device *dev)
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ tmp |= 15 << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
return 0;
}
@@ -495,7 +494,6 @@ nv04_graph_object_new(struct nouveau_channel *chan, int engine,
static int
nv04_graph_init(struct drm_device *dev, int engine)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -527,7 +525,7 @@ nv04_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ tmp |= 15 << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
/* These don't belong here, they're part of a per-channel context */
@@ -550,28 +548,6 @@ nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
return 0;
}
-static int
-nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- atomic_set(&chan->fence.last_sequence_irq, data);
- return 0;
-}
-
-int
-nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_page_flip_state s;
-
- if (!nouveau_finish_page_flip(chan, &s))
- nv_set_crtc_base(dev, s.crtc,
- s.offset + s.y * s.pitch + s.x * s.bpp / 8);
-
- return 0;
-}
-
/*
* Software methods, why they are needed, and how they all work:
*
@@ -1020,7 +996,8 @@ nv04_graph_context_switch(struct drm_device *dev)
nv04_graph_unload_context(dev);
/* Load context for next channel */
- chid = dev_priv->engine.fifo.channel_id(dev);
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
chan = dev_priv->channels.ptr[chid];
if (chan)
nv04_graph_load_context(chan);
@@ -1345,9 +1322,5 @@ nv04_graph_create(struct drm_device *dev)
NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index c1248e0740a3..ef7a934a499a 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -1,6 +1,8 @@
#include "drmP.h"
#include "drm.h"
+
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
/* returns the size of fifo context */
@@ -10,12 +12,15 @@ nouveau_fifo_ctx_size(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->chipset >= 0x40)
- return 128;
+ return 128 * 32;
else
if (dev_priv->chipset >= 0x17)
- return 64;
+ return 64 * 32;
+ else
+ if (dev_priv->chipset >= 0x10)
+ return 32 * 32;
- return 32;
+ return 32 * 16;
}
int nv04_instmem_init(struct drm_device *dev)
@@ -39,14 +44,10 @@ int nv04_instmem_init(struct drm_device *dev)
else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
else rsvd = 0x4a40 * vs;
rsvd += 16 * 1024;
- rsvd *= dev_priv->engine.fifo.channels;
-
- /* pciegart table */
- if (pci_is_pcie(dev->pdev))
- rsvd += 512 * 1024;
+ rsvd *= 32; /* per-channel */
- /* object storage */
- rsvd += 512 * 1024;
+ rsvd += 512 * 1024; /* pci(e)gart table */
+ rsvd += 512 * 1024; /* object storage */
dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
} else {
@@ -71,7 +72,7 @@ int nv04_instmem_init(struct drm_device *dev)
return ret;
/* And RAMFC */
- length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
+ length = nouveau_fifo_ctx_size(dev);
switch (dev_priv->card_type) {
case NV_40:
offset = 0x20000;
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
new file mode 100644
index 000000000000..0c41abf48774
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_software.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+#include "nouveau_software.h"
+#include "nouveau_hw.h"
+
+struct nv04_software_priv {
+ struct nouveau_software_priv base;
+};
+
+struct nv04_software_chan {
+ struct nouveau_software_chan base;
+};
+
+static int
+mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+
+ struct nouveau_page_flip_state state;
+
+ if (!nouveau_finish_page_flip(chan, &state)) {
+ nv_set_crtc_base(chan->dev, state.crtc, state.offset +
+ state.y * state.pitch +
+ state.x * state.bpp / 8);
+ }
+
+ return 0;
+}
+
+static int
+nv04_software_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_software_chan *pch;
+
+ pch = kzalloc(sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ nouveau_software_context_new(&pch->base);
+ chan->engctx[engine] = pch;
+ return 0;
+}
+
+static void
+nv04_software_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv04_software_chan *pch = chan->engctx[engine];
+ chan->engctx[engine] = NULL;
+ kfree(pch);
+}
+
+static int
+nv04_software_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 0;
+ obj->class = class;
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static int
+nv04_software_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static int
+nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nv04_software_destroy(struct drm_device *dev, int engine)
+{
+ struct nv04_software_priv *psw = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, SW);
+ kfree(psw);
+}
+
+int
+nv04_software_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_software_priv *psw;
+
+ psw = kzalloc(sizeof(*psw), GFP_KERNEL);
+ if (!psw)
+ return -ENOMEM;
+
+ psw->base.base.destroy = nv04_software_destroy;
+ psw->base.base.init = nv04_software_init;
+ psw->base.base.fini = nv04_software_fini;
+ psw->base.base.context_new = nv04_software_context_new;
+ psw->base.base.context_del = nv04_software_context_del;
+ psw->base.base.object_new = nv04_software_object_new;
+ nouveau_software_create(&psw->base);
+
+ NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
+ if (dev_priv->card_type <= NV_04) {
+ NVOBJ_CLASS(dev, 0x006e, SW);
+ NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
+ NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
+ } else {
+ NVOBJ_CLASS(dev, 0x016e, SW);
+ NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
new file mode 100644
index 000000000000..8a1b75009185
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nv10_fence_chan {
+ struct nouveau_fence_chan base;
+};
+
+struct nv10_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+ spinlock_t lock;
+ u32 sequence;
+};
+
+static int
+nv10_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
+ OUT_RING (chan, fence->sequence);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+
+static int
+nv10_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ return -ENODEV;
+}
+
+static int
+nv17_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+ u32 value;
+ int ret;
+
+ if (!mutex_trylock(&prev->mutex))
+ return -EBUSY;
+
+ spin_lock(&priv->lock);
+ value = priv->sequence;
+ priv->sequence += 2;
+ spin_unlock(&priv->lock);
+
+ ret = RING_SPACE(prev, 5);
+ if (!ret) {
+ BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (prev, NvSema);
+ OUT_RING (prev, 0);
+ OUT_RING (prev, value + 0);
+ OUT_RING (prev, value + 1);
+ FIRE_RING (prev);
+ }
+
+ if (!ret && !(ret = RING_SPACE(chan, 5))) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+ OUT_RING (chan, NvSema);
+ OUT_RING (chan, 0);
+ OUT_RING (chan, value + 1);
+ OUT_RING (chan, value + 2);
+ FIRE_RING (chan);
+ }
+
+ mutex_unlock(&prev->mutex);
+ return 0;
+}
+
+static u32
+nv10_fence_read(struct nouveau_channel *chan)
+{
+ return nvchan_rd32(chan, 0x0048);
+}
+
+static void
+nv10_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv10_fence_chan *fctx = chan->engctx[engine];
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv10_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv10_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nv10_fence_chan *fctx;
+ struct nouveau_gpuobj *obj;
+ int ret = 0;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+
+ if (priv->bo) {
+ struct ttm_mem_reg *mem = &priv->bo->bo.mem;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
+ mem->start * PAGE_SIZE, mem->size,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
+ if (!ret) {
+ ret = nouveau_ramht_insert(chan, NvSema, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ }
+ }
+
+ if (ret)
+ nv10_fence_context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv10_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nv10_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nv10_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fence_priv *priv = nv_engine(dev, engine);
+
+ nouveau_bo_ref(NULL, &priv->bo);
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv10_fence_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fence_priv *priv;
+ int ret = 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nv10_fence_destroy;
+ priv->base.engine.init = nv10_fence_init;
+ priv->base.engine.fini = nv10_fence_fini;
+ priv->base.engine.context_new = nv10_fence_context_new;
+ priv->base.engine.context_del = nv10_fence_context_del;
+ priv->base.emit = nv10_fence_emit;
+ priv->base.read = nv10_fence_read;
+ priv->base.sync = nv10_fence_sync;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+ spin_lock_init(&priv->lock);
+
+ if (dev_priv->chipset >= 0x17) {
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, NULL, &priv->bo);
+ if (!ret) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret == 0) {
+ nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
+ priv->base.sync = nv17_fence_sync;
+ }
+ }
+
+ if (ret)
+ nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index d2ecbff4bee1..f1fe7d758241 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -27,220 +27,112 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
+#include "nouveau_util.h"
#include "nouveau_ramht.h"
-#define NV10_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV10_RAMFC__SIZE))
-#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
-
-int
-nv10_fifo_channel_id(struct drm_device *dev)
-{
- return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
-}
-
-int
-nv10_fifo_create_context(struct nouveau_channel *chan)
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv10_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+struct nv10_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv10_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
- uint32_t fc = NV10_RAMFC(chan->id);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv10_fifo_chan *fctx;
+ unsigned long flags;
int ret;
- ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
- NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
- if (ret)
- return ret;
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ /* map channel control registers */
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
- return -ENOMEM;
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- /* Fill entries that are seen filled in dumps of nvidia driver just
- * after channel's is put into DMA mode
- */
- nv_wi32(dev, fc + 0, chan->pushbuf_base);
- nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
- nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 32, ~0, 32,
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x08, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x10, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
- 0);
-
- /* enable the fifo dma operation */
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
- return 0;
-}
-
-static void
-nv10_fifo_do_load_context(struct drm_device *dev, int chid)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV10_RAMFC(chid), tmp;
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
- nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
-
- tmp = nv_ri32(dev, fc + 12);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
-
- if (dev_priv->chipset < 0x17)
- goto out;
-
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
- tmp = nv_ri32(dev, fc + 36);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
- nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
-
-out:
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-}
-
-int
-nv10_fifo_load_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- uint32_t tmp;
-
- nv10_fifo_do_load_context(dev, chan->id);
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x18, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
- NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
-
- return 0;
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
int
-nv10_fifo_unload_context(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- uint32_t fc, tmp;
- int chid;
-
- chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
- return 0;
- fc = NV10_RAMFC(chid);
-
- nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
- tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
- nv_wi32(dev, fc + 12, tmp);
- nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
- nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
- nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
- nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
-
- if (dev_priv->chipset < 0x17)
- goto out;
-
- nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
- tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
- nv_wi32(dev, fc + 36, tmp);
- nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
- nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
- nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
-
-out:
- nv10_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
- return 0;
-}
-
-static void
-nv10_fifo_init_reset(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
-
- nv_wr32(dev, 0x003224, 0x000f0078);
- nv_wr32(dev, 0x002044, 0x0101ffff);
- nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002500, 0x00000000);
- nv_wr32(dev, 0x003000, 0x00000000);
- nv_wr32(dev, 0x003050, 0x00000000);
-
- nv_wr32(dev, 0x003258, 0x00000000);
- nv_wr32(dev, 0x003210, 0x00000000);
- nv_wr32(dev, 0x003270, 0x00000000);
-}
-
-static void
-nv10_fifo_init_ramxx(struct drm_device *dev)
+nv10_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fifo_priv *priv;
- nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht->bits - 9) << 16) |
- (dev_priv->ramht->gpuobj->pinst >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- if (dev_priv->chipset < 0x17) {
- nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
- } else {
- nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc->pinst >> 8) |
- (1 << 16) /* 64 Bytes entry*/);
- /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
- }
-}
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv04_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv10_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv10_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-static void
-nv10_fifo_init_intr(struct drm_device *dev)
-{
nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
-}
-
-int
-nv10_fifo_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int i;
-
- nv10_fifo_init_reset(dev);
- nv10_fifo_init_ramxx(dev);
-
- nv10_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
-
- nv10_fifo_init_intr(dev);
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->channels.ptr[i]) {
- uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
- nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
- }
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 7255e4a4d3f3..fb1d88a951de 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -759,7 +759,6 @@ static int
nv10_graph_unload_context(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct graph_state *ctx;
uint32_t tmp;
@@ -782,7 +781,7 @@ nv10_graph_unload_context(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (pfifo->channels - 1) << 24;
+ tmp |= 31 << 24;
nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
return 0;
}
@@ -822,12 +821,12 @@ struct nouveau_channel *
nv10_graph_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = dev_priv->engine.fifo.channels;
+ int chid = 31;
if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
- if (chid >= dev_priv->engine.fifo.channels)
+ if (chid >= 31)
return NULL;
return dev_priv->channels.ptr[chid];
@@ -948,7 +947,7 @@ nv10_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ tmp |= 31 << 24;
nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
@@ -1153,10 +1152,6 @@ nv10_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv10_graph_isr);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
-
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv17_fifo.c b/drivers/gpu/drm/nouveau/nv17_fifo.c
new file mode 100644
index 000000000000..d9e482e4abee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_fifo.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
+#include "nouveau_util.h"
+#include "nouveau_ramht.h"
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv17_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ {}
+};
+
+struct nv17_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv17_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv17_fifo_chan *fctx;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 64, ~0, 64,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv17_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
+ dev_priv->ramfc->pinst >> 8);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
+}
+
+int
+nv17_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv17_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv17_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv17_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 183e37512ef9..e34ea30758f6 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -43,8 +43,6 @@ struct nv20_graph_engine {
int
nv20_graph_unload_context(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan;
struct nouveau_gpuobj *grctx;
u32 tmp;
@@ -62,7 +60,7 @@ nv20_graph_unload_context(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= (pfifo->channels - 1) << 24;
+ tmp |= 31 << 24;
nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
return 0;
}
@@ -796,10 +794,6 @@ nv20_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv20_graph_isr);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
-
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
index 6f06a0713f00..5f239bf658c4 100644
--- a/drivers/gpu/drm/nouveau/nv31_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv31_mpeg.c
@@ -24,6 +24,7 @@
#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
struct nv31_mpeg_engine {
@@ -208,6 +209,7 @@ nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
static int
nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ctx;
unsigned long flags;
@@ -218,7 +220,7 @@ nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
return 0;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 68cb2d991c88..cdc818479b0a 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -25,215 +25,123 @@
*/
#include "drmP.h"
+#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_drm.h"
+#include "nouveau_fifo.h"
+#include "nouveau_util.h"
#include "nouveau_ramht.h"
-#define NV40_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV40_RAMFC__SIZE))
-#define NV40_RAMFC__SIZE 128
-
-int
-nv40_fifo_create_context(struct nouveau_channel *chan)
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv40_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 2, 28, 0x18, 28, 0x002058 },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
+ { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
+ { 32, 0, 0x40, 0, 0x0032e4 },
+ { 32, 0, 0x44, 0, 0x0032e8 },
+ { 32, 0, 0x4c, 0, 0x002088 },
+ { 32, 0, 0x50, 0, 0x003300 },
+ { 32, 0, 0x54, 0, 0x00330c },
+ {}
+};
+
+struct nv40_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv40_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV40_RAMFC(chan->id);
+ struct nv40_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv40_fifo_chan *fctx;
unsigned long flags;
int ret;
- ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
- NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
- if (ret)
- return ret;
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV40_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
return -ENOMEM;
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- nv_wi32(dev, fc + 0, chan->pushbuf_base);
- nv_wi32(dev, fc + 4, chan->pushbuf_base);
- nv_wi32(dev, fc + 12, chan->pushbuf->pinst >> 4);
- nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 128, ~0, 128,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
- 0x30000000 /* no idea.. */);
- nv_wi32(dev, fc + 60, 0x0001FFFF);
-
- /* enable the fifo dma operation */
- nv_wr32(dev, NV04_PFIFO_MODE,
- nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- return 0;
-}
-
-static void
-nv40_fifo_do_load_context(struct drm_device *dev, int chid)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
- nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
-
- /* No idea what 0x2058 is.. */
- tmp = nv_ri32(dev, fc + 24);
- tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
- tmp2 |= (tmp & 0x30000000);
- nv_wr32(dev, 0x2058, tmp2);
- tmp &= ~0x30000000;
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
- tmp = nv_ri32(dev, fc + 40);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
- nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
- nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
- nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
+ /*XXX: remove this later, need fifo engine context commit hook */
+ nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
- /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
- tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
- tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
-
- nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
- /* NVIDIA does this next line twice... */
- nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
- nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
- nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
- nv_wr32(dev, 0x330c, nv_ri32(dev, fc + 84));
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
-}
-
-int
-nv40_fifo_load_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- uint32_t tmp;
-
- nv40_fifo_do_load_context(dev, chan->id);
-
- /* Set channel active, and in DMA mode */
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
- NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
-
- /* Reset DMA_CTL_AT_INFO to INVALID */
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
-
- return 0;
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
-int
-nv40_fifo_unload_context(struct drm_device *dev)
+static int
+nv40_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- uint32_t fc, tmp;
- int chid;
-
- chid = pfifo->channel_id(dev);
- if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
- return 0;
- fc = NV40_RAMFC(chid);
-
- nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
- nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
- nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
- nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
- tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
- nv_wi32(dev, fc + 24, tmp);
- nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
- nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
- nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
- tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
- nv_wi32(dev, fc + 40, tmp);
- nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
- nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
- /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
- * more involved depending on the value of 0x3228?
- */
- nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
- nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
- nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
- /* No idea what the below is for exactly, ripped from a mmio-trace */
- nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
- /* NVIDIA do this next line twice.. bug? */
- nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
- nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
- nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
-#if 0 /* no real idea which is PUT/GET in UNK_48.. */
- tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
- tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
- nv_wi32(dev, fc + 72, tmp);
-#endif
- nv_wi32(dev, fc + 84, nv_rd32(dev, 0x330c));
-
- nv40_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
- NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
- return 0;
-}
-
-static void
-nv40_fifo_init_reset(struct drm_device *dev)
-{
+ struct nv40_fifo_priv *priv = nv_engine(dev, engine);
int i;
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
- nv_wr32(dev, 0x003224, 0x000f0078);
- nv_wr32(dev, 0x003210, 0x00000000);
- nv_wr32(dev, 0x003270, 0x00000000);
- nv_wr32(dev, 0x003240, 0x00000000);
- nv_wr32(dev, 0x003244, 0x00000000);
- nv_wr32(dev, 0x003258, 0x00000000);
- nv_wr32(dev, 0x002504, 0x00000000);
- for (i = 0; i < 16; i++)
- nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
- nv_wr32(dev, 0x00250c, 0x0000ffff);
- nv_wr32(dev, 0x002048, 0x00000000);
- nv_wr32(dev, 0x003228, 0x00000000);
- nv_wr32(dev, 0x0032e8, 0x00000000);
- nv_wr32(dev, 0x002410, 0x00000000);
- nv_wr32(dev, 0x002420, 0x00000000);
- nv_wr32(dev, 0x002058, 0x00000001);
- nv_wr32(dev, 0x00221c, 0x00000000);
- /* something with 0x2084, read/modify/write, no change */
nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002500, 0x00000000);
- nv_wr32(dev, 0x003200, 0x00000000);
-
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
-}
-
-static void
-nv40_fifo_init_ramxx(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv_wr32(dev, 0x002044, 0x2101ffff);
+ nv_wr32(dev, 0x002058, 0x00000001);
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
@@ -244,64 +152,59 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
case 0x47:
case 0x49:
case 0x4b:
- nv_wr32(dev, 0x2230, 1);
- break;
- default:
- break;
- }
-
- switch (dev_priv->chipset) {
+ nv_wr32(dev, 0x002230, 0x00000001);
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x45:
- case 0x47:
case 0x48:
- case 0x49:
- case 0x4b:
- nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
+ nv_wr32(dev, 0x002220, 0x00030002);
break;
default:
- nv_wr32(dev, 0x2230, 0);
- nv_wr32(dev, NV40_PFIFO_RAMFC,
- ((dev_priv->vram_size - 512 * 1024 +
- dev_priv->ramfc->pinst) >> 16) | (3 << 16));
+ nv_wr32(dev, 0x002230, 0x00000000);
+ nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
+ dev_priv->ramfc->pinst) >> 16) |
+ 0x00030000);
break;
}
-}
-static void
-nv40_fifo_init_intr(struct drm_device *dev)
-{
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
}
int
-nv40_fifo_init(struct drm_device *dev)
+nv40_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int i;
-
- nv40_fifo_init_reset(dev);
- nv40_fifo_init_ramxx(dev);
+ struct nv40_fifo_priv *priv;
- nv40_fifo_do_load_context(dev, pfifo->channels - 1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
-
- nv40_fifo_init_intr(dev);
- pfifo->enable(dev);
- pfifo->reassign(dev, true);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (dev_priv->channels.ptr[i]) {
- uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
- nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
- }
- }
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv40_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv40_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv40_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index ba14a93d8afa..aa9e2df64a26 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -27,7 +27,7 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_grctx.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
struct nv40_graph_engine {
@@ -42,7 +42,6 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx = NULL;
- struct nouveau_grctx ctx = {};
unsigned long flags;
int ret;
@@ -52,11 +51,7 @@ nv40_graph_context_new(struct nouveau_channel *chan, int engine)
return ret;
/* Initialise default context values */
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = grctx;
- nv40_grctx_init(&ctx);
-
+ nv40_grctx_fill(dev, grctx);
nv_wo32(grctx, 0, grctx->vinst);
/* init grctx pointer in ramfc, and on PFIFO if channel is
@@ -184,8 +179,7 @@ nv40_graph_init(struct drm_device *dev, int engine)
struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_grctx ctx = {};
- uint32_t vramsz, *cp;
+ uint32_t vramsz;
int i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -193,22 +187,8 @@ nv40_graph_init(struct drm_device *dev, int engine)
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
- cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
-
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = cp;
- ctx.ctxprog_max = 256;
- nv40_grctx_init(&ctx);
- pgraph->grctx_size = ctx.ctxvals_pos * 4;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
-
- kfree(cp);
+ /* generate and upload context program */
+ nv40_grctx_init(dev, &pgraph->grctx_size);
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -366,13 +346,14 @@ nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
static int
nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
@@ -460,7 +441,6 @@ nv40_graph_create(struct drm_device *dev)
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv40_graph_isr);
- NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
@@ -483,8 +463,5 @@ nv40_graph_create(struct drm_device *dev)
else
NVOBJ_CLASS(dev, 0x4097, GR);
- /* nvsw */
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
index f70447d131d7..be0a74750fb1 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -595,8 +595,8 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
}
}
-void
-nv40_grctx_init(struct nouveau_grctx *ctx)
+static void
+nv40_grctx_generate(struct nouveau_grctx *ctx)
{
/* decide whether we're loading/unloading the context */
cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
@@ -660,3 +660,31 @@ nv40_grctx_init(struct nouveau_grctx *ctx)
cp_out (ctx, CP_END);
}
+void
+nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+{
+ nv40_grctx_generate(&(struct nouveau_grctx) {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_VALS,
+ .data = mem,
+ });
+}
+
+void
+nv40_grctx_init(struct drm_device *dev, u32 *size)
+{
+ u32 ctxprog[256], i;
+ struct nouveau_grctx ctx = {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_PROG,
+ .data = ctxprog,
+ .ctxprog_max = ARRAY_SIZE(ctxprog)
+ };
+
+ nv40_grctx_generate(&ctx);
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
+ *size = ctx.ctxvals_pos * 4;
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index c7615381c5d9..e66273aff493 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -27,6 +27,7 @@
#include "nouveau_bios.h"
#include "nouveau_pm.h"
#include "nouveau_hw.h"
+#include "nouveau_fifo.h"
#define min2(a,b) ((a) < (b) ? (a) : (b))
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 701b927998bf..97a477b3d52d 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -79,15 +79,15 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
NV_ERROR(dev, "no space while blanking crtc\n");
return ret;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
OUT_RING(evo, 0);
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
} else {
if (nv_crtc->cursor.visible)
@@ -100,20 +100,20 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
NV_ERROR(dev, "no space while unblanking crtc\n");
return ret;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON);
OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
OUT_RING(evo, nv_crtc->fb.offset >> 8);
OUT_RING(evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
if (dev_priv->chipset != 0x50)
if (nv_crtc->fb.tile_flags == 0x7a00 ||
nv_crtc->fb.tile_flags == 0xfe00)
@@ -158,10 +158,10 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
if (ret == 0) {
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
OUT_RING (evo, mode);
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
FIRE_RING (evo);
}
@@ -193,11 +193,11 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
OUT_RING (evo, (hue << 20) | (vib << 8));
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
FIRE_RING (evo);
}
@@ -311,9 +311,9 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
OUT_RING (evo, ctrl);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
OUT_RING (evo, oY << 16 | oX);
OUT_RING (evo, oY << 16 | oX);
@@ -383,23 +383,15 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
static void
nv50_crtc_destroy(struct drm_crtc *crtc)
{
- struct drm_device *dev;
- struct nouveau_crtc *nv_crtc;
-
- if (!crtc)
- return;
-
- dev = crtc->dev;
- nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- drm_crtc_cleanup(&nv_crtc->base);
+ NV_DEBUG_KMS(crtc->dev, "\n");
nouveau_bo_unmap(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ drm_crtc_cleanup(&nv_crtc->base);
kfree(nv_crtc);
}
@@ -593,7 +585,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
OUT_RING (evo, fb->r_dma);
}
@@ -601,18 +593,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
OUT_RING (evo, nv_crtc->fb.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
OUT_RING (evo, fb->r_pitch);
OUT_RING (evo, fb->r_format);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
OUT_RING (evo, fb->base.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
OUT_RING (evo, (y << 16) | x);
if (nv_crtc->lut.depth != fb->base.depth) {
@@ -672,23 +664,23 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
ret = RING_SPACE(evo, 18);
if (ret == 0) {
- BEGIN_RING(evo, 0, 0x0804 + head, 2);
+ BEGIN_NV04(evo, 0, 0x0804 + head, 2);
OUT_RING (evo, 0x00800000 | mode->clock);
OUT_RING (evo, (ilace == 2) ? 2 : 0);
- BEGIN_RING(evo, 0, 0x0810 + head, 6);
+ BEGIN_NV04(evo, 0, 0x0810 + head, 6);
OUT_RING (evo, 0x00000000); /* border colour */
OUT_RING (evo, (vactive << 16) | hactive);
OUT_RING (evo, ( vsynce << 16) | hsynce);
OUT_RING (evo, (vblanke << 16) | hblanke);
OUT_RING (evo, (vblanks << 16) | hblanks);
OUT_RING (evo, (vblan2e << 16) | vblan2s);
- BEGIN_RING(evo, 0, 0x082c + head, 1);
+ BEGIN_NV04(evo, 0, 0x082c + head, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0900 + head, 1);
+ BEGIN_NV04(evo, 0, 0x0900 + head, 1);
OUT_RING (evo, 0x00000311); /* makes sync channel work */
- BEGIN_RING(evo, 0, 0x08c8 + head, 1);
+ BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
- BEGIN_RING(evo, 0, 0x08d4 + head, 1);
+ BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
OUT_RING (evo, 0x00000000); /* screen position */
}
@@ -755,21 +747,25 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (!nv_crtc)
return -ENOMEM;
+ nv_crtc->index = index;
+ nv_crtc->set_dither = nv50_crtc_set_dither;
+ nv_crtc->set_scale = nv50_crtc_set_scale;
+ nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
nv_crtc->color_vibrance = 50;
nv_crtc->vibrant_hue = 0;
-
- /* Default CLUT parameters, will be activated on the hw upon
- * first mode set.
- */
+ nv_crtc->lut.depth = 0;
for (i = 0; i < 256; i++) {
nv_crtc->lut.r[i] = i << 8;
nv_crtc->lut.g[i] = i << 8;
nv_crtc->lut.b[i] = i << 8;
}
- nv_crtc->lut.depth = 0;
+
+ drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
+ drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->lut.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -778,24 +774,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
}
- if (ret) {
- kfree(nv_crtc);
- return ret;
- }
-
- nv_crtc->index = index;
+ if (ret)
+ goto out;
- /* set function pointers */
- nv_crtc->set_dither = nv50_crtc_set_dither;
- nv_crtc->set_scale = nv50_crtc_set_scale;
- nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
-
- drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
- drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
- drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -804,6 +788,12 @@ nv50_crtc_create(struct drm_device *dev, int index)
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
}
+ if (ret)
+ goto out;
+
nv50_cursor_init(nv_crtc);
- return 0;
+out:
+ if (ret)
+ nv50_crtc_destroy(&nv_crtc->base);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index adfc9b607a50..af4ec7bf3670 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -53,15 +53,15 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
}
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
OUT_RING(evo, nv_crtc->cursor.offset >> 8);
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
FIRE_RING(evo);
nv_crtc->cursor.visible = true;
@@ -86,16 +86,16 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
NV_ERROR(dev, "no space while hiding cursor\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
OUT_RING(evo, 0);
if (dev_priv->chipset != 0x50) {
- BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
}
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
FIRE_RING(evo);
nv_crtc->cursor.visible = false;
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 55c56330be6d..eb216a446b89 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -55,9 +55,9 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
NV_ERROR(dev, "no space while disconnecting DAC\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
nv_encoder->crtc = NULL;
@@ -240,7 +240,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
NV_ERROR(dev, "no space while connecting DAC\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
OUT_RING(evo, mode_ctl);
OUT_RING(evo, mode_ctl2);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8b78b9cfa383..5c41612723b4 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -32,6 +32,7 @@
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "nouveau_ramht.h"
+#include "nouveau_software.h"
#include "drm_crtc_helper.h"
static void nv50_display_isr(struct drm_device *);
@@ -140,11 +141,11 @@ nv50_display_sync(struct drm_device *dev)
ret = RING_SPACE(evo, 6);
if (ret == 0) {
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x80000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
+ BEGIN_NV04(evo, 0, 0x0080, 1);
OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
nv_wo32(disp->ntfy, 0x000, 0x00000000);
@@ -267,7 +268,7 @@ nv50_display_init(struct drm_device *dev)
ret = RING_SPACE(evo, 3);
if (ret)
return ret;
- BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
+ BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
OUT_RING (evo, NvEvoSync);
@@ -292,7 +293,7 @@ nv50_display_fini(struct drm_device *dev)
ret = RING_SPACE(evo, 2);
if (ret == 0) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING(evo, 0);
}
FIRE_RING(evo);
@@ -358,8 +359,11 @@ nv50_display_create(struct drm_device *dev)
dev_priv->engine.display.priv = priv;
/* Create CRTC objects */
- for (i = 0; i < 2; i++)
- nv50_crtc_create(dev, i);
+ for (i = 0; i < 2; i++) {
+ ret = nv50_crtc_create(dev, i);
+ if (ret)
+ return ret;
+ }
/* We setup the encoders from the BIOS table */
for (i = 0 ; i < dcb->entries; i++) {
@@ -438,13 +442,13 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
return;
}
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0094, 1);
+ BEGIN_NV04(evo, 0, 0x0094, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x00c0, 1);
+ BEGIN_NV04(evo, 0, 0x00c0, 1);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
+ BEGIN_NV04(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
}
@@ -474,28 +478,28 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
if (dev_priv->chipset < 0xc0) {
- BEGIN_RING(chan, 0, 0x0060, 2);
+ BEGIN_NV04(chan, 0, 0x0060, 2);
OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, dispc->sem.offset);
- BEGIN_RING(chan, 0, 0x006c, 1);
+ BEGIN_NV04(chan, 0, 0x006c, 1);
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
- BEGIN_RING(chan, 0, 0x0064, 2);
+ BEGIN_NV04(chan, 0, 0x0064, 2);
OUT_RING (chan, dispc->sem.offset ^ 0x10);
OUT_RING (chan, 0x74b1e000);
- BEGIN_RING(chan, 0, 0x0060, 1);
+ BEGIN_NV04(chan, 0, 0x0060, 1);
if (dev_priv->chipset < 0x84)
OUT_RING (chan, NvSema);
else
OUT_RING (chan, chan->vram_handle);
} else {
- u64 offset = chan->dispc_vma[nv_crtc->index].offset;
+ u64 offset = nvc0_software_crtc(chan, nv_crtc->index);
offset += dispc->sem.offset;
- BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
+ BEGIN_NVC0(chan, 0, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
@@ -508,40 +512,40 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
/* queue the flip on the crtc's "display sync" channel */
- BEGIN_RING(evo, 0, 0x0100, 1);
+ BEGIN_NV04(evo, 0, 0x0100, 1);
OUT_RING (evo, 0xfffe0000);
if (chan) {
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000100);
} else {
- BEGIN_RING(evo, 0, 0x0084, 1);
+ BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000010);
/* allows gamma somehow, PDISP will bitch at you if
* you don't wait for vblank before changing this..
*/
- BEGIN_RING(evo, 0, 0x00e0, 1);
+ BEGIN_NV04(evo, 0, 0x00e0, 1);
OUT_RING (evo, 0x40000000);
}
- BEGIN_RING(evo, 0, 0x0088, 4);
+ BEGIN_NV04(evo, 0, 0x0088, 4);
OUT_RING (evo, dispc->sem.offset);
OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
OUT_RING (evo, 0x74b1e000);
OUT_RING (evo, NvEvoSync);
- BEGIN_RING(evo, 0, 0x00a0, 2);
+ BEGIN_NV04(evo, 0, 0x00a0, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x00c0, 1);
+ BEGIN_NV04(evo, 0, 0x00c0, 1);
OUT_RING (evo, nv_fb->r_dma);
- BEGIN_RING(evo, 0, 0x0110, 2);
+ BEGIN_NV04(evo, 0, 0x0110, 2);
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
- BEGIN_RING(evo, 0, 0x0800, 5);
+ BEGIN_NV04(evo, 0, 0x0800, 5);
OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (fb->height << 16) | fb->width);
OUT_RING (evo, nv_fb->r_pitch);
OUT_RING (evo, nv_fb->r_format);
- BEGIN_RING(evo, 0, 0x0080, 1);
+ BEGIN_NV04(evo, 0, 0x0080, 1);
OUT_RING (evo, 0x00000000);
FIRE_RING (evo);
@@ -642,20 +646,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
static void
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan, *tmp;
-
- list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
- nvsw.vbl_wait) {
- if (chan->nvsw.vblsem_head != crtc)
- continue;
-
- nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
- chan->nvsw.vblsem_rval);
- list_del(&chan->nvsw.vbl_wait);
- drm_vblank_put(dev, crtc);
- }
-
+ nouveau_software_vblank(dev, crtc);
drm_handle_vblank(dev, crtc);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 5d3dd14d2837..e9db9b97f041 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -33,6 +33,7 @@
#include "nouveau_dma.h"
#include "nouveau_reg.h"
#include "nouveau_crtc.h"
+#include "nouveau_software.h"
#include "nv50_evo.h"
struct nv50_display_crtc {
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 9b962e989d7c..ddcd55595824 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -117,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
evo->user_get = 4;
evo->user_put = 0;
- ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
&evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
@@ -333,7 +333,7 @@ nv50_evo_create(struct drm_device *dev)
goto err;
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &dispc->sem.bo);
+ 0, 0x0000, NULL, &dispc->sem.bo);
if (!ret) {
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index bdd2afe29205..f1e4b9e07d14 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -2,6 +2,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_fifo.h"
struct nv50_fb_priv {
struct page *r100c08_page;
@@ -212,6 +213,7 @@ static struct nouveau_enum vm_fault[] = {
void
nv50_fb_vm_trap(struct drm_device *dev, int display)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
const struct nouveau_enum *en, *cl;
unsigned long flags;
@@ -236,7 +238,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display)
/* lookup channel id */
chinst = (trap[2] << 16) | trap[1];
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+ for (ch = 0; ch < pfifo->channels; ch++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
if (!chan || !chan->ramin)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index dc75a7206524..e3c8b05dcae4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -43,22 +43,22 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
return ret;
if (rect->rop != ROP_COPY) {
- BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
OUT_RING(chan, 1);
}
- BEGIN_RING(chan, NvSub2D, 0x0588, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0588, 1);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
OUT_RING(chan, rect->color);
- BEGIN_RING(chan, NvSub2D, 0x0600, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x0600, 4);
OUT_RING(chan, rect->dx);
OUT_RING(chan, rect->dy);
OUT_RING(chan, rect->dx + rect->width);
OUT_RING(chan, rect->dy + rect->height);
if (rect->rop != ROP_COPY) {
- BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
OUT_RING(chan, 3);
}
FIRE_RING(chan);
@@ -78,14 +78,14 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
if (ret)
return ret;
- BEGIN_RING(chan, NvSub2D, 0x0110, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0110, 1);
OUT_RING(chan, 0);
- BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x08b0, 4);
OUT_RING(chan, region->dx);
OUT_RING(chan, region->dy);
OUT_RING(chan, region->width);
OUT_RING(chan, region->height);
- BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x08d0, 4);
OUT_RING(chan, 0);
OUT_RING(chan, region->sx);
OUT_RING(chan, 0);
@@ -116,7 +116,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
- BEGIN_RING(chan, NvSub2D, 0x0814, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING(chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, image->bg_color);
OUT_RING(chan, image->fg_color);
}
- BEGIN_RING(chan, NvSub2D, 0x0838, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0838, 2);
OUT_RING(chan, image->width);
OUT_RING(chan, image->height);
- BEGIN_RING(chan, NvSub2D, 0x0850, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x0850, 4);
OUT_RING(chan, 0);
OUT_RING(chan, image->dx);
OUT_RING(chan, 0);
@@ -143,7 +143,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
dwords -= push;
- BEGIN_RING(chan, NvSub2D, 0x40000860, push);
+ BEGIN_NI04(chan, NvSub2D, 0x0860, push);
OUT_RINGp(chan, data, push);
data += push;
}
@@ -199,60 +199,59 @@ nv50_fbcon_accel_init(struct fb_info *info)
return ret;
}
- BEGIN_RING(chan, NvSub2D, 0x0000, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
OUT_RING(chan, Nv2D);
- BEGIN_RING(chan, NvSub2D, 0x0180, 4);
- OUT_RING(chan, NvNotify0);
+ BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
OUT_RING(chan, chan->vram_handle);
OUT_RING(chan, chan->vram_handle);
OUT_RING(chan, chan->vram_handle);
- BEGIN_RING(chan, NvSub2D, 0x0290, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
OUT_RING(chan, 0);
- BEGIN_RING(chan, NvSub2D, 0x0888, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
OUT_RING(chan, 3);
- BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x02a0, 1);
OUT_RING(chan, 0x55);
- BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x08c0, 4);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0580, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0580, 2);
OUT_RING(chan, 4);
OUT_RING(chan, format);
- BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x02e8, 2);
OUT_RING(chan, 2);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0804, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0804, 1);
OUT_RING(chan, format);
- BEGIN_RING(chan, NvSub2D, 0x0800, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x0800, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0808, 3);
+ BEGIN_NV04(chan, NvSub2D, 0x0808, 3);
OUT_RING(chan, 0);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x081c, 1);
+ BEGIN_NV04(chan, NvSub2D, 0x081c, 1);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0840, 4);
+ BEGIN_NV04(chan, NvSub2D, 0x0840, 4);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0200, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0200, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0214, 5);
+ BEGIN_NV04(chan, NvSub2D, 0x0214, 5);
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
OUT_RING(chan, upper_32_bits(fb->vma.offset));
OUT_RING(chan, lower_32_bits(fb->vma.offset));
- BEGIN_RING(chan, NvSub2D, 0x0230, 2);
+ BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
- BEGIN_RING(chan, NvSub2D, 0x0244, 5);
+ BEGIN_NV04(chan, NvSub2D, 0x0244, 5);
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 3bc2a565c20b..55383b85db0b 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
+ * Copyright (C) 2012 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
@@ -27,480 +27,268 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
-static void
+struct nv50_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv50_fifo_chan {
+ struct nouveau_fifo_chan base;
+};
+
+void
nv50_fifo_playlist_update(struct drm_device *dev)
{
+ struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_gpuobj *cur;
- int i, nr;
-
- NV_DEBUG(dev, "\n");
+ int i, p;
- cur = pfifo->playlist[pfifo->cur_playlist];
- pfifo->cur_playlist = !pfifo->cur_playlist;
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
- /* We never schedule channel 0 or 127 */
- for (i = 1, nr = 0; i < 127; i++) {
- if (dev_priv->channels.ptr[i] &&
- dev_priv->channels.ptr[i]->ramfc) {
- nv_wo32(cur, (nr * 4), i);
- nr++;
- }
+ for (i = 0, p = 0; i < priv->base.channels; i++) {
+ if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
+ nv_wo32(cur, p++ * 4, i);
}
- dev_priv->engine.instmem.flush(dev);
-
- nv_wr32(dev, 0x32f4, cur->vinst >> 12);
- nv_wr32(dev, 0x32ec, nr);
- nv_wr32(dev, 0x2500, 0x101);
-}
-static void
-nv50_fifo_channel_enable(struct drm_device *dev, int channel)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
- uint32_t inst;
-
- NV_DEBUG(dev, "ch%d\n", channel);
-
- if (dev_priv->chipset == 0x50)
- inst = chan->ramfc->vinst >> 12;
- else
- inst = chan->ramfc->vinst >> 8;
+ dev_priv->engine.instmem.flush(dev);
- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
- NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
+ nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
+ nv_wr32(dev, 0x0032ec, p);
+ nv_wr32(dev, 0x002500, 0x00000101);
}
-static void
-nv50_fifo_channel_disable(struct drm_device *dev, int channel)
+static int
+nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
{
+ struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv50_fifo_chan *fctx;
+ struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t inst;
-
- NV_DEBUG(dev, "ch%d\n", channel);
+ u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
+ u64 instance = chan->ramin->vinst >> 12;
+ unsigned long flags;
+ int ret = 0, i;
- if (dev_priv->chipset == 0x50)
- inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
- else
- inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
-}
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ atomic_inc(&chan->vm->engref[engine]);
-static void
-nv50_fifo_init_reset(struct drm_device *dev)
-{
- uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
- NV_DEBUG(dev, "\n");
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x3c, 0x403f6078);
+ nv_wo32(chan->ramin, 0x40, 0x00000000);
+ nv_wo32(chan->ramin, 0x44, 0x01003fff);
+ nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(chan->ramin, 0x60, 0x7fffffff);
+ nv_wo32(chan->ramin, 0x78, 0x00000000);
+ nv_wo32(chan->ramin, 0x7c, 0x30000001);
+ nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
-}
+ dev_priv->engine.instmem.flush(dev);
-static void
-nv50_fifo_init_intr(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
+ nv50_fifo_playlist_update(dev);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
}
-static void
-nv50_fifo_init_context_table(struct drm_device *dev)
+static bool
+nv50_fifo_kickoff(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- NV_DEBUG(dev, "\n");
-
- for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
- if (dev_priv->channels.ptr[i])
- nv50_fifo_channel_enable(dev, i);
- else
- nv50_fifo_channel_disable(dev, i);
+ struct drm_device *dev = chan->dev;
+ bool done = true;
+ u32 me;
+
+ /* HW bug workaround:
+ *
+ * PFIFO will hang forever if the connected engines don't report
+ * that they've processed the context switch request.
+ *
+ * In order for the kickoff to work, we need to ensure all the
+ * connected engines are in a state where they can answer.
+ *
+ * Newer chipsets don't seem to suffer from this issue, and well,
+ * there's also a "ignore these engines" bitmask reg we can use
+ * if we hit the issue there..
+ */
+
+ /* PME: make sure engine is enabled */
+ me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
+
+ /* do the kickoff... */
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+ done = false;
}
- nv50_fifo_playlist_update(dev);
+ /* restore any engine states we changed, and exit */
+ nv_wr32(dev, 0x00b860, me);
+ return done;
}
static void
-nv50_fifo_init_regs__nv(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
-
- nv_wr32(dev, 0x250c, 0x6f3cfc34);
-}
-
-static void
-nv50_fifo_init_regs(struct drm_device *dev)
-{
- NV_DEBUG(dev, "\n");
-
- nv_wr32(dev, 0x2500, 0);
- nv_wr32(dev, 0x3250, 0);
- nv_wr32(dev, 0x3220, 0);
- nv_wr32(dev, 0x3204, 0);
- nv_wr32(dev, 0x3210, 0);
- nv_wr32(dev, 0x3270, 0);
- nv_wr32(dev, 0x2044, 0x01003fff);
-
- /* Enable dummy channels setup by nv50_instmem.c */
- nv50_fifo_channel_enable(dev, 0);
- nv50_fifo_channel_enable(dev, 127);
-}
-
-int
-nv50_fifo_init(struct drm_device *dev)
+nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
{
+ struct nv50_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- int ret;
+ unsigned long flags;
- NV_DEBUG(dev, "\n");
+ /* remove channel from playlist, will context switch if active */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(dev);
- if (pfifo->playlist[0]) {
- pfifo->cur_playlist = !pfifo->cur_playlist;
- goto just_reset;
- }
+ /* tell any engines on this channel to unload their contexts */
+ nv50_fifo_kickoff(chan);
- ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[0]);
- if (ret) {
- NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
- return ret;
- }
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC,
- &pfifo->playlist[1]);
- if (ret) {
- nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
- NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
- return ret;
+ /* clean up */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
}
-just_reset:
- nv50_fifo_init_reset(dev);
- nv50_fifo_init_intr(dev);
- nv50_fifo_init_context_table(dev);
- nv50_fifo_init_regs__nv(dev);
- nv50_fifo_init_regs(dev);
- dev_priv->engine.fifo.enable(dev);
- dev_priv->engine.fifo.reassign(dev, true);
-
- return 0;
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
}
-void
-nv50_fifo_takedown(struct drm_device *dev)
+static int
+nv50_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ u32 instance;
+ int i;
- NV_DEBUG(dev, "\n");
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x00250c, 0x6f3cfc34);
+ nv_wr32(dev, 0x002044, 0x01003fff);
- if (!pfifo->playlist[0])
- return;
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
- nv_wr32(dev, 0x2140, 0x00000000);
- nouveau_irq_unregister(dev, 8);
+ for (i = 0; i < 128; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && chan->engctx[engine])
+ instance = 0x80000000 | chan->ramin->vinst >> 12;
+ else
+ instance = 0x00000000;
+ nv_wr32(dev, 0x002600 + (i * 4), instance);
+ }
- nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
- nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
-}
+ nv50_fifo_playlist_update(dev);
-int
-nv50_fifo_channel_id(struct drm_device *dev)
-{
- return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
+ nv_wr32(dev, 0x003200, 1);
+ nv_wr32(dev, 0x003250, 1);
+ nv_wr32(dev, 0x002500, 1);
+ return 0;
}
-int
-nv50_fifo_create_context(struct nouveau_channel *chan)
+static int
+nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
{
- struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramfc = NULL;
- uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
- unsigned long flags;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- if (dev_priv->chipset == 0x50) {
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
- chan->ramin->vinst, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE,
- &chan->ramfc);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
- chan->ramin->vinst + 0x0400,
- 4096, 0, &chan->cache);
- if (ret)
- return ret;
- } else {
- ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
- 0, &chan->cache);
- if (ret)
- return ret;
- }
- ramfc = chan->ramfc;
+ struct nv50_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_USER(chan->id), PAGE_SIZE);
- if (!chan->user)
- return -ENOMEM;
+ /* set playlist length to zero, fifo will unload context */
+ nv_wr32(dev, 0x0032ec, 0);
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
- nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
- nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->cinst >> 4));
- nv_wo32(ramfc, 0x44, 0x01003fff);
- nv_wo32(ramfc, 0x60, 0x7fffffff);
- nv_wo32(ramfc, 0x40, 0x00000000);
- nv_wo32(ramfc, 0x7c, 0x30000001);
- nv_wo32(ramfc, 0x78, 0x00000000);
- nv_wo32(ramfc, 0x3c, 0x403f6078);
- nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
- nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
- drm_order(chan->dma.ib_max + 1) << 16);
-
- if (dev_priv->chipset != 0x50) {
- nv_wo32(chan->ramin, 0, chan->id);
- nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
-
- nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
- nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
+ /* tell all connected engines to unload their contexts */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && !nv50_fifo_kickoff(chan))
+ return -EBUSY;
}
- dev_priv->engine.instmem.flush(dev);
-
- nv50_fifo_channel_enable(dev, chan->id);
- nv50_fifo_playlist_update(dev);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002140, 0);
return 0;
}
void
-nv50_fifo_destroy_context(struct nouveau_channel *chan)
+nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_gpuobj *ramfc = NULL;
- unsigned long flags;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- pfifo->reassign(dev, false);
-
- /* Unload the context if it's the currently active one */
- if (pfifo->channel_id(dev) == chan->id) {
- pfifo->disable(dev);
- pfifo->unload_context(dev);
- pfifo->enable(dev);
- }
-
- /* This will ensure the channel is seen as disabled. */
- nouveau_gpuobj_ref(chan->ramfc, &ramfc);
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
- nv50_fifo_channel_disable(dev, chan->id);
-
- /* Dummy channel, also used on ch 127 */
- if (chan->id == 0)
- nv50_fifo_channel_disable(dev, 127);
- nv50_fifo_playlist_update(dev);
-
- pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the channel resources */
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
- nouveau_gpuobj_ref(NULL, &ramfc);
- nouveau_gpuobj_ref(NULL, &chan->cache);
+ nv50_vm_flush_engine(dev, 5);
}
-int
-nv50_fifo_load_context(struct nouveau_channel *chan)
+void
+nv50_fifo_destroy(struct drm_device *dev, int engine)
{
- struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramfc = chan->ramfc;
- struct nouveau_gpuobj *cache = chan->cache;
- int ptr, cnt;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
- nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
- nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
- nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
- nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
- nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
- nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
- nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
- nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
- nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
- nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
- nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
- nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
- nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
- nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
- nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
- nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
- nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
- nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
- nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
- nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
- nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
- nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
- nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
- nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
- nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
- nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
- nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
- nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
- nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
- nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
- nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
- nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
-
- cnt = nv_ro32(ramfc, 0x84);
- for (ptr = 0; ptr < cnt; ptr++) {
- nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
- nv_ro32(cache, (ptr * 8) + 0));
- nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
- nv_ro32(cache, (ptr * 8) + 4));
- }
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
-
- /* guessing that all the 0x34xx regs aren't on NV50 */
- if (dev_priv->chipset != 0x50) {
- nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
- nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
- nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
- nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
- nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
- }
+ struct nv50_fifo_priv *priv = nv_engine(dev, engine);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
- return 0;
+ nouveau_irq_unregister(dev, 8);
+
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
}
int
-nv50_fifo_unload_context(struct drm_device *dev)
+nv50_fifo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nouveau_gpuobj *ramfc, *cache;
- struct nouveau_channel *chan = NULL;
- int chid, get, put, ptr;
-
- NV_DEBUG(dev, "\n");
-
- chid = pfifo->channel_id(dev);
- if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
- return 0;
-
- chan = dev_priv->channels.ptr[chid];
- if (!chan) {
- NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
- return -EINVAL;
- }
- NV_DEBUG(dev, "ch%d\n", chan->id);
- ramfc = chan->ramfc;
- cache = chan->cache;
-
- nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
- nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
- nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
- nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
- nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
- nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
- nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
- nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
- nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
- nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
- nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
- nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
- nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
- nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
- nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
- nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
- nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
- nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
- nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
- nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
- nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
- nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
- nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
- nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
- nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
- nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
- nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
- nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
- nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
- nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
- nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
- nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
- nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
-
- put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
- get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
- ptr = 0;
- while (put != get) {
- nv_wo32(cache, ptr + 0,
- nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
- nv_wo32(cache, ptr + 4,
- nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
- get = (get + 1) & 0x1ff;
- ptr += 8;
- }
-
- /* guessing that all the 0x34xx regs aren't on NV50 */
- if (dev_priv->chipset != 0x50) {
- nv_wo32(ramfc, 0x84, ptr >> 3);
- nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
- nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
- nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
- nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
- nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
- }
+ struct nv50_fifo_priv *priv;
+ int ret;
- dev_priv->engine.instmem.flush(dev);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- /*XXX: probably reload ch127 (NULL) state back too */
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
- return 0;
-}
+ priv->base.base.destroy = nv50_fifo_destroy;
+ priv->base.base.init = nv50_fifo_init;
+ priv->base.base.fini = nv50_fifo_fini;
+ priv->base.base.context_new = nv50_fifo_context_new;
+ priv->base.base.context_del = nv50_fifo_context_del;
+ priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
+ priv->base.channels = 127;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ if (ret)
+ goto error;
-void
-nv50_fifo_tlb_flush(struct drm_device *dev)
-{
- nv50_vm_flush_engine(dev, 5);
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 33d5711a918d..d9cc2f2638d6 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -27,8 +27,8 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
#include "nouveau_ramht.h"
-#include "nouveau_grctx.h"
#include "nouveau_dma.h"
#include "nouveau_vm.h"
#include "nv50_evo.h"
@@ -40,86 +40,6 @@ struct nv50_graph_engine {
u32 grctx_size;
};
-static void
-nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
-{
- const uint32_t mask = 0x00010001;
-
- if (enabled)
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
- else
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
-}
-
-static struct nouveau_channel *
-nv50_graph_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t inst;
- int i;
-
- /* Be sure we're not in the middle of a context switch or bad things
- * will happen, such as unloading the wrong pgraph context.
- */
- if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
- NV_ERROR(dev, "Ctxprog is still running\n");
-
- inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
- return NULL;
- inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
-
- if (chan && chan->ramin && chan->ramin->vinst == inst)
- return chan;
- }
-
- return NULL;
-}
-
-static int
-nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
-{
- uint32_t fifo = nv_rd32(dev, 0x400500);
-
- nv_wr32(dev, 0x400500, fifo & ~1);
- nv_wr32(dev, 0x400784, inst);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
- nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
- nv_wr32(dev, 0x400040, 0xffffffff);
- (void)nv_rd32(dev, 0x400040);
- nv_wr32(dev, 0x400040, 0x00000000);
- nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
-
- if (nouveau_wait_for_idle(dev))
- nv_wr32(dev, 0x40032c, inst | (1<<31));
- nv_wr32(dev, 0x400500, fifo);
-
- return 0;
-}
-
-static int
-nv50_graph_unload_context(struct drm_device *dev)
-{
- uint32_t inst;
-
- inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
- return 0;
- inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
-
- nouveau_wait_for_idle(dev);
- nv_wr32(dev, 0x400784, inst);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
- nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
- nouveau_wait_for_idle(dev);
-
- nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
- return 0;
-}
-
static int
nv50_graph_init(struct drm_device *dev, int engine)
{
@@ -211,12 +131,6 @@ nv50_graph_init(struct drm_device *dev, int engine)
static int
nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
- nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
- if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
- nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
- return -EBUSY;
- }
- nv50_graph_unload_context(dev);
nv_wr32(dev, 0x40013c, 0x00000000);
return 0;
}
@@ -229,7 +143,6 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *ramin = chan->ramin;
struct nouveau_gpuobj *grctx = NULL;
struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
- struct nouveau_grctx ctx = {};
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -248,11 +161,7 @@ nv50_graph_context_new(struct nouveau_channel *chan, int engine)
nv_wo32(ramin, hdr + 0x10, 0);
nv_wo32(ramin, hdr + 0x14, 0x00010000);
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = grctx;
- nv50_grctx_init(&ctx);
-
+ nv50_grctx_fill(dev, grctx);
nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
dev_priv->engine.instmem.flush(dev);
@@ -268,33 +177,14 @@ nv50_graph_context_del(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
- unsigned long flags;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- if (!chan->ramin)
- return;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- pfifo->reassign(dev, false);
- nv50_graph_fifo_access(dev, false);
-
- if (nv50_graph_channel(dev) == chan)
- nv50_graph_unload_context(dev);
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(chan->ramin, i, 0);
dev_priv->engine.instmem.flush(dev);
- nv50_graph_fifo_access(dev, true);
- pfifo->reassign(dev, true);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- nouveau_gpuobj_ref(NULL, &grctx);
-
atomic_dec(&chan->vm->engref[engine]);
+ nouveau_gpuobj_ref(NULL, &grctx);
chan->engctx[engine] = NULL;
}
@@ -325,85 +215,6 @@ nv50_graph_object_new(struct nouveau_channel *chan, int engine,
}
static void
-nv50_graph_context_switch(struct drm_device *dev)
-{
- uint32_t inst;
-
- nv50_graph_unload_context(dev);
-
- inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
- inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
- nv50_graph_do_load_context(dev, inst);
-
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
-}
-
-static int
-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct nouveau_gpuobj *gpuobj;
-
- gpuobj = nouveau_ramht_find(chan, data);
- if (!gpuobj)
- return -ENOENT;
-
- if (nouveau_notifier_offset(gpuobj, NULL))
- return -EINVAL;
-
- chan->nvsw.vblsem = gpuobj;
- chan->nvsw.vblsem_offset = ~0;
- return 0;
-}
-
-static int
-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
- return -ERANGE;
-
- chan->nvsw.vblsem_offset = data >> 2;
- return 0;
-}
-
-static int
-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- chan->nvsw.vblsem_rval = data;
- return 0;
-}
-
-static int
-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
- return -EINVAL;
-
- drm_vblank_get(dev, data);
-
- chan->nvsw.vblsem_head = data;
- list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
-
- return 0;
-}
-
-static int
-nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- nouveau_finish_page_flip(chan, NULL);
- return 0;
-}
-
-
-static void
nv50_graph_tlb_flush(struct drm_device *dev, int engine)
{
nv50_vm_flush_engine(dev, 0);
@@ -514,6 +325,7 @@ struct nouveau_enum nv50_data_error_names[] = {
{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
+ { 0x00000024, "VP_ZERO_INPUTS", NULL },
{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
@@ -900,13 +712,14 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
int
nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->ramin)
continue;
@@ -939,15 +752,6 @@ nv50_graph_isr(struct drm_device *dev)
show &= ~0x00000010;
}
- if (stat & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, 0x400100, 0x00001000);
- nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
- nv50_graph_context_switch(dev);
- stat &= ~0x00001000;
- show &= ~0x00001000;
- }
-
show = (show && nouveau_ratelimit()) ? show : 0;
if (show & 0x00100000) {
@@ -996,28 +800,21 @@ nv50_graph_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_graph_engine *pgraph;
- struct nouveau_grctx ctx = {};
int ret;
pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
if (!pgraph)
return -ENOMEM;
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = pgraph->ctxprog;
- ctx.ctxprog_max = ARRAY_SIZE(pgraph->ctxprog);
-
- ret = nv50_grctx_init(&ctx);
+ ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
+ &pgraph->ctxprog_size,
+ &pgraph->grctx_size);
if (ret) {
NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
kfree(pgraph);
return 0;
}
- pgraph->grctx_size = ctx.ctxvals_pos * 4;
- pgraph->ctxprog_size = ctx.ctxprog_len;
-
pgraph->base.destroy = nv50_graph_destroy;
pgraph->base.init = nv50_graph_init;
pgraph->base.fini = nv50_graph_fini;
@@ -1031,14 +828,6 @@ nv50_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 12, nv50_graph_isr);
- /* NVSW really doesn't live here... */
- NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
- NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
- NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
- NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
- NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
-
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index 4b46d6968566..881e22b249fc 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -172,8 +172,8 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
/* Main function: construct the ctxprog skeleton, call the other functions. */
-int
-nv50_grctx_init(struct nouveau_grctx *ctx)
+static int
+nv50_grctx_generate(struct nouveau_grctx *ctx)
{
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
@@ -210,7 +210,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
cp_name(ctx, cp_check_load);
cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
- cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+ cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit);
/* setup for context load */
cp_name(ctx, cp_setup_auto_load);
@@ -277,6 +277,33 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
return 0;
}
+void
+nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+{
+ nv50_grctx_generate(&(struct nouveau_grctx) {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_VALS,
+ .data = mem,
+ });
+}
+
+int
+nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
+{
+ struct nouveau_grctx ctx = {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_PROG,
+ .data = data,
+ .ctxprog_max = max
+ };
+ int ret;
+
+ ret = nv50_grctx_generate(&ctx);
+ *cnt = ctx.ctxvals_pos * 4;
+ *len = ctx.ctxprog_len;
+ return ret;
+}
+
/*
* Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
* registers to save/restore and the default values for them.
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a7c12c94a5a6..0bba54f11800 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -83,7 +83,7 @@ nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size);
+ ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
if (ret) {
nv50_channel_del(&chan);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index b57a2d180ad2..90e8ed22cfcb 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -77,27 +77,13 @@ nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
static void
nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_gpuobj *ctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
- unsigned long flags;
- u32 inst, i;
-
- if (!chan->ramin)
- return;
-
- inst = chan->ramin->vinst >> 12;
- inst |= 0x80000000;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- if (nv_rd32(dev, 0x00b318) == inst)
- nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ int i;
for (i = 0x00; i <= 0x14; i += 4)
nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
+
nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = NULL;
}
@@ -162,7 +148,6 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
static int
nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
{
- /*XXX: context save for s/r */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
nv_wr32(dev, 0x00b140, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
new file mode 100644
index 000000000000..114d2517d4a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_software.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+#include "nv50_display.h"
+
+struct nv50_software_priv {
+ struct nouveau_software_priv base;
+};
+
+struct nv50_software_chan {
+ struct nouveau_software_chan base;
+ struct {
+ struct nouveau_gpuobj *object;
+ } vblank;
+};
+
+static int
+mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ struct nouveau_gpuobj *gpuobj;
+
+ gpuobj = nouveau_ramht_find(chan, data);
+ if (!gpuobj)
+ return -ENOENT;
+
+ if (nouveau_notifier_offset(gpuobj, NULL))
+ return -EINVAL;
+
+ pch->vblank.object = gpuobj;
+ pch->base.vblank.offset = ~0;
+ return 0;
+}
+
+static int
+mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+
+ if (nouveau_notifier_offset(pch->vblank.object, &data))
+ return -ERANGE;
+
+ pch->base.vblank.offset = data >> 2;
+ return 0;
+}
+
+static int
+mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ pch->base.vblank.value = data;
+ return 0;
+}
+
+static int
+mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
+ struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ struct drm_device *dev = chan->dev;
+
+ if (!pch->vblank.object || pch->base.vblank.offset == ~0 || data > 1)
+ return -EINVAL;
+
+ drm_vblank_get(dev, data);
+
+ pch->base.vblank.head = data;
+ list_add(&pch->base.vblank.list, &psw->base.vblank);
+ return 0;
+}
+
+static int
+mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ nouveau_finish_page_flip(chan, NULL);
+ return 0;
+}
+
+static int
+nv50_software_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
+ struct nv50_display *pdisp = nv50_display(chan->dev);
+ struct nv50_software_chan *pch;
+ int ret = 0, i;
+
+ pch = kzalloc(sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ nouveau_software_context_new(&pch->base);
+ pch->base.vblank.bo = chan->notifier_bo;
+ chan->engctx[engine] = pch;
+
+ /* dma objects for display sync channel semaphore blocks */
+ for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
+ struct nv50_display_crtc *dispc = &pdisp->crtc[i];
+ struct nouveau_gpuobj *obj = NULL;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ dispc->sem.bo->bo.offset, 0x1000,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
+ if (ret)
+ break;
+
+ ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ }
+
+ if (ret)
+ psw->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv50_software_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv50_software_chan *pch = chan->engctx[engine];
+ chan->engctx[engine] = NULL;
+ kfree(pch);
+}
+
+static int
+nv50_software_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 0;
+ obj->class = class;
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static int
+nv50_software_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static int
+nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nv50_software_destroy(struct drm_device *dev, int engine)
+{
+ struct nv50_software_priv *psw = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, SW);
+ kfree(psw);
+}
+
+int
+nv50_software_create(struct drm_device *dev)
+{
+ struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
+ if (!psw)
+ return -ENOMEM;
+
+ psw->base.base.destroy = nv50_software_destroy;
+ psw->base.base.init = nv50_software_init;
+ psw->base.base.fini = nv50_software_fini;
+ psw->base.base.context_new = nv50_software_context_new;
+ psw->base.base.context_del = nv50_software_context_del;
+ psw->base.base.object_new = nv50_software_object_new;
+ nouveau_software_create(&psw->base);
+
+ NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
+ NVOBJ_CLASS(dev, 0x506e, SW);
+ NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
+ NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
+ NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
+ NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
+ NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 274640212475..a9514eaa74c1 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -242,9 +242,9 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
NV_ERROR(dev, "no space while disconnecting SOR\n");
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
nouveau_hdmi_mode_set(encoder, NULL);
@@ -430,7 +430,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nv_encoder->crtc = NULL;
return;
}
- BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING(evo, mode_ctl);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 44fbac9c7d93..179bb42a635c 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -147,7 +147,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
{
struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i;
pinstmem->flush(vm->dev);
@@ -158,7 +157,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
return;
}
- pfifo->tlb_flush(vm->dev);
for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
if (atomic_read(&vm->engref[i]))
dev_priv->eng[i]->tlb_flush(vm->dev, i);
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
new file mode 100644
index 000000000000..c2f889b0d340
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fifo.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nv84_fence_chan {
+ struct nouveau_fence_chan base;
+};
+
+struct nv84_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_gpuobj *mem;
+};
+
+static int
+nv84_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, NvSema);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(chan->id * 16));
+ OUT_RING (chan, lower_32_bits(chan->id * 16));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+
+static int
+nv84_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ int ret = RING_SPACE(chan, 7);
+ if (ret == 0) {
+ BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, NvSema);
+ BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(prev->id * 16));
+ OUT_RING (chan, lower_32_bits(prev->id * 16));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
+ FIRE_RING (chan);
+ }
+ return ret;
+}
+
+static u32
+nv84_fence_read(struct nouveau_channel *chan)
+{
+ struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+ return nv_ro32(priv->mem, chan->id * 16);
+}
+
+static void
+nv84_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fence_chan *fctx = chan->engctx[engine];
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv84_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nv84_fence_chan *fctx;
+ struct nouveau_gpuobj *obj;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
+ priv->mem->vinst, priv->mem->size,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &obj);
+ if (ret == 0) {
+ ret = nouveau_ramht_insert(chan, NvSema, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ nv_wo32(priv->mem, chan->id * 16, 0x00000000);
+ }
+
+ if (ret)
+ nv84_fence_context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv84_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nv84_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nv84_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fence_priv *priv = nv_engine(dev, engine);
+
+ nouveau_gpuobj_ref(NULL, &priv->mem);
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv84_fence_create(struct drm_device *dev)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fence_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nv84_fence_destroy;
+ priv->base.engine.init = nv84_fence_init;
+ priv->base.engine.fini = nv84_fence_fini;
+ priv->base.engine.context_new = nv84_fence_context_new;
+ priv->base.engine.context_del = nv84_fence_context_del;
+ priv->base.emit = nv84_fence_emit;
+ priv->base.sync = nv84_fence_sync;
+ priv->base.read = nv84_fence_read;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
+ 0x1000, 0, &priv->mem);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
new file mode 100644
index 000000000000..cc82d799fc3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_fifo.h"
+#include "nouveau_ramht.h"
+#include "nouveau_vm.h"
+
+struct nv84_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv84_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *cache;
+};
+
+static int
+nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv84_fifo_chan *fctx;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
+ u64 instance;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ atomic_inc(&chan->vm->engref[engine]);
+
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ instance = fctx->ramfc->vinst >> 8;
+
+ ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
+ nv_wo32(fctx->ramfc, 0x40, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
+ nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(fctx->ramfc, 0x78, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
+ nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
+ nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
+ nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
+
+ nv_wo32(chan->ramin, 0x00, chan->id);
+ nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
+
+ dev_priv->engine.instmem.flush(dev);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
+ nv50_fifo_playlist_update(dev);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ /* remove channel from playlist, will context switch if active */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(dev);
+
+ /* tell any engines on this channel to unload their contexts */
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* clean up */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ nouveau_gpuobj_ref(NULL, &fctx->ramfc);
+ nouveau_gpuobj_ref(NULL, &fctx->cache);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv84_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_chan *fctx;
+ u32 instance;
+ int i;
+
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x00250c, 0x6f3cfc34);
+ nv_wr32(dev, 0x002044, 0x01003fff);
+
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+
+ for (i = 0; i < 128; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && (fctx = chan->engctx[engine]))
+ instance = 0x80000000 | fctx->ramfc->vinst >> 8;
+ else
+ instance = 0x00000000;
+ nv_wr32(dev, 0x002600 + (i * 4), instance);
+ }
+
+ nv50_fifo_playlist_update(dev);
+
+ nv_wr32(dev, 0x003200, 1);
+ nv_wr32(dev, 0x003250, 1);
+ nv_wr32(dev, 0x002500, 1);
+ return 0;
+}
+
+static int
+nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ /* set playlist length to zero, fifo will unload context */
+ nv_wr32(dev, 0x0032ec, 0);
+
+ /* tell all connected engines to unload their contexts */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan)
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0);
+ return 0;
+}
+
+int
+nv84_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv50_fifo_destroy;
+ priv->base.base.init = nv84_fifo_init;
+ priv->base.base.fini = nv84_fifo_fini;
+ priv->base.base.context_new = nv84_fifo_context_new;
+ priv->base.base.context_del = nv84_fifo_context_del;
+ priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
+ priv->base.channels = 127;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
index db94ff0a9fab..e25e13fb894e 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
@@ -23,21 +23,93 @@
*/
#include "drmP.h"
+
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include "nouveau_vm.h"
#include "nouveau_ramht.h"
-struct nv98_crypt_engine {
+#include "nv98_crypt.fuc.h"
+
+struct nv98_crypt_priv {
struct nouveau_exec_engine base;
};
+struct nv98_crypt_chan {
+ struct nouveau_gpuobj *mem;
+};
+
static int
-nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv98_crypt_priv *priv = nv_engine(dev, engine);
+ struct nv98_crypt_chan *cctx;
+ int ret;
+
+ cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
+ if (!cctx)
+ return -ENOMEM;
+
+ atomic_inc(&chan->vm->engref[engine]);
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0xa0, 0x00190000);
+ nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
+ nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
+ nv_wo32(chan->ramin, 0xac, 0x00000000);
+ nv_wo32(chan->ramin, 0xb0, 0x00000000);
+ nv_wo32(chan->ramin, 0xb4, 0x00000000);
+ dev_priv->engine.instmem.flush(dev);
+
+error:
+ if (ret)
+ priv->base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv98_crypt_chan *cctx = chan->engctx[engine];
+ int i;
+
+ for (i = 0xa0; i < 0xb4; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+
+ nouveau_gpuobj_ref(NULL, &cctx->mem);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(cctx);
+}
+
+static int
+nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
{
- if (!(nv_rd32(dev, 0x000200) & 0x00004000))
- return 0;
+ struct nv98_crypt_chan *cctx = chan->engctx[engine];
+
+ /* fuc engine doesn't need an object, our ramht code does.. */
+ cctx->mem->engine = 5;
+ cctx->mem->class = class;
+ return nouveau_ramht_insert(chan, handle, cctx->mem);
+}
+static void
+nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
+{
+ nv50_vm_flush_engine(dev, 0x0a);
+}
+
+static int
+nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
return 0;
}
@@ -45,34 +117,100 @@ nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
static int
nv98_crypt_init(struct drm_device *dev, int engine)
{
+ int i;
+
+ /* reset! */
nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+ /* wait for exit interrupt to signal */
+ nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
+ nv_wr32(dev, 0x087004, 0x00000010);
+
+ /* upload microcode code and data segments */
+ nv_wr32(dev, 0x087ff8, 0x00100000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
+ nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
+
+ nv_wr32(dev, 0x087ff8, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
+ nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
+
+ /* start it running */
+ nv_wr32(dev, 0x08710c, 0x00000000);
+ nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
+ nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
return 0;
}
+static struct nouveau_enum nv98_crypt_isr_error_name[] = {
+ { 0x0000, "ILLEGAL_MTHD" },
+ { 0x0001, "INVALID_BITFIELD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "QUERY" },
+ {}
+};
+
+static void
+nv98_crypt_isr(struct drm_device *dev)
+{
+ u32 disp = nv_rd32(dev, 0x08701c);
+ u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
+ u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
+ u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
+ u32 addr = nv_rd32(dev, 0x087040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(dev, 0x087044);
+ int chid = nv50_graph_isr_chid(dev, inst);
+
+ if (stat & 0x00000040) {
+ NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
+ nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
+ printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, mthd, data);
+ nv_wr32(dev, 0x087004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
+ nv_wr32(dev, 0x087004, stat);
+ }
+
+ nv50_fb_vm_trap(dev, 1);
+}
+
static void
nv98_crypt_destroy(struct drm_device *dev, int engine)
{
- struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
+ struct nv98_crypt_priv *priv = nv_engine(dev, engine);
+ nouveau_irq_unregister(dev, 14);
NVOBJ_ENGINE_DEL(dev, CRYPT);
-
- kfree(pcrypt);
+ kfree(priv);
}
int
nv98_crypt_create(struct drm_device *dev)
{
- struct nv98_crypt_engine *pcrypt;
+ struct nv98_crypt_priv *priv;
- pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
- if (!pcrypt)
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- pcrypt->base.destroy = nv98_crypt_destroy;
- pcrypt->base.init = nv98_crypt_init;
- pcrypt->base.fini = nv98_crypt_fini;
+ priv->base.destroy = nv98_crypt_destroy;
+ priv->base.init = nv98_crypt_init;
+ priv->base.fini = nv98_crypt_fini;
+ priv->base.context_new = nv98_crypt_context_new;
+ priv->base.context_del = nv98_crypt_context_del;
+ priv->base.object_new = nv98_crypt_object_new;
+ priv->base.tlb_flush = nv98_crypt_tlb_flush;
+
+ nouveau_irq_register(dev, 14, nv98_crypt_isr);
- NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
+ NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
+ NVOBJ_CLASS(dev, 0x88b4, CRYPT);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
new file mode 100644
index 000000000000..7393813044de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc
@@ -0,0 +1,698 @@
+/*
+ * fuc microcode for nv98 pcrypt engine
+ * Copyright (C) 2010 Marcin Kościelnicki
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+.section #nv98_pcrypt_data
+
+ctx_dma:
+ctx_dma_query: .b32 0
+ctx_dma_src: .b32 0
+ctx_dma_dst: .b32 0
+.equ #dma_count 3
+ctx_query_address_high: .b32 0
+ctx_query_address_low: .b32 0
+ctx_query_counter: .b32 0
+ctx_cond_address_high: .b32 0
+ctx_cond_address_low: .b32 0
+ctx_cond_off: .b32 0
+ctx_src_address_high: .b32 0
+ctx_src_address_low: .b32 0
+ctx_dst_address_high: .b32 0
+ctx_dst_address_low: .b32 0
+ctx_mode: .b32 0
+.align 16
+ctx_key: .skip 16
+ctx_iv: .skip 16
+
+.align 0x80
+swap:
+.skip 32
+
+.align 8
+common_cmd_dtable:
+.b32 #ctx_query_address_high + 0x20000 ~0xff
+.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_query_counter + 0x20000 ~0xffffffff
+.b32 #cmd_query_get + 0x00000 ~1
+.b32 #ctx_cond_address_high + 0x20000 ~0xff
+.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0
+.b32 #cmd_cond_mode + 0x00000 ~7
+.b32 #cmd_wrcache_flush + 0x00000 ~0
+.equ #common_cmd_max 0x88
+
+
+.align 8
+engine_cmd_dtable:
+.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_src_address_high + 0x20000 ~0xff
+.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_dst_address_high + 0x20000 ~0xff
+.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
+.b32 #crypt_cmd_mode + 0x00000 ~0xf
+.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
+.equ #engine_cmd_max 0xce
+
+.align 4
+crypt_dtable:
+.b16 #crypt_copy_prep #crypt_do_inout
+.b16 #crypt_store_prep #crypt_do_out
+.b16 #crypt_ecb_e_prep #crypt_do_inout
+.b16 #crypt_ecb_d_prep #crypt_do_inout
+.b16 #crypt_cbc_e_prep #crypt_do_inout
+.b16 #crypt_cbc_d_prep #crypt_do_inout
+.b16 #crypt_pcbc_e_prep #crypt_do_inout
+.b16 #crypt_pcbc_d_prep #crypt_do_inout
+.b16 #crypt_cfb_e_prep #crypt_do_inout
+.b16 #crypt_cfb_d_prep #crypt_do_inout
+.b16 #crypt_ofb_prep #crypt_do_inout
+.b16 #crypt_ctr_prep #crypt_do_inout
+.b16 #crypt_cbc_mac_prep #crypt_do_in
+.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
+.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
+
+.align 0x100
+
+.section #nv98_pcrypt_code
+
+ // $r0 is always set to 0 in our code - this allows some space savings.
+ clear b32 $r0
+
+ // set up the interrupt handler
+ mov $r1 #ih
+ mov $iv0 $r1
+
+ // init stack pointer
+ mov $sp $r0
+
+ // set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host
+ movw $r1 0xfff0
+ sethi $r1 0
+ mov $r2 0x400
+ iowr I[$r2 + 0x300] $r1
+
+ // enable the interrupts
+ or $r1 0xc
+ iowr I[$r2] $r1
+
+ // enable fifo access and context switching
+ mov $r1 3
+ mov $r2 0x1200
+ iowr I[$r2] $r1
+
+ // enable i0 delivery
+ bset $flags ie0
+
+ // sleep forver, waking only for interrupts.
+ bset $flags $p0
+ spin:
+ sleep $p0
+ bra #spin
+
+// i0 handler
+ih:
+ // see which interrupts we got
+ iord $r1 I[$r0 + 0x200]
+
+ and $r2 $r1 0x8
+ cmpu b32 $r2 0
+ bra e #noctx
+
+ // context switch... prepare the regs for xfer
+ mov $r2 0x7700
+ mov $xtargets $r2
+ mov $xdbase $r0
+ // 128-byte context.
+ mov $r2 0
+ sethi $r2 0x50000
+
+ // read current channel
+ mov $r3 0x1400
+ iord $r4 I[$r3]
+ // if bit 30 set, it's active, so we have to unload it first.
+ shl b32 $r5 $r4 1
+ cmps b32 $r5 0
+ bra nc #ctxload
+
+ // unload the current channel - save the context
+ xdst $r0 $r2
+ xdwait
+ // and clear bit 30, then write back
+ bclr $r4 0x1e
+ iowr I[$r3] $r4
+ // tell PFIFO we unloaded
+ mov $r4 1
+ iowr I[$r3 + 0x200] $r4
+
+ bra #noctx
+
+ ctxload:
+ // no channel loaded - perhaps we're requested to load one
+ iord $r4 I[$r3 + 0x100]
+ shl b32 $r15 $r4 1
+ cmps b32 $r15 0
+ // if bit 30 of next channel not set, probably PFIFO is just
+ // killing a context. do a faux load, without the active bit.
+ bra nc #dummyload
+
+ // ok, do a real context load.
+ xdld $r0 $r2
+ xdwait
+ mov $r5 #ctx_dma
+ mov $r6 #dma_count - 1
+ ctxload_dma_loop:
+ ld b32 $r7 D[$r5 + $r6 * 4]
+ add b32 $r8 $r6 0x180
+ shl b32 $r8 8
+ iowr I[$r8] $r7
+ sub b32 $r6 1
+ bra nc #ctxload_dma_loop
+
+ dummyload:
+ // tell PFIFO we're done
+ mov $r5 2
+ iowr I[$r3 + 0x200] $r5
+
+ noctx:
+ and $r2 $r1 0x4
+ cmpu b32 $r2 0
+ bra e #nocmd
+
+ // incoming fifo command.
+ mov $r3 0x1900
+ iord $r2 I[$r3 + 0x100]
+ iord $r3 I[$r3]
+ // extract the method
+ and $r4 $r2 0x7ff
+ // shift the addr to proper position if we need to interrupt later
+ shl b32 $r2 0x10
+
+ // mthd 0 and 0x100 [NAME, NOP]: ignore
+ and $r5 $r4 0x7bf
+ cmpu b32 $r5 0
+ bra e #cmddone
+
+ mov $r5 #engine_cmd_dtable - 0xc0 * 8
+ mov $r6 #engine_cmd_max
+ cmpu b32 $r4 0xc0
+ bra nc #dtable_cmd
+ mov $r5 #common_cmd_dtable - 0x80 * 8
+ mov $r6 #common_cmd_max
+ cmpu b32 $r4 0x80
+ bra nc #dtable_cmd
+ cmpu b32 $r4 0x60
+ bra nc #dma_cmd
+ cmpu b32 $r4 0x50
+ bra ne #illegal_mthd
+
+ // mthd 0x140: PM_TRIGGER
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x20000
+ iowr I[$r2] $r3
+ bra #cmddone
+
+ dma_cmd:
+ // mthd 0x180...: DMA_*
+ cmpu b32 $r4 0x60+#dma_count
+ bra nc #illegal_mthd
+ shl b32 $r5 $r4 2
+ add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff
+ bset $r3 0x1e
+ st b32 D[$r5] $r3
+ add b32 $r4 0x180 - 0x60
+ shl b32 $r4 8
+ iowr I[$r4] $r3
+ bra #cmddone
+
+ dtable_cmd:
+ cmpu b32 $r4 $r6
+ bra nc #illegal_mthd
+ shl b32 $r4 3
+ add b32 $r4 $r5
+ ld b32 $r5 D[$r4 + 4]
+ and $r5 $r3
+ cmpu b32 $r5 0
+ bra ne #invalid_bitfield
+ ld b16 $r5 D[$r4]
+ ld b16 $r6 D[$r4 + 2]
+ cmpu b32 $r6 2
+ bra e #cmd_setctx
+ ld b32 $r7 D[$r0 + #ctx_cond_off]
+ and $r6 $r7
+ cmpu b32 $r6 1
+ bra e #cmddone
+ call $r5
+ bra $p1 #dispatch_error
+ bra #cmddone
+
+ cmd_setctx:
+ st b32 D[$r5] $r3
+ bra #cmddone
+
+
+ invalid_bitfield:
+ or $r2 1
+ dispatch_error:
+ illegal_mthd:
+ mov $r4 0x1000
+ iowr I[$r4] $r2
+ iowr I[$r4 + 0x100] $r3
+ mov $r4 0x40
+ iowr I[$r0] $r4
+
+ im_loop:
+ iord $r4 I[$r0 + 0x200]
+ and $r4 0x40
+ cmpu b32 $r4 0
+ bra ne #im_loop
+
+ cmddone:
+ // remove the command from FIFO
+ mov $r3 0x1d00
+ mov $r4 1
+ iowr I[$r3] $r4
+
+ nocmd:
+ // ack the processed interrupts
+ and $r1 $r1 0xc
+ iowr I[$r0 + 0x100] $r1
+iret
+
+cmd_query_get:
+ // if bit 0 of param set, trigger interrupt afterwards.
+ setp $p1 $r3
+ or $r2 3
+
+ // read PTIMER, beware of races...
+ mov $r4 0xb00
+ ptimer_retry:
+ iord $r6 I[$r4 + 0x100]
+ iord $r5 I[$r4]
+ iord $r7 I[$r4 + 0x100]
+ cmpu b32 $r6 $r7
+ bra ne #ptimer_retry
+
+ // prepare the query structure
+ ld b32 $r4 D[$r0 + #ctx_query_counter]
+ st b32 D[$r0 + #swap + 0x0] $r4
+ st b32 D[$r0 + #swap + 0x4] $r0
+ st b32 D[$r0 + #swap + 0x8] $r5
+ st b32 D[$r0 + #swap + 0xc] $r6
+
+ // will use target 0, DMA_QUERY.
+ mov $xtargets $r0
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_high]
+ shl b32 $r4 0x18
+ mov $xdbase $r4
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_low]
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdst $r4 $r5
+ xdwait
+
+ ret
+
+cmd_cond_mode:
+ // if >= 5, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 5
+ bra nc #return
+
+ // otherwise, no error.
+ bclr $flags $p1
+
+ // if < 2, no QUERY object is involved
+ cmpu b32 $r3 2
+ bra nc #cmd_cond_mode_queryful
+
+ xor $r3 1
+ st b32 D[$r0 + #ctx_cond_off] $r3
+ return:
+ ret
+
+ cmd_cond_mode_queryful:
+ // ok, will need to pull a QUERY object, prepare offsets
+ ld b32 $r4 D[$r0 + #ctx_cond_address_high]
+ ld b32 $r5 D[$r0 + #ctx_cond_address_low]
+ and $r6 $r5 0xff
+ shr b32 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r5
+ mov $xdbase $r4
+ mov $xtargets $r0
+
+ // pull the first one
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdld $r6 $r5
+
+ // if == 2, only a single QUERY is involved...
+ cmpu b32 $r3 2
+ bra ne #cmd_cond_mode_double
+
+ xdwait
+ ld b32 $r4 D[$r0 + #swap + 4]
+ cmpu b32 $r4 0
+ xbit $r4 $flags z
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+ // ok, we'll need to pull second one too
+ cmd_cond_mode_double:
+ add b32 $r6 0x10
+ add b32 $r5 0x10
+ xdld $r6 $r5
+ xdwait
+
+ // compare COUNTERs
+ ld b32 $r5 D[$r0 + #swap + 0x00]
+ ld b32 $r6 D[$r0 + #swap + 0x10]
+ cmpu b32 $r5 $r6
+ xbit $r4 $flags z
+
+ // compare RESen
+ ld b32 $r5 D[$r0 + #swap + 0x04]
+ ld b32 $r6 D[$r0 + #swap + 0x14]
+ cmpu b32 $r5 $r6
+ xbit $r5 $flags z
+ and $r4 $r5
+
+ // and negate or not, depending on mode
+ cmpu b32 $r3 3
+ xbit $r5 $flags z
+ xor $r4 $r5
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+cmd_wrcache_flush:
+ bclr $flags $p1
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x10000
+ iowr I[$r2] $r3
+ ret
+
+crypt_cmd_mode:
+ // if >= 0xf, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 0xf
+ bra nc #crypt_cmd_mode_return
+
+ bclr $flags $p1
+ st b32 D[$r0 + #ctx_mode] $r3
+
+ crypt_cmd_mode_return:
+ ret
+
+crypt_cmd_length:
+ // nop if length == 0
+ cmpu b32 $r3 0
+ bra e #crypt_cmd_mode_return
+
+ // init key, IV
+ cxset 3
+ mov $r4 #ctx_key
+ sethi $r4 0x70000
+ xdst $r0 $r4
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdst $r0 $r4
+ xdwait
+ ckeyreg $c7
+
+ // prepare the targets
+ mov $r4 0x2100
+ mov $xtargets $r4
+
+ // prepare src address
+ ld b32 $r4 D[$r0 + #ctx_src_address_high]
+ ld b32 $r5 D[$r0 + #ctx_src_address_low]
+ shr b32 $r8 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r8
+ and $r5 $r5 0xff
+
+ // prepare dst address
+ ld b32 $r6 D[$r0 + #ctx_dst_address_high]
+ ld b32 $r7 D[$r0 + #ctx_dst_address_low]
+ shr b32 $r8 $r7 8
+ shl b32 $r6 0x18
+ or $r6 $r8
+ and $r7 $r7 0xff
+
+ // find the proper prep & do functions
+ ld b32 $r8 D[$r0 + #ctx_mode]
+ shl b32 $r8 2
+
+ // run prep
+ ld b16 $r9 D[$r8 + #crypt_dtable]
+ call $r9
+
+ // do it
+ ld b16 $r9 D[$r8 + #crypt_dtable + 2]
+ call $r9
+ cxset 1
+ xdwait
+ cxset 0x61
+ xdwait
+ xdwait
+
+ // update src address
+ shr b32 $r8 $r4 0x18
+ shl b32 $r9 $r4 8
+ add b32 $r9 $r5
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_src_address_high] $r8
+ st b32 D[$r0 + #ctx_src_address_low] $r9
+
+ // update dst address
+ shr b32 $r8 $r6 0x18
+ shl b32 $r9 $r6 8
+ add b32 $r9 $r7
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_dst_address_high] $r8
+ st b32 D[$r0 + #ctx_dst_address_low] $r9
+
+ // pull updated IV
+ cxset 2
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdld $r0 $r4
+ xdwait
+
+ ret
+
+
+crypt_copy_prep:
+ cs0begin 2
+ cxsin $c0
+ cxsout $c0
+ ret
+
+crypt_store_prep:
+ cs0begin 1
+ cxsout $c6
+ ret
+
+crypt_ecb_e_prep:
+ cs0begin 3
+ cxsin $c0
+ cenc $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_ecb_d_prep:
+ ckexp $c7 $c7
+ cs0begin 3
+ cxsin $c0
+ cdec $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_cbc_e_prep:
+ cs0begin 4
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ ret
+
+crypt_cbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cmov $c2 $c6
+ cxsin $c6
+ cdec $c0 $c6
+ cxor $c0 $c2
+ cxsout $c0
+ ret
+
+crypt_pcbc_e_prep:
+ cs0begin 5
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_pcbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cxsin $c0
+ cdec $c1 $c0
+ cxor $c6 $c1
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_cfb_e_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c6 $c0
+ cxsout $c6
+ ret
+
+crypt_cfb_d_prep:
+ cs0begin 4
+ cenc $c0 $c6
+ cxsin $c6
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ofb_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ctr_prep:
+ cs0begin 5
+ cenc $c1 $c6
+ cadd $c6 1
+ cxsin $c0
+ cxor $c0 $c1
+ cxsout $c0
+ ret
+
+crypt_cbc_mac_prep:
+ cs0begin 3
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_complete_prep:
+ cs0begin 7
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_partial_prep:
+ cs0begin 8
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+// TODO
+crypt_do_in:
+ add b32 $r3 $r5
+ mov $xdbase $r4
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_in_loop:
+ xdld $r5 $r9
+ xdwait
+ cxset 0x22
+ xdst $r0 $r9
+ cs0exec 1
+ xdwait
+ add b32 $r5 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_in_loop
+ cxset 1
+ xdwait
+ ret
+
+crypt_do_out:
+ add b32 $r3 $r7
+ mov $xdbase $r6
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_out_loop:
+ cs0exec 1
+ cxset 0x61
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r7 0x10
+ cmpu b32 $r7 $r3
+ bra ne #crypt_do_out_loop
+ ret
+
+crypt_do_inout:
+ add b32 $r3 $r5
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_inout_loop:
+ mov $xdbase $r4
+ xdld $r5 $r9
+ xdwait
+ cxset 0x21
+ xdst $r0 $r9
+ cs0exec 1
+ cxset 0x61
+ mov $xdbase $r6
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r5 0x10
+ add b32 $r7 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_inout_loop
+ ret
+
+.align 0x100
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
new file mode 100644
index 000000000000..38676c74e6e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
@@ -0,0 +1,584 @@
+uint32_t nv98_pcrypt_data[] = {
+/* 0x0000: ctx_dma */
+/* 0x0000: ctx_dma_query */
+ 0x00000000,
+/* 0x0004: ctx_dma_src */
+ 0x00000000,
+/* 0x0008: ctx_dma_dst */
+ 0x00000000,
+/* 0x000c: ctx_query_address_high */
+ 0x00000000,
+/* 0x0010: ctx_query_address_low */
+ 0x00000000,
+/* 0x0014: ctx_query_counter */
+ 0x00000000,
+/* 0x0018: ctx_cond_address_high */
+ 0x00000000,
+/* 0x001c: ctx_cond_address_low */
+ 0x00000000,
+/* 0x0020: ctx_cond_off */
+ 0x00000000,
+/* 0x0024: ctx_src_address_high */
+ 0x00000000,
+/* 0x0028: ctx_src_address_low */
+ 0x00000000,
+/* 0x002c: ctx_dst_address_high */
+ 0x00000000,
+/* 0x0030: ctx_dst_address_low */
+ 0x00000000,
+/* 0x0034: ctx_mode */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0040: ctx_key */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0050: ctx_iv */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0080: swap */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x00a0: common_cmd_dtable */
+ 0x0002000c,
+ 0xffffff00,
+ 0x00020010,
+ 0x0000000f,
+ 0x00020014,
+ 0x00000000,
+ 0x00000192,
+ 0xfffffffe,
+ 0x00020018,
+ 0xffffff00,
+ 0x0002001c,
+ 0x0000000f,
+ 0x000001d7,
+ 0xfffffff8,
+ 0x00000260,
+ 0xffffffff,
+/* 0x00e0: engine_cmd_dtable */
+ 0x00020040,
+ 0x00000000,
+ 0x00020044,
+ 0x00000000,
+ 0x00020048,
+ 0x00000000,
+ 0x0002004c,
+ 0x00000000,
+ 0x00020050,
+ 0x00000000,
+ 0x00020054,
+ 0x00000000,
+ 0x00020058,
+ 0x00000000,
+ 0x0002005c,
+ 0x00000000,
+ 0x00020024,
+ 0xffffff00,
+ 0x00020028,
+ 0x0000000f,
+ 0x0002002c,
+ 0xffffff00,
+ 0x00020030,
+ 0x0000000f,
+ 0x00000271,
+ 0xfffffff0,
+ 0x00010285,
+ 0xf000000f,
+/* 0x0150: crypt_dtable */
+ 0x04db0321,
+ 0x04b1032f,
+ 0x04db0339,
+ 0x04db034b,
+ 0x04db0361,
+ 0x04db0377,
+ 0x04db0395,
+ 0x04db03af,
+ 0x04db03cd,
+ 0x04db03e3,
+ 0x04db03f9,
+ 0x04db040f,
+ 0x04830429,
+ 0x0483043b,
+ 0x0483045d,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nv98_pcrypt_code[] = {
+ 0x17f004bd,
+ 0x0010fe35,
+ 0xf10004fe,
+ 0xf0fff017,
+ 0x27f10013,
+ 0x21d00400,
+ 0x0c15f0c0,
+ 0xf00021d0,
+ 0x27f10317,
+ 0x21d01200,
+ 0x1031f400,
+/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
+/* 0x0035: ih */
+ 0x8001cffd,
+ 0xb00812c4,
+ 0x0bf40024,
+ 0x0027f167,
+ 0x002bfe77,
+ 0xf00007fe,
+ 0x23f00027,
+ 0x0037f105,
+ 0x0034cf14,
+ 0xb0014594,
+ 0x18f40055,
+ 0x0602fa17,
+ 0x4af003f8,
+ 0x0034d01e,
+ 0xd00147f0,
+ 0x0ef48034,
+/* 0x0075: ctxload */
+ 0x4034cf33,
+ 0xb0014f94,
+ 0x18f400f5,
+ 0x0502fa21,
+ 0x57f003f8,
+ 0x0267f000,
+/* 0x008c: ctxload_dma_loop */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
+/* 0x009f: dummyload */
+ 0xf0f018f4,
+ 0x35d00257,
+/* 0x00a5: noctx */
+ 0x0412c480,
+ 0xf50024b0,
+ 0xf100df0b,
+ 0xcf190037,
+ 0x33cf4032,
+ 0xff24e400,
+ 0x1024b607,
+ 0x07bf45e4,
+ 0xf50054b0,
+ 0xf100b90b,
+ 0xf1fae057,
+ 0xb000ce67,
+ 0x18f4c044,
+ 0xa057f14d,
+ 0x8867f1fc,
+ 0x8044b000,
+ 0xb03f18f4,
+ 0x18f46044,
+ 0x5044b019,
+ 0xf1741bf4,
+ 0xbd220027,
+ 0x0233f034,
+ 0xf50023d0,
+/* 0x0103: dma_cmd */
+ 0xb000810e,
+ 0x18f46344,
+ 0x0245945e,
+ 0xfe8050b7,
+ 0x801e39f0,
+ 0x40b70053,
+ 0x44b60120,
+ 0x0043d008,
+/* 0x0123: dtable_cmd */
+ 0xb8600ef4,
+ 0x18f40446,
+ 0x0344b63e,
+ 0x980045bb,
+ 0x53fd0145,
+ 0x0054b004,
+ 0x58291bf4,
+ 0x46580045,
+ 0x0264b001,
+ 0x98170bf4,
+ 0x67fd0807,
+ 0x0164b004,
+ 0xf9300bf4,
+ 0x0f01f455,
+/* 0x015b: cmd_setctx */
+ 0x80280ef4,
+ 0x0ef40053,
+/* 0x0161: invalid_bitfield */
+ 0x0125f022,
+/* 0x0164: dispatch_error */
+/* 0x0164: illegal_mthd */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x47f04043,
+ 0x0004d040,
+/* 0x0174: im_loop */
+ 0xf08004cf,
+ 0x44b04044,
+ 0xf71bf400,
+/* 0x0180: cmddone */
+ 0x1d0037f1,
+ 0xd00147f0,
+/* 0x018a: nocmd */
+ 0x11c40034,
+ 0x4001d00c,
+/* 0x0192: cmd_query_get */
+ 0x38f201f8,
+ 0x0325f001,
+ 0x0b0047f1,
+/* 0x019c: ptimer_retry */
+ 0xcf4046cf,
+ 0x47cf0045,
+ 0x0467b840,
+ 0x98f41bf4,
+ 0x04800504,
+ 0x21008020,
+ 0x80220580,
+ 0x0bfe2306,
+ 0x03049800,
+ 0xfe1844b6,
+ 0x04980047,
+ 0x8057f104,
+ 0x0253f000,
+ 0xf80645fa,
+/* 0x01d7: cmd_cond_mode */
+ 0xf400f803,
+ 0x25f00131,
+ 0x0534b002,
+ 0xf41218f4,
+ 0x34b00132,
+ 0x0b18f402,
+ 0x800136f0,
+/* 0x01f2: return */
+ 0x00f80803,
+/* 0x01f4: cmd_cond_mode_queryful */
+ 0x98060498,
+ 0x56c40705,
+ 0x0855b6ff,
+ 0xfd1844b6,
+ 0x47fe0545,
+ 0x000bfe00,
+ 0x008057f1,
+ 0xfa0253f0,
+ 0x34b00565,
+ 0x131bf402,
+ 0x049803f8,
+ 0x0044b021,
+ 0x800b4cf0,
+ 0x00f80804,
+/* 0x022c: cmd_cond_mode_double */
+ 0xb61060b6,
+ 0x65fa1050,
+ 0x9803f805,
+ 0x06982005,
+ 0x0456b824,
+ 0x980b4cf0,
+ 0x06982105,
+ 0x0456b825,
+ 0xfd0b5cf0,
+ 0x34b00445,
+ 0x0b5cf003,
+ 0x800645fd,
+ 0x00f80804,
+/* 0x0260: cmd_wrcache_flush */
+ 0xf10132f4,
+ 0xbd220027,
+ 0x0133f034,
+ 0xf80023d0,
+/* 0x0271: crypt_cmd_mode */
+ 0x0131f400,
+ 0xb00225f0,
+ 0x18f40f34,
+ 0x0132f409,
+/* 0x0283: crypt_cmd_mode_return */
+ 0xf80d0380,
+/* 0x0285: crypt_cmd_length */
+ 0x0034b000,
+ 0xf4fb0bf4,
+ 0x47f0033c,
+ 0x0743f040,
+ 0xf00604fa,
+ 0x43f05047,
+ 0x0604fa06,
+ 0x3cf503f8,
+ 0x47f1c407,
+ 0x4bfe2100,
+ 0x09049800,
+ 0x950a0598,
+ 0x44b60858,
+ 0x0548fd18,
+ 0x98ff55c4,
+ 0x07980b06,
+ 0x0878950c,
+ 0xfd1864b6,
+ 0x77c40568,
+ 0x0d0898ff,
+ 0x580284b6,
+ 0x95f9a889,
+ 0xf9a98958,
+ 0x013cf495,
+ 0x3cf403f8,
+ 0xf803f861,
+ 0x18489503,
+ 0xbb084994,
+ 0x81b60095,
+ 0x09088000,
+ 0x950a0980,
+ 0x69941868,
+ 0x0097bb08,
+ 0x800081b6,
+ 0x09800b08,
+ 0x023cf40c,
+ 0xf05047f0,
+ 0x04fa0643,
+ 0xf803f805,
+/* 0x0321: crypt_copy_prep */
+ 0x203cf500,
+ 0x003cf594,
+ 0x003cf588,
+/* 0x032f: crypt_store_prep */
+ 0xf500f88c,
+ 0xf594103c,
+ 0xf88c063c,
+/* 0x0339: crypt_ecb_e_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x003cf588,
+ 0x003cf5d0,
+/* 0x034b: crypt_ecb_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594303c,
+ 0xf588003c,
+ 0xf5d4003c,
+ 0xf88c003c,
+/* 0x0361: crypt_cbc_e_prep */
+ 0x403cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+/* 0x0377: crypt_cbc_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf584623c,
+ 0xf588063c,
+ 0xf5d4603c,
+ 0xf5ac203c,
+ 0xf88c003c,
+/* 0x0395: crypt_pcbc_e_prep */
+ 0x503cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+ 0x063cf58c,
+/* 0x03af: crypt_pcbc_d_prep */
+ 0xf500f8ac,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf588003c,
+ 0xf5d4013c,
+ 0xf5ac163c,
+ 0xf58c063c,
+ 0xf8ac063c,
+/* 0x03cd: crypt_cfb_e_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x063cf588,
+ 0x063cf5ac,
+/* 0x03e3: crypt_cfb_d_prep */
+ 0xf500f88c,
+ 0xf594403c,
+ 0xf5d0603c,
+ 0xf588063c,
+ 0xf5ac603c,
+ 0xf88c003c,
+/* 0x03f9: crypt_ofb_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x603cf588,
+ 0x003cf5ac,
+/* 0x040f: crypt_ctr_prep */
+ 0xf500f88c,
+ 0xf594503c,
+ 0xf5d0613c,
+ 0xf5b0163c,
+ 0xf588003c,
+ 0xf5ac103c,
+ 0xf88c003c,
+/* 0x0429: crypt_cbc_mac_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+/* 0x043b: crypt_cmac_finish_complete_prep */
+ 0xf500f8d0,
+ 0xf594703c,
+ 0xf588003c,
+ 0xf5ac063c,
+ 0xf5ac003c,
+ 0xf5d0003c,
+ 0xf5bc003c,
+ 0xf5ac063c,
+ 0xf8d0663c,
+/* 0x045d: crypt_cmac_finish_partial_prep */
+ 0x803cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x003cf5ac,
+ 0x003cf5ac,
+ 0x003cf5d0,
+ 0x003cf5bc,
+ 0x063cf5bc,
+ 0x663cf5ac,
+/* 0x0483: crypt_do_in */
+ 0xbb00f8d0,
+ 0x47fe0035,
+ 0x8097f100,
+ 0x0293f000,
+/* 0x0490: crypt_do_in_loop */
+ 0xf80559fa,
+ 0x223cf403,
+ 0xf50609fa,
+ 0xf898103c,
+ 0x1050b603,
+ 0xf40453b8,
+ 0x3cf4e91b,
+ 0xf803f801,
+/* 0x04b1: crypt_do_out */
+ 0x0037bb00,
+ 0xf10067fe,
+ 0xf0008097,
+/* 0x04be: crypt_do_out_loop */
+ 0x3cf50293,
+ 0x3cf49810,
+ 0x0579fa61,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb81070b6,
+ 0x1bf40473,
+/* 0x04db: crypt_do_inout */
+ 0xbb00f8e8,
+ 0x97f10035,
+ 0x93f00080,
+/* 0x04e5: crypt_do_inout_loop */
+ 0x0047fe02,
+ 0xf80559fa,
+ 0x213cf403,
+ 0xf50609fa,
+ 0xf498103c,
+ 0x67fe613c,
+ 0x0579fa00,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb61050b6,
+ 0x53b81070,
+ 0xd41bf404,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index 8f356d58e409..0387dc7f4f42 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -79,29 +79,13 @@ static void
nva3_copy_context_del(struct nouveau_channel *chan, int engine)
{
struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- u32 inst;
-
- inst = (chan->ramin->vinst >> 12);
- inst |= 0x40000000;
-
- /* disable fifo access */
- nv_wr32(dev, 0x104048, 0x00000000);
- /* mark channel as unloaded if it's currently active */
- if (nv_rd32(dev, 0x104050) == inst)
- nv_mask(dev, 0x104050, 0x40000000, 0x00000000);
- /* mark next channel as invalid if it's about to be loaded */
- if (nv_rd32(dev, 0x104054) == inst)
- nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
- /* restore fifo access */
- nv_wr32(dev, 0x104048, 0x00000003);
+ int i;
- for (inst = 0xc0; inst <= 0xd4; inst += 4)
- nv_wo32(chan->ramin, inst, 0x00000000);
-
- nouveau_gpuobj_ref(NULL, &ctx);
+ for (i = 0xc0; i <= 0xd4; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
atomic_dec(&chan->vm->engref[engine]);
+ nouveau_gpuobj_ref(NULL, &ctx);
chan->engctx[engine] = ctx;
}
@@ -143,13 +127,6 @@ static int
nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
-
- /* trigger fuc context unload */
- nv_wait(dev, 0x104008, 0x0000000c, 0x00000000);
- nv_mask(dev, 0x104054, 0x40000000, 0x00000000);
- nv_wr32(dev, 0x104000, 0x00000008);
- nv_wait(dev, 0x104008, 0x00000008, 0x00000000);
-
nv_wr32(dev, 0x104014, 0xffffffff);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 9e636e6ef6d7..798829353fb6 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -98,7 +98,9 @@ read_pll(struct drm_device *dev, int clk, u32 pll)
sclk = read_clk(dev, 0x10 + clk, false);
}
- return sclk * N / (M * P);
+ if (M * P)
+ return sclk * N / (M * P);
+ return 0;
}
struct creg {
@@ -182,23 +184,26 @@ prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
const u32 src1 = 0x004160 + (clk * 4);
const u32 ctrl = pll + 0;
const u32 coef = pll + 4;
- u32 cntl;
if (!reg->clk && !reg->pll) {
NV_DEBUG(dev, "no clock for %02x\n", clk);
return;
}
- cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
if (reg->pll) {
nv_mask(dev, src0, 0x00000101, 0x00000101);
nv_wr32(dev, coef, reg->pll);
- nv_wr32(dev, ctrl, cntl | 0x00000015);
+ nv_mask(dev, ctrl, 0x00000015, 0x00000015);
+ nv_mask(dev, ctrl, 0x00000010, 0x00000000);
+ nv_wait(dev, ctrl, 0x00020000, 0x00020000);
+ nv_mask(dev, ctrl, 0x00000010, 0x00000010);
+ nv_mask(dev, ctrl, 0x00000008, 0x00000000);
nv_mask(dev, src1, 0x00000100, 0x00000000);
nv_mask(dev, src1, 0x00000001, 0x00000000);
} else {
nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
- nv_wr32(dev, ctrl, cntl | 0x0000001d);
+ nv_mask(dev, ctrl, 0x00000018, 0x00000018);
+ udelay(20);
nv_mask(dev, ctrl, 0x00000001, 0x00000000);
nv_mask(dev, src0, 0x00000100, 0x00000000);
nv_mask(dev, src0, 0x00000001, 0x00000000);
@@ -230,17 +235,28 @@ nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
}
struct nva3_pm_state {
+ struct nouveau_pm_level *perflvl;
+
struct creg nclk;
struct creg sclk;
- struct creg mclk;
struct creg vdec;
struct creg unka0;
+
+ struct creg mclk;
+ u8 *rammap;
+ u8 rammap_ver;
+ u8 rammap_len;
+ u8 *ramcfg;
+ u8 ramcfg_len;
+ u32 r004018;
+ u32 r100760;
};
void *
nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct nva3_pm_state *info;
+ u8 ramcfg_cnt;
int ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -267,6 +283,20 @@ nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
if (ret < 0)
goto out;
+ info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
+ &info->rammap_ver,
+ &info->rammap_len,
+ &ramcfg_cnt, &info->ramcfg_len);
+ if (info->rammap_ver != 0x10 || info->rammap_len < 5)
+ info->rammap = NULL;
+
+ info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
+ &info->rammap_ver,
+ &info->ramcfg_len);
+ if (info->rammap_ver != 0x10)
+ info->ramcfg = NULL;
+
+ info->perflvl = perflvl;
out:
if (ret < 0) {
kfree(info);
@@ -287,6 +317,240 @@ nva3_pm_grcp_idle(void *data)
return false;
}
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+ nv_wr32(exec->dev, 0x1002d4, 0x00000001);
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+ nv_wr32(exec->dev, 0x1002d0, 0x00000001);
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000);
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+ volatile u32 post = nv_rd32(exec->dev, 0); (void)post;
+ udelay((nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+ if (mr <= 1)
+ return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
+ if (mr <= 3)
+ return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
+ return 0;
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+ struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
+
+ if (mr <= 1) {
+ if (dev_priv->vram_rank_B)
+ nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data);
+ nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data);
+ } else
+ if (mr <= 3) {
+ if (dev_priv->vram_rank_B)
+ nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data);
+ nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data);
+ }
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+ struct drm_device *dev = exec->dev;
+ struct nva3_pm_state *info = exec->priv;
+ u32 ctrl;
+
+ ctrl = nv_rd32(dev, 0x004000);
+ if (!(ctrl & 0x00000008) && info->mclk.pll) {
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
+ nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
+ nv_wr32(dev, 0x004018, 0x00001000);
+ nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001));
+ nv_wr32(dev, 0x004004, info->mclk.pll);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
+ udelay(64);
+ nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
+ udelay(20);
+ } else
+ if (!info->mclk.pll) {
+ nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000008));
+ nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000);
+ nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018);
+ }
+
+ if (info->rammap) {
+ if (info->ramcfg && (info->rammap[4] & 0x08)) {
+ u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
+ info->ramcfg[5];
+ u32 unk5a4 = ROM16(info->ramcfg[7]);
+ u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
+ (info->ramcfg[3] & 0x0f) << 16 |
+ (info->ramcfg[9] & 0x0f) |
+ 0x80000000;
+ nv_wr32(dev, 0x1005a0, unk5a0);
+ nv_wr32(dev, 0x1005a4, unk5a4);
+ nv_wr32(dev, 0x10f804, unk804);
+ nv_mask(dev, 0x10053c, 0x00001000, 0x00000000);
+ } else {
+ nv_mask(dev, 0x10053c, 0x00001000, 0x00001000);
+ nv_mask(dev, 0x10f804, 0x80000000, 0x00000000);
+ nv_mask(dev, 0x100760, 0x22222222, info->r100760);
+ nv_mask(dev, 0x1007a0, 0x22222222, info->r100760);
+ nv_mask(dev, 0x1007e0, 0x22222222, info->r100760);
+ }
+ }
+
+ if (info->mclk.pll) {
+ nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000);
+ nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008));
+ }
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+ struct drm_device *dev = exec->dev;
+ struct nva3_pm_state *info = exec->priv;
+ struct nouveau_pm_level *perflvl = info->perflvl;
+ int i;
+
+ for (i = 0; i < 9; i++)
+ nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]);
+
+ if (info->ramcfg) {
+ u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
+ nv_mask(dev, 0x100200, 0x00001000, data);
+ }
+
+ if (info->ramcfg) {
+ u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010;
+ u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100;
+ u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100;
+ if ( (info->ramcfg[2] & 0x20))
+ unk714 |= 0xf0000000;
+ if (!(info->ramcfg[2] & 0x04))
+ unk714 |= 0x00000010;
+ nv_wr32(dev, 0x100714, unk714);
+
+ if (info->ramcfg[2] & 0x01)
+ unk71c |= 0x00000100;
+ nv_wr32(dev, 0x10071c, unk71c);
+
+ if (info->ramcfg[2] & 0x02)
+ unk718 |= 0x00000100;
+ nv_wr32(dev, 0x100718, unk718);
+
+ if (info->ramcfg[2] & 0x10)
+ nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/
+ }
+}
+
+static void
+prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
+{
+ struct nouveau_mem_exec_func exec = {
+ .dev = dev,
+ .precharge = mclk_precharge,
+ .refresh = mclk_refresh,
+ .refresh_auto = mclk_refresh_auto,
+ .refresh_self = mclk_refresh_self,
+ .wait = mclk_wait,
+ .mrg = mclk_mrg,
+ .mrs = mclk_mrs,
+ .clock_set = mclk_clock_set,
+ .timing_set = mclk_timing_set,
+ .priv = info
+ };
+ u32 ctrl;
+
+ /* XXX: where the fuck does 750MHz come from? */
+ if (info->perflvl->memory <= 750000) {
+ info->r004018 = 0x10000000;
+ info->r100760 = 0x22222222;
+ }
+
+ ctrl = nv_rd32(dev, 0x004000);
+ if (ctrl & 0x00000008) {
+ if (info->mclk.pll) {
+ nv_mask(dev, 0x004128, 0x00000101, 0x00000101);
+ nv_wr32(dev, 0x004004, info->mclk.pll);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000001));
+ nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef));
+ nv_wait(dev, 0x004000, 0x00020000, 0x00020000);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000010));
+ nv_wr32(dev, 0x004018, 0x00005000 | info->r004018);
+ nv_wr32(dev, 0x004000, (ctrl |= 0x00000004));
+ }
+ } else {
+ u32 ssel = 0x00000101;
+ if (info->mclk.clk)
+ ssel |= info->mclk.clk;
+ else
+ ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
+ nv_mask(dev, 0x004168, 0x003f3141, ctrl);
+ }
+
+ if (info->ramcfg) {
+ if (info->ramcfg[2] & 0x10) {
+ nv_mask(dev, 0x111104, 0x00000600, 0x00000000);
+ } else {
+ nv_mask(dev, 0x111100, 0x40000000, 0x40000000);
+ nv_mask(dev, 0x111104, 0x00000180, 0x00000000);
+ }
+ }
+ if (info->rammap && !(info->rammap[4] & 0x02))
+ nv_mask(dev, 0x100200, 0x00000800, 0x00000000);
+ nv_wr32(dev, 0x611200, 0x00003300);
+ if (!(info->ramcfg[2] & 0x10))
+ nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/
+
+ nouveau_mem_exec(&exec, info->perflvl);
+
+ nv_wr32(dev, 0x611200, 0x00003330);
+ if (info->rammap && (info->rammap[4] & 0x02))
+ nv_mask(dev, 0x100200, 0x00000800, 0x00000800);
+ if (info->ramcfg) {
+ if (info->ramcfg[2] & 0x10) {
+ nv_mask(dev, 0x111104, 0x00000180, 0x00000180);
+ nv_mask(dev, 0x111100, 0x40000000, 0x00000000);
+ } else {
+ nv_mask(dev, 0x111104, 0x00000600, 0x00000600);
+ }
+ }
+
+ if (info->mclk.pll) {
+ nv_mask(dev, 0x004168, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x004168, 0x00000100, 0x00000000);
+ } else {
+ nv_mask(dev, 0x004000, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x004128, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x004128, 0x00000100, 0x00000000);
+ }
+}
+
int
nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
@@ -316,18 +580,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
prog_clk(dev, 0x20, &info->unka0);
prog_clk(dev, 0x21, &info->vdec);
- if (info->mclk.clk || info->mclk.pll) {
- nv_wr32(dev, 0x100210, 0);
- nv_wr32(dev, 0x1002dc, 1);
- nv_wr32(dev, 0x004018, 0x00001000);
- prog_pll(dev, 0x02, 0x004000, &info->mclk);
- if (nv_rd32(dev, 0x4000) & 0x00000008)
- nv_wr32(dev, 0x004018, 0x1000d000);
- else
- nv_wr32(dev, 0x004018, 0x10005000);
- nv_wr32(dev, 0x1002dc, 0);
- nv_wr32(dev, 0x100210, 0x80000000);
- }
+ if (info->mclk.clk || info->mclk.pll)
+ prog_mem(dev, info);
ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index a495e48197ca..797159e7b7a6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -43,22 +43,22 @@ nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
return ret;
if (rect->rop != ROP_COPY) {
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
OUT_RING (chan, 1);
}
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0588, 1);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
else
OUT_RING (chan, rect->color);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x0600, 4);
OUT_RING (chan, rect->dx);
OUT_RING (chan, rect->dy);
OUT_RING (chan, rect->dx + rect->width);
OUT_RING (chan, rect->dy + rect->height);
if (rect->rop != ROP_COPY) {
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
OUT_RING (chan, 3);
}
FIRE_RING(chan);
@@ -78,14 +78,14 @@ nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
if (ret)
return ret;
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0110, 1);
OUT_RING (chan, 0);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4);
OUT_RING (chan, region->dx);
OUT_RING (chan, region->dy);
OUT_RING (chan, region->width);
OUT_RING (chan, region->height);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4);
OUT_RING (chan, 0);
OUT_RING (chan, region->sx);
OUT_RING (chan, 0);
@@ -116,7 +116,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
OUT_RING (chan, palette[image->bg_color] | mask);
@@ -125,10 +125,10 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, image->bg_color);
OUT_RING (chan, image->fg_color);
}
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0838, 2);
OUT_RING (chan, image->width);
OUT_RING (chan, image->height);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x0850, 4);
OUT_RING (chan, 0);
OUT_RING (chan, image->dx);
OUT_RING (chan, 0);
@@ -143,7 +143,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
dwords -= push;
- BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
+ BEGIN_NIC0(chan, NvSub2D, 0x0860, push);
OUT_RINGp(chan, data, push);
data += push;
}
@@ -200,47 +200,47 @@ nvc0_fbcon_accel_init(struct fb_info *info)
return ret;
}
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
OUT_RING (chan, 0x0000902d);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
OUT_RING (chan, 3);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1);
OUT_RING (chan, 0x55);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x0580, 2);
OUT_RING (chan, 4);
OUT_RING (chan, format);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
+ BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2);
OUT_RING (chan, 2);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0804, 1);
OUT_RING (chan, format);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x0800, 1);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
+ BEGIN_NVC0(chan, NvSub2D, 0x0808, 3);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
+ BEGIN_NVC0(chan, NvSub2D, 0x081c, 1);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
+ BEGIN_NVC0(chan, NvSub2D, 0x0840, 4);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
OUT_RING (chan, 1);
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
+ BEGIN_NVC0(chan, NvSub2D, 0x0200, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
@@ -251,7 +251,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->var.yres_virtual);
OUT_RING (chan, upper_32_bits(fb->vma.offset));
OUT_RING (chan, lower_32_bits(fb->vma.offset));
- BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
+ BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
OUT_RING (chan, 0);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
new file mode 100644
index 000000000000..47ab388a606e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fifo.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fence.h"
+
+struct nvc0_fence_priv {
+ struct nouveau_fence_priv base;
+ struct nouveau_bo *bo;
+};
+
+struct nvc0_fence_chan {
+ struct nouveau_fence_chan base;
+ struct nouveau_vma vma;
+};
+
+static int
+nvc0_fence_emit(struct nouveau_fence *fence)
+{
+ struct nouveau_channel *chan = fence->channel;
+ struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ u64 addr = fctx->vma.offset + chan->id * 16;
+ int ret;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+ FIRE_RING (chan);
+ }
+
+ return ret;
+}
+
+static int
+nvc0_fence_sync(struct nouveau_fence *fence,
+ struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+ struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
+ u64 addr = fctx->vma.offset + prev->id * 16;
+ int ret;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
+ OUT_RING (chan, fence->sequence);
+ OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
+ NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+ FIRE_RING (chan);
+ }
+
+ return ret;
+}
+
+static u32
+nvc0_fence_read(struct nouveau_channel *chan)
+{
+ struct nvc0_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
+ return nouveau_bo_rd32(priv->bo, chan->id * 16/4);
+}
+
+static void
+nvc0_fence_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nvc0_fence_chan *fctx = chan->engctx[engine];
+
+ nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ nouveau_fence_context_del(&fctx->base);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nvc0_fence_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
+ struct nvc0_fence_chan *fctx;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ nouveau_fence_context_new(&fctx->base);
+
+ ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma);
+ if (ret)
+ nvc0_fence_context_del(chan, engine);
+
+ nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000);
+ return ret;
+}
+
+static int
+nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static int
+nvc0_fence_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static void
+nvc0_fence_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fence_priv *priv = nv_engine(dev, engine);
+
+ nouveau_bo_unmap(priv->bo);
+ nouveau_bo_ref(NULL, &priv->bo);
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nvc0_fence_create(struct drm_device *dev)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fence_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.engine.destroy = nvc0_fence_destroy;
+ priv->base.engine.init = nvc0_fence_init;
+ priv->base.engine.fini = nvc0_fence_fini;
+ priv->base.engine.context_new = nvc0_fence_context_new;
+ priv->base.engine.context_del = nvc0_fence_context_del;
+ priv->base.emit = nvc0_fence_emit;
+ priv->base.sync = nvc0_fence_sync;
+ priv->base.read = nvc0_fence_read;
+ dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
+
+ ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, NULL, &priv->bo);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (ret == 0)
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
+ if (ret)
+ nvc0_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 50d68a7a1379..7d85553d518c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -26,10 +26,12 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
static void nvc0_fifo_isr(struct drm_device *);
struct nvc0_fifo_priv {
+ struct nouveau_fifo_priv base;
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
struct nouveau_vma user_vma;
@@ -37,8 +39,8 @@ struct nvc0_fifo_priv {
};
struct nvc0_fifo_chan {
+ struct nouveau_fifo_chan base;
struct nouveau_gpuobj *user;
- struct nouveau_gpuobj *ramfc;
};
static void
@@ -46,8 +48,7 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv = pfifo->priv;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct nouveau_gpuobj *cur;
int i, p;
@@ -69,59 +70,20 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
NV_ERROR(dev, "PFIFO - playlist update failed\n");
}
-void
-nvc0_fifo_disable(struct drm_device *dev)
-{
-}
-
-void
-nvc0_fifo_enable(struct drm_device *dev)
-{
-}
-
-bool
-nvc0_fifo_reassign(struct drm_device *dev, bool enable)
-{
- return false;
-}
-
-bool
-nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
-{
- return false;
-}
-
-int
-nvc0_fifo_channel_id(struct drm_device *dev)
-{
- return 127;
-}
-
-int
-nvc0_fifo_create_context(struct nouveau_channel *chan)
+static int
+nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv = pfifo->priv;
- struct nvc0_fifo_chan *fifoch;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nvc0_fifo_chan *fctx;
u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
- int ret;
+ int ret, i;
- chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
- if (!chan->fifo_priv)
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
return -ENOMEM;
- fifoch = chan->fifo_priv;
-
- /* allocate vram for control regs, map into polling area */
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &fifoch->user);
- if (ret)
- goto error;
-
- nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
- *(struct nouveau_mem **)fifoch->user->node);
chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
priv->user_vma.offset + (chan->id * 0x1000),
@@ -131,176 +93,77 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
goto error;
}
- /* ramfc */
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
- chan->ramin->vinst, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
+ /* allocate vram for control regs, map into polling area */
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
if (ret)
goto error;
- nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(fifoch->user->vinst));
- nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(fifoch->user->vinst));
- nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
- nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
- nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
- nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
+ *(struct nouveau_mem **)fctx->user->node);
+
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
+ nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
+ nv_wo32(chan->ramin, 0x10, 0x0000face);
+ nv_wo32(chan->ramin, 0x30, 0xfffff902);
+ nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
upper_32_bits(ib_virt));
- nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
- nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
- nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
- nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
- nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
- nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
- nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
- nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
- nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
- nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
+ nv_wo32(chan->ramin, 0x54, 0x00000002);
+ nv_wo32(chan->ramin, 0x84, 0x20400000);
+ nv_wo32(chan->ramin, 0x94, 0x30000001);
+ nv_wo32(chan->ramin, 0x9c, 0x00000100);
+ nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
+ nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
+ nv_wo32(chan->ramin, 0xac, 0x0000001f);
+ nv_wo32(chan->ramin, 0xb8, 0xf8000000);
+ nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
pinstmem->flush(dev);
nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
(chan->ramin->vinst >> 12));
nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
nvc0_fifo_playlist_update(dev);
- return 0;
error:
- pfifo->destroy_context(chan);
+ if (ret)
+ priv->base.base.context_del(chan, engine);
return ret;
}
-void
-nvc0_fifo_destroy_context(struct nouveau_channel *chan)
+static void
+nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
{
+ struct nvc0_fifo_chan *fctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
- struct nvc0_fifo_chan *fifoch;
nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
nv_wr32(dev, 0x002634, chan->id);
if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
-
nvc0_fifo_playlist_update(dev);
-
nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
+ nouveau_gpuobj_ref(NULL, &fctx->user);
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
- fifoch = chan->fifo_priv;
- chan->fifo_priv = NULL;
- if (!fifoch)
- return;
-
- nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
- nouveau_gpuobj_ref(NULL, &fifoch->user);
- kfree(fifoch);
-}
-
-int
-nvc0_fifo_load_context(struct nouveau_channel *chan)
-{
- return 0;
-}
-
-int
-nvc0_fifo_unload_context(struct drm_device *dev)
-{
- int i;
-
- for (i = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
- continue;
-
- nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
- nv_wr32(dev, 0x002634, i);
- if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
- NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
- i, nv_rd32(dev, 0x002634));
- return -EBUSY;
- }
- }
-
- return 0;
-}
-
-static void
-nvc0_fifo_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv;
-
- priv = pfifo->priv;
- if (!priv)
- return;
-
- nouveau_vm_put(&priv->user_vma);
- nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
- nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
- kfree(priv);
-}
-
-void
-nvc0_fifo_takedown(struct drm_device *dev)
-{
- nv_wr32(dev, 0x002140, 0x00000000);
- nvc0_fifo_destroy(dev);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
}
static int
-nvc0_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
- struct nvc0_fifo_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- pfifo->priv = priv;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
- &priv->playlist[0]);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
- &priv->playlist[1]);
- if (ret)
- goto error;
-
- ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
- 12, NV_MEM_ACCESS_RW, &priv->user_vma);
- if (ret)
- goto error;
-
- nouveau_irq_register(dev, 8, nvc0_fifo_isr);
- NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
- return 0;
-
-error:
- nvc0_fifo_destroy(dev);
- return ret;
-}
-
-int
-nvc0_fifo_init(struct drm_device *dev)
+nvc0_fifo_init(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
struct nouveau_channel *chan;
- struct nvc0_fifo_priv *priv;
- int ret, i;
-
- if (!pfifo->priv) {
- ret = nvc0_fifo_create(dev);
- if (ret)
- return ret;
- }
- priv = pfifo->priv;
+ int i;
/* reset PFIFO, enable all available PSUBFIFO areas */
nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
@@ -338,7 +201,7 @@ nvc0_fifo_init(struct drm_device *dev)
/* restore PFIFO context table */
for (i = 0; i < 128; i++) {
chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->fifo_priv)
+ if (!chan || !chan->engctx[engine])
continue;
nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
@@ -350,6 +213,29 @@ nvc0_fifo_init(struct drm_device *dev)
return 0;
}
+static int
+nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ int i;
+
+ for (i = 0; i < 128; i++) {
+ if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
+ continue;
+
+ nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x002634, i);
+ if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
+ NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
+ i, nv_rd32(dev, 0x002634));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0x00000000);
+ return 0;
+}
+
+
struct nouveau_enum nvc0_fifo_fault_unit[] = {
{ 0x00, "PGRAPH" },
{ 0x03, "PEEPHOLE" },
@@ -439,13 +325,14 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
static int
nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
{
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) {
+ if (likely(chid >= 0 && chid < priv->base.channels)) {
chan = dev_priv->channels.ptr[chid];
if (likely(chan))
ret = nouveau_finish_page_flip(chan, NULL);
@@ -534,3 +421,56 @@ nvc0_fifo_isr(struct drm_device *dev)
nv_wr32(dev, 0x002140, 0);
}
}
+
+static void
+nvc0_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_vm_put(&priv->user_vma);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nvc0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nvc0_fifo_destroy;
+ priv->base.base.init = nvc0_fifo_init;
+ priv->base.base.fini = nvc0_fifo_fini;
+ priv->base.base.context_new = nvc0_fifo_context_new;
+ priv->base.base.context_del = nvc0_fifo_context_del;
+ priv->base.channels = 128;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
+ 12, NV_MEM_ACCESS_RW, &priv->user_vma);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nvc0_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 9066102d1159..2a01e6e47724 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -29,6 +29,7 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
#include "nvc0_graph.h"
#include "nvc0_grhub.fuc.h"
@@ -620,13 +621,14 @@ nvc0_graph_init(struct drm_device *dev, int engine)
int
nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ for (i = 0; i < pfifo->channels; i++) {
chan = dev_priv->channels.ptr[i];
if (!chan || !chan->ramin)
continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index ce65f81bb871..7c95c44e2887 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -164,7 +164,9 @@ struct nvc0_pm_clock {
};
struct nvc0_pm_state {
+ struct nouveau_pm_level *perflvl;
struct nvc0_pm_clock eng[16];
+ struct nvc0_pm_clock mem;
};
static u32
@@ -303,6 +305,48 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
return 0;
}
+static int
+calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
+{
+ struct pll_lims pll;
+ int N, M, P, ret;
+ u32 ctrl;
+
+ /* mclk pll input freq comes from another pll, make sure it's on */
+ ctrl = nv_rd32(dev, 0x132020);
+ if (!(ctrl & 0x00000001)) {
+ /* if not, program it to 567MHz. nfi where this value comes
+ * from - it looks like it's in the pll limits table for
+ * 132000 but the binary driver ignores all my attempts to
+ * change this value.
+ */
+ nv_wr32(dev, 0x137320, 0x00000103);
+ nv_wr32(dev, 0x137330, 0x81200606);
+ nv_wait(dev, 0x132020, 0x00010000, 0x00010000);
+ nv_wr32(dev, 0x132024, 0x0001150f);
+ nv_mask(dev, 0x132020, 0x00000001, 0x00000001);
+ nv_wait(dev, 0x137390, 0x00020000, 0x00020000);
+ nv_mask(dev, 0x132020, 0x00000004, 0x00000004);
+ }
+
+ /* for the moment, until the clock tree is better understood, use
+ * pll mode for all clock frequencies
+ */
+ ret = get_pll_limits(dev, 0x132000, &pll);
+ if (ret == 0) {
+ pll.refclk = read_pll(dev, 0x132020);
+ if (pll.refclk) {
+ ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
+ if (ret > 0) {
+ info->coef = (P << 16) | (N << 8) | M;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
void *
nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
@@ -335,6 +379,15 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
return ERR_PTR(ret);
}
+ if (perflvl->memory) {
+ ret = calc_mem(dev, &info->mem, perflvl->memory);
+ if (ret) {
+ kfree(info);
+ return ERR_PTR(ret);
+ }
+ }
+
+ info->perflvl = perflvl;
return info;
}
@@ -375,12 +428,148 @@ prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
}
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+ nv_wr32(exec->dev, 0x10f210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+ udelay((nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+ struct drm_device *dev = exec->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
+ if (mr <= 1)
+ return nv_rd32(dev, 0x10f300 + ((mr - 0) * 4));
+ return nv_rd32(dev, 0x10f320 + ((mr - 2) * 4));
+ } else {
+ if (mr == 0)
+ return nv_rd32(dev, 0x10f300 + (mr * 4));
+ else
+ if (mr <= 7)
+ return nv_rd32(dev, 0x10f32c + (mr * 4));
+ return nv_rd32(dev, 0x10f34c);
+ }
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+ struct drm_device *dev = exec->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) {
+ if (mr <= 1) {
+ nv_wr32(dev, 0x10f300 + ((mr - 0) * 4), data);
+ if (dev_priv->vram_rank_B)
+ nv_wr32(dev, 0x10f308 + ((mr - 0) * 4), data);
+ } else
+ if (mr <= 3) {
+ nv_wr32(dev, 0x10f320 + ((mr - 2) * 4), data);
+ if (dev_priv->vram_rank_B)
+ nv_wr32(dev, 0x10f328 + ((mr - 2) * 4), data);
+ }
+ } else {
+ if (mr == 0) nv_wr32(dev, 0x10f300 + (mr * 4), data);
+ else if (mr <= 7) nv_wr32(dev, 0x10f32c + (mr * 4), data);
+ else if (mr == 15) nv_wr32(dev, 0x10f34c, data);
+ }
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+ struct nvc0_pm_state *info = exec->priv;
+ struct drm_device *dev = exec->dev;
+ u32 ctrl = nv_rd32(dev, 0x132000);
+
+ nv_wr32(dev, 0x137360, 0x00000001);
+ nv_wr32(dev, 0x137370, 0x00000000);
+ nv_wr32(dev, 0x137380, 0x00000000);
+ if (ctrl & 0x00000001)
+ nv_wr32(dev, 0x132000, (ctrl &= ~0x00000001));
+
+ nv_wr32(dev, 0x132004, info->mem.coef);
+ nv_wr32(dev, 0x132000, (ctrl |= 0x00000001));
+ nv_wait(dev, 0x137390, 0x00000002, 0x00000002);
+ nv_wr32(dev, 0x132018, 0x00005000);
+
+ nv_wr32(dev, 0x137370, 0x00000001);
+ nv_wr32(dev, 0x137380, 0x00000001);
+ nv_wr32(dev, 0x137360, 0x00000000);
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+ struct nvc0_pm_state *info = exec->priv;
+ struct nouveau_pm_level *perflvl = info->perflvl;
+ int i;
+
+ for (i = 0; i < 5; i++)
+ nv_wr32(exec->dev, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
+}
+
+static void
+prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_mem_exec_func exec = {
+ .dev = dev,
+ .precharge = mclk_precharge,
+ .refresh = mclk_refresh,
+ .refresh_auto = mclk_refresh_auto,
+ .refresh_self = mclk_refresh_self,
+ .wait = mclk_wait,
+ .mrg = mclk_mrg,
+ .mrs = mclk_mrs,
+ .clock_set = mclk_clock_set,
+ .timing_set = mclk_timing_set,
+ .priv = info
+ };
+
+ if (dev_priv->chipset < 0xd0)
+ nv_wr32(dev, 0x611200, 0x00003300);
+ else
+ nv_wr32(dev, 0x62c000, 0x03030000);
+
+ nouveau_mem_exec(&exec, info->perflvl);
+
+ if (dev_priv->chipset < 0xd0)
+ nv_wr32(dev, 0x611200, 0x00003300);
+ else
+ nv_wr32(dev, 0x62c000, 0x03030300);
+}
int
nvc0_pm_clocks_set(struct drm_device *dev, void *data)
{
struct nvc0_pm_state *info = data;
int i;
+ if (info->mem.coef)
+ prog_mem(dev, info);
+
for (i = 0; i < 16; i++) {
if (!info->eng[i].freq)
continue;
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
new file mode 100644
index 000000000000..93e8c164fec6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_software.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+#include "nv50_display.h"
+
+struct nvc0_software_priv {
+ struct nouveau_software_priv base;
+};
+
+struct nvc0_software_chan {
+ struct nouveau_software_chan base;
+ struct nouveau_vma dispc_vma[4];
+};
+
+u64
+nvc0_software_crtc(struct nouveau_channel *chan, int crtc)
+{
+ struct nvc0_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
+ return pch->dispc_vma[crtc].offset;
+}
+
+static int
+nvc0_software_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
+ struct nvc0_software_chan *pch;
+ int ret = 0, i;
+
+ pch = kzalloc(sizeof(*pch), GFP_KERNEL);
+ if (!pch)
+ return -ENOMEM;
+
+ nouveau_software_context_new(&pch->base);
+ chan->engctx[engine] = pch;
+
+ /* map display semaphore buffers into channel's vm */
+ for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo;
+ if (dev_priv->card_type >= NV_D0)
+ bo = nvd0_display_crtc_sema(dev, i);
+ else
+ bo = nv50_display(dev)->crtc[i].sem.bo;
+
+ ret = nouveau_bo_vma_add(bo, chan->vm, &pch->dispc_vma[i]);
+ }
+
+ if (ret)
+ psw->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nvc0_software_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_software_chan *pch = chan->engctx[engine];
+ int i;
+
+ if (dev_priv->card_type >= NV_D0) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &pch->dispc_vma[i]);
+ }
+ } else
+ if (dev_priv->card_type >= NV_50) {
+ struct nv50_display *disp = nv50_display(dev);
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nv50_display_crtc *dispc = &disp->crtc[i];
+ nouveau_bo_vma_del(dispc->sem.bo, &pch->dispc_vma[i]);
+ }
+ }
+
+ chan->engctx[engine] = NULL;
+ kfree(pch);
+}
+
+static int
+nvc0_software_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ return 0;
+}
+
+static int
+nvc0_software_init(struct drm_device *dev, int engine)
+{
+ return 0;
+}
+
+static int
+nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nvc0_software_destroy(struct drm_device *dev, int engine)
+{
+ struct nvc0_software_priv *psw = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, SW);
+ kfree(psw);
+}
+
+int
+nvc0_software_create(struct drm_device *dev)
+{
+ struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
+ if (!psw)
+ return -ENOMEM;
+
+ psw->base.base.destroy = nvc0_software_destroy;
+ psw->base.base.init = nvc0_software_init;
+ psw->base.base.fini = nvc0_software_fini;
+ psw->base.base.context_new = nvc0_software_context_new;
+ psw->base.base.context_del = nvc0_software_context_del;
+ psw->base.base.object_new = nvc0_software_object_new;
+ nouveau_software_create(&psw->base);
+
+ NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
+ NVOBJ_CLASS(dev, 0x906e, SW);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 0247250939e8..c486d3ce3c2c 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -33,6 +33,7 @@
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
#include "nouveau_fb.h"
+#include "nouveau_software.h"
#include "nv50_display.h"
#define EVO_DMA_NR 9
@@ -284,8 +285,6 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
u32 *push;
int ret;
- evo_sync(crtc->dev, EVO_MASTER);
-
swap_interval <<= 4;
if (swap_interval == 0)
swap_interval |= 0x100;
@@ -300,15 +299,16 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (ret)
return ret;
- offset = chan->dispc_vma[nv_crtc->index].offset;
+
+ offset = nvc0_software_crtc(chan, nv_crtc->index);
offset += evo->sem.offset;
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 0xf00d0000 | evo->sem.value);
OUT_RING (chan, 0x1002);
- BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset ^ 0x10));
OUT_RING (chan, 0x74b1e000);
@@ -882,7 +882,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(crtc, 256);
ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -895,7 +895,7 @@ nvd0_crtc_create(struct drm_device *dev, int index)
goto out;
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &nv_crtc->lut.nvbo);
+ 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -2030,7 +2030,7 @@ nvd0_display_create(struct drm_device *dev)
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
- 0, 0x0000, &disp->sync);
+ 0, 0x0000, NULL, &disp->sync);
if (!ret) {
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
new file mode 100644
index 000000000000..1855ecbd843b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
+
+#define NVE0_FIFO_ENGINE_NUM 32
+
+static void nve0_fifo_isr(struct drm_device *);
+
+struct nve0_fifo_engine {
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nve0_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
+ struct {
+ struct nouveau_gpuobj *mem;
+ struct nouveau_vma bar;
+ } user;
+ int spoon_nr;
+};
+
+struct nve0_fifo_chan {
+ struct nouveau_fifo_chan base;
+ u32 engine;
+};
+
+static void
+nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct nve0_fifo_engine *peng = &priv->engine[engine];
+ struct nouveau_gpuobj *cur;
+ u32 match = (engine << 16) | 0x00000001;
+ int ret, i, p;
+
+ cur = peng->playlist[peng->cur_playlist];
+ if (unlikely(cur == NULL)) {
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
+ if (ret) {
+ NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
+ return;
+ }
+
+ peng->playlist[peng->cur_playlist] = cur;
+ }
+
+ peng->cur_playlist = !peng->cur_playlist;
+
+ for (i = 0, p = 0; i < priv->base.channels; i++) {
+ u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
+ if (ctrl != match)
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000000);
+ p += 8;
+ }
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x002270, cur->vinst >> 12);
+ nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
+ if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+ NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
+}
+
+static int
+nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nve0_fifo_chan *fctx;
+ u64 usermem = priv->user.mem->vinst + chan->id * 512;
+ u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+ int ret = 0, i;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ fctx->engine = 0; /* PGRAPH */
+
+ /* allocate vram for control regs, map into polling area */
+ chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+ priv->user.bar.offset + (chan->id * 512), 512);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
+ nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
+ nv_wo32(chan->ramin, 0x10, 0x0000face);
+ nv_wo32(chan->ramin, 0x30, 0xfffff902);
+ nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ upper_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x84, 0x20400000);
+ nv_wo32(chan->ramin, 0x94, 0x30000001);
+ nv_wo32(chan->ramin, 0x9c, 0x00000100);
+ nv_wo32(chan->ramin, 0xac, 0x0000001f);
+ nv_wo32(chan->ramin, 0xe4, 0x00000000);
+ nv_wo32(chan->ramin, 0xe8, chan->id);
+ nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
+ (chan->ramin->vinst >> 12));
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nve0_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
+ nv_wr32(dev, 0x002634, chan->id);
+ if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+ NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
+
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
+ kfree(fctx);
+}
+
+static int
+nve0_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nve0_fifo_chan *fctx;
+ int i;
+
+ /* reset PFIFO, enable all available PSUBFIFO areas */
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x000204, 0xffffffff);
+
+ priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
+ NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+ }
+
+ nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+ nv_wr32(dev, 0x002a00, 0xffffffff);
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xbfffffff);
+
+ /* restore PFIFO context table */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (!chan || !(fctx = chan->engctx[engine]))
+ continue;
+
+ nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
+ (chan->ramin->vinst >> 12));
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
+ }
+
+ return 0;
+}
+
+static int
+nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
+ continue;
+
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
+ nv_wr32(dev, 0x002634, i);
+ if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
+ NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
+ i, nv_rd32(dev, 0x002634));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0x00000000);
+ return 0;
+}
+
+struct nouveau_enum nve0_fifo_fault_unit[] = {
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_reason[] = {
+ { 0x00, "PT_NOT_PRESENT" },
+ { 0x01, "PT_TOO_SHORT" },
+ { 0x02, "PAGE_NOT_PRESENT" },
+ { 0x03, "VM_LIMIT_EXCEEDED" },
+ { 0x04, "NO_CHANNEL" },
+ { 0x05, "PAGE_SYSTEM_ONLY" },
+ { 0x06, "PAGE_READ_ONLY" },
+ { 0x0a, "COMPRESSED_SYSRAM" },
+ { 0x0c, "INVALID_STORAGE_TYPE" },
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+ {}
+};
+
+struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+ u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+ u32 client = (stat & 0x00001f00) >> 8;
+
+ NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+ (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ if (stat & 0x00000040) {
+ printk("/");
+ nouveau_enum_print(nve0_fifo_fault_hubclient, client);
+ } else {
+ printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
+ }
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static void
+nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+ u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000);
+ u32 mthd = (addr & 0x00003ffc);
+
+ NV_INFO(dev, "PSUBFIFO %d:", unit);
+ nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
+ NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+
+ nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nve0_fifo_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x002100);
+
+ if (stat & 0x00000100) {
+ NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
+ nv_wr32(dev, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(dev, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_vm_fault(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(dev, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_subfifo_intr(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
+ nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+ nv_wr32(dev, 0x002100, stat);
+ nv_wr32(dev, 0x002140, 0);
+ }
+}
+
+static void
+nve0_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nouveau_vm_put(&priv->user.bar);
+ nouveau_gpuobj_ref(NULL, &priv->user.mem);
+
+ for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
+ }
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nve0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nve0_fifo_destroy;
+ priv->base.base.init = nve0_fifo_init;
+ priv->base.base.fini = nve0_fifo_fini;
+ priv->base.base.context_new = nve0_fifo_context_new;
+ priv->base.base.context_del = nve0_fifo_context_del;
+ priv->base.channels = 4096;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
+ 12, NV_MEM_ACCESS_RW, &priv->user.bar);
+ if (ret)
+ goto error;
+
+ nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
+
+ nouveau_irq_register(dev, 8, nve0_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c
new file mode 100644
index 000000000000..8a8051b68f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_fifo.h"
+
+#include "nve0_graph.h"
+
+static void
+nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
+{
+ NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
+ nv_rd32(dev, base + 0x400));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
+ nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
+ nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
+}
+
+static void
+nve0_graph_ctxctl_debug(struct drm_device *dev)
+{
+ u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
+ u32 gpc;
+
+ nve0_graph_ctxctl_debug_unit(dev, 0x409000);
+ for (gpc = 0; gpc < gpcnr; gpc++)
+ nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
+}
+
+static int
+nve0_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, 0x409840, 0x00000030);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000003);
+ if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+ NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+ return 0;
+}
+
+static int
+nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+ nv_wr32(dev, 0x409840, 0x00000003);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+ nv_wr32(dev, 0x409504, 0x00000009);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nve0_graph_construct_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ int ret, i;
+ u32 *ctx;
+
+ ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nve0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nve0_grctx_generate(chan);
+ if (ret)
+ goto err;
+
+ ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+ priv->grctx_vals = ctx;
+ return 0;
+
+err:
+ kfree(ctx);
+ return ret;
+}
+
+static int
+nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ u32 magic[GPC_MAX][2];
+ u16 offset = 0x0000;
+ int gpc;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
+ &grch->unk408004);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
+ &grch->unk40800c);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
+ &grch->unk418810);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
+ &grch->mmio);
+ if (ret)
+ return ret;
+
+#define mmio(r,v) do { \
+ nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
+ nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
+ grch->mmio_nr++; \
+} while (0)
+ mmio(0x40800c, grch->unk40800c->linst >> 8);
+ mmio(0x408010, 0x80000000);
+ mmio(0x419004, grch->unk40800c->linst >> 8);
+ mmio(0x419008, 0x00000000);
+ mmio(0x4064cc, 0x80000000);
+ mmio(0x408004, grch->unk408004->linst >> 8);
+ mmio(0x408008, 0x80000030);
+ mmio(0x418808, grch->unk408004->linst >> 8);
+ mmio(0x41880c, 0x80000030);
+ mmio(0x4064c8, 0x01800600);
+ mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
+ mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
+ mmio(0x405830, 0x02180648);
+ mmio(0x4064c4, 0x0192ffff);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+ u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+ magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
+ magic[gpc][1] = 0x00000000 | (magic1 << 16);
+ offset += 0x0324 * priv->tpc_nr[gpc];
+ }
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
+ mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
+ offset += 0x07ff * priv->tpc_nr[gpc];
+ }
+
+ mmio(0x17e91c, 0x06060609);
+ mmio(0x17e920, 0x00090a05);
+#undef mmio
+ return 0;
+}
+
+static int
+nve0_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_graph_priv *priv = nv_engine(dev, engine);
+ struct nve0_graph_chan *grch;
+ struct nouveau_gpuobj *grctx;
+ int ret, i;
+
+ grch = kzalloc(sizeof(*grch), GFP_KERNEL);
+ if (!grch)
+ return -ENOMEM;
+ chan->engctx[NVOBJ_ENGINE_GR] = grch;
+
+ ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+ &grch->grctx);
+ if (ret)
+ goto error;
+ grctx = grch->grctx;
+
+ ret = nve0_graph_create_context_mmio_list(chan);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
+ pinstmem->flush(dev);
+
+ if (!priv->grctx_vals) {
+ ret = nve0_graph_construct_context(chan);
+ if (ret)
+ goto error;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+
+ pinstmem->flush(dev);
+ return 0;
+
+error:
+ priv->base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nve0_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nve0_graph_chan *grch = chan->engctx[engine];
+
+ nouveau_gpuobj_ref(NULL, &grch->mmio);
+ nouveau_gpuobj_ref(NULL, &grch->unk418810);
+ nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+ nouveau_gpuobj_ref(NULL, &grch->unk408004);
+ nouveau_gpuobj_ref(NULL, &grch->grctx);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nve0_graph_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ return 0;
+}
+
+static int
+nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nve0_graph_init_obj418880(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int i;
+
+ nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+ nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nve0_graph_init_regs(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400080, 0x003083c2);
+ nv_wr32(dev, 0x400088, 0x0001ffe7);
+ nv_wr32(dev, 0x40008c, 0x00000000);
+ nv_wr32(dev, 0x400090, 0x00000030);
+ nv_wr32(dev, 0x40013c, 0x003901f7);
+ nv_wr32(dev, 0x400140, 0x00000100);
+ nv_wr32(dev, 0x400144, 0x00000000);
+ nv_wr32(dev, 0x400148, 0x00000110);
+ nv_wr32(dev, 0x400138, 0x00000000);
+ nv_wr32(dev, 0x400130, 0x00000000);
+ nv_wr32(dev, 0x400134, 0x00000000);
+ nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nve0_graph_init_units(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x409ffc, 0x00000000);
+ nv_wr32(dev, 0x409c14, 0x00003e3e);
+ nv_wr32(dev, 0x409c24, 0x000f0000);
+
+ nv_wr32(dev, 0x404000, 0xc0000000);
+ nv_wr32(dev, 0x404600, 0xc0000000);
+ nv_wr32(dev, 0x408030, 0xc0000000);
+ nv_wr32(dev, 0x404490, 0xc0000000);
+ nv_wr32(dev, 0x406018, 0xc0000000);
+ nv_wr32(dev, 0x407020, 0xc0000000);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x405844, 0x00ffffff);
+
+ nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+
+}
+
+static void
+nve0_graph_init_gpc_0(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+ u32 data[TPC_MAX / 8];
+ u8 tpcnr[GPC_MAX];
+ int i, gpc, tpc;
+
+ nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ priv->tpc_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
+}
+
+static void
+nve0_graph_init_gpc_1(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int gpc, tpc;
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+ }
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+}
+
+static void
+nve0_graph_init_rop(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int rop;
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+}
+
+static void
+nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
+ struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
+{
+ int i;
+
+ nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+ for (i = 0; i < data->size / 4; i++)
+ nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
+
+ nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+ for (i = 0; i < code->size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+ nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
+ }
+}
+
+static int
+nve0_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ u32 r000260;
+
+ /* load fuc microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
+ nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
+ nv_wr32(dev, 0x000260, r000260);
+
+ /* start both of them running */
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x41a10c, 0x00000000);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x41a100, 0x00000002);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+ NV_INFO(dev, "0x409800 wait failed\n");
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x7fffffff);
+ nv_wr32(dev, 0x409504, 0x00000021);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000010);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ return -EBUSY;
+ }
+ priv->grctx_size = nv_rd32(dev, 0x409800);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000016);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000025);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000030);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409810, 0xb00095c8);
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000031);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409810, 0x00080420);
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000032);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409614, 0x00000070);
+ nv_wr32(dev, 0x409614, 0x00000770);
+ nv_wr32(dev, 0x40802c, 0x00000001);
+ return 0;
+}
+
+static int
+nve0_graph_init(struct drm_device *dev, int engine)
+{
+ int ret;
+
+ nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+ nve0_graph_init_obj418880(dev);
+ nve0_graph_init_regs(dev);
+ nve0_graph_init_gpc_0(dev);
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
+
+ nve0_graph_init_units(dev);
+ nve0_graph_init_gpc_1(dev);
+ nve0_graph_init_rop(dev);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400118, 0xffffffff);
+ nv_wr32(dev, 0x400130, 0xffffffff);
+ nv_wr32(dev, 0x40011c, 0xffffffff);
+ nv_wr32(dev, 0x400134, 0xffffffff);
+ nv_wr32(dev, 0x400054, 0x34ce3464);
+
+ ret = nve0_graph_init_ctxctl(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < pfifo->channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nve0_graph_ctxctl_isr(struct drm_device *dev)
+{
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ if (ustat & 0x00000001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
+ if (ustat & 0x00080000)
+ NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
+ if (ustat & ~0x00080001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
+
+ nve0_graph_ctxctl_debug(dev);
+ nv_wr32(dev, 0x409c20, ustat);
+}
+
+static void
+nve0_graph_trap_isr(struct drm_device *dev, int chid)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ u32 trap = nv_rd32(dev, 0x400108);
+ int rop;
+
+ if (trap & 0x00000001) {
+ u32 stat = nv_rd32(dev, 0x404000);
+ NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
+ nv_wr32(dev, 0x404000, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x00000001);
+ trap &= ~0x00000001;
+ }
+
+ if (trap & 0x00000010) {
+ u32 stat = nv_rd32(dev, 0x405840);
+ NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x00000010);
+ trap &= ~0x00000010;
+ }
+
+ if (trap & 0x02000000) {
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
+ u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
+ NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
+ rop, chid, statz, statc);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ }
+ nv_wr32(dev, 0x400108, 0x02000000);
+ trap &= ~0x02000000;
+ }
+
+ if (trap) {
+ NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
+ nv_wr32(dev, 0x400108, trap);
+ }
+}
+
+static void
+nve0_graph_isr(struct drm_device *dev)
+{
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ u32 chid = nve0_graph_isr_chid(dev, inst);
+ u32 stat = nv_rd32(dev, 0x400100);
+ u32 addr = nv_rd32(dev, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(dev, 0x400708);
+ u32 code = nv_rd32(dev, 0x400110);
+ u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ nv_wr32(dev, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ nve0_graph_trap_isr(dev, chid);
+ nv_wr32(dev, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ nve0_graph_ctxctl_isr(dev);
+ nv_wr32(dev, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+ nv_wr32(dev, 0x400100, stat);
+ }
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static int
+nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
+ struct nve0_graph_fuc *fuc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const struct firmware *fw;
+ char f[32];
+ int ret;
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
+ ret = request_firmware(&fw, f, &dev->pdev->dev);
+ if (ret)
+ return ret;
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+static void
+nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
+{
+ if (fuc->data) {
+ kfree(fuc->data);
+ fuc->data = NULL;
+ }
+}
+
+static void
+nve0_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, engine);
+
+ nve0_graph_destroy_fw(&priv->fuc409c);
+ nve0_graph_destroy_fw(&priv->fuc409d);
+ nve0_graph_destroy_fw(&priv->fuc41ac);
+ nve0_graph_destroy_fw(&priv->fuc41ad);
+
+ nouveau_irq_unregister(dev, 12);
+
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+ if (priv->grctx_vals)
+ kfree(priv->grctx_vals);
+
+ NVOBJ_ENGINE_DEL(dev, GR);
+ kfree(priv);
+}
+
+int
+nve0_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_graph_priv *priv;
+ int ret, gpc, i;
+ u32 kepler;
+
+ kepler = nve0_graph_class(dev);
+ if (!kepler) {
+ NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+ return 0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.destroy = nve0_graph_destroy;
+ priv->base.init = nve0_graph_init;
+ priv->base.fini = nve0_graph_fini;
+ priv->base.context_new = nve0_graph_context_new;
+ priv->base.context_del = nve0_graph_context_del;
+ priv->base.object_new = nve0_graph_object_new;
+
+ NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
+ nouveau_irq_register(dev, 12, nve0_graph_isr);
+
+ NV_INFO(dev, "PGRAPH: using external firmware\n");
+ if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
+ nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
+ nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
+ nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
+ ret = 0;
+ goto error;
+ }
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
+ priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+ priv->tpc_total += priv->tpc_nr[gpc];
+ }
+
+ switch (dev_priv->chipset) {
+ case 0xe4:
+ if (priv->tpc_total == 8)
+ priv->magic_not_rop_nr = 3;
+ else
+ if (priv->tpc_total == 7)
+ priv->magic_not_rop_nr = 1;
+ break;
+ case 0xe7:
+ priv->magic_not_rop_nr = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (!priv->magic_not_rop_nr) {
+ NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+ priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
+ priv->tpc_nr[3], priv->rop_nr);
+ priv->magic_not_rop_nr = 0x00;
+ }
+
+ NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
+ NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
+ NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
+ NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
+ NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
+ return 0;
+
+error:
+ nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.h b/drivers/gpu/drm/nouveau/nve0_graph.h
new file mode 100644
index 000000000000..2ba70449ba01
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_graph.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVE0_GRAPH_H__
+#define __NVE0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TPC_MAX 32
+
+#define ROP_BCAST(r) (0x408800 + (r))
+#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r) (0x418000 + (r))
+#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
+#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nve0_graph_fuc {
+ u32 *data;
+ u32 size;
+};
+
+struct nve0_graph_priv {
+ struct nouveau_exec_engine base;
+
+ struct nve0_graph_fuc fuc409c;
+ struct nve0_graph_fuc fuc409d;
+ struct nve0_graph_fuc fuc41ac;
+ struct nve0_graph_fuc fuc41ad;
+
+ u8 gpc_nr;
+ u8 rop_nr;
+ u8 tpc_nr[GPC_MAX];
+ u8 tpc_total;
+
+ u32 grctx_size;
+ u32 *grctx_vals;
+ struct nouveau_gpuobj *unk4188b4;
+ struct nouveau_gpuobj *unk4188b8;
+
+ u8 magic_not_rop_nr;
+};
+
+struct nve0_graph_chan {
+ struct nouveau_gpuobj *grctx;
+ struct nouveau_gpuobj *unk408004; /* 0x418810 too */
+ struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
+ struct nouveau_gpuobj *unk418810; /* 0x419848 too */
+ struct nouveau_gpuobj *mmio;
+ int mmio_nr;
+};
+
+int nve0_grctx_generate(struct nouveau_channel *);
+
+/* nve0_graph.c uses this also to determine supported chipsets */
+static inline u32
+nve0_graph_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0xe4:
+ case 0xe7:
+ return 0xa097;
+ default:
+ return 0;
+ }
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nve0_grctx.c b/drivers/gpu/drm/nouveau/nve0_grctx.c
new file mode 100644
index 000000000000..d8cb360e92c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nve0_grctx.c
@@ -0,0 +1,2777 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nve0_graph.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+ nv_wr32(dev, 0x400204, data);
+ nv_wr32(dev, 0x400200, icmd);
+ while (nv_rd32(dev, 0x400700) & 0x00000002) {}
+}
+
+static void
+nve0_grctx_generate_icmd(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400208, 0x80000000);
+ nv_icmd(dev, 0x001000, 0x00000004);
+ nv_icmd(dev, 0x000039, 0x00000000);
+ nv_icmd(dev, 0x00003a, 0x00000000);
+ nv_icmd(dev, 0x00003b, 0x00000000);
+ nv_icmd(dev, 0x0000a9, 0x0000ffff);
+ nv_icmd(dev, 0x000038, 0x0fac6881);
+ nv_icmd(dev, 0x00003d, 0x00000001);
+ nv_icmd(dev, 0x0000e8, 0x00000400);
+ nv_icmd(dev, 0x0000e9, 0x00000400);
+ nv_icmd(dev, 0x0000ea, 0x00000400);
+ nv_icmd(dev, 0x0000eb, 0x00000400);
+ nv_icmd(dev, 0x0000ec, 0x00000400);
+ nv_icmd(dev, 0x0000ed, 0x00000400);
+ nv_icmd(dev, 0x0000ee, 0x00000400);
+ nv_icmd(dev, 0x0000ef, 0x00000400);
+ nv_icmd(dev, 0x000078, 0x00000300);
+ nv_icmd(dev, 0x000079, 0x00000300);
+ nv_icmd(dev, 0x00007a, 0x00000300);
+ nv_icmd(dev, 0x00007b, 0x00000300);
+ nv_icmd(dev, 0x00007c, 0x00000300);
+ nv_icmd(dev, 0x00007d, 0x00000300);
+ nv_icmd(dev, 0x00007e, 0x00000300);
+ nv_icmd(dev, 0x00007f, 0x00000300);
+ nv_icmd(dev, 0x000050, 0x00000011);
+ nv_icmd(dev, 0x000058, 0x00000008);
+ nv_icmd(dev, 0x000059, 0x00000008);
+ nv_icmd(dev, 0x00005a, 0x00000008);
+ nv_icmd(dev, 0x00005b, 0x00000008);
+ nv_icmd(dev, 0x00005c, 0x00000008);
+ nv_icmd(dev, 0x00005d, 0x00000008);
+ nv_icmd(dev, 0x00005e, 0x00000008);
+ nv_icmd(dev, 0x00005f, 0x00000008);
+ nv_icmd(dev, 0x000208, 0x00000001);
+ nv_icmd(dev, 0x000209, 0x00000001);
+ nv_icmd(dev, 0x00020a, 0x00000001);
+ nv_icmd(dev, 0x00020b, 0x00000001);
+ nv_icmd(dev, 0x00020c, 0x00000001);
+ nv_icmd(dev, 0x00020d, 0x00000001);
+ nv_icmd(dev, 0x00020e, 0x00000001);
+ nv_icmd(dev, 0x00020f, 0x00000001);
+ nv_icmd(dev, 0x000081, 0x00000001);
+ nv_icmd(dev, 0x000085, 0x00000004);
+ nv_icmd(dev, 0x000088, 0x00000400);
+ nv_icmd(dev, 0x000090, 0x00000300);
+ nv_icmd(dev, 0x000098, 0x00001001);
+ nv_icmd(dev, 0x0000e3, 0x00000001);
+ nv_icmd(dev, 0x0000da, 0x00000001);
+ nv_icmd(dev, 0x0000f8, 0x00000003);
+ nv_icmd(dev, 0x0000fa, 0x00000001);
+ nv_icmd(dev, 0x00009f, 0x0000ffff);
+ nv_icmd(dev, 0x0000a0, 0x0000ffff);
+ nv_icmd(dev, 0x0000a1, 0x0000ffff);
+ nv_icmd(dev, 0x0000a2, 0x0000ffff);
+ nv_icmd(dev, 0x0000b1, 0x00000001);
+ nv_icmd(dev, 0x0000ad, 0x0000013e);
+ nv_icmd(dev, 0x0000e1, 0x00000010);
+ nv_icmd(dev, 0x000290, 0x00000000);
+ nv_icmd(dev, 0x000291, 0x00000000);
+ nv_icmd(dev, 0x000292, 0x00000000);
+ nv_icmd(dev, 0x000293, 0x00000000);
+ nv_icmd(dev, 0x000294, 0x00000000);
+ nv_icmd(dev, 0x000295, 0x00000000);
+ nv_icmd(dev, 0x000296, 0x00000000);
+ nv_icmd(dev, 0x000297, 0x00000000);
+ nv_icmd(dev, 0x000298, 0x00000000);
+ nv_icmd(dev, 0x000299, 0x00000000);
+ nv_icmd(dev, 0x00029a, 0x00000000);
+ nv_icmd(dev, 0x00029b, 0x00000000);
+ nv_icmd(dev, 0x00029c, 0x00000000);
+ nv_icmd(dev, 0x00029d, 0x00000000);
+ nv_icmd(dev, 0x00029e, 0x00000000);
+ nv_icmd(dev, 0x00029f, 0x00000000);
+ nv_icmd(dev, 0x0003b0, 0x00000000);
+ nv_icmd(dev, 0x0003b1, 0x00000000);
+ nv_icmd(dev, 0x0003b2, 0x00000000);
+ nv_icmd(dev, 0x0003b3, 0x00000000);
+ nv_icmd(dev, 0x0003b4, 0x00000000);
+ nv_icmd(dev, 0x0003b5, 0x00000000);
+ nv_icmd(dev, 0x0003b6, 0x00000000);
+ nv_icmd(dev, 0x0003b7, 0x00000000);
+ nv_icmd(dev, 0x0003b8, 0x00000000);
+ nv_icmd(dev, 0x0003b9, 0x00000000);
+ nv_icmd(dev, 0x0003ba, 0x00000000);
+ nv_icmd(dev, 0x0003bb, 0x00000000);
+ nv_icmd(dev, 0x0003bc, 0x00000000);
+ nv_icmd(dev, 0x0003bd, 0x00000000);
+ nv_icmd(dev, 0x0003be, 0x00000000);
+ nv_icmd(dev, 0x0003bf, 0x00000000);
+ nv_icmd(dev, 0x0002a0, 0x00000000);
+ nv_icmd(dev, 0x0002a1, 0x00000000);
+ nv_icmd(dev, 0x0002a2, 0x00000000);
+ nv_icmd(dev, 0x0002a3, 0x00000000);
+ nv_icmd(dev, 0x0002a4, 0x00000000);
+ nv_icmd(dev, 0x0002a5, 0x00000000);
+ nv_icmd(dev, 0x0002a6, 0x00000000);
+ nv_icmd(dev, 0x0002a7, 0x00000000);
+ nv_icmd(dev, 0x0002a8, 0x00000000);
+ nv_icmd(dev, 0x0002a9, 0x00000000);
+ nv_icmd(dev, 0x0002aa, 0x00000000);
+ nv_icmd(dev, 0x0002ab, 0x00000000);
+ nv_icmd(dev, 0x0002ac, 0x00000000);
+ nv_icmd(dev, 0x0002ad, 0x00000000);
+ nv_icmd(dev, 0x0002ae, 0x00000000);
+ nv_icmd(dev, 0x0002af, 0x00000000);
+ nv_icmd(dev, 0x000420, 0x00000000);
+ nv_icmd(dev, 0x000421, 0x00000000);
+ nv_icmd(dev, 0x000422, 0x00000000);
+ nv_icmd(dev, 0x000423, 0x00000000);
+ nv_icmd(dev, 0x000424, 0x00000000);
+ nv_icmd(dev, 0x000425, 0x00000000);
+ nv_icmd(dev, 0x000426, 0x00000000);
+ nv_icmd(dev, 0x000427, 0x00000000);
+ nv_icmd(dev, 0x000428, 0x00000000);
+ nv_icmd(dev, 0x000429, 0x00000000);
+ nv_icmd(dev, 0x00042a, 0x00000000);
+ nv_icmd(dev, 0x00042b, 0x00000000);
+ nv_icmd(dev, 0x00042c, 0x00000000);
+ nv_icmd(dev, 0x00042d, 0x00000000);
+ nv_icmd(dev, 0x00042e, 0x00000000);
+ nv_icmd(dev, 0x00042f, 0x00000000);
+ nv_icmd(dev, 0x0002b0, 0x00000000);
+ nv_icmd(dev, 0x0002b1, 0x00000000);
+ nv_icmd(dev, 0x0002b2, 0x00000000);
+ nv_icmd(dev, 0x0002b3, 0x00000000);
+ nv_icmd(dev, 0x0002b4, 0x00000000);
+ nv_icmd(dev, 0x0002b5, 0x00000000);
+ nv_icmd(dev, 0x0002b6, 0x00000000);
+ nv_icmd(dev, 0x0002b7, 0x00000000);
+ nv_icmd(dev, 0x0002b8, 0x00000000);
+ nv_icmd(dev, 0x0002b9, 0x00000000);
+ nv_icmd(dev, 0x0002ba, 0x00000000);
+ nv_icmd(dev, 0x0002bb, 0x00000000);
+ nv_icmd(dev, 0x0002bc, 0x00000000);
+ nv_icmd(dev, 0x0002bd, 0x00000000);
+ nv_icmd(dev, 0x0002be, 0x00000000);
+ nv_icmd(dev, 0x0002bf, 0x00000000);
+ nv_icmd(dev, 0x000430, 0x00000000);
+ nv_icmd(dev, 0x000431, 0x00000000);
+ nv_icmd(dev, 0x000432, 0x00000000);
+ nv_icmd(dev, 0x000433, 0x00000000);
+ nv_icmd(dev, 0x000434, 0x00000000);
+ nv_icmd(dev, 0x000435, 0x00000000);
+ nv_icmd(dev, 0x000436, 0x00000000);
+ nv_icmd(dev, 0x000437, 0x00000000);
+ nv_icmd(dev, 0x000438, 0x00000000);
+ nv_icmd(dev, 0x000439, 0x00000000);
+ nv_icmd(dev, 0x00043a, 0x00000000);
+ nv_icmd(dev, 0x00043b, 0x00000000);
+ nv_icmd(dev, 0x00043c, 0x00000000);
+ nv_icmd(dev, 0x00043d, 0x00000000);
+ nv_icmd(dev, 0x00043e, 0x00000000);
+ nv_icmd(dev, 0x00043f, 0x00000000);
+ nv_icmd(dev, 0x0002c0, 0x00000000);
+ nv_icmd(dev, 0x0002c1, 0x00000000);
+ nv_icmd(dev, 0x0002c2, 0x00000000);
+ nv_icmd(dev, 0x0002c3, 0x00000000);
+ nv_icmd(dev, 0x0002c4, 0x00000000);
+ nv_icmd(dev, 0x0002c5, 0x00000000);
+ nv_icmd(dev, 0x0002c6, 0x00000000);
+ nv_icmd(dev, 0x0002c7, 0x00000000);
+ nv_icmd(dev, 0x0002c8, 0x00000000);
+ nv_icmd(dev, 0x0002c9, 0x00000000);
+ nv_icmd(dev, 0x0002ca, 0x00000000);
+ nv_icmd(dev, 0x0002cb, 0x00000000);
+ nv_icmd(dev, 0x0002cc, 0x00000000);
+ nv_icmd(dev, 0x0002cd, 0x00000000);
+ nv_icmd(dev, 0x0002ce, 0x00000000);
+ nv_icmd(dev, 0x0002cf, 0x00000000);
+ nv_icmd(dev, 0x0004d0, 0x00000000);
+ nv_icmd(dev, 0x0004d1, 0x00000000);
+ nv_icmd(dev, 0x0004d2, 0x00000000);
+ nv_icmd(dev, 0x0004d3, 0x00000000);
+ nv_icmd(dev, 0x0004d4, 0x00000000);
+ nv_icmd(dev, 0x0004d5, 0x00000000);
+ nv_icmd(dev, 0x0004d6, 0x00000000);
+ nv_icmd(dev, 0x0004d7, 0x00000000);
+ nv_icmd(dev, 0x0004d8, 0x00000000);
+ nv_icmd(dev, 0x0004d9, 0x00000000);
+ nv_icmd(dev, 0x0004da, 0x00000000);
+ nv_icmd(dev, 0x0004db, 0x00000000);
+ nv_icmd(dev, 0x0004dc, 0x00000000);
+ nv_icmd(dev, 0x0004dd, 0x00000000);
+ nv_icmd(dev, 0x0004de, 0x00000000);
+ nv_icmd(dev, 0x0004df, 0x00000000);
+ nv_icmd(dev, 0x000720, 0x00000000);
+ nv_icmd(dev, 0x000721, 0x00000000);
+ nv_icmd(dev, 0x000722, 0x00000000);
+ nv_icmd(dev, 0x000723, 0x00000000);
+ nv_icmd(dev, 0x000724, 0x00000000);
+ nv_icmd(dev, 0x000725, 0x00000000);
+ nv_icmd(dev, 0x000726, 0x00000000);
+ nv_icmd(dev, 0x000727, 0x00000000);
+ nv_icmd(dev, 0x000728, 0x00000000);
+ nv_icmd(dev, 0x000729, 0x00000000);
+ nv_icmd(dev, 0x00072a, 0x00000000);
+ nv_icmd(dev, 0x00072b, 0x00000000);
+ nv_icmd(dev, 0x00072c, 0x00000000);
+ nv_icmd(dev, 0x00072d, 0x00000000);
+ nv_icmd(dev, 0x00072e, 0x00000000);
+ nv_icmd(dev, 0x00072f, 0x00000000);
+ nv_icmd(dev, 0x0008c0, 0x00000000);
+ nv_icmd(dev, 0x0008c1, 0x00000000);
+ nv_icmd(dev, 0x0008c2, 0x00000000);
+ nv_icmd(dev, 0x0008c3, 0x00000000);
+ nv_icmd(dev, 0x0008c4, 0x00000000);
+ nv_icmd(dev, 0x0008c5, 0x00000000);
+ nv_icmd(dev, 0x0008c6, 0x00000000);
+ nv_icmd(dev, 0x0008c7, 0x00000000);
+ nv_icmd(dev, 0x0008c8, 0x00000000);
+ nv_icmd(dev, 0x0008c9, 0x00000000);
+ nv_icmd(dev, 0x0008ca, 0x00000000);
+ nv_icmd(dev, 0x0008cb, 0x00000000);
+ nv_icmd(dev, 0x0008cc, 0x00000000);
+ nv_icmd(dev, 0x0008cd, 0x00000000);
+ nv_icmd(dev, 0x0008ce, 0x00000000);
+ nv_icmd(dev, 0x0008cf, 0x00000000);
+ nv_icmd(dev, 0x000890, 0x00000000);
+ nv_icmd(dev, 0x000891, 0x00000000);
+ nv_icmd(dev, 0x000892, 0x00000000);
+ nv_icmd(dev, 0x000893, 0x00000000);
+ nv_icmd(dev, 0x000894, 0x00000000);
+ nv_icmd(dev, 0x000895, 0x00000000);
+ nv_icmd(dev, 0x000896, 0x00000000);
+ nv_icmd(dev, 0x000897, 0x00000000);
+ nv_icmd(dev, 0x000898, 0x00000000);
+ nv_icmd(dev, 0x000899, 0x00000000);
+ nv_icmd(dev, 0x00089a, 0x00000000);
+ nv_icmd(dev, 0x00089b, 0x00000000);
+ nv_icmd(dev, 0x00089c, 0x00000000);
+ nv_icmd(dev, 0x00089d, 0x00000000);
+ nv_icmd(dev, 0x00089e, 0x00000000);
+ nv_icmd(dev, 0x00089f, 0x00000000);
+ nv_icmd(dev, 0x0008e0, 0x00000000);
+ nv_icmd(dev, 0x0008e1, 0x00000000);
+ nv_icmd(dev, 0x0008e2, 0x00000000);
+ nv_icmd(dev, 0x0008e3, 0x00000000);
+ nv_icmd(dev, 0x0008e4, 0x00000000);
+ nv_icmd(dev, 0x0008e5, 0x00000000);
+ nv_icmd(dev, 0x0008e6, 0x00000000);
+ nv_icmd(dev, 0x0008e7, 0x00000000);
+ nv_icmd(dev, 0x0008e8, 0x00000000);
+ nv_icmd(dev, 0x0008e9, 0x00000000);
+ nv_icmd(dev, 0x0008ea, 0x00000000);
+ nv_icmd(dev, 0x0008eb, 0x00000000);
+ nv_icmd(dev, 0x0008ec, 0x00000000);
+ nv_icmd(dev, 0x0008ed, 0x00000000);
+ nv_icmd(dev, 0x0008ee, 0x00000000);
+ nv_icmd(dev, 0x0008ef, 0x00000000);
+ nv_icmd(dev, 0x0008a0, 0x00000000);
+ nv_icmd(dev, 0x0008a1, 0x00000000);
+ nv_icmd(dev, 0x0008a2, 0x00000000);
+ nv_icmd(dev, 0x0008a3, 0x00000000);
+ nv_icmd(dev, 0x0008a4, 0x00000000);
+ nv_icmd(dev, 0x0008a5, 0x00000000);
+ nv_icmd(dev, 0x0008a6, 0x00000000);
+ nv_icmd(dev, 0x0008a7, 0x00000000);
+ nv_icmd(dev, 0x0008a8, 0x00000000);
+ nv_icmd(dev, 0x0008a9, 0x00000000);
+ nv_icmd(dev, 0x0008aa, 0x00000000);
+ nv_icmd(dev, 0x0008ab, 0x00000000);
+ nv_icmd(dev, 0x0008ac, 0x00000000);
+ nv_icmd(dev, 0x0008ad, 0x00000000);
+ nv_icmd(dev, 0x0008ae, 0x00000000);
+ nv_icmd(dev, 0x0008af, 0x00000000);
+ nv_icmd(dev, 0x0008f0, 0x00000000);
+ nv_icmd(dev, 0x0008f1, 0x00000000);
+ nv_icmd(dev, 0x0008f2, 0x00000000);
+ nv_icmd(dev, 0x0008f3, 0x00000000);
+ nv_icmd(dev, 0x0008f4, 0x00000000);
+ nv_icmd(dev, 0x0008f5, 0x00000000);
+ nv_icmd(dev, 0x0008f6, 0x00000000);
+ nv_icmd(dev, 0x0008f7, 0x00000000);
+ nv_icmd(dev, 0x0008f8, 0x00000000);
+ nv_icmd(dev, 0x0008f9, 0x00000000);
+ nv_icmd(dev, 0x0008fa, 0x00000000);
+ nv_icmd(dev, 0x0008fb, 0x00000000);
+ nv_icmd(dev, 0x0008fc, 0x00000000);
+ nv_icmd(dev, 0x0008fd, 0x00000000);
+ nv_icmd(dev, 0x0008fe, 0x00000000);
+ nv_icmd(dev, 0x0008ff, 0x00000000);
+ nv_icmd(dev, 0x00094c, 0x000000ff);
+ nv_icmd(dev, 0x00094d, 0xffffffff);
+ nv_icmd(dev, 0x00094e, 0x00000002);
+ nv_icmd(dev, 0x0002ec, 0x00000001);
+ nv_icmd(dev, 0x000303, 0x00000001);
+ nv_icmd(dev, 0x0002e6, 0x00000001);
+ nv_icmd(dev, 0x000466, 0x00000052);
+ nv_icmd(dev, 0x000301, 0x3f800000);
+ nv_icmd(dev, 0x000304, 0x30201000);
+ nv_icmd(dev, 0x000305, 0x70605040);
+ nv_icmd(dev, 0x000306, 0xb8a89888);
+ nv_icmd(dev, 0x000307, 0xf8e8d8c8);
+ nv_icmd(dev, 0x00030a, 0x00ffff00);
+ nv_icmd(dev, 0x00030b, 0x0000001a);
+ nv_icmd(dev, 0x00030c, 0x00000001);
+ nv_icmd(dev, 0x000318, 0x00000001);
+ nv_icmd(dev, 0x000340, 0x00000000);
+ nv_icmd(dev, 0x000375, 0x00000001);
+ nv_icmd(dev, 0x00037d, 0x00000006);
+ nv_icmd(dev, 0x0003a0, 0x00000002);
+ nv_icmd(dev, 0x0003aa, 0x00000001);
+ nv_icmd(dev, 0x0003a9, 0x00000001);
+ nv_icmd(dev, 0x000380, 0x00000001);
+ nv_icmd(dev, 0x000383, 0x00000011);
+ nv_icmd(dev, 0x000360, 0x00000040);
+ nv_icmd(dev, 0x000366, 0x00000000);
+ nv_icmd(dev, 0x000367, 0x00000000);
+ nv_icmd(dev, 0x000368, 0x00000fff);
+ nv_icmd(dev, 0x000370, 0x00000000);
+ nv_icmd(dev, 0x000371, 0x00000000);
+ nv_icmd(dev, 0x000372, 0x000fffff);
+ nv_icmd(dev, 0x00037a, 0x00000012);
+ nv_icmd(dev, 0x000619, 0x00000003);
+ nv_icmd(dev, 0x000811, 0x00000003);
+ nv_icmd(dev, 0x000812, 0x00000004);
+ nv_icmd(dev, 0x000813, 0x00000006);
+ nv_icmd(dev, 0x000814, 0x00000008);
+ nv_icmd(dev, 0x000815, 0x0000000b);
+ nv_icmd(dev, 0x000800, 0x00000001);
+ nv_icmd(dev, 0x000801, 0x00000001);
+ nv_icmd(dev, 0x000802, 0x00000001);
+ nv_icmd(dev, 0x000803, 0x00000001);
+ nv_icmd(dev, 0x000804, 0x00000001);
+ nv_icmd(dev, 0x000805, 0x00000001);
+ nv_icmd(dev, 0x000632, 0x00000001);
+ nv_icmd(dev, 0x000633, 0x00000002);
+ nv_icmd(dev, 0x000634, 0x00000003);
+ nv_icmd(dev, 0x000635, 0x00000004);
+ nv_icmd(dev, 0x000654, 0x3f800000);
+ nv_icmd(dev, 0x000657, 0x3f800000);
+ nv_icmd(dev, 0x000655, 0x3f800000);
+ nv_icmd(dev, 0x000656, 0x3f800000);
+ nv_icmd(dev, 0x0006cd, 0x3f800000);
+ nv_icmd(dev, 0x0007f5, 0x3f800000);
+ nv_icmd(dev, 0x0007dc, 0x39291909);
+ nv_icmd(dev, 0x0007dd, 0x79695949);
+ nv_icmd(dev, 0x0007de, 0xb9a99989);
+ nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
+ nv_icmd(dev, 0x0007e8, 0x00003210);
+ nv_icmd(dev, 0x0007e9, 0x00007654);
+ nv_icmd(dev, 0x0007ea, 0x00000098);
+ nv_icmd(dev, 0x0007ec, 0x39291909);
+ nv_icmd(dev, 0x0007ed, 0x79695949);
+ nv_icmd(dev, 0x0007ee, 0xb9a99989);
+ nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
+ nv_icmd(dev, 0x0007f0, 0x00003210);
+ nv_icmd(dev, 0x0007f1, 0x00007654);
+ nv_icmd(dev, 0x0007f2, 0x00000098);
+ nv_icmd(dev, 0x0005a5, 0x00000001);
+ nv_icmd(dev, 0x000980, 0x00000000);
+ nv_icmd(dev, 0x000981, 0x00000000);
+ nv_icmd(dev, 0x000982, 0x00000000);
+ nv_icmd(dev, 0x000983, 0x00000000);
+ nv_icmd(dev, 0x000984, 0x00000000);
+ nv_icmd(dev, 0x000985, 0x00000000);
+ nv_icmd(dev, 0x000986, 0x00000000);
+ nv_icmd(dev, 0x000987, 0x00000000);
+ nv_icmd(dev, 0x000988, 0x00000000);
+ nv_icmd(dev, 0x000989, 0x00000000);
+ nv_icmd(dev, 0x00098a, 0x00000000);
+ nv_icmd(dev, 0x00098b, 0x00000000);
+ nv_icmd(dev, 0x00098c, 0x00000000);
+ nv_icmd(dev, 0x00098d, 0x00000000);
+ nv_icmd(dev, 0x00098e, 0x00000000);
+ nv_icmd(dev, 0x00098f, 0x00000000);
+ nv_icmd(dev, 0x000990, 0x00000000);
+ nv_icmd(dev, 0x000991, 0x00000000);
+ nv_icmd(dev, 0x000992, 0x00000000);
+ nv_icmd(dev, 0x000993, 0x00000000);
+ nv_icmd(dev, 0x000994, 0x00000000);
+ nv_icmd(dev, 0x000995, 0x00000000);
+ nv_icmd(dev, 0x000996, 0x00000000);
+ nv_icmd(dev, 0x000997, 0x00000000);
+ nv_icmd(dev, 0x000998, 0x00000000);
+ nv_icmd(dev, 0x000999, 0x00000000);
+ nv_icmd(dev, 0x00099a, 0x00000000);
+ nv_icmd(dev, 0x00099b, 0x00000000);
+ nv_icmd(dev, 0x00099c, 0x00000000);
+ nv_icmd(dev, 0x00099d, 0x00000000);
+ nv_icmd(dev, 0x00099e, 0x00000000);
+ nv_icmd(dev, 0x00099f, 0x00000000);
+ nv_icmd(dev, 0x0009a0, 0x00000000);
+ nv_icmd(dev, 0x0009a1, 0x00000000);
+ nv_icmd(dev, 0x0009a2, 0x00000000);
+ nv_icmd(dev, 0x0009a3, 0x00000000);
+ nv_icmd(dev, 0x0009a4, 0x00000000);
+ nv_icmd(dev, 0x0009a5, 0x00000000);
+ nv_icmd(dev, 0x0009a6, 0x00000000);
+ nv_icmd(dev, 0x0009a7, 0x00000000);
+ nv_icmd(dev, 0x0009a8, 0x00000000);
+ nv_icmd(dev, 0x0009a9, 0x00000000);
+ nv_icmd(dev, 0x0009aa, 0x00000000);
+ nv_icmd(dev, 0x0009ab, 0x00000000);
+ nv_icmd(dev, 0x0009ac, 0x00000000);
+ nv_icmd(dev, 0x0009ad, 0x00000000);
+ nv_icmd(dev, 0x0009ae, 0x00000000);
+ nv_icmd(dev, 0x0009af, 0x00000000);
+ nv_icmd(dev, 0x0009b0, 0x00000000);
+ nv_icmd(dev, 0x0009b1, 0x00000000);
+ nv_icmd(dev, 0x0009b2, 0x00000000);
+ nv_icmd(dev, 0x0009b3, 0x00000000);
+ nv_icmd(dev, 0x0009b4, 0x00000000);
+ nv_icmd(dev, 0x0009b5, 0x00000000);
+ nv_icmd(dev, 0x0009b6, 0x00000000);
+ nv_icmd(dev, 0x0009b7, 0x00000000);
+ nv_icmd(dev, 0x0009b8, 0x00000000);
+ nv_icmd(dev, 0x0009b9, 0x00000000);
+ nv_icmd(dev, 0x0009ba, 0x00000000);
+ nv_icmd(dev, 0x0009bb, 0x00000000);
+ nv_icmd(dev, 0x0009bc, 0x00000000);
+ nv_icmd(dev, 0x0009bd, 0x00000000);
+ nv_icmd(dev, 0x0009be, 0x00000000);
+ nv_icmd(dev, 0x0009bf, 0x00000000);
+ nv_icmd(dev, 0x0009c0, 0x00000000);
+ nv_icmd(dev, 0x0009c1, 0x00000000);
+ nv_icmd(dev, 0x0009c2, 0x00000000);
+ nv_icmd(dev, 0x0009c3, 0x00000000);
+ nv_icmd(dev, 0x0009c4, 0x00000000);
+ nv_icmd(dev, 0x0009c5, 0x00000000);
+ nv_icmd(dev, 0x0009c6, 0x00000000);
+ nv_icmd(dev, 0x0009c7, 0x00000000);
+ nv_icmd(dev, 0x0009c8, 0x00000000);
+ nv_icmd(dev, 0x0009c9, 0x00000000);
+ nv_icmd(dev, 0x0009ca, 0x00000000);
+ nv_icmd(dev, 0x0009cb, 0x00000000);
+ nv_icmd(dev, 0x0009cc, 0x00000000);
+ nv_icmd(dev, 0x0009cd, 0x00000000);
+ nv_icmd(dev, 0x0009ce, 0x00000000);
+ nv_icmd(dev, 0x0009cf, 0x00000000);
+ nv_icmd(dev, 0x0009d0, 0x00000000);
+ nv_icmd(dev, 0x0009d1, 0x00000000);
+ nv_icmd(dev, 0x0009d2, 0x00000000);
+ nv_icmd(dev, 0x0009d3, 0x00000000);
+ nv_icmd(dev, 0x0009d4, 0x00000000);
+ nv_icmd(dev, 0x0009d5, 0x00000000);
+ nv_icmd(dev, 0x0009d6, 0x00000000);
+ nv_icmd(dev, 0x0009d7, 0x00000000);
+ nv_icmd(dev, 0x0009d8, 0x00000000);
+ nv_icmd(dev, 0x0009d9, 0x00000000);
+ nv_icmd(dev, 0x0009da, 0x00000000);
+ nv_icmd(dev, 0x0009db, 0x00000000);
+ nv_icmd(dev, 0x0009dc, 0x00000000);
+ nv_icmd(dev, 0x0009dd, 0x00000000);
+ nv_icmd(dev, 0x0009de, 0x00000000);
+ nv_icmd(dev, 0x0009df, 0x00000000);
+ nv_icmd(dev, 0x0009e0, 0x00000000);
+ nv_icmd(dev, 0x0009e1, 0x00000000);
+ nv_icmd(dev, 0x0009e2, 0x00000000);
+ nv_icmd(dev, 0x0009e3, 0x00000000);
+ nv_icmd(dev, 0x0009e4, 0x00000000);
+ nv_icmd(dev, 0x0009e5, 0x00000000);
+ nv_icmd(dev, 0x0009e6, 0x00000000);
+ nv_icmd(dev, 0x0009e7, 0x00000000);
+ nv_icmd(dev, 0x0009e8, 0x00000000);
+ nv_icmd(dev, 0x0009e9, 0x00000000);
+ nv_icmd(dev, 0x0009ea, 0x00000000);
+ nv_icmd(dev, 0x0009eb, 0x00000000);
+ nv_icmd(dev, 0x0009ec, 0x00000000);
+ nv_icmd(dev, 0x0009ed, 0x00000000);
+ nv_icmd(dev, 0x0009ee, 0x00000000);
+ nv_icmd(dev, 0x0009ef, 0x00000000);
+ nv_icmd(dev, 0x0009f0, 0x00000000);
+ nv_icmd(dev, 0x0009f1, 0x00000000);
+ nv_icmd(dev, 0x0009f2, 0x00000000);
+ nv_icmd(dev, 0x0009f3, 0x00000000);
+ nv_icmd(dev, 0x0009f4, 0x00000000);
+ nv_icmd(dev, 0x0009f5, 0x00000000);
+ nv_icmd(dev, 0x0009f6, 0x00000000);
+ nv_icmd(dev, 0x0009f7, 0x00000000);
+ nv_icmd(dev, 0x0009f8, 0x00000000);
+ nv_icmd(dev, 0x0009f9, 0x00000000);
+ nv_icmd(dev, 0x0009fa, 0x00000000);
+ nv_icmd(dev, 0x0009fb, 0x00000000);
+ nv_icmd(dev, 0x0009fc, 0x00000000);
+ nv_icmd(dev, 0x0009fd, 0x00000000);
+ nv_icmd(dev, 0x0009fe, 0x00000000);
+ nv_icmd(dev, 0x0009ff, 0x00000000);
+ nv_icmd(dev, 0x000468, 0x00000004);
+ nv_icmd(dev, 0x00046c, 0x00000001);
+ nv_icmd(dev, 0x000470, 0x00000000);
+ nv_icmd(dev, 0x000471, 0x00000000);
+ nv_icmd(dev, 0x000472, 0x00000000);
+ nv_icmd(dev, 0x000473, 0x00000000);
+ nv_icmd(dev, 0x000474, 0x00000000);
+ nv_icmd(dev, 0x000475, 0x00000000);
+ nv_icmd(dev, 0x000476, 0x00000000);
+ nv_icmd(dev, 0x000477, 0x00000000);
+ nv_icmd(dev, 0x000478, 0x00000000);
+ nv_icmd(dev, 0x000479, 0x00000000);
+ nv_icmd(dev, 0x00047a, 0x00000000);
+ nv_icmd(dev, 0x00047b, 0x00000000);
+ nv_icmd(dev, 0x00047c, 0x00000000);
+ nv_icmd(dev, 0x00047d, 0x00000000);
+ nv_icmd(dev, 0x00047e, 0x00000000);
+ nv_icmd(dev, 0x00047f, 0x00000000);
+ nv_icmd(dev, 0x000480, 0x00000000);
+ nv_icmd(dev, 0x000481, 0x00000000);
+ nv_icmd(dev, 0x000482, 0x00000000);
+ nv_icmd(dev, 0x000483, 0x00000000);
+ nv_icmd(dev, 0x000484, 0x00000000);
+ nv_icmd(dev, 0x000485, 0x00000000);
+ nv_icmd(dev, 0x000486, 0x00000000);
+ nv_icmd(dev, 0x000487, 0x00000000);
+ nv_icmd(dev, 0x000488, 0x00000000);
+ nv_icmd(dev, 0x000489, 0x00000000);
+ nv_icmd(dev, 0x00048a, 0x00000000);
+ nv_icmd(dev, 0x00048b, 0x00000000);
+ nv_icmd(dev, 0x00048c, 0x00000000);
+ nv_icmd(dev, 0x00048d, 0x00000000);
+ nv_icmd(dev, 0x00048e, 0x00000000);
+ nv_icmd(dev, 0x00048f, 0x00000000);
+ nv_icmd(dev, 0x000490, 0x00000000);
+ nv_icmd(dev, 0x000491, 0x00000000);
+ nv_icmd(dev, 0x000492, 0x00000000);
+ nv_icmd(dev, 0x000493, 0x00000000);
+ nv_icmd(dev, 0x000494, 0x00000000);
+ nv_icmd(dev, 0x000495, 0x00000000);
+ nv_icmd(dev, 0x000496, 0x00000000);
+ nv_icmd(dev, 0x000497, 0x00000000);
+ nv_icmd(dev, 0x000498, 0x00000000);
+ nv_icmd(dev, 0x000499, 0x00000000);
+ nv_icmd(dev, 0x00049a, 0x00000000);
+ nv_icmd(dev, 0x00049b, 0x00000000);
+ nv_icmd(dev, 0x00049c, 0x00000000);
+ nv_icmd(dev, 0x00049d, 0x00000000);
+ nv_icmd(dev, 0x00049e, 0x00000000);
+ nv_icmd(dev, 0x00049f, 0x00000000);
+ nv_icmd(dev, 0x0004a0, 0x00000000);
+ nv_icmd(dev, 0x0004a1, 0x00000000);
+ nv_icmd(dev, 0x0004a2, 0x00000000);
+ nv_icmd(dev, 0x0004a3, 0x00000000);
+ nv_icmd(dev, 0x0004a4, 0x00000000);
+ nv_icmd(dev, 0x0004a5, 0x00000000);
+ nv_icmd(dev, 0x0004a6, 0x00000000);
+ nv_icmd(dev, 0x0004a7, 0x00000000);
+ nv_icmd(dev, 0x0004a8, 0x00000000);
+ nv_icmd(dev, 0x0004a9, 0x00000000);
+ nv_icmd(dev, 0x0004aa, 0x00000000);
+ nv_icmd(dev, 0x0004ab, 0x00000000);
+ nv_icmd(dev, 0x0004ac, 0x00000000);
+ nv_icmd(dev, 0x0004ad, 0x00000000);
+ nv_icmd(dev, 0x0004ae, 0x00000000);
+ nv_icmd(dev, 0x0004af, 0x00000000);
+ nv_icmd(dev, 0x0004b0, 0x00000000);
+ nv_icmd(dev, 0x0004b1, 0x00000000);
+ nv_icmd(dev, 0x0004b2, 0x00000000);
+ nv_icmd(dev, 0x0004b3, 0x00000000);
+ nv_icmd(dev, 0x0004b4, 0x00000000);
+ nv_icmd(dev, 0x0004b5, 0x00000000);
+ nv_icmd(dev, 0x0004b6, 0x00000000);
+ nv_icmd(dev, 0x0004b7, 0x00000000);
+ nv_icmd(dev, 0x0004b8, 0x00000000);
+ nv_icmd(dev, 0x0004b9, 0x00000000);
+ nv_icmd(dev, 0x0004ba, 0x00000000);
+ nv_icmd(dev, 0x0004bb, 0x00000000);
+ nv_icmd(dev, 0x0004bc, 0x00000000);
+ nv_icmd(dev, 0x0004bd, 0x00000000);
+ nv_icmd(dev, 0x0004be, 0x00000000);
+ nv_icmd(dev, 0x0004bf, 0x00000000);
+ nv_icmd(dev, 0x0004c0, 0x00000000);
+ nv_icmd(dev, 0x0004c1, 0x00000000);
+ nv_icmd(dev, 0x0004c2, 0x00000000);
+ nv_icmd(dev, 0x0004c3, 0x00000000);
+ nv_icmd(dev, 0x0004c4, 0x00000000);
+ nv_icmd(dev, 0x0004c5, 0x00000000);
+ nv_icmd(dev, 0x0004c6, 0x00000000);
+ nv_icmd(dev, 0x0004c7, 0x00000000);
+ nv_icmd(dev, 0x0004c8, 0x00000000);
+ nv_icmd(dev, 0x0004c9, 0x00000000);
+ nv_icmd(dev, 0x0004ca, 0x00000000);
+ nv_icmd(dev, 0x0004cb, 0x00000000);
+ nv_icmd(dev, 0x0004cc, 0x00000000);
+ nv_icmd(dev, 0x0004cd, 0x00000000);
+ nv_icmd(dev, 0x0004ce, 0x00000000);
+ nv_icmd(dev, 0x0004cf, 0x00000000);
+ nv_icmd(dev, 0x000510, 0x3f800000);
+ nv_icmd(dev, 0x000511, 0x3f800000);
+ nv_icmd(dev, 0x000512, 0x3f800000);
+ nv_icmd(dev, 0x000513, 0x3f800000);
+ nv_icmd(dev, 0x000514, 0x3f800000);
+ nv_icmd(dev, 0x000515, 0x3f800000);
+ nv_icmd(dev, 0x000516, 0x3f800000);
+ nv_icmd(dev, 0x000517, 0x3f800000);
+ nv_icmd(dev, 0x000518, 0x3f800000);
+ nv_icmd(dev, 0x000519, 0x3f800000);
+ nv_icmd(dev, 0x00051a, 0x3f800000);
+ nv_icmd(dev, 0x00051b, 0x3f800000);
+ nv_icmd(dev, 0x00051c, 0x3f800000);
+ nv_icmd(dev, 0x00051d, 0x3f800000);
+ nv_icmd(dev, 0x00051e, 0x3f800000);
+ nv_icmd(dev, 0x00051f, 0x3f800000);
+ nv_icmd(dev, 0x000520, 0x000002b6);
+ nv_icmd(dev, 0x000529, 0x00000001);
+ nv_icmd(dev, 0x000530, 0xffff0000);
+ nv_icmd(dev, 0x000531, 0xffff0000);
+ nv_icmd(dev, 0x000532, 0xffff0000);
+ nv_icmd(dev, 0x000533, 0xffff0000);
+ nv_icmd(dev, 0x000534, 0xffff0000);
+ nv_icmd(dev, 0x000535, 0xffff0000);
+ nv_icmd(dev, 0x000536, 0xffff0000);
+ nv_icmd(dev, 0x000537, 0xffff0000);
+ nv_icmd(dev, 0x000538, 0xffff0000);
+ nv_icmd(dev, 0x000539, 0xffff0000);
+ nv_icmd(dev, 0x00053a, 0xffff0000);
+ nv_icmd(dev, 0x00053b, 0xffff0000);
+ nv_icmd(dev, 0x00053c, 0xffff0000);
+ nv_icmd(dev, 0x00053d, 0xffff0000);
+ nv_icmd(dev, 0x00053e, 0xffff0000);
+ nv_icmd(dev, 0x00053f, 0xffff0000);
+ nv_icmd(dev, 0x000585, 0x0000003f);
+ nv_icmd(dev, 0x000576, 0x00000003);
+ nv_icmd(dev, 0x00057b, 0x00000059);
+ nv_icmd(dev, 0x000586, 0x00000040);
+ nv_icmd(dev, 0x000582, 0x00000080);
+ nv_icmd(dev, 0x000583, 0x00000080);
+ nv_icmd(dev, 0x0005c2, 0x00000001);
+ nv_icmd(dev, 0x000638, 0x00000001);
+ nv_icmd(dev, 0x000639, 0x00000001);
+ nv_icmd(dev, 0x00063a, 0x00000002);
+ nv_icmd(dev, 0x00063b, 0x00000001);
+ nv_icmd(dev, 0x00063c, 0x00000001);
+ nv_icmd(dev, 0x00063d, 0x00000002);
+ nv_icmd(dev, 0x00063e, 0x00000001);
+ nv_icmd(dev, 0x0008b8, 0x00000001);
+ nv_icmd(dev, 0x0008b9, 0x00000001);
+ nv_icmd(dev, 0x0008ba, 0x00000001);
+ nv_icmd(dev, 0x0008bb, 0x00000001);
+ nv_icmd(dev, 0x0008bc, 0x00000001);
+ nv_icmd(dev, 0x0008bd, 0x00000001);
+ nv_icmd(dev, 0x0008be, 0x00000001);
+ nv_icmd(dev, 0x0008bf, 0x00000001);
+ nv_icmd(dev, 0x000900, 0x00000001);
+ nv_icmd(dev, 0x000901, 0x00000001);
+ nv_icmd(dev, 0x000902, 0x00000001);
+ nv_icmd(dev, 0x000903, 0x00000001);
+ nv_icmd(dev, 0x000904, 0x00000001);
+ nv_icmd(dev, 0x000905, 0x00000001);
+ nv_icmd(dev, 0x000906, 0x00000001);
+ nv_icmd(dev, 0x000907, 0x00000001);
+ nv_icmd(dev, 0x000908, 0x00000002);
+ nv_icmd(dev, 0x000909, 0x00000002);
+ nv_icmd(dev, 0x00090a, 0x00000002);
+ nv_icmd(dev, 0x00090b, 0x00000002);
+ nv_icmd(dev, 0x00090c, 0x00000002);
+ nv_icmd(dev, 0x00090d, 0x00000002);
+ nv_icmd(dev, 0x00090e, 0x00000002);
+ nv_icmd(dev, 0x00090f, 0x00000002);
+ nv_icmd(dev, 0x000910, 0x00000001);
+ nv_icmd(dev, 0x000911, 0x00000001);
+ nv_icmd(dev, 0x000912, 0x00000001);
+ nv_icmd(dev, 0x000913, 0x00000001);
+ nv_icmd(dev, 0x000914, 0x00000001);
+ nv_icmd(dev, 0x000915, 0x00000001);
+ nv_icmd(dev, 0x000916, 0x00000001);
+ nv_icmd(dev, 0x000917, 0x00000001);
+ nv_icmd(dev, 0x000918, 0x00000001);
+ nv_icmd(dev, 0x000919, 0x00000001);
+ nv_icmd(dev, 0x00091a, 0x00000001);
+ nv_icmd(dev, 0x00091b, 0x00000001);
+ nv_icmd(dev, 0x00091c, 0x00000001);
+ nv_icmd(dev, 0x00091d, 0x00000001);
+ nv_icmd(dev, 0x00091e, 0x00000001);
+ nv_icmd(dev, 0x00091f, 0x00000001);
+ nv_icmd(dev, 0x000920, 0x00000002);
+ nv_icmd(dev, 0x000921, 0x00000002);
+ nv_icmd(dev, 0x000922, 0x00000002);
+ nv_icmd(dev, 0x000923, 0x00000002);
+ nv_icmd(dev, 0x000924, 0x00000002);
+ nv_icmd(dev, 0x000925, 0x00000002);
+ nv_icmd(dev, 0x000926, 0x00000002);
+ nv_icmd(dev, 0x000927, 0x00000002);
+ nv_icmd(dev, 0x000928, 0x00000001);
+ nv_icmd(dev, 0x000929, 0x00000001);
+ nv_icmd(dev, 0x00092a, 0x00000001);
+ nv_icmd(dev, 0x00092b, 0x00000001);
+ nv_icmd(dev, 0x00092c, 0x00000001);
+ nv_icmd(dev, 0x00092d, 0x00000001);
+ nv_icmd(dev, 0x00092e, 0x00000001);
+ nv_icmd(dev, 0x00092f, 0x00000001);
+ nv_icmd(dev, 0x000648, 0x00000001);
+ nv_icmd(dev, 0x000649, 0x00000001);
+ nv_icmd(dev, 0x00064a, 0x00000001);
+ nv_icmd(dev, 0x00064b, 0x00000001);
+ nv_icmd(dev, 0x00064c, 0x00000001);
+ nv_icmd(dev, 0x00064d, 0x00000001);
+ nv_icmd(dev, 0x00064e, 0x00000001);
+ nv_icmd(dev, 0x00064f, 0x00000001);
+ nv_icmd(dev, 0x000650, 0x00000001);
+ nv_icmd(dev, 0x000658, 0x0000000f);
+ nv_icmd(dev, 0x0007ff, 0x0000000a);
+ nv_icmd(dev, 0x00066a, 0x40000000);
+ nv_icmd(dev, 0x00066b, 0x10000000);
+ nv_icmd(dev, 0x00066c, 0xffff0000);
+ nv_icmd(dev, 0x00066d, 0xffff0000);
+ nv_icmd(dev, 0x0007af, 0x00000008);
+ nv_icmd(dev, 0x0007b0, 0x00000008);
+ nv_icmd(dev, 0x0007f6, 0x00000001);
+ nv_icmd(dev, 0x0006b2, 0x00000055);
+ nv_icmd(dev, 0x0007ad, 0x00000003);
+ nv_icmd(dev, 0x000937, 0x00000001);
+ nv_icmd(dev, 0x000971, 0x00000008);
+ nv_icmd(dev, 0x000972, 0x00000040);
+ nv_icmd(dev, 0x000973, 0x0000012c);
+ nv_icmd(dev, 0x00097c, 0x00000040);
+ nv_icmd(dev, 0x000979, 0x00000003);
+ nv_icmd(dev, 0x000975, 0x00000020);
+ nv_icmd(dev, 0x000976, 0x00000001);
+ nv_icmd(dev, 0x000977, 0x00000020);
+ nv_icmd(dev, 0x000978, 0x00000001);
+ nv_icmd(dev, 0x000957, 0x00000003);
+ nv_icmd(dev, 0x00095e, 0x20164010);
+ nv_icmd(dev, 0x00095f, 0x00000020);
+ nv_icmd(dev, 0x00097d, 0x00000020);
+ nv_icmd(dev, 0x000683, 0x00000006);
+ nv_icmd(dev, 0x000685, 0x003fffff);
+ nv_icmd(dev, 0x000687, 0x003fffff);
+ nv_icmd(dev, 0x0006a0, 0x00000005);
+ nv_icmd(dev, 0x000840, 0x00400008);
+ nv_icmd(dev, 0x000841, 0x08000080);
+ nv_icmd(dev, 0x000842, 0x00400008);
+ nv_icmd(dev, 0x000843, 0x08000080);
+ nv_icmd(dev, 0x000818, 0x00000000);
+ nv_icmd(dev, 0x000819, 0x00000000);
+ nv_icmd(dev, 0x00081a, 0x00000000);
+ nv_icmd(dev, 0x00081b, 0x00000000);
+ nv_icmd(dev, 0x00081c, 0x00000000);
+ nv_icmd(dev, 0x00081d, 0x00000000);
+ nv_icmd(dev, 0x00081e, 0x00000000);
+ nv_icmd(dev, 0x00081f, 0x00000000);
+ nv_icmd(dev, 0x000848, 0x00000000);
+ nv_icmd(dev, 0x000849, 0x00000000);
+ nv_icmd(dev, 0x00084a, 0x00000000);
+ nv_icmd(dev, 0x00084b, 0x00000000);
+ nv_icmd(dev, 0x00084c, 0x00000000);
+ nv_icmd(dev, 0x00084d, 0x00000000);
+ nv_icmd(dev, 0x00084e, 0x00000000);
+ nv_icmd(dev, 0x00084f, 0x00000000);
+ nv_icmd(dev, 0x000850, 0x00000000);
+ nv_icmd(dev, 0x000851, 0x00000000);
+ nv_icmd(dev, 0x000852, 0x00000000);
+ nv_icmd(dev, 0x000853, 0x00000000);
+ nv_icmd(dev, 0x000854, 0x00000000);
+ nv_icmd(dev, 0x000855, 0x00000000);
+ nv_icmd(dev, 0x000856, 0x00000000);
+ nv_icmd(dev, 0x000857, 0x00000000);
+ nv_icmd(dev, 0x000738, 0x00000000);
+ nv_icmd(dev, 0x0006aa, 0x00000001);
+ nv_icmd(dev, 0x0006ab, 0x00000002);
+ nv_icmd(dev, 0x0006ac, 0x00000080);
+ nv_icmd(dev, 0x0006ad, 0x00000100);
+ nv_icmd(dev, 0x0006ae, 0x00000100);
+ nv_icmd(dev, 0x0006b1, 0x00000011);
+ nv_icmd(dev, 0x0006bb, 0x000000cf);
+ nv_icmd(dev, 0x0006ce, 0x2a712488);
+ nv_icmd(dev, 0x000739, 0x4085c000);
+ nv_icmd(dev, 0x00073a, 0x00000080);
+ nv_icmd(dev, 0x000786, 0x80000100);
+ nv_icmd(dev, 0x00073c, 0x00010100);
+ nv_icmd(dev, 0x00073d, 0x02800000);
+ nv_icmd(dev, 0x000787, 0x000000cf);
+ nv_icmd(dev, 0x00078c, 0x00000008);
+ nv_icmd(dev, 0x000792, 0x00000001);
+ nv_icmd(dev, 0x000794, 0x00000001);
+ nv_icmd(dev, 0x000795, 0x00000001);
+ nv_icmd(dev, 0x000796, 0x00000001);
+ nv_icmd(dev, 0x000797, 0x000000cf);
+ nv_icmd(dev, 0x000836, 0x00000001);
+ nv_icmd(dev, 0x00079a, 0x00000002);
+ nv_icmd(dev, 0x000833, 0x04444480);
+ nv_icmd(dev, 0x0007a1, 0x00000001);
+ nv_icmd(dev, 0x0007a3, 0x00000001);
+ nv_icmd(dev, 0x0007a4, 0x00000001);
+ nv_icmd(dev, 0x0007a5, 0x00000001);
+ nv_icmd(dev, 0x000831, 0x00000004);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x000a04, 0x000000ff);
+ nv_icmd(dev, 0x000a0b, 0x00000040);
+ nv_icmd(dev, 0x00097f, 0x00000100);
+ nv_icmd(dev, 0x000a02, 0x00000001);
+ nv_icmd(dev, 0x000809, 0x00000007);
+ nv_icmd(dev, 0x00c221, 0x00000040);
+ nv_icmd(dev, 0x00c1b0, 0x0000000f);
+ nv_icmd(dev, 0x00c1b1, 0x0000000f);
+ nv_icmd(dev, 0x00c1b2, 0x0000000f);
+ nv_icmd(dev, 0x00c1b3, 0x0000000f);
+ nv_icmd(dev, 0x00c1b4, 0x0000000f);
+ nv_icmd(dev, 0x00c1b5, 0x0000000f);
+ nv_icmd(dev, 0x00c1b6, 0x0000000f);
+ nv_icmd(dev, 0x00c1b7, 0x0000000f);
+ nv_icmd(dev, 0x00c1b8, 0x0fac6881);
+ nv_icmd(dev, 0x00c1b9, 0x00fac688);
+ nv_icmd(dev, 0x00c401, 0x00000001);
+ nv_icmd(dev, 0x00c402, 0x00010001);
+ nv_icmd(dev, 0x00c403, 0x00000001);
+ nv_icmd(dev, 0x00c404, 0x00000001);
+ nv_icmd(dev, 0x00c40e, 0x00000020);
+ nv_icmd(dev, 0x00c500, 0x00000003);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000002);
+ nv_icmd(dev, 0x0006aa, 0x00000001);
+ nv_icmd(dev, 0x0006ad, 0x00000100);
+ nv_icmd(dev, 0x0006ae, 0x00000100);
+ nv_icmd(dev, 0x0006b1, 0x00000011);
+ nv_icmd(dev, 0x00078c, 0x00000008);
+ nv_icmd(dev, 0x000792, 0x00000001);
+ nv_icmd(dev, 0x000794, 0x00000001);
+ nv_icmd(dev, 0x000795, 0x00000001);
+ nv_icmd(dev, 0x000796, 0x00000001);
+ nv_icmd(dev, 0x000797, 0x000000cf);
+ nv_icmd(dev, 0x00079a, 0x00000002);
+ nv_icmd(dev, 0x000833, 0x04444480);
+ nv_icmd(dev, 0x0007a1, 0x00000001);
+ nv_icmd(dev, 0x0007a3, 0x00000001);
+ nv_icmd(dev, 0x0007a4, 0x00000001);
+ nv_icmd(dev, 0x0007a5, 0x00000001);
+ nv_icmd(dev, 0x000831, 0x00000004);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000008);
+ nv_icmd(dev, 0x000039, 0x00000000);
+ nv_icmd(dev, 0x00003a, 0x00000000);
+ nv_icmd(dev, 0x00003b, 0x00000000);
+ nv_icmd(dev, 0x000380, 0x00000001);
+ nv_icmd(dev, 0x000366, 0x00000000);
+ nv_icmd(dev, 0x000367, 0x00000000);
+ nv_icmd(dev, 0x000368, 0x00000fff);
+ nv_icmd(dev, 0x000370, 0x00000000);
+ nv_icmd(dev, 0x000371, 0x00000000);
+ nv_icmd(dev, 0x000372, 0x000fffff);
+ nv_icmd(dev, 0x000813, 0x00000006);
+ nv_icmd(dev, 0x000814, 0x00000008);
+ nv_icmd(dev, 0x000957, 0x00000003);
+ nv_icmd(dev, 0x000818, 0x00000000);
+ nv_icmd(dev, 0x000819, 0x00000000);
+ nv_icmd(dev, 0x00081a, 0x00000000);
+ nv_icmd(dev, 0x00081b, 0x00000000);
+ nv_icmd(dev, 0x00081c, 0x00000000);
+ nv_icmd(dev, 0x00081d, 0x00000000);
+ nv_icmd(dev, 0x00081e, 0x00000000);
+ nv_icmd(dev, 0x00081f, 0x00000000);
+ nv_icmd(dev, 0x000848, 0x00000000);
+ nv_icmd(dev, 0x000849, 0x00000000);
+ nv_icmd(dev, 0x00084a, 0x00000000);
+ nv_icmd(dev, 0x00084b, 0x00000000);
+ nv_icmd(dev, 0x00084c, 0x00000000);
+ nv_icmd(dev, 0x00084d, 0x00000000);
+ nv_icmd(dev, 0x00084e, 0x00000000);
+ nv_icmd(dev, 0x00084f, 0x00000000);
+ nv_icmd(dev, 0x000850, 0x00000000);
+ nv_icmd(dev, 0x000851, 0x00000000);
+ nv_icmd(dev, 0x000852, 0x00000000);
+ nv_icmd(dev, 0x000853, 0x00000000);
+ nv_icmd(dev, 0x000854, 0x00000000);
+ nv_icmd(dev, 0x000855, 0x00000000);
+ nv_icmd(dev, 0x000856, 0x00000000);
+ nv_icmd(dev, 0x000857, 0x00000000);
+ nv_icmd(dev, 0x000738, 0x00000000);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x000a04, 0x000000ff);
+ nv_icmd(dev, 0x00097f, 0x00000100);
+ nv_icmd(dev, 0x000a02, 0x00000001);
+ nv_icmd(dev, 0x000809, 0x00000007);
+ nv_icmd(dev, 0x00c221, 0x00000040);
+ nv_icmd(dev, 0x00c401, 0x00000001);
+ nv_icmd(dev, 0x00c402, 0x00010001);
+ nv_icmd(dev, 0x00c403, 0x00000001);
+ nv_icmd(dev, 0x00c404, 0x00000001);
+ nv_icmd(dev, 0x00c40e, 0x00000020);
+ nv_icmd(dev, 0x00c500, 0x00000003);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000001);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_wr32(dev, 0x400208, 0x00000000);
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+ nv_wr32(dev, 0x40448c, data);
+ nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nve0_grctx_generate_a097(struct drm_device *dev)
+{
+ nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
+ nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
+ nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
+ nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
+ nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
+ nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
+ nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
+ nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
+ nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
+ nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
+ nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
+ nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
+ nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
+ nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
+ nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
+ nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
+ nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
+ nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
+ nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
+ nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
+ nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
+ nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
+ nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
+ nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
+ nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
+ nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
+ nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
+ nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
+ nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
+ nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
+ nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
+ nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
+ nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
+ nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
+ nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
+ nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
+ nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
+ nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
+ nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
+ nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
+ nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
+ nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
+ nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
+ nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
+ nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
+ nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
+ nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
+ nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
+ nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
+ nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
+ nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
+ nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_902d(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
+}
+
+static void
+nve0_graph_generate_unk40xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404010, 0x0);
+ nv_wr32(dev, 0x404014, 0x0);
+ nv_wr32(dev, 0x404018, 0x0);
+ nv_wr32(dev, 0x40401c, 0x0);
+ nv_wr32(dev, 0x404020, 0x0);
+ nv_wr32(dev, 0x404024, 0xe000);
+ nv_wr32(dev, 0x404028, 0x0);
+ nv_wr32(dev, 0x4040a8, 0x0);
+ nv_wr32(dev, 0x4040ac, 0x0);
+ nv_wr32(dev, 0x4040b0, 0x0);
+ nv_wr32(dev, 0x4040b4, 0x0);
+ nv_wr32(dev, 0x4040b8, 0x0);
+ nv_wr32(dev, 0x4040bc, 0x0);
+ nv_wr32(dev, 0x4040c0, 0x0);
+ nv_wr32(dev, 0x4040c4, 0x0);
+ nv_wr32(dev, 0x4040c8, 0xf800008f);
+ nv_wr32(dev, 0x4040d0, 0x0);
+ nv_wr32(dev, 0x4040d4, 0x0);
+ nv_wr32(dev, 0x4040d8, 0x0);
+ nv_wr32(dev, 0x4040dc, 0x0);
+ nv_wr32(dev, 0x4040e0, 0x0);
+ nv_wr32(dev, 0x4040e4, 0x0);
+ nv_wr32(dev, 0x4040e8, 0x1000);
+ nv_wr32(dev, 0x4040f8, 0x0);
+ nv_wr32(dev, 0x404130, 0x0);
+ nv_wr32(dev, 0x404134, 0x0);
+ nv_wr32(dev, 0x404138, 0x20000040);
+ nv_wr32(dev, 0x404150, 0x2e);
+ nv_wr32(dev, 0x404154, 0x400);
+ nv_wr32(dev, 0x404158, 0x200);
+ nv_wr32(dev, 0x404164, 0x55);
+ nv_wr32(dev, 0x4041a0, 0x0);
+ nv_wr32(dev, 0x4041a4, 0x0);
+ nv_wr32(dev, 0x4041a8, 0x0);
+ nv_wr32(dev, 0x4041ac, 0x0);
+ nv_wr32(dev, 0x404200, 0x0);
+ nv_wr32(dev, 0x404204, 0x0);
+ nv_wr32(dev, 0x404208, 0x0);
+ nv_wr32(dev, 0x40420c, 0x0);
+}
+
+static void
+nve0_graph_generate_unk44xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404404, 0x0);
+ nv_wr32(dev, 0x404408, 0x0);
+ nv_wr32(dev, 0x40440c, 0x0);
+ nv_wr32(dev, 0x404410, 0x0);
+ nv_wr32(dev, 0x404414, 0x0);
+ nv_wr32(dev, 0x404418, 0x0);
+ nv_wr32(dev, 0x40441c, 0x0);
+ nv_wr32(dev, 0x404420, 0x0);
+ nv_wr32(dev, 0x404424, 0x0);
+ nv_wr32(dev, 0x404428, 0x0);
+ nv_wr32(dev, 0x40442c, 0x0);
+ nv_wr32(dev, 0x404430, 0x0);
+ nv_wr32(dev, 0x404434, 0x0);
+ nv_wr32(dev, 0x404438, 0x0);
+ nv_wr32(dev, 0x404460, 0x0);
+ nv_wr32(dev, 0x404464, 0x0);
+ nv_wr32(dev, 0x404468, 0xffffff);
+ nv_wr32(dev, 0x40446c, 0x0);
+ nv_wr32(dev, 0x404480, 0x1);
+ nv_wr32(dev, 0x404498, 0x1);
+}
+
+static void
+nve0_graph_generate_unk46xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404604, 0x14);
+ nv_wr32(dev, 0x404608, 0x0);
+ nv_wr32(dev, 0x40460c, 0x3fff);
+ nv_wr32(dev, 0x404610, 0x100);
+ nv_wr32(dev, 0x404618, 0x0);
+ nv_wr32(dev, 0x40461c, 0x0);
+ nv_wr32(dev, 0x404620, 0x0);
+ nv_wr32(dev, 0x404624, 0x0);
+ nv_wr32(dev, 0x40462c, 0x0);
+ nv_wr32(dev, 0x404630, 0x0);
+ nv_wr32(dev, 0x404640, 0x0);
+ nv_wr32(dev, 0x404654, 0x0);
+ nv_wr32(dev, 0x404660, 0x0);
+ nv_wr32(dev, 0x404678, 0x0);
+ nv_wr32(dev, 0x40467c, 0x2);
+ nv_wr32(dev, 0x404680, 0x0);
+ nv_wr32(dev, 0x404684, 0x0);
+ nv_wr32(dev, 0x404688, 0x0);
+ nv_wr32(dev, 0x40468c, 0x0);
+ nv_wr32(dev, 0x404690, 0x0);
+ nv_wr32(dev, 0x404694, 0x0);
+ nv_wr32(dev, 0x404698, 0x0);
+ nv_wr32(dev, 0x40469c, 0x0);
+ nv_wr32(dev, 0x4046a0, 0x7f0080);
+ nv_wr32(dev, 0x4046a4, 0x0);
+ nv_wr32(dev, 0x4046a8, 0x0);
+ nv_wr32(dev, 0x4046ac, 0x0);
+ nv_wr32(dev, 0x4046b0, 0x0);
+ nv_wr32(dev, 0x4046b4, 0x0);
+ nv_wr32(dev, 0x4046b8, 0x0);
+ nv_wr32(dev, 0x4046bc, 0x0);
+ nv_wr32(dev, 0x4046c0, 0x0);
+ nv_wr32(dev, 0x4046c8, 0x0);
+ nv_wr32(dev, 0x4046cc, 0x0);
+ nv_wr32(dev, 0x4046d0, 0x0);
+}
+
+static void
+nve0_graph_generate_unk47xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404700, 0x0);
+ nv_wr32(dev, 0x404704, 0x0);
+ nv_wr32(dev, 0x404708, 0x0);
+ nv_wr32(dev, 0x404718, 0x0);
+ nv_wr32(dev, 0x40471c, 0x0);
+ nv_wr32(dev, 0x404720, 0x0);
+ nv_wr32(dev, 0x404724, 0x0);
+ nv_wr32(dev, 0x404728, 0x0);
+ nv_wr32(dev, 0x40472c, 0x0);
+ nv_wr32(dev, 0x404730, 0x0);
+ nv_wr32(dev, 0x404734, 0x100);
+ nv_wr32(dev, 0x404738, 0x0);
+ nv_wr32(dev, 0x40473c, 0x0);
+ nv_wr32(dev, 0x404744, 0x0);
+ nv_wr32(dev, 0x404748, 0x0);
+ nv_wr32(dev, 0x404754, 0x0);
+}
+
+static void
+nve0_graph_generate_unk58xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x405800, 0xf8000bf);
+ nv_wr32(dev, 0x405830, 0x2180648);
+ nv_wr32(dev, 0x405834, 0x8000000);
+ nv_wr32(dev, 0x405838, 0x0);
+ nv_wr32(dev, 0x405854, 0x0);
+ nv_wr32(dev, 0x405870, 0x1);
+ nv_wr32(dev, 0x405874, 0x1);
+ nv_wr32(dev, 0x405878, 0x1);
+ nv_wr32(dev, 0x40587c, 0x1);
+ nv_wr32(dev, 0x405a00, 0x0);
+ nv_wr32(dev, 0x405a04, 0x0);
+ nv_wr32(dev, 0x405a18, 0x0);
+ nv_wr32(dev, 0x405b00, 0x0);
+ nv_wr32(dev, 0x405b10, 0x1000);
+}
+
+static void
+nve0_graph_generate_unk60xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x406020, 0x4103c1);
+ nv_wr32(dev, 0x406028, 0x1);
+ nv_wr32(dev, 0x40602c, 0x1);
+ nv_wr32(dev, 0x406030, 0x1);
+ nv_wr32(dev, 0x406034, 0x1);
+}
+
+static void
+nve0_graph_generate_unk64xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x4064a8, 0x0);
+ nv_wr32(dev, 0x4064ac, 0x3fff);
+ nv_wr32(dev, 0x4064b4, 0x0);
+ nv_wr32(dev, 0x4064b8, 0x0);
+ nv_wr32(dev, 0x4064c0, 0x801a00f0);
+ nv_wr32(dev, 0x4064c4, 0x192ffff);
+ nv_wr32(dev, 0x4064c8, 0x1800600);
+ nv_wr32(dev, 0x4064cc, 0x0);
+ nv_wr32(dev, 0x4064d0, 0x0);
+ nv_wr32(dev, 0x4064d4, 0x0);
+ nv_wr32(dev, 0x4064d8, 0x0);
+ nv_wr32(dev, 0x4064dc, 0x0);
+ nv_wr32(dev, 0x4064e0, 0x0);
+ nv_wr32(dev, 0x4064e4, 0x0);
+ nv_wr32(dev, 0x4064e8, 0x0);
+ nv_wr32(dev, 0x4064ec, 0x0);
+ nv_wr32(dev, 0x4064fc, 0x22a);
+}
+
+static void
+nve0_graph_generate_unk70xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407040, 0x0);
+}
+
+static void
+nve0_graph_generate_unk78xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407804, 0x23);
+ nv_wr32(dev, 0x40780c, 0xa418820);
+ nv_wr32(dev, 0x407810, 0x62080e6);
+ nv_wr32(dev, 0x407814, 0x20398a4);
+ nv_wr32(dev, 0x407818, 0xe629062);
+ nv_wr32(dev, 0x40781c, 0xa418820);
+ nv_wr32(dev, 0x407820, 0xe6);
+ nv_wr32(dev, 0x4078bc, 0x103);
+}
+
+static void
+nve0_graph_generate_unk80xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408000, 0x0);
+ nv_wr32(dev, 0x408004, 0x0);
+ nv_wr32(dev, 0x408008, 0x30);
+ nv_wr32(dev, 0x40800c, 0x0);
+ nv_wr32(dev, 0x408010, 0x0);
+ nv_wr32(dev, 0x408014, 0x69);
+ nv_wr32(dev, 0x408018, 0xe100e100);
+ nv_wr32(dev, 0x408064, 0x0);
+}
+
+static void
+nve0_graph_generate_unk88xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408800, 0x2802a3c);
+ nv_wr32(dev, 0x408804, 0x40);
+ nv_wr32(dev, 0x408808, 0x1043e005);
+ nv_wr32(dev, 0x408840, 0xb);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x62000001);
+ nv_wr32(dev, 0x408908, 0xc8102f);
+ nv_wr32(dev, 0x408980, 0x11d);
+}
+
+static void
+nve0_graph_generate_gpc(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x418380, 0x16);
+ nv_wr32(dev, 0x418400, 0x38004e00);
+ nv_wr32(dev, 0x418404, 0x71e0ffff);
+ nv_wr32(dev, 0x41840c, 0x1008);
+ nv_wr32(dev, 0x418410, 0xfff0fff);
+ nv_wr32(dev, 0x418414, 0x2200fff);
+ nv_wr32(dev, 0x418450, 0x0);
+ nv_wr32(dev, 0x418454, 0x0);
+ nv_wr32(dev, 0x418458, 0x0);
+ nv_wr32(dev, 0x41845c, 0x0);
+ nv_wr32(dev, 0x418460, 0x0);
+ nv_wr32(dev, 0x418464, 0x0);
+ nv_wr32(dev, 0x418468, 0x1);
+ nv_wr32(dev, 0x41846c, 0x0);
+ nv_wr32(dev, 0x418470, 0x0);
+ nv_wr32(dev, 0x418600, 0x1f);
+ nv_wr32(dev, 0x418684, 0xf);
+ nv_wr32(dev, 0x418700, 0x2);
+ nv_wr32(dev, 0x418704, 0x80);
+ nv_wr32(dev, 0x418708, 0x0);
+ nv_wr32(dev, 0x41870c, 0x0);
+ nv_wr32(dev, 0x418710, 0x0);
+ nv_wr32(dev, 0x418800, 0x7006860a);
+ nv_wr32(dev, 0x418808, 0x0);
+ nv_wr32(dev, 0x41880c, 0x0);
+ nv_wr32(dev, 0x418810, 0x0);
+ nv_wr32(dev, 0x418828, 0x44);
+ nv_wr32(dev, 0x418830, 0x10000001);
+ nv_wr32(dev, 0x4188d8, 0x8);
+ nv_wr32(dev, 0x4188e0, 0x1000000);
+ nv_wr32(dev, 0x4188e8, 0x0);
+ nv_wr32(dev, 0x4188ec, 0x0);
+ nv_wr32(dev, 0x4188f0, 0x0);
+ nv_wr32(dev, 0x4188f4, 0x0);
+ nv_wr32(dev, 0x4188f8, 0x0);
+ nv_wr32(dev, 0x4188fc, 0x20100018);
+ nv_wr32(dev, 0x41891c, 0xff00ff);
+ nv_wr32(dev, 0x418924, 0x0);
+ nv_wr32(dev, 0x418928, 0xffff00);
+ nv_wr32(dev, 0x41892c, 0xff00);
+ nv_wr32(dev, 0x418a00, 0x0);
+ nv_wr32(dev, 0x418a04, 0x0);
+ nv_wr32(dev, 0x418a08, 0x0);
+ nv_wr32(dev, 0x418a0c, 0x10000);
+ nv_wr32(dev, 0x418a10, 0x0);
+ nv_wr32(dev, 0x418a14, 0x0);
+ nv_wr32(dev, 0x418a18, 0x0);
+ nv_wr32(dev, 0x418a20, 0x0);
+ nv_wr32(dev, 0x418a24, 0x0);
+ nv_wr32(dev, 0x418a28, 0x0);
+ nv_wr32(dev, 0x418a2c, 0x10000);
+ nv_wr32(dev, 0x418a30, 0x0);
+ nv_wr32(dev, 0x418a34, 0x0);
+ nv_wr32(dev, 0x418a38, 0x0);
+ nv_wr32(dev, 0x418a40, 0x0);
+ nv_wr32(dev, 0x418a44, 0x0);
+ nv_wr32(dev, 0x418a48, 0x0);
+ nv_wr32(dev, 0x418a4c, 0x10000);
+ nv_wr32(dev, 0x418a50, 0x0);
+ nv_wr32(dev, 0x418a54, 0x0);
+ nv_wr32(dev, 0x418a58, 0x0);
+ nv_wr32(dev, 0x418a60, 0x0);
+ nv_wr32(dev, 0x418a64, 0x0);
+ nv_wr32(dev, 0x418a68, 0x0);
+ nv_wr32(dev, 0x418a6c, 0x10000);
+ nv_wr32(dev, 0x418a70, 0x0);
+ nv_wr32(dev, 0x418a74, 0x0);
+ nv_wr32(dev, 0x418a78, 0x0);
+ nv_wr32(dev, 0x418a80, 0x0);
+ nv_wr32(dev, 0x418a84, 0x0);
+ nv_wr32(dev, 0x418a88, 0x0);
+ nv_wr32(dev, 0x418a8c, 0x10000);
+ nv_wr32(dev, 0x418a90, 0x0);
+ nv_wr32(dev, 0x418a94, 0x0);
+ nv_wr32(dev, 0x418a98, 0x0);
+ nv_wr32(dev, 0x418aa0, 0x0);
+ nv_wr32(dev, 0x418aa4, 0x0);
+ nv_wr32(dev, 0x418aa8, 0x0);
+ nv_wr32(dev, 0x418aac, 0x10000);
+ nv_wr32(dev, 0x418ab0, 0x0);
+ nv_wr32(dev, 0x418ab4, 0x0);
+ nv_wr32(dev, 0x418ab8, 0x0);
+ nv_wr32(dev, 0x418ac0, 0x0);
+ nv_wr32(dev, 0x418ac4, 0x0);
+ nv_wr32(dev, 0x418ac8, 0x0);
+ nv_wr32(dev, 0x418acc, 0x10000);
+ nv_wr32(dev, 0x418ad0, 0x0);
+ nv_wr32(dev, 0x418ad4, 0x0);
+ nv_wr32(dev, 0x418ad8, 0x0);
+ nv_wr32(dev, 0x418ae0, 0x0);
+ nv_wr32(dev, 0x418ae4, 0x0);
+ nv_wr32(dev, 0x418ae8, 0x0);
+ nv_wr32(dev, 0x418aec, 0x10000);
+ nv_wr32(dev, 0x418af0, 0x0);
+ nv_wr32(dev, 0x418af4, 0x0);
+ nv_wr32(dev, 0x418af8, 0x0);
+ nv_wr32(dev, 0x418b00, 0x6);
+ nv_wr32(dev, 0x418b08, 0xa418820);
+ nv_wr32(dev, 0x418b0c, 0x62080e6);
+ nv_wr32(dev, 0x418b10, 0x20398a4);
+ nv_wr32(dev, 0x418b14, 0xe629062);
+ nv_wr32(dev, 0x418b18, 0xa418820);
+ nv_wr32(dev, 0x418b1c, 0xe6);
+ nv_wr32(dev, 0x418bb8, 0x103);
+ nv_wr32(dev, 0x418c08, 0x1);
+ nv_wr32(dev, 0x418c10, 0x0);
+ nv_wr32(dev, 0x418c14, 0x0);
+ nv_wr32(dev, 0x418c18, 0x0);
+ nv_wr32(dev, 0x418c1c, 0x0);
+ nv_wr32(dev, 0x418c20, 0x0);
+ nv_wr32(dev, 0x418c24, 0x0);
+ nv_wr32(dev, 0x418c28, 0x0);
+ nv_wr32(dev, 0x418c2c, 0x0);
+ nv_wr32(dev, 0x418c40, 0xffffffff);
+ nv_wr32(dev, 0x418c6c, 0x1);
+ nv_wr32(dev, 0x418c80, 0x20200004);
+ nv_wr32(dev, 0x418c8c, 0x1);
+ nv_wr32(dev, 0x419000, 0x780);
+ nv_wr32(dev, 0x419004, 0x0);
+ nv_wr32(dev, 0x419008, 0x0);
+ nv_wr32(dev, 0x419014, 0x4);
+}
+
+static void
+nve0_graph_generate_tpc(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x419848, 0x0);
+ nv_wr32(dev, 0x419864, 0x129);
+ nv_wr32(dev, 0x419888, 0x0);
+ nv_wr32(dev, 0x419a00, 0xf0);
+ nv_wr32(dev, 0x419a04, 0x1);
+ nv_wr32(dev, 0x419a08, 0x21);
+ nv_wr32(dev, 0x419a0c, 0x20000);
+ nv_wr32(dev, 0x419a10, 0x0);
+ nv_wr32(dev, 0x419a14, 0x200);
+ nv_wr32(dev, 0x419a1c, 0xc000);
+ nv_wr32(dev, 0x419a20, 0x800);
+ nv_wr32(dev, 0x419a30, 0x1);
+ nv_wr32(dev, 0x419ac4, 0x37f440);
+ nv_wr32(dev, 0x419c00, 0xa);
+ nv_wr32(dev, 0x419c04, 0x80000006);
+ nv_wr32(dev, 0x419c08, 0x2);
+ nv_wr32(dev, 0x419c20, 0x0);
+ nv_wr32(dev, 0x419c24, 0x84210);
+ nv_wr32(dev, 0x419c28, 0x3efbefbe);
+ nv_wr32(dev, 0x419ce8, 0x0);
+ nv_wr32(dev, 0x419cf4, 0x3203);
+ nv_wr32(dev, 0x419e04, 0x0);
+ nv_wr32(dev, 0x419e08, 0x0);
+ nv_wr32(dev, 0x419e0c, 0x0);
+ nv_wr32(dev, 0x419e10, 0x402);
+ nv_wr32(dev, 0x419e44, 0x13eff2);
+ nv_wr32(dev, 0x419e48, 0x0);
+ nv_wr32(dev, 0x419e4c, 0x7f);
+ nv_wr32(dev, 0x419e50, 0x0);
+ nv_wr32(dev, 0x419e54, 0x0);
+ nv_wr32(dev, 0x419e58, 0x0);
+ nv_wr32(dev, 0x419e5c, 0x0);
+ nv_wr32(dev, 0x419e60, 0x0);
+ nv_wr32(dev, 0x419e64, 0x0);
+ nv_wr32(dev, 0x419e68, 0x0);
+ nv_wr32(dev, 0x419e6c, 0x0);
+ nv_wr32(dev, 0x419e70, 0x0);
+ nv_wr32(dev, 0x419e74, 0x0);
+ nv_wr32(dev, 0x419e78, 0x0);
+ nv_wr32(dev, 0x419e7c, 0x0);
+ nv_wr32(dev, 0x419e80, 0x0);
+ nv_wr32(dev, 0x419e84, 0x0);
+ nv_wr32(dev, 0x419e88, 0x0);
+ nv_wr32(dev, 0x419e8c, 0x0);
+ nv_wr32(dev, 0x419e90, 0x0);
+ nv_wr32(dev, 0x419e94, 0x0);
+ nv_wr32(dev, 0x419e98, 0x0);
+ nv_wr32(dev, 0x419eac, 0x1fcf);
+ nv_wr32(dev, 0x419eb0, 0xd3f);
+ nv_wr32(dev, 0x419ec8, 0x1304f);
+ nv_wr32(dev, 0x419f30, 0x0);
+ nv_wr32(dev, 0x419f34, 0x0);
+ nv_wr32(dev, 0x419f38, 0x0);
+ nv_wr32(dev, 0x419f3c, 0x0);
+ nv_wr32(dev, 0x419f40, 0x0);
+ nv_wr32(dev, 0x419f44, 0x0);
+ nv_wr32(dev, 0x419f48, 0x0);
+ nv_wr32(dev, 0x419f4c, 0x0);
+ nv_wr32(dev, 0x419f58, 0x0);
+ nv_wr32(dev, 0x419f78, 0xb);
+}
+
+static void
+nve0_graph_generate_tpcunk(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x41be24, 0x6);
+ nv_wr32(dev, 0x41bec0, 0x12180000);
+ nv_wr32(dev, 0x41bec4, 0x37f7f);
+ nv_wr32(dev, 0x41bee4, 0x6480430);
+ nv_wr32(dev, 0x41bf00, 0xa418820);
+ nv_wr32(dev, 0x41bf04, 0x62080e6);
+ nv_wr32(dev, 0x41bf08, 0x20398a4);
+ nv_wr32(dev, 0x41bf0c, 0xe629062);
+ nv_wr32(dev, 0x41bf10, 0xa418820);
+ nv_wr32(dev, 0x41bf14, 0xe6);
+ nv_wr32(dev, 0x41bfd0, 0x900103);
+ nv_wr32(dev, 0x41bfe0, 0x400001);
+ nv_wr32(dev, 0x41bfe4, 0x0);
+}
+
+int
+nve0_grctx_generate(struct nouveau_channel *chan)
+{
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ u32 data[6] = {}, data2[2] = {}, tmp;
+ u32 tpc_set = 0, tpc_mask = 0;
+ u8 tpcnr[GPC_MAX], a, b;
+ u8 shift, ntpcv;
+ int i, gpc, tpc, id;
+
+ nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x400204, 0x00000000);
+ nv_wr32(dev, 0x400208, 0x00000000);
+
+ nve0_graph_generate_unk40xx(dev);
+ nve0_graph_generate_unk44xx(dev);
+ nve0_graph_generate_unk46xx(dev);
+ nve0_graph_generate_unk47xx(dev);
+ nve0_graph_generate_unk58xx(dev);
+ nve0_graph_generate_unk60xx(dev);
+ nve0_graph_generate_unk64xx(dev);
+ nve0_graph_generate_unk70xx(dev);
+ nve0_graph_generate_unk78xx(dev);
+ nve0_graph_generate_unk80xx(dev);
+ nve0_graph_generate_unk88xx(dev);
+ nve0_graph_generate_gpc(dev);
+ nve0_graph_generate_tpc(dev);
+ nve0_graph_generate_tpcunk(dev);
+
+ nv_wr32(dev, 0x404154, 0x0);
+
+ for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+ u32 reg = nv_ro32(grch->mmio, i + 0);
+ u32 val = nv_ro32(grch->mmio, i + 4);
+ nv_wr32(dev, reg, val);
+ }
+
+ nv_wr32(dev, 0x418c6c, 0x1);
+ nv_wr32(dev, 0x41980c, 0x10);
+ nv_wr32(dev, 0x41be08, 0x4);
+ nv_wr32(dev, 0x4064c0, 0x801a00f0);
+ nv_wr32(dev, 0x405800, 0xf8000bf);
+ nv_wr32(dev, 0x419c00, 0xa);
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
+ }
+
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tpc_nr[i] << (i * 4);
+ nv_wr32(dev, 0x406028, tmp);
+ nv_wr32(dev, 0x405870, tmp);
+
+ nv_wr32(dev, 0x40602c, 0x0);
+ nv_wr32(dev, 0x405874, 0x0);
+ nv_wr32(dev, 0x406030, 0x0);
+ nv_wr32(dev, 0x405878, 0x0);
+ nv_wr32(dev, 0x406034, 0x0);
+ nv_wr32(dev, 0x40587c, 0x0);
+
+ /* calculate first set of magics */
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+ gpc = -1;
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpcnr[gpc]--;
+
+ data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+ }
+
+ for (; tpc < 32; tpc++)
+ data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tpc_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = ntpcv << 16;
+ data2[0] |= shift << 21;
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ data2[0] |= priv->tpc_total << 8;
+ data2[0] |= priv->magic_not_rop_nr;
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ /* and write it all the various parts of PGRAPH */
+ nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+ nv_wr32(dev, 0x41bfd0, data2[0]);
+ nv_wr32(dev, 0x41bfe4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
+
+ nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+
+
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+ for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+ a = (i * (priv->tpc_total - 1)) / 32;
+ if (a != b) {
+ b = a;
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ tpc_set |= 1 << ((gpc * 8) + tpc);
+ }
+
+ nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+ }
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
+
+ nv_wr32(dev, 0x405b00, 0x201);
+ nv_wr32(dev, 0x408850, 0x2);
+ nv_wr32(dev, 0x408958, 0x2);
+ nv_wr32(dev, 0x419f78, 0xa);
+
+ nve0_grctx_generate_icmd(dev);
+ nve0_grctx_generate_a097(dev);
+ nve0_grctx_generate_902d(dev);
+
+ nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x418800, 0x7026860a); //XXX
+ nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 9d83729956ff..a6598fd66423 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,8 +70,9 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
- radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o si_blit_shaders.o
+ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
+ si_blit_shaders.o radeon_prime.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af1054f8202a..3904d7964a4b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -591,8 +591,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- /* if (connector && connector->display_info.bpc)
- bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
encoder_mode = atombios_get_encoder_mode(encoder);
is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
@@ -968,9 +967,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
-
- /* if (connector->display_info.bpc)
- bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
@@ -1152,7 +1149,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
}
if (tiling_flags & RADEON_TILING_MACRO) {
- if (rdev->family >= CHIP_CAYMAN)
+ if (rdev->family >= CHIP_TAHITI)
+ tmp = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
@@ -1180,6 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
} else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+ if ((rdev->family == CHIP_TAHITI) ||
+ (rdev->family == CHIP_PITCAIRN))
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+ else if (rdev->family == CHIP_VERDE)
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(AVIVO_D1VGA_CONTROL, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index c57d85664e77..5131b3b0f7d2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -405,13 +405,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
/* get bpc from the EDID */
static int convert_bpc_to_bpp(int bpc)
{
-#if 0
if (bpc == 0)
return 24;
else
return bpc * 3;
-#endif
- return 24;
}
/* get the max pix clock supported by the link rate and lane num */
@@ -463,7 +460,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
- int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int max_link_rate = dp_get_max_link_rate(dpcd);
int max_lane_num = dp_get_max_lane_number(dpcd);
int lane_num;
@@ -482,7 +479,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
- int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
+ int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int lane_num, max_pix_clock;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
@@ -533,6 +530,23 @@ u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
dig_connector->dp_i2c_bus->rec.i2c_id, 0);
}
+static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 buf[3];
+
+ if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+ return;
+
+ if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
+ DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+
+ if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
+ DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+ buf[0], buf[1], buf[2]);
+}
+
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
@@ -546,6 +560,9 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
for (i = 0; i < 8; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
+
+ radeon_dp_probe_oui(radeon_connector);
+
return true;
}
dig_connector->dpcd[0] = 0;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2d39f9977e00..486ccdf4aacd 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -545,7 +545,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
- /* bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
}
/* no dig encoder assigned */
@@ -1163,7 +1163,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- /* bpc = connector->display_info.bpc; */
+ bpc = radeon_get_monitor_bpc(connector);
}
memset(&args, 0, sizeof(args));
@@ -1926,7 +1926,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
- r600_hdmi_setmode(encoder, adjusted_mode);
+ if (ASIC_IS_DCE6(rdev))
+ ; /* TODO (use pointers instead of if-s?) */
+ else if (ASIC_IS_DCE4(rdev))
+ evergreen_hdmi_setmode(encoder, adjusted_mode);
+ else
+ r600_hdmi_setmode(encoder, adjusted_mode);
}
}
@@ -2081,6 +2086,7 @@ radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
+ struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -2089,8 +2095,16 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
(radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (dig)
+ if (dig) {
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
+ if (rdev->family >= CHIP_R600)
+ dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
+ else
+ /* RS600/690/740 have only 1 afmt block */
+ dig->afmt = rdev->mode_info.afmt[0];
+ }
+ }
}
radeon_atom_output_lock(encoder, true);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index cfa372cb1cb3..7fb3d2e0434c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ if ((rdev->family == CHIP_JUNIPER) ||
+ (rdev->family == CHIP_CYPRESS) ||
+ (rdev->family == CHIP_HEMLOCK) ||
+ (rdev->family == CHIP_BARTS))
+ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
}
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask = 0;
- u32 enabled_backends_count = 0;
- u32 cur_pipe;
- u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
- u32 cur_backend = 0;
- u32 i;
- bool force_no_swizzle;
-
- if (num_tile_pipes > EVERGREEN_MAX_PIPES)
- num_tile_pipes = EVERGREEN_MAX_PIPES;
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_backends > EVERGREEN_MAX_BACKENDS)
- num_backends = EVERGREEN_MAX_BACKENDS;
- if (num_backends < 1)
- num_backends = 1;
-
- for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
- if (((backend_disable_mask >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends)
- break;
- }
-
- if (enabled_backends_count == 0) {
- enabled_backends_mask = 1;
- enabled_backends_count = 1;
- }
-
- if (enabled_backends_count != num_backends)
- num_backends = enabled_backends_count;
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
- switch (rdev->family) {
- case CHIP_CEDAR:
- case CHIP_REDWOOD:
- case CHIP_PALM:
- case CHIP_SUMO:
- case CHIP_SUMO2:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- force_no_swizzle = false;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- case CHIP_JUNIPER:
- case CHIP_BARTS:
- default:
- force_no_swizzle = true;
- break;
- }
- if (force_no_swizzle) {
- bool last_backend_enabled = false;
-
- force_no_swizzle = false;
- for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
- if (((enabled_backends_mask >> i) & 1) == 1) {
- if (last_backend_enabled)
- force_no_swizzle = true;
- last_backend_enabled = true;
- } else
- last_backend_enabled = false;
- }
- }
-
- switch (num_tile_pipes) {
- case 1:
- case 3:
- case 5:
- case 7:
- DRM_ERROR("odd number of pipes!\n");
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- swizzle_pipe[3] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- }
- break;
- }
-
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
-
- backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
- cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
static void evergreen_gpu_init(struct radeon_device *rdev)
{
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config;
- u32 gb_addr_config = 0;
+ u32 gb_addr_config;
u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 gb_backend_map;
- u32 grbm_gfx_index;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 sq_config;
@@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
u32 sq_stack_resource_mgmt_3;
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
+ u32 disabled_rb_mask;
int i, j, num_shader_engines, ps_thread_count;
switch (rdev->family) {
@@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_JUNIPER:
rdev->config.evergreen.num_ses = 1;
@@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_REDWOOD:
rdev->config.evergreen.num_ses = 1;
@@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CEDAR:
default:
@@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_PALM:
rdev->config.evergreen.num_ses = 1;
@@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
@@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
@@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
@@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_TURKS:
rdev->config.evergreen.num_ses = 1;
@@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
@@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
break;
}
@@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
evergreen_fix_pci_max_read_req_size(rdev);
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
-
- cc_gc_shader_pipe_config |=
- INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
- & EVERGREEN_MAX_PIPES_MASK);
- cc_gc_shader_pipe_config |=
- INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
- & EVERGREEN_MAX_SIMDS_MASK);
-
- cc_rb_backend_disable =
- BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
- & EVERGREEN_MAX_BACKENDS_MASK);
-
-
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
@@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
else
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
- switch (rdev->config.evergreen.max_tile_pipes) {
- case 1:
- default:
- gb_addr_config |= NUM_PIPES(0);
- break;
- case 2:
- gb_addr_config |= NUM_PIPES(1);
- break;
- case 4:
- gb_addr_config |= NUM_PIPES(2);
- break;
- case 8:
- gb_addr_config |= NUM_PIPES(3);
- break;
- }
-
- gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
- gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
- gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
- gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
- gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
- gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-
- if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
- gb_addr_config |= ROW_SIZE(2);
- else
- gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
-
- if (rdev->ddev->pdev->device == 0x689e) {
- u32 efuse_straps_4;
- u32 efuse_straps_3;
- u8 efuse_box_bit_131_124;
-
- WREG32(RCU_IND_INDEX, 0x204);
- efuse_straps_4 = RREG32(RCU_IND_DATA);
- WREG32(RCU_IND_INDEX, 0x203);
- efuse_straps_3 = RREG32(RCU_IND_DATA);
- efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
-
- switch(efuse_box_bit_131_124) {
- case 0x00:
- gb_backend_map = 0x76543210;
- break;
- case 0x55:
- gb_backend_map = 0x77553311;
- break;
- case 0x56:
- gb_backend_map = 0x77553300;
- break;
- case 0x59:
- gb_backend_map = 0x77552211;
- break;
- case 0x66:
- gb_backend_map = 0x77443300;
- break;
- case 0x99:
- gb_backend_map = 0x66552211;
- break;
- case 0x5a:
- gb_backend_map = 0x77552200;
- break;
- case 0xaa:
- gb_backend_map = 0x66442200;
- break;
- case 0x95:
- gb_backend_map = 0x66553311;
- break;
- default:
- DRM_ERROR("bad backend map, using default\n");
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
- break;
- }
- } else if (rdev->ddev->pdev->device == 0x68b9) {
- u32 efuse_straps_3;
- u8 efuse_box_bit_127_124;
-
- WREG32(RCU_IND_INDEX, 0x203);
- efuse_straps_3 = RREG32(RCU_IND_DATA);
- efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
-
- switch(efuse_box_bit_127_124) {
- case 0x0:
- gb_backend_map = 0x00003210;
- break;
- case 0x5:
- case 0x6:
- case 0x9:
- case 0xa:
- gb_backend_map = 0x00003311;
- break;
- default:
- DRM_ERROR("bad backend map, using default\n");
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
- break;
- }
- } else {
- switch (rdev->family) {
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- case CHIP_BARTS:
- gb_backend_map = 0x66442200;
- break;
- case CHIP_JUNIPER:
- gb_backend_map = 0x00002200;
- break;
- default:
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
- }
- }
-
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
@@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
- else
- rdev->config.evergreen.tile_config |=
- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
- rdev->config.evergreen.tile_config |=
- ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ rdev->config.evergreen.tile_config |= 1 << 4;
+ else
+ rdev->config.evergreen.tile_config |= 0 << 4;
+ }
+ rdev->config.evergreen.tile_config |= 0 << 8;
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
- rdev->config.evergreen.backend_map = gb_backend_map;
- WREG32(GB_BACKEND_MAP, gb_backend_map);
- WREG32(GB_ADDR_CONFIG, gb_addr_config);
- WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
- WREG32(HDP_ADDR_CONFIG, gb_addr_config);
-
- num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
- grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
+ num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
- for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
- u32 rb = cc_rb_backend_disable | (0xf0 << 16);
- u32 sp = cc_gc_shader_pipe_config;
- u32 gfx = grbm_gfx_index | SE_INDEX(i);
+ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+ u32 efuse_straps_4;
+ u32 efuse_straps_3;
- if (i == num_shader_engines) {
- rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
- sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ tmp = (((efuse_straps_4 & 0xf) << 4) |
+ ((efuse_straps_3 & 0xf0000000) >> 28));
+ } else {
+ tmp = 0;
+ for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+ u32 rb_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+ tmp <<= 4;
+ tmp |= rb_disable_bitmap;
}
+ }
+ /* enabled rb are just the one not disabled :) */
+ disabled_rb_mask = tmp;
- WREG32(GRBM_GFX_INDEX, gfx);
- WREG32(RLC_GFX_INDEX, gfx);
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
- WREG32(CC_RB_BACKEND_DISABLE, rb);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
- WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
- }
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- grbm_gfx_index |= SE_BROADCAST_WRITES;
- WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
- WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+ EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
@@ -2202,6 +1932,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+ if (rdev->family <= CHIP_SUMO2)
+ WREG32(SMX_SAR_CTL0, 0x00010000);
+
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
@@ -2424,27 +2157,18 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
- struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
- int r;
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2594,6 +2318,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+ u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2614,6 +2339,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
@@ -2690,6 +2422,30 @@ int evergreen_irq_set(struct radeon_device *rdev)
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.afmt[0]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
+ afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[1]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
+ afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[2]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
+ afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[3]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
+ afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[4]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
+ afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
+ if (rdev->irq.afmt[5]) {
+ DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
+ afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+ }
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
@@ -2732,6 +2488,13 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
+
return 0;
}
@@ -2756,6 +2519,13 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
+ rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
@@ -2829,6 +2599,36 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
+ if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
+ }
+ if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
+ }
}
void evergreen_irq_disable(struct radeon_device *rdev)
@@ -2878,6 +2678,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -3111,6 +2912,55 @@ restart_ih:
break;
}
break;
+ case 44: /* hdmi */
+ switch (src_data) {
+ case 0:
+ if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
+ break;
+ case 1:
+ if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
+ break;
+ case 2:
+ if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI2\n");
+ }
+ break;
+ case 3:
+ if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI3\n");
+ }
+ break;
+ case 4:
+ if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI4\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI5\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -3154,6 +3004,8 @@ restart_ih:
goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3248,12 +3100,9 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
r = r600_audio_init(rdev);
if (r) {
@@ -3319,10 +3168,6 @@ int evergreen_init(struct radeon_device *rdev)
{
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -3434,7 +3279,6 @@ void evergreen_fini(struct radeon_device *rdev)
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 222acd2d33df..1e96bd458cfd 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -637,7 +637,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
- mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family < CHIP_CAYMAN)
@@ -669,7 +668,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size = ALIGN(obj_size, 256);
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_obj);
+ NULL, &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("evergreen failed to allocate shader\n");
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 70089d32b80f..c16554122ccd 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -52,6 +52,7 @@ struct evergreen_cs_track {
u32 cb_color_view[12];
u32 cb_color_pitch[12];
u32 cb_color_slice[12];
+ u32 cb_color_slice_idx[12];
u32 cb_color_attrib[12];
u32 cb_color_cmask_slice[8];/* unused */
u32 cb_color_fmask_slice[8];/* unused */
@@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
track->cb_color_info[i] = 0;
track->cb_color_view[i] = 0xFFFFFFFF;
track->cb_color_pitch[i] = 0;
- track->cb_color_slice[i] = 0;
+ track->cb_color_slice[i] = 0xfffffff;
+ track->cb_color_slice_idx[i] = 0;
}
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
track->cb_dirty = true;
+ track->db_depth_slice = 0xffffffff;
track->db_depth_view = 0xFFFFC000;
track->db_depth_size = 0xFFFFFFFF;
track->db_depth_control = 0xFFFFFFFF;
@@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
{
struct evergreen_cs_track *track = p->track;
unsigned palign, halign, tileb, slice_pt;
+ unsigned mtile_pr, mtile_ps, mtileb;
tileb = 64 * surf->bpe * surf->nsamples;
- palign = track->group_size / (8 * surf->bpe * surf->nsamples);
- palign = MAX(8, palign);
slice_pt = 1;
if (tileb > surf->tsplit) {
slice_pt = tileb / surf->tsplit;
@@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
/* macro tile width & height */
palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
- surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
+ mtileb = (palign / 8) * (halign / 8) * tileb;;
+ mtile_pr = surf->nbx / palign;
+ mtile_ps = (mtile_pr * surf->nby) / halign;
+ surf->layer_size = mtile_ps * mtileb * slice_pt;
surf->base_align = (palign / 8) * (halign / 8) * tileb;
surf->palign = palign;
surf->halign = halign;
@@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
offset += surf.layer_size * mslice;
if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+ /* old ddx are broken they allocate bo with w*h*bpp but
+ * program slice with ALIGN(h, 8), catch this and patch
+ * command stream.
+ */
+ if (!surf.mode) {
+ volatile u32 *ib = p->ib.ptr;
+ unsigned long tmp, nby, bsize, size, min = 0;
+
+ /* find the height the ddx wants */
+ if (surf.nby > 8) {
+ min = surf.nby - 8;
+ }
+ bsize = radeon_bo_size(track->cb_color_bo[id]);
+ tmp = track->cb_color_bo_offset[id] << 8;
+ for (nby = surf.nby; nby > min; nby--) {
+ size = nby * surf.nbx * surf.bpe * surf.nsamples;
+ if ((tmp + size * mslice) <= bsize) {
+ break;
+ }
+ }
+ if (nby > min) {
+ surf.nby = nby;
+ slice = ((nby * surf.nbx) / 64) - 1;
+ if (!evergreen_surface_check(p, &surf, "cb")) {
+ /* check if this one works */
+ tmp += surf.layer_size * mslice;
+ if (tmp <= bsize) {
+ ib[track->cb_color_slice_idx[id]] = slice;
+ goto old_ddx_ok;
+ }
+ }
+ }
+ }
dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
"offset %d, max layer %d, bo size %ld, slice %d)\n",
__func__, __LINE__, id, surf.layer_size,
@@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
surf.tsplit, surf.mtilea);
return -EINVAL;
}
+old_ddx_ok:
return 0;
}
@@ -1057,7 +1096,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg, wait_reg_mem_info;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1215,7 +1254,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(evergreen_reg_safe_bm[i] & m))
return 0;
}
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
@@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_SLICE:
tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
track->cb_dirty = true;
break;
case CB_COLOR8_SLICE:
@@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_SLICE:
tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
track->cb_dirty = true;
break;
case CB_COLOR0_ATTRIB:
@@ -1896,7 +1937,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u32 idx_value;
track = (struct evergreen_cs_track *)p->track;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -2610,8 +2651,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
- for (r = 0; r < p->ib->length_dw; r++) {
- printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
mdelay(1);
}
#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
new file mode 100644
index 000000000000..65c54160028b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ * Rafał Miłecki
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "atom.h"
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
+
+ WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
+ uint8_t versionNumber,
+ uint8_t length,
+ uint8_t *frame)
+{
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void evergreen_hdmi_videoinfoframe(
+ struct drm_encoder *encoder,
+ uint8_t color_format,
+ int active_information_present,
+ uint8_t active_format_aspect_ratio,
+ uint8_t scan_information,
+ uint8_t colorimetry,
+ uint8_t ex_colorimetry,
+ uint8_t quantization,
+ int ITC,
+ uint8_t picture_aspect_ratio,
+ uint8_t video_format_identification,
+ uint8_t pixel_repetition,
+ uint8_t non_uniform_picture_scaling,
+ uint8_t bar_info_data_valid,
+ uint16_t top_bar,
+ uint16_t bottom_bar,
+ uint16_t left_bar,
+ uint16_t right_bar
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ uint8_t frame[14];
+
+ frame[0x0] = 0;
+ frame[0x1] =
+ (scan_information & 0x3) |
+ ((bar_info_data_valid & 0x3) << 2) |
+ ((active_information_present & 0x1) << 4) |
+ ((color_format & 0x3) << 5);
+ frame[0x2] =
+ (active_format_aspect_ratio & 0xF) |
+ ((picture_aspect_ratio & 0x3) << 4) |
+ ((colorimetry & 0x3) << 6);
+ frame[0x3] =
+ (non_uniform_picture_scaling & 0x3) |
+ ((quantization & 0x3) << 2) |
+ ((ex_colorimetry & 0x7) << 4) |
+ ((ITC & 0x1) << 7);
+ frame[0x4] = (video_format_identification & 0x7F);
+ frame[0x5] = (pixel_repetition & 0xF);
+ frame[0x6] = (top_bar & 0xFF);
+ frame[0x7] = (top_bar >> 8);
+ frame[0x8] = (bottom_bar & 0xFF);
+ frame[0x9] = (bottom_bar >> 8);
+ frame[0xA] = (left_bar & 0xFF);
+ frame[0xB] = (left_bar >> 8);
+ frame[0xC] = (right_bar & 0xFF);
+ frame[0xD] = (right_bar >> 8);
+
+ evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+ /* Our header values (type, version, length) should be alright, Intel
+ * is using the same. Checksum function also seems to be OK, it works
+ * fine for audio infoframe. However calculated value is always lower
+ * by 2 in comparison to fglrx. It breaks displaying anything in case
+ * of TVs that strictly check the checksum. Hack it manually here to
+ * workaround this issue. */
+ frame[0x0] += 2;
+
+ WREG32(AFMT_AVI_INFO0 + offset,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(AFMT_AVI_INFO1 + offset,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(AFMT_AVI_INFO2 + offset,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(AFMT_AVI_INFO3 + offset,
+ frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ r600_audio_set_clock(encoder, mode->clock);
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND); /* send null packets when required */
+
+ WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI_ACR_SOURCE); /* select SW CTS value */
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND | /* send null packets when required */
+ HDMI_GC_SEND | /* send general control packets */
+ HDMI_GC_CONT); /* send general control packets every frame */
+
+ WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+
+ WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+ AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+ WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+
+ evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0);
+
+ evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 96c10b3991aa..8beac1065025 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -232,6 +232,4 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
-#define EVERGREEN_HDMI_CONFIG_OFFSET 0xf0
-
#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b4eefc355f16..b50b15c70498 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -37,6 +37,15 @@
#define EVERGREEN_MAX_PIPES_MASK 0xFF
#define EVERGREEN_MAX_LDS_NUM 0xFFFF
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
/* Registers */
#define RCU_IND_INDEX 0x100
@@ -54,6 +63,7 @@
#define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x0000000f
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
#define NUM_SHADER_ENGINES(x) ((x) << 12)
@@ -112,6 +122,226 @@
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_DEBUG 0xC1FC
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE 0x05ac
+# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE 0x05b0
+#define DCCG_AUDIO_DTO0_MODULE 0x05b4
+#define DCCG_AUDIO_DTO0_LOAD 0x05b8
+#define DCCG_AUDIO_DTO0_CNTL 0x05bc
+
+#define DCCG_AUDIO_DTO1_PHASE 0x05c0
+#define DCCG_AUDIO_DTO1_MODULE 0x05c4
+#define DCCG_AUDIO_DTO1_LOAD 0x05c8
+#define DCCG_AUDIO_DTO1_CNTL 0x05cc
+
+/* DCE 4.0 AFMT */
+#define HDMI_CONTROL 0x7030
+# define HDMI_KEEPOUT_MODE (1 << 0)
+# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
+# define HDMI_ERROR_ACK (1 << 8)
+# define HDMI_ERROR_MASK (1 << 9)
+# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
+# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
+# define HDMI_24BIT_DEEP_COLOR 0
+# define HDMI_30BIT_DEEP_COLOR 1
+# define HDMI_36BIT_DEEP_COLOR 2
+#define HDMI_STATUS 0x7034
+# define HDMI_ACTIVE_AVMUTE (1 << 0)
+# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
+# define HDMI_VBI_PACKET_ERROR (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL 0x7038
+# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL 0x703c
+# define HDMI_ACR_SEND (1 << 0)
+# define HDMI_ACR_CONT (1 << 1)
+# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI_ACR_HW 0
+# define HDMI_ACR_32 1
+# define HDMI_ACR_44 2
+# define HDMI_ACR_48 3
+# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI_ACR_AUTO_SEND (1 << 12)
+# define HDMI_ACR_N_MULTIPLE(x) (((x) & 7) << 16)
+# define HDMI_ACR_X1 1
+# define HDMI_ACR_X2 2
+# define HDMI_ACR_X4 4
+# define HDMI_ACR_AUDIO_PRIORITY (1 << 31)
+#define HDMI_VBI_PACKET_CONTROL 0x7040
+# define HDMI_NULL_SEND (1 << 0)
+# define HDMI_GC_SEND (1 << 4)
+# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0 0x7044
+# define HDMI_AVI_INFO_SEND (1 << 0)
+# define HDMI_AVI_INFO_CONT (1 << 1)
+# define HDMI_AUDIO_INFO_SEND (1 << 4)
+# define HDMI_AUDIO_INFO_CONT (1 << 5)
+# define HDMI_MPEG_INFO_SEND (1 << 8)
+# define HDMI_MPEG_INFO_CONT (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1 0x7048
+# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL 0x704c
+# define HDMI_GENERIC0_SEND (1 << 0)
+# define HDMI_GENERIC0_CONT (1 << 1)
+# define HDMI_GENERIC1_SEND (1 << 4)
+# define HDMI_GENERIC1_CONT (1 << 5)
+# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI_GC 0x7058
+# define HDMI_GC_AVMUTE (1 << 0)
+# define HDMI_GC_AVMUTE_CONT (1 << 2)
+#define AFMT_AUDIO_PACKET_CONTROL2 0x705c
+# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
+# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
+# define AFMT_60958_CS_SOURCE (1 << 4)
+# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
+# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0 0x7084
+# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
+# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
+# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
+# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define AFMT_AVI_INFO_Y_RGB 0
+# define AFMT_AVI_INFO_Y_YCBCR422 1
+# define AFMT_AVI_INFO_Y_YCBCR444 2
+# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
+# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
+# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
+# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1 0x7088
+# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_CN(x) (((x) & 0x3) << 12)
+# define AFMT_AVI_INFO_YQ(x) (((x) & 0x3) << 14)
+# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2 0x708c
+# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3 0x7090
+# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0 0x7094
+# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1 0x7098
+# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR 0x709c
+#define AFMT_GENERIC0_0 0x70a0
+#define AFMT_GENERIC0_1 0x70a4
+#define AFMT_GENERIC0_2 0x70a8
+#define AFMT_GENERIC0_3 0x70ac
+#define AFMT_GENERIC0_4 0x70b0
+#define AFMT_GENERIC0_5 0x70b4
+#define AFMT_GENERIC0_6 0x70b8
+#define AFMT_GENERIC1_HDR 0x70bc
+#define AFMT_GENERIC1_0 0x70c0
+#define AFMT_GENERIC1_1 0x70c4
+#define AFMT_GENERIC1_2 0x70c8
+#define AFMT_GENERIC1_3 0x70cc
+#define AFMT_GENERIC1_4 0x70d0
+#define AFMT_GENERIC1_5 0x70d4
+#define AFMT_GENERIC1_6 0x70d8
+#define HDMI_ACR_32_0 0x70dc
+# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1 0x70e0
+# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0 0x70e4
+# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1 0x70e8
+# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0 0x70ec
+# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1 0x70f0
+# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0 0x70f4
+#define HDMI_ACR_STATUS_1 0x70f8
+#define AFMT_AUDIO_INFO0 0x70fc
+# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+# define AFMT_AUDIO_INFO_CT(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
+# define AFMT_AUDIO_INFO_CXT(x) (((x) & 0x1f) << 24)
+#define AFMT_AUDIO_INFO1 0x7100
+# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+# define AFMT_AUDIO_INFO_LFEBPL(x) (((x) & 3) << 16)
+#define AFMT_60958_0 0x7104
+# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
+# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
+# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
+# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
+# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
+# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define AFMT_60958_1 0x7108
+# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL 0x710c
+# define AFMT_AUDIO_CRC_EN (1 << 0)
+#define AFMT_RAMP_CONTROL0 0x7110
+# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_RAMP_DATA_SIGN (1 << 31)
+#define AFMT_RAMP_CONTROL1 0x7114
+# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2 0x7118
+# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3 0x711c
+# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_60958_2 0x7120
+# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+#define AFMT_STATUS 0x7128
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AUDIO_HBR_ENABLE (1 << 8)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x712c
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL 0x7130
+# define AFMT_GENERIC0_UPDATE (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0 0x7134
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - afmt regs */
+# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
+# define AFMT_MPEG_INFO_UPDATE (1 << 10)
+#define AFMT_GENERIC0_7 0x7138
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
@@ -232,6 +462,7 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
@@ -272,6 +503,7 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
+#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define NUMBER_OF_SETS(x) ((x) << 1)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index a48ca53fcd6a..b7bf18e40215 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -417,215 +417,17 @@ out:
/*
* Core functions
*/
-static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends_per_asic,
- u32 *backend_disable_mask_per_asic,
- u32 num_shader_engines)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask = 0;
- u32 enabled_backends_count = 0;
- u32 num_backends_per_se;
- u32 cur_pipe;
- u32 swizzle_pipe[CAYMAN_MAX_PIPES];
- u32 cur_backend = 0;
- u32 i;
- bool force_no_swizzle;
-
- /* force legal values */
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
- num_tile_pipes = rdev->config.cayman.max_tile_pipes;
- if (num_shader_engines < 1)
- num_shader_engines = 1;
- if (num_shader_engines > rdev->config.cayman.max_shader_engines)
- num_shader_engines = rdev->config.cayman.max_shader_engines;
- if (num_backends_per_asic < num_shader_engines)
- num_backends_per_asic = num_shader_engines;
- if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
- num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
-
- /* make sure we have the same number of backends per se */
- num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
- /* set up the number of backends per se */
- num_backends_per_se = num_backends_per_asic / num_shader_engines;
- if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
- num_backends_per_se = rdev->config.cayman.max_backends_per_se;
- num_backends_per_asic = num_backends_per_se * num_shader_engines;
- }
-
- /* create enable mask and count for enabled backends */
- for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
- if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends_per_asic)
- break;
- }
-
- /* force the backends mask to match the current number of backends */
- if (enabled_backends_count != num_backends_per_asic) {
- u32 this_backend_enabled;
- u32 shader_engine;
- u32 backend_per_se;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
- for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
- /* calc the current se */
- shader_engine = i / rdev->config.cayman.max_backends_per_se;
- /* calc the backend per se */
- backend_per_se = i % rdev->config.cayman.max_backends_per_se;
- /* default to not enabled */
- this_backend_enabled = 0;
- if ((shader_engine < num_shader_engines) &&
- (backend_per_se < num_backends_per_se))
- this_backend_enabled = 1;
- if (this_backend_enabled) {
- enabled_backends_mask |= (1 << i);
- *backend_disable_mask_per_asic &= ~(1 << i);
- ++enabled_backends_count;
- }
- }
- }
-
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
- switch (rdev->family) {
- case CHIP_CAYMAN:
- case CHIP_ARUBA:
- force_no_swizzle = true;
- break;
- default:
- force_no_swizzle = false;
- break;
- }
- if (force_no_swizzle) {
- bool last_backend_enabled = false;
-
- force_no_swizzle = false;
- for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
- if (((enabled_backends_mask >> i) & 1) == 1) {
- if (last_backend_enabled)
- force_no_swizzle = true;
- last_backend_enabled = true;
- } else
- last_backend_enabled = false;
- }
- }
-
- switch (num_tile_pipes) {
- case 1:
- case 3:
- case 5:
- case 7:
- DRM_ERROR("odd number of pipes!\n");
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- swizzle_pipe[3] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- }
- break;
- }
-
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
-
- backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
- cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
-static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
- u32 disable_mask_per_se,
- u32 max_disable_mask_per_se,
- u32 num_shader_engines)
-{
- u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
- u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
- if (num_shader_engines == 1)
- return disable_mask_per_asic;
- else if (num_shader_engines == 2)
- return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
- else
- return 0xffffffff;
-}
-
static void cayman_gpu_init(struct radeon_device *rdev)
{
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 gb_backend_map;
u32 cgts_tcc_disable;
u32 sx_debug_1;
u32 smx_dc_ctl0;
- u32 gc_user_shader_pipe_config;
- u32 gc_user_rb_backend_disable;
- u32 cgts_user_tcc_disable;
u32 cgts_sm_ctrl_reg;
u32 hdp_host_path_cntl;
u32 tmp;
+ u32 disabled_rb_mask;
int i, j;
switch (rdev->family) {
@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.sc_prim_fifo_size = 0x100;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_ARUBA:
default:
@@ -657,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_pipes_per_simd = 4;
rdev->config.cayman.max_tile_pipes = 2;
if ((rdev->pdev->device == 0x9900) ||
- (rdev->pdev->device == 0x9901)) {
+ (rdev->pdev->device == 0x9901) ||
+ (rdev->pdev->device == 0x9905) ||
+ (rdev->pdev->device == 0x9906) ||
+ (rdev->pdev->device == 0x9907) ||
+ (rdev->pdev->device == 0x9908) ||
+ (rdev->pdev->device == 0x9909) ||
+ (rdev->pdev->device == 0x9910) ||
+ (rdev->pdev->device == 0x9917)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
- (rdev->pdev->device == 0x9904)) {
+ (rdev->pdev->device == 0x9904) ||
+ (rdev->pdev->device == 0x990A) ||
+ (rdev->pdev->device == 0x9913) ||
+ (rdev->pdev->device == 0x9918)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x9990) ||
- (rdev->pdev->device == 0x9991)) {
+ } else if ((rdev->pdev->device == 0x9919) ||
+ (rdev->pdev->device == 0x9990) ||
+ (rdev->pdev->device == 0x9991) ||
+ (rdev->pdev->device == 0x9994) ||
+ (rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
} else {
@@ -687,6 +503,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.sc_prim_fifo_size = 0x40;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
break;
}
@@ -706,39 +523,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
- cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
- cgts_tcc_disable = 0xffff0000;
- for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
- cgts_tcc_disable &= ~(1 << (16 + i));
- gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
- gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
- cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
- rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
- tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
- rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
- rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
- tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
- rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
- tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
- tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.cayman.backend_disable_mask_per_asic =
- cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
- rdev->config.cayman.num_shader_engines);
- rdev->config.cayman.backend_map =
- cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
- rdev->config.cayman.num_backends_per_se *
- rdev->config.cayman.num_shader_engines,
- &rdev->config.cayman.backend_disable_mask_per_asic,
- rdev->config.cayman.num_shader_engines);
- tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
- rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
- tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
- rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
- if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
- rdev->config.cayman.mem_max_burst_length_bytes = 512;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (rdev->config.cayman.mem_row_size_in_kb > 4)
@@ -748,73 +532,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.num_gpus = 1;
rdev->config.cayman.multi_gpu_tile_size = 64;
- //gb_addr_config = 0x02011003
-#if 0
- gb_addr_config = RREG32(GB_ADDR_CONFIG);
-#else
- gb_addr_config = 0;
- switch (rdev->config.cayman.num_tile_pipes) {
- case 1:
- default:
- gb_addr_config |= NUM_PIPES(0);
- break;
- case 2:
- gb_addr_config |= NUM_PIPES(1);
- break;
- case 4:
- gb_addr_config |= NUM_PIPES(2);
- break;
- case 8:
- gb_addr_config |= NUM_PIPES(3);
- break;
- }
-
- tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
- gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
- gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
- tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
- gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
- switch (rdev->config.cayman.num_gpus) {
- case 1:
- default:
- gb_addr_config |= NUM_GPUS(0);
- break;
- case 2:
- gb_addr_config |= NUM_GPUS(1);
- break;
- case 4:
- gb_addr_config |= NUM_GPUS(2);
- break;
- }
- switch (rdev->config.cayman.multi_gpu_tile_size) {
- case 16:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
- break;
- case 32:
- default:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
- break;
- case 64:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
- break;
- case 128:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
- break;
- }
- switch (rdev->config.cayman.mem_row_size_in_kb) {
- case 1:
- default:
- gb_addr_config |= ROW_SIZE(0);
- break;
- case 2:
- gb_addr_config |= ROW_SIZE(1);
- break;
- case 4:
- gb_addr_config |= ROW_SIZE(2);
- break;
- }
-#endif
-
tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
rdev->config.cayman.num_tile_pipes = (1 << tmp);
tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
@@ -828,17 +545,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
- //gb_backend_map = 0x76541032;
-#if 0
- gb_backend_map = RREG32(GB_BACKEND_MAP);
-#else
- gb_backend_map =
- cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
- rdev->config.cayman.num_backends_per_se *
- rdev->config.cayman.num_shader_engines,
- &rdev->config.cayman.backend_disable_mask_per_asic,
- rdev->config.cayman.num_shader_engines);
-#endif
+
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
@@ -865,34 +572,50 @@ static void cayman_gpu_init(struct radeon_device *rdev)
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
- rdev->config.evergreen.tile_config |= 1 << 4;
- else
- rdev->config.cayman.tile_config |=
- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ rdev->config.cayman.tile_config |= 1 << 4;
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ rdev->config.cayman.tile_config |= 1 << 4;
+ else
+ rdev->config.cayman.tile_config |= 0 << 4;
+ }
rdev->config.cayman.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.cayman.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
- rdev->config.cayman.backend_map = gb_backend_map;
- WREG32(GB_BACKEND_MAP, gb_backend_map);
+ tmp = 0;
+ for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+ u32 rb_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+ tmp <<= 4;
+ tmp |= rb_disable_bitmap;
+ }
+ /* enabled rb are just the one not disabled :) */
+ disabled_rb_mask = tmp;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- /* primary versions */
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp,
+ rdev->config.cayman.max_backends_per_se *
+ rdev->config.cayman.max_shader_engines,
+ CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+ WREG32(GB_BACKEND_MAP, tmp);
+ cgts_tcc_disable = 0xffff0000;
+ for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+ cgts_tcc_disable &= ~(1 << (16 + i));
WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
-
- /* user versions */
- WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
@@ -1392,35 +1115,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
-bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 srbm_status;
- u32 grbm_status;
- u32 grbm_status_se0, grbm_status_se1;
- struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
- int r;
-
- srbm_status = RREG32(SRBM_STATUS);
- grbm_status = RREG32(GRBM_STATUS);
- grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
- grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
- if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
- return false;
- }
- /* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- /* XXX deal with CP0,1,2 */
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
-}
-
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
@@ -1601,17 +1295,18 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
r = radeon_vm_manager_start(rdev);
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1638,6 +1333,7 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
radeon_ib_pool_suspend(rdev);
radeon_vm_manager_suspend(rdev);
@@ -1661,10 +1357,6 @@ int cayman_init(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -1776,7 +1468,6 @@ void cayman_fini(struct radeon_device *rdev)
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2aa7046ada56..a0b98066e207 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -41,6 +41,9 @@
#define CAYMAN_MAX_TCC 16
#define CAYMAN_MAX_TCC_MASK 0xFF
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
+
#define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
@@ -148,6 +151,8 @@
#define CGTS_SYS_TCC_DISABLE 0x3F90
#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define RLC_GFX_INDEX 0x3FC4
+
#define CONFIG_MEMSIZE 0x5428
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
@@ -212,6 +217,12 @@
#define SOFT_RESET_VGT (1 << 14)
#define SOFT_RESET_IA (1 << 15)
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index fe33d35dae8c..fb44e7e49083 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -139,9 +139,9 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
}
tmp |= tile_flags;
- p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
+ p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
} else
- p->ib->ptr[idx] = (value & 0xffc00000) | tmp;
+ p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
return 0;
}
@@ -156,7 +156,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
volatile uint32_t *ib;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
if (c > 16) {
@@ -660,7 +660,7 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
WREG32(RADEON_AIC_CNTL, tmp);
r100_pci_gart_tlb_flush(rdev);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
@@ -1180,6 +1180,10 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+
+ /* at this point everything should be setup correctly to enable master */
+ pci_set_master(rdev->pdev);
+
radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
@@ -1271,7 +1275,7 @@ void r100_cs_dump_packet(struct radeon_cs_parser *p,
unsigned i;
unsigned idx;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -1350,7 +1354,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the wait until */
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -1529,7 +1533,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
@@ -1885,7 +1889,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
volatile uint32_t *ib;
int r;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch (pkt->opcode) {
@@ -2004,6 +2008,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
int r;
track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (!track)
+ return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
@@ -2155,79 +2161,18 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
-{
- lockup->last_cp_rptr = ring->rptr;
- lockup->last_jiffies = jiffies;
-}
-
-/**
- * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
- * @rdev: radeon device structure
- * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
- * @cp: radeon_cp structure holding CP information
- *
- * We don't need to initialize the lockup tracking information as we will either
- * have CP rptr to a different value of jiffies wrap around which will force
- * initialization of the lockup tracking informations.
- *
- * A possible false positivie is if we get call after while and last_cp_rptr ==
- * the current CP rptr, even if it's unlikely it might happen. To avoid this
- * if the elapsed time since last call is bigger than 2 second than we return
- * false and update the tracking information. Due to this the caller must call
- * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
- * the fencing code should be cautious about that.
- *
- * Caller should write to the ring to force CP to do something so we don't get
- * false positive when CP is just gived nothing to do.
- *
- **/
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
-{
- unsigned long cjiffies, elapsed;
-
- cjiffies = jiffies;
- if (!time_after(cjiffies, lockup->last_jiffies)) {
- /* likely a wrap around */
- lockup->last_cp_rptr = ring->rptr;
- lockup->last_jiffies = jiffies;
- return false;
- }
- if (ring->rptr != lockup->last_cp_rptr) {
- /* CP is still working no lockup */
- lockup->last_cp_rptr = ring->rptr;
- lockup->last_jiffies = jiffies;
- return false;
- }
- elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
- if (elapsed >= 10000) {
- dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
- return true;
- }
- /* give a chance to the GPU ... */
- return false;
-}
-
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
- int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
void r100_bm_disable(struct radeon_device *rdev)
@@ -2296,7 +2241,6 @@ int r100_asic_reset(struct radeon_device *rdev)
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
@@ -3742,7 +3686,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ib *ib;
+ struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -3758,22 +3702,22 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
if (r) {
return r;
}
- ib->ptr[0] = PACKET0(scratch, 0);
- ib->ptr[1] = 0xDEADBEEF;
- ib->ptr[2] = PACKET2(0);
- ib->ptr[3] = PACKET2(0);
- ib->ptr[4] = PACKET2(0);
- ib->ptr[5] = PACKET2(0);
- ib->ptr[6] = PACKET2(0);
- ib->ptr[7] = PACKET2(0);
- ib->length_dw = 8;
- r = radeon_ib_schedule(rdev, ib);
+ ib.ptr[0] = PACKET0(scratch, 0);
+ ib.ptr[1] = 0xDEADBEEF;
+ ib.ptr[2] = PACKET2(0);
+ ib.ptr[3] = PACKET2(0);
+ ib.ptr[4] = PACKET2(0);
+ ib.ptr[5] = PACKET2(0);
+ ib.ptr[6] = PACKET2(0);
+ ib.ptr[7] = PACKET2(0);
+ ib.length_dw = 8;
+ r = radeon_ib_schedule(rdev, &ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib.fence, false);
if (r) {
return r;
}
@@ -3965,12 +3909,9 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a59cc474d537..a26144d01207 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -154,7 +154,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index fa14383f9ca0..97722a33e513 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -377,28 +377,6 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 rbbm_status;
- int r;
-
- rbbm_status = RREG32(R_000E40_RBBM_STATUS);
- if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
- return false;
- }
- /* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
-}
-
int r300_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
@@ -449,7 +427,6 @@ int r300_asic_reset(struct radeon_device *rdev)
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
@@ -627,7 +604,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
int r;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
@@ -1169,7 +1146,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
unsigned idx;
int r;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch(pkt->opcode) {
@@ -1418,12 +1395,9 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index f3fcaacfea01..99137be7a300 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -279,12 +279,9 @@ static int r420_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ebcc15b03c9f..b5cf8375cd25 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -207,12 +207,10 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c8187c4b6ae8..bff627293812 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -713,6 +713,14 @@ void r600_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ */
+ continue;
+ }
if (ASIC_IS_DCE3(rdev)) {
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
if (ASIC_IS_DCE32(rdev))
@@ -1223,7 +1231,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->vram_scratch.robj);
+ NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
@@ -1350,31 +1358,17 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
- struct r100_gpu_lockup *lockup;
- int r;
-
- if (rdev->family >= CHIP_RV770)
- lockup = &rdev->config.rv770.lockup;
- else
- lockup = &rdev->config.r600.lockup;
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
- r100_gpu_lockup_update(lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
int r600_asic_reset(struct radeon_device *rdev)
@@ -1382,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
return r600_gpu_soft_reset(rdev);
}
-static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+ u32 tiling_pipe_num,
+ u32 max_rb_num,
+ u32 total_max_rb_num,
+ u32 disabled_rb_mask)
{
- u32 backend_map = 0;
- u32 enabled_backends_mask;
- u32 enabled_backends_count;
- u32 cur_pipe;
- u32 swizzle_pipe[R6XX_MAX_PIPES];
- u32 cur_backend;
- u32 i;
+ u32 rendering_pipe_num, rb_num_width, req_rb_num;
+ u32 pipe_rb_ratio, pipe_rb_remain;
+ u32 data = 0, mask = 1 << (max_rb_num - 1);
+ unsigned i, j;
- if (num_tile_pipes > R6XX_MAX_PIPES)
- num_tile_pipes = R6XX_MAX_PIPES;
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_backends > R6XX_MAX_BACKENDS)
- num_backends = R6XX_MAX_BACKENDS;
- if (num_backends < 1)
- num_backends = 1;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
- if (((backend_disable_mask >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends)
- break;
- }
+ /* mask out the RBs that don't exist on that asic */
+ disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
- if (enabled_backends_count == 0) {
- enabled_backends_mask = 1;
- enabled_backends_count = 1;
- }
+ rendering_pipe_num = 1 << tiling_pipe_num;
+ req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+ BUG_ON(rendering_pipe_num < req_rb_num);
- if (enabled_backends_count != num_backends)
- num_backends = enabled_backends_count;
+ pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+ pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
- switch (num_tile_pipes) {
- case 1:
- swizzle_pipe[0] = 0;
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 3:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- break;
- case 4:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- break;
- case 5:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- break;
- case 6:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- break;
- case 7:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- break;
- case 8:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- break;
+ if (rdev->family <= CHIP_RV740) {
+ /* r6xx/r7xx */
+ rb_num_width = 2;
+ } else {
+ /* eg+ */
+ rb_num_width = 4;
}
- cur_backend = 0;
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
-
- backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
- cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+ for (i = 0; i < max_rb_num; i++) {
+ if (!(mask & disabled_rb_mask)) {
+ for (j = 0; j < pipe_rb_ratio; j++) {
+ data <<= rb_num_width;
+ data |= max_rb_num - i - 1;
+ }
+ if (pipe_rb_remain) {
+ data <<= rb_num_width;
+ data |= max_rb_num - i - 1;
+ pipe_rb_remain--;
+ }
+ }
+ mask >>= 1;
}
- return backend_map;
+ return data;
}
int r600_count_pipe_bits(uint32_t val)
@@ -1506,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
- u32 backend_map;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 tmp;
@@ -1517,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
u32 sq_thread_resource_mgmt = 0;
u32 sq_stack_resource_mgmt_1 = 0;
u32 sq_stack_resource_mgmt_2 = 0;
+ u32 disabled_rb_mask;
- /* FIXME: implement */
+ rdev->config.r600.tiling_group_size = 256;
switch (rdev->family) {
case CHIP_R600:
rdev->config.r600.max_pipes = 4;
@@ -1622,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
- if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
- rdev->config.r600.tiling_group_size = 512;
- else
- rdev->config.r600.tiling_group_size = 256;
+
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
@@ -1637,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= BANK_SWAPS(1);
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
- cc_rb_backend_disable |=
- BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
-
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
- cc_gc_shader_pipe_config |=
- INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
- cc_gc_shader_pipe_config |=
- INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
-
- backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
- (R6XX_MAX_BACKENDS -
- r600_count_pipe_bits((cc_rb_backend_disable &
- R6XX_MAX_BACKENDS_MASK) >> 16)),
- (cc_rb_backend_disable >> 16));
+ tmp = R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+ if (tmp < rdev->config.r600.max_backends) {
+ rdev->config.r600.max_backends = tmp;
+ }
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+ tmp = R6XX_MAX_PIPES -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.r600.max_pipes) {
+ rdev->config.r600.max_pipes = tmp;
+ }
+ tmp = R6XX_MAX_SIMDS -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+ if (tmp < rdev->config.r600.max_simds) {
+ rdev->config.r600.max_simds = tmp;
+ }
+
+ disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+ tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+ R6XX_MAX_BACKENDS, disabled_rb_mask);
+ tiling_config |= tmp << 16;
+ rdev->config.r600.backend_map = tmp;
+
rdev->config.r600.tile_config = tiling_config;
- rdev->config.r600.backend_map = backend_map;
- tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
- /* Setup pipes */
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1906,6 +1839,7 @@ void r600_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+ WREG32(VC_ENHANCE, 0);
}
@@ -2377,20 +2311,15 @@ int r600_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_sa_bo *vb = NULL;
int r;
- mutex_lock(&rdev->r600_blit.mutex);
- rdev->r600_blit.vb_ib = NULL;
- r = r600_blit_prepare_copy(rdev, num_gpu_pages);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
if (r) {
- if (rdev->r600_blit.vb_ib)
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
- mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
- r600_blit_done_copy(rdev, fence);
- mutex_unlock(&rdev->r600_blit.mutex);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
+ r600_blit_done_copy(rdev, fence, vb);
return 0;
}
@@ -2494,10 +2423,13 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ return r;
+
+ r = r600_audio_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ DRM_ERROR("radeon: audio init failed\n");
return r;
}
@@ -2537,12 +2469,6 @@ int r600_resume(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- DRM_ERROR("radeon: audio resume failed\n");
- return r;
- }
-
return r;
}
@@ -2574,10 +2500,6 @@ int r600_init(struct radeon_device *rdev)
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -2656,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
- r = r600_audio_init(rdev);
- if (r)
- return r; /* TODO error handling */
return 0;
}
@@ -2675,7 +2594,6 @@ void r600_fini(struct radeon_device *rdev)
r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -2704,7 +2622,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ib *ib;
+ struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -2722,18 +2640,18 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
}
- ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
- ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- ib->ptr[2] = 0xDEADBEEF;
- ib->length_dw = 3;
- r = radeon_ib_schedule(rdev, ib);
+ ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+ ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ ib.ptr[2] = 0xDEADBEEF;
+ ib.length_dw = 3;
+ r = radeon_ib_schedule(rdev, &ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib.fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
return r;
@@ -2745,7 +2663,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -2788,7 +2706,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
- &rdev->ih.ring_obj);
+ NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
return r;
@@ -2968,6 +2886,15 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+ } else {
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
} else {
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
@@ -2978,6 +2905,10 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
}
@@ -3047,6 +2978,9 @@ int r600_irq_init(struct radeon_device *rdev)
else
r600_disable_interrupt_state(rdev);
+ /* at this point everything should be setup correctly to enable master */
+ pci_set_master(rdev->pdev);
+
/* enable irqs */
r600_enable_interrupts(rdev);
@@ -3071,7 +3005,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
- u32 hdmi1, hdmi2;
+ u32 hdmi0, hdmi1;
u32 d1grph = 0, d2grph = 0;
if (!rdev->irq.installed) {
@@ -3086,9 +3020,7 @@ int r600_irq_set(struct radeon_device *rdev)
return 0;
}
- hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
- hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -3096,12 +3028,18 @@ int r600_irq_set(struct radeon_device *rdev)
if (ASIC_IS_DCE32(rdev)) {
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+ } else {
+ hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
} else {
- hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+ hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
@@ -3143,13 +3081,13 @@ int r600_irq_set(struct radeon_device *rdev)
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
- if (rdev->irq.hdmi[0]) {
- DRM_DEBUG("r600_irq_set: hdmi 1\n");
- hdmi1 |= R600_HDMI_INT_EN;
+ if (rdev->irq.afmt[0]) {
+ DRM_DEBUG("r600_irq_set: hdmi 0\n");
+ hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
}
- if (rdev->irq.hdmi[1]) {
- DRM_DEBUG("r600_irq_set: hdmi 2\n");
- hdmi2 |= R600_HDMI_INT_EN;
+ if (rdev->irq.afmt[1]) {
+ DRM_DEBUG("r600_irq_set: hdmi 0\n");
+ hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
@@ -3161,9 +3099,7 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
- WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
- WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -3171,12 +3107,18 @@ int r600_irq_set(struct radeon_device *rdev)
if (ASIC_IS_DCE32(rdev)) {
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
} else {
- WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
return 0;
@@ -3190,10 +3132,19 @@ static void r600_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ if (ASIC_IS_DCE32(rdev)) {
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
+ } else {
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
+ }
} else {
rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+ rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+ rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
}
rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
@@ -3259,17 +3210,32 @@ static void r600_irq_ack(struct radeon_device *rdev)
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
- }
- if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
- WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
- }
- if (ASIC_IS_DCE3(rdev)) {
- if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
- WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
+ tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
}
} else {
- if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
- WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
+ if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+ tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ }
+ if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ } else {
+ tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
+ tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+ WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+ }
}
}
}
@@ -3345,6 +3311,7 @@ int r600_irq_process(struct radeon_device *rdev)
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -3480,9 +3447,26 @@ restart_ih:
break;
}
break;
- case 21: /* HDMI */
- DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
- r600_audio_schedule_polling(rdev);
+ case 21: /* hdmi */
+ switch (src_data) {
+ case 4:
+ if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
+ break;
+ case 5:
+ if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
@@ -3514,6 +3498,8 @@ restart_ih:
goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index ba66f3093d46..79b55916cf90 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -29,131 +29,119 @@
#include "radeon_asic.h"
#include "atom.h"
-#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
+/*
+ * check if enc_priv stores radeon_encoder_atom_dig
+ */
+static bool radeon_dig_encoder(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ }
+ return false;
+}
/*
* check if the chipset is supported
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
+ return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
}
-/*
- * current number of channels
- */
-int r600_audio_channels(struct radeon_device *rdev)
+struct r600_audio r600_audio_status(struct radeon_device *rdev)
{
- return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
-}
+ struct r600_audio status;
+ uint32_t value;
-/*
- * current bits per sample
- */
-int r600_audio_bits_per_sample(struct radeon_device *rdev)
-{
- uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
- switch (value) {
- case 0x0: return 8;
- case 0x1: return 16;
- case 0x2: return 20;
- case 0x3: return 24;
- case 0x4: return 32;
- }
+ value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
- dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n",
- (int)value);
+ /* number of channels */
+ status.channels = (value & 0x7) + 1;
- return 16;
-}
-
-/*
- * current sampling rate in HZ
- */
-int r600_audio_rate(struct radeon_device *rdev)
-{
- uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
- uint32_t result;
+ /* bits per sample */
+ switch ((value & 0xF0) >> 4) {
+ case 0x0:
+ status.bits_per_sample = 8;
+ break;
+ case 0x1:
+ status.bits_per_sample = 16;
+ break;
+ case 0x2:
+ status.bits_per_sample = 20;
+ break;
+ case 0x3:
+ status.bits_per_sample = 24;
+ break;
+ case 0x4:
+ status.bits_per_sample = 32;
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
+ (int)value);
+ status.bits_per_sample = 16;
+ }
+ /* current sampling rate in HZ */
if (value & 0x4000)
- result = 44100;
+ status.rate = 44100;
else
- result = 48000;
+ status.rate = 48000;
+ status.rate *= ((value >> 11) & 0x7) + 1;
+ status.rate /= ((value >> 8) & 0x7) + 1;
- result *= ((value >> 11) & 0x7) + 1;
- result /= ((value >> 8) & 0x7) + 1;
+ value = RREG32(R600_AUDIO_STATUS_BITS);
- return result;
-}
+ /* iec 60958 status bits */
+ status.status_bits = value & 0xff;
-/*
- * iec 60958 status bits
- */
-uint8_t r600_audio_status_bits(struct radeon_device *rdev)
-{
- return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
-}
+ /* iec 60958 category code */
+ status.category_code = (value >> 8) & 0xff;
-/*
- * iec 60958 category code
- */
-uint8_t r600_audio_category_code(struct radeon_device *rdev)
-{
- return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
-}
-
-/*
- * schedule next audio update event
- */
-void r600_audio_schedule_polling(struct radeon_device *rdev)
-{
- mod_timer(&rdev->audio_timer,
- jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+ return status;
}
/*
* update all hdmi interfaces with current audio parameters
*/
-static void r600_audio_update_hdmi(unsigned long param)
+void r600_audio_update_hdmi(struct work_struct *work)
{
- struct radeon_device *rdev = (struct radeon_device *)param;
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ audio_work);
struct drm_device *dev = rdev->ddev;
-
- int channels = r600_audio_channels(rdev);
- int rate = r600_audio_rate(rdev);
- int bps = r600_audio_bits_per_sample(rdev);
- uint8_t status_bits = r600_audio_status_bits(rdev);
- uint8_t category_code = r600_audio_category_code(rdev);
-
+ struct r600_audio audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
- int changes = 0, still_going = 0;
-
- changes |= channels != rdev->audio_channels;
- changes |= rate != rdev->audio_rate;
- changes |= bps != rdev->audio_bits_per_sample;
- changes |= status_bits != rdev->audio_status_bits;
- changes |= category_code != rdev->audio_category_code;
-
- if (changes) {
- rdev->audio_channels = channels;
- rdev->audio_rate = rate;
- rdev->audio_bits_per_sample = bps;
- rdev->audio_status_bits = status_bits;
- rdev->audio_category_code = category_code;
+ bool changed = false;
+
+ if (rdev->audio_status.channels != audio_status.channels ||
+ rdev->audio_status.rate != audio_status.rate ||
+ rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
+ rdev->audio_status.status_bits != audio_status.status_bits ||
+ rdev->audio_status.category_code != audio_status.category_code) {
+ rdev->audio_status = audio_status;
+ changed = true;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- still_going |= radeon_encoder->audio_polling_active;
- if (changes || r600_hdmi_buffer_status_changed(encoder))
+ if (!radeon_dig_encoder(encoder))
+ continue;
+ if (changed || r600_hdmi_buffer_status_changed(encoder))
r600_hdmi_update_audio_settings(encoder);
}
-
- if (still_going)
- r600_audio_schedule_polling(rdev);
}
/*
@@ -177,7 +165,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
}
/*
- * initialize the audio vars and register the update timer
+ * initialize the audio vars
*/
int r600_audio_init(struct radeon_device *rdev)
{
@@ -186,51 +174,16 @@ int r600_audio_init(struct radeon_device *rdev)
r600_audio_engine_enable(rdev, true);
- rdev->audio_channels = -1;
- rdev->audio_rate = -1;
- rdev->audio_bits_per_sample = -1;
- rdev->audio_status_bits = 0;
- rdev->audio_category_code = 0;
-
- setup_timer(
- &rdev->audio_timer,
- r600_audio_update_hdmi,
- (unsigned long)rdev);
+ rdev->audio_status.channels = -1;
+ rdev->audio_status.rate = -1;
+ rdev->audio_status.bits_per_sample = -1;
+ rdev->audio_status.status_bits = 0;
+ rdev->audio_status.category_code = 0;
return 0;
}
/*
- * enable the polling timer, to check for status changes
- */
-void r600_audio_enable_polling(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-
- DRM_DEBUG("r600_audio_enable_polling: %d\n",
- radeon_encoder->audio_polling_active);
- if (radeon_encoder->audio_polling_active)
- return;
-
- radeon_encoder->audio_polling_active = 1;
- if (rdev->audio_enabled)
- mod_timer(&rdev->audio_timer, jiffies + 1);
-}
-
-/*
- * disable the polling timer, so we get no more status updates
- */
-void r600_audio_disable_polling(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DRM_DEBUG("r600_audio_disable_polling: %d\n",
- radeon_encoder->audio_polling_active);
- radeon_encoder->audio_polling_active = 0;
-}
-
-/*
* atach the audio codec to the clock source of the encoder
*/
void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
@@ -239,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
int base_rate = 48000;
switch (radeon_encoder->encoder_id) {
@@ -264,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
- /* Some magic trigger or src sel? */
- WREG32_P(0x5ac, 0x01, ~0x77);
+ /* Select DTO source */
+ WREG32(0x5ac, radeon_crtc->crtc_id);
} else {
switch (dig->dig_encoder) {
case 0:
@@ -297,7 +251,5 @@ void r600_audio_fini(struct radeon_device *rdev)
if (!rdev->audio_enabled)
return;
- del_timer(&rdev->audio_timer);
-
r600_audio_engine_enable(rdev, false);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index db38f587f27a..03b6e0d3d503 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -513,7 +513,6 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.set_default_state = set_default_state;
rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
@@ -528,7 +527,6 @@ int r600_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
- mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
@@ -554,7 +552,7 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size = ALIGN(obj_size, 256);
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_obj);
+ NULL, &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
return r;
@@ -621,27 +619,6 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
-{
- int r;
- r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
- &rdev->r600_blit.vb_ib, size);
- if (r) {
- DRM_ERROR("failed to get IB for vertex buffer\n");
- return r;
- }
-
- rdev->r600_blit.vb_total = size;
- rdev->r600_blit.vb_used = 0;
- return 0;
-}
-
-static void r600_vb_ib_put(struct radeon_device *rdev)
-{
- radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
-}
-
static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int *width, int *height, int max_dim)
{
@@ -688,7 +665,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
}
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_sa_bo **vb)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
@@ -705,46 +683,54 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
}
/* 48 bytes for vertex per loop */
- r = r600_vb_ib_get(rdev, (num_loops*48)+256);
- if (r)
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
+ (num_loops*48)+256, 256, true);
+ if (r) {
return r;
+ }
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size);
- if (r)
+ if (r) {
+ radeon_sa_bo_free(rdev, vb, NULL);
return r;
+ }
rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev);
return 0;
}
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+ struct radeon_sa_bo *vb)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- if (rdev->r600_blit.vb_ib)
- r600_vb_ib_put(rdev);
-
- if (fence)
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return;
+ }
- radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_sa_bo_free(rdev, &vb, fence);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages)
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb)
{
u64 vb_gpu_addr;
- u32 *vb;
+ u32 *vb_cpu_addr;
- DRM_DEBUG("emitting copy %16llx %16llx %d %d\n",
- src_gpu_addr, dst_gpu_addr,
- num_gpu_pages, rdev->r600_blit.vb_used);
- vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
+ DRM_DEBUG("emitting copy %16llx %16llx %d\n",
+ src_gpu_addr, dst_gpu_addr, num_gpu_pages);
+ vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
+ vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
while (num_gpu_pages) {
int w, h;
@@ -756,39 +742,34 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
- if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
- WARN_ON(1);
- }
-
- vb[0] = 0;
- vb[1] = 0;
- vb[2] = 0;
- vb[3] = 0;
+ vb_cpu_addr[0] = 0;
+ vb_cpu_addr[1] = 0;
+ vb_cpu_addr[2] = 0;
+ vb_cpu_addr[3] = 0;
- vb[4] = 0;
- vb[5] = i2f(h);
- vb[6] = 0;
- vb[7] = i2f(h);
+ vb_cpu_addr[4] = 0;
+ vb_cpu_addr[5] = i2f(h);
+ vb_cpu_addr[6] = 0;
+ vb_cpu_addr[7] = i2f(h);
- vb[8] = i2f(w);
- vb[9] = i2f(h);
- vb[10] = i2f(w);
- vb[11] = i2f(h);
+ vb_cpu_addr[8] = i2f(w);
+ vb_cpu_addr[9] = i2f(h);
+ vb_cpu_addr[10] = i2f(w);
+ vb_cpu_addr[11] = i2f(h);
rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
w, h, w, src_gpu_addr, size_in_bytes);
rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
w, h, dst_gpu_addr);
rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
- vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
rdev->r600_blit.primitives.draw_auto(rdev);
rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
size_in_bytes, dst_gpu_addr);
- vb += 12;
- rdev->r600_blit.vb_used += 4*12;
+ vb_cpu_addr += 12;
+ vb_gpu_addr += 4*12;
src_gpu_addr += size_in_bytes;
dst_gpu_addr += size_in_bytes;
num_gpu_pages -= pages_per_loop;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b8e12af304a9..ca87f7afaf23 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -345,7 +345,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
- volatile u32 *ib = p->ib->ptr;
+ volatile u32 *ib = p->ib.ptr;
unsigned array_mode;
u32 format;
@@ -471,7 +471,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
u64 base_offset, base_align;
struct array_mode_checker array_check;
int array_mode;
- volatile u32 *ib = p->ib->ptr;
+ volatile u32 *ib = p->ib.ptr;
if (track->db_bo == NULL) {
@@ -961,7 +961,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg, wait_reg_mem_info;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1110,7 +1110,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
m = 1 << ((reg >> 2) & 31);
if (!(r600_reg_safe_bm[i] & m))
return 0;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
@@ -1714,7 +1714,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u32 idx_value;
track = (struct r600_cs_track *)p->track;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -2079,6 +2079,48 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
+ case PACKET3_STRMOUT_BASE_UPDATE:
+ if (p->family < CHIP_RV770) {
+ DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+ return -EINVAL;
+ }
+ if (pkt->count != 1) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+ return -EINVAL;
+ }
+ if (idx_value > 3) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+ return -EINVAL;
+ }
+ {
+ u64 offset;
+
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+ return -EINVAL;
+ }
+
+ if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+ return -EINVAL;
+ }
+
+ offset = radeon_get_ib_value(p, idx+1) << 8;
+ if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+ offset, track->vgt_strmout_bo_offset[idx_value]);
+ return -EINVAL;
+ }
+
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
case PACKET3_SURFACE_BASE_UPDATE:
if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
@@ -2249,8 +2291,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
- for (r = 0; r < p->ib->length_dw; r++) {
- printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
mdelay(1);
}
#endif
@@ -2298,7 +2340,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
{
struct radeon_cs_parser parser;
struct radeon_cs_chunk *ib_chunk;
- struct radeon_ib fake_ib;
struct r600_cs_track *track;
int r;
@@ -2314,9 +2355,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
- parser.ib = &fake_ib;
parser.track = track;
- fake_ib.ptr = ib;
+ parser.ib.ptr = ib;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
@@ -2333,8 +2373,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
* input memory (cached) and write to the IB (which can be
* uncached). */
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
- parser.ib->length_dw = ib_chunk->length_dw;
- *l = parser.ib->length_dw;
+ parser.ib.length_dw = ib_chunk->length_dw;
+ *l = parser.ib.length_dw;
r = r600_cs_parse(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 0b5920671450..82a0a4c919c0 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -27,6 +27,7 @@
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_asic.h"
+#include "r600d.h"
#include "atom.h"
/*
@@ -52,19 +53,7 @@ enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_LEVEL = 0x80
};
-struct {
- uint32_t Clock;
-
- int N_32kHz;
- int CTS_32kHz;
-
- int N_44_1kHz;
- int CTS_44_1kHz;
-
- int N_48kHz;
- int CTS_48kHz;
-
-} r600_hdmi_ACR[] = {
+struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
{ 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
@@ -83,7 +72,7 @@ struct {
/*
* calculate CTS value if it's not found in the table
*/
-static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
{
if (*CTS == 0)
*CTS = clock * N / (128 * freq) * 1000;
@@ -91,6 +80,24 @@ static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
N, *CTS, freq);
}
+struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+{
+ struct radeon_hdmi_acr res;
+ u8 i;
+
+ for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
+ r600_hdmi_predefined_acr[i].clock != 0; i++)
+ ;
+ res = r600_hdmi_predefined_acr[i];
+
+ /* In case some CTS are missing */
+ r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
+ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
+ r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
+
+ return res;
+}
+
/*
* update the N and CTS parameters for a given pixel clock rate
*/
@@ -98,30 +105,19 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
- int CTS;
- int N;
- int i;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
+
+ WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
- for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
-
- CTS = r600_hdmi_ACR[i].CTS_32kHz;
- N = r600_hdmi_ACR[i].N_32kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
- WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_32kHz_N, N);
-
- CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
- N = r600_hdmi_ACR[i].N_44_1kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
- WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_44_1kHz_N, N);
-
- CTS = r600_hdmi_ACR[i].CTS_48kHz;
- N = r600_hdmi_ACR[i].N_48kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
- WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
- WREG32(offset+R600_HDMI_48kHz_N, N);
+ WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
}
/*
@@ -165,7 +161,9 @@ static void r600_hdmi_videoinfoframe(
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
uint8_t frame[14];
@@ -204,13 +202,13 @@ static void r600_hdmi_videoinfoframe(
* workaround this issue. */
frame[0x0] += 2;
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
+ WREG32(HDMI0_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
+ WREG32(HDMI0_AVI_INFO1 + offset,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
+ WREG32(HDMI0_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
+ WREG32(HDMI0_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8));
}
@@ -231,7 +229,9 @@ static void r600_hdmi_audioinfoframe(
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
uint8_t frame[11];
@@ -249,22 +249,24 @@ static void r600_hdmi_audioinfoframe(
r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
- WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
+ WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
+ WREG32(HDMI0_AUDIO_INFO1 + offset,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
}
/*
* test if audio buffer is filled enough to start playing
*/
-static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
- return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
+ return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
}
/*
@@ -273,14 +275,15 @@ static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int status, result;
- if (!radeon_encoder->hdmi_offset)
+ if (!dig->afmt || !dig->afmt->enabled)
return 0;
status = r600_hdmi_is_audio_buffer_filled(encoder);
- result = radeon_encoder->hdmi_buffer_status != status;
- radeon_encoder->hdmi_buffer_status = status;
+ result = dig->afmt->last_buffer_filled_status != status;
+ dig->afmt->last_buffer_filled_status = status;
return result;
}
@@ -288,26 +291,23 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
/*
* write the audio workaround status to the hardware
*/
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- uint32_t offset = radeon_encoder->hdmi_offset;
-
- if (!offset)
- return;
-
- if (!radeon_encoder->hdmi_audio_workaround ||
- r600_hdmi_is_audio_buffer_filled(encoder)) {
-
- /* disable audio workaround */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
-
- } else {
- /* enable audio workaround */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
- }
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+ bool hdmi_audio_workaround = false; /* FIXME */
+ u32 value;
+
+ if (!hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder))
+ value = 0; /* disable workaround */
+ else
+ value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
+ WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ value, ~HDMI0_AUDIO_TEST_EN);
}
@@ -318,39 +318,71 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- if (ASIC_IS_DCE5(rdev))
- return;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
- if (!offset)
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
return;
+ offset = dig->afmt->offset;
r600_audio_set_clock(encoder, mode->clock);
- WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
- WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
- WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND); /* send null packets when required */
- r600_hdmi_update_ACR(encoder, mode->clock);
+ WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ }
+
+ WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+ HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI0_ACR_SOURCE); /* select SW CTS value */
- WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
- WREG32(offset+R600_HDMI_VERSION, 0x202);
+ /* TODO: HDMI0_AUDIO_INFO_UPDATE */
+ WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
+
+ WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
- WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
-
- /* audio packets per line, does anyone know how to calc this ? */
- WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
}
/*
@@ -360,145 +392,82 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
-
- int channels = r600_audio_channels(rdev);
- int rate = r600_audio_rate(rdev);
- int bps = r600_audio_bits_per_sample(rdev);
- uint8_t status_bits = r600_audio_status_bits(rdev);
- uint8_t category_code = r600_audio_category_code(rdev);
-
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct r600_audio audio = r600_audio_status(rdev);
+ uint32_t offset;
uint32_t iec;
- if (!offset)
+ if (!dig->afmt || !dig->afmt->enabled)
return;
+ offset = dig->afmt->offset;
DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
- channels, rate, bps);
+ audio.channels, audio.rate, audio.bits_per_sample);
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
- (int)status_bits, (int)category_code);
+ (int)audio.status_bits, (int)audio.category_code);
iec = 0;
- if (status_bits & AUDIO_STATUS_PROFESSIONAL)
+ if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
iec |= 1 << 0;
- if (status_bits & AUDIO_STATUS_NONAUDIO)
+ if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
iec |= 1 << 1;
- if (status_bits & AUDIO_STATUS_COPYRIGHT)
+ if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
iec |= 1 << 2;
- if (status_bits & AUDIO_STATUS_EMPHASIS)
+ if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
iec |= 1 << 3;
- iec |= category_code << 8;
-
- switch (rate) {
- case 32000: iec |= 0x3 << 24; break;
- case 44100: iec |= 0x0 << 24; break;
- case 88200: iec |= 0x8 << 24; break;
- case 176400: iec |= 0xc << 24; break;
- case 48000: iec |= 0x2 << 24; break;
- case 96000: iec |= 0xa << 24; break;
- case 192000: iec |= 0xe << 24; break;
+ iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
+
+ switch (audio.rate) {
+ case 32000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
+ break;
+ case 44100:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
+ break;
+ case 48000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
+ break;
+ case 88200:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
+ break;
+ case 96000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
+ break;
+ case 176400:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
+ break;
+ case 192000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
+ break;
}
- WREG32(offset+R600_HDMI_IEC60958_1, iec);
+ WREG32(HDMI0_60958_0 + offset, iec);
iec = 0;
- switch (bps) {
- case 16: iec |= 0x2; break;
- case 20: iec |= 0x3; break;
- case 24: iec |= 0xb; break;
+ switch (audio.bits_per_sample) {
+ case 16:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
+ break;
+ case 20:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
+ break;
+ case 24:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
+ break;
}
- if (status_bits & AUDIO_STATUS_V)
+ if (audio.status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
+ WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
- WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
-
- /* 0x021 or 0x031 sets the audio frame length */
- WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
- r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
+ r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
+ 0);
r600_hdmi_audio_workaround(encoder);
}
-static int r600_hdmi_find_free_block(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder;
- struct radeon_encoder *radeon_encoder;
- bool free_blocks[3] = { true, true, true };
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- radeon_encoder = to_radeon_encoder(encoder);
- switch (radeon_encoder->hdmi_offset) {
- case R600_HDMI_BLOCK1:
- free_blocks[0] = false;
- break;
- case R600_HDMI_BLOCK2:
- free_blocks[1] = false;
- break;
- case R600_HDMI_BLOCK3:
- free_blocks[2] = false;
- break;
- }
- }
-
- if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
- rdev->family == CHIP_RS740) {
- return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
- } else if (rdev->family >= CHIP_R600) {
- if (free_blocks[0])
- return R600_HDMI_BLOCK1;
- else if (free_blocks[1])
- return R600_HDMI_BLOCK2;
- }
- return 0;
-}
-
-static void r600_hdmi_assign_block(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- u16 eg_offsets[] = {
- EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_CRTC5_REGISTER_OFFSET,
- };
-
- if (!dig) {
- dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
- return;
- }
-
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
- dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
- return;
- }
- radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
- eg_offsets[dig->dig_encoder];
- radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
- + EVERGREEN_HDMI_CONFIG_OFFSET;
- } else if (ASIC_IS_DCE3(rdev)) {
- radeon_encoder->hdmi_offset = dig->dig_encoder ?
- R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
- if (ASIC_IS_DCE32(rdev))
- radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
- R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
- } else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
- rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
- radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
- }
-}
-
/*
* enable the HDMI engine
*/
@@ -507,64 +476,57 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
+ u32 hdmi;
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE6(rdev))
return;
- if (!radeon_encoder->hdmi_offset) {
- r600_hdmi_assign_block(encoder);
- if (!radeon_encoder->hdmi_offset) {
- dev_warn(rdev->dev, "Could not find HDMI block for "
- "0x%x encoder\n", radeon_encoder->encoder_id);
- return;
- }
- }
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
- offset = radeon_encoder->hdmi_offset;
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
- } else if (ASIC_IS_DCE3(rdev)) {
- /* TODO */
- } else if (rdev->family >= CHIP_R600) {
+ /* Older chipsets require setting HDMI and routing manually */
+ if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
~AVIVO_TMDSA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0x101);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
~AVIVO_LVTMA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0x105);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
break;
default:
- dev_err(rdev->dev, "Unknown HDMI output type\n");
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
break;
}
+ WREG32(HDMI0_CONTROL + offset, hdmi);
}
- if (rdev->irq.installed
- && rdev->family != CHIP_RS600
- && rdev->family != CHIP_RS690
- && rdev->family != CHIP_RS740
- && !ASIC_IS_DCE4(rdev)) {
+ if (rdev->irq.installed) {
/* if irq is available use it */
- rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+ rdev->irq.afmt[dig->afmt->id] = true;
radeon_irq_set(rdev);
-
- r600_audio_disable_polling(encoder);
- } else {
- /* if not fallback to polling */
- r600_audio_enable_polling(encoder);
}
+ dig->afmt->enabled = true;
+
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
}
/*
@@ -575,51 +537,51 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE6(rdev))
return;
- offset = radeon_encoder->hdmi_offset;
- if (!offset) {
- dev_err(rdev->dev, "Disabling not enabled HDMI\n");
+ /* Called for ATOM_ENCODER_MODE_HDMI only */
+ if (!dig || !dig->afmt) {
+ WARN_ON(1);
return;
}
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
/* disable irq */
- rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = false;
+ rdev->irq.afmt[dig->afmt->id] = false;
radeon_irq_set(rdev);
- /* disable polling */
- r600_audio_disable_polling(encoder);
-
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
- WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ /* Older chipsets not handled by AtomBIOS */
+ if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0,
~AVIVO_TMDSA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, 0,
~AVIVO_LVTMA_CNTL_HDMI_EN);
- WREG32(offset + R600_HDMI_ENABLE, 0);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
break;
default:
- dev_err(rdev->dev, "Unknown HDMI output type\n");
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
break;
}
+ WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
}
- radeon_encoder->hdmi_offset = 0;
- radeon_encoder->hdmi_config_offset = 0;
+ dig->afmt->enabled = false;
}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index f869897c7456..2b960cb5c18a 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -156,45 +156,10 @@
#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
#define R600_AUDIO_STATUS_BITS 0x73d8
-/* HDMI base register addresses */
-#define R600_HDMI_BLOCK1 0x7400
-#define R600_HDMI_BLOCK2 0x7700
-#define R600_HDMI_BLOCK3 0x7800
-
-/* HDMI registers */
-#define R600_HDMI_ENABLE 0x00
-#define R600_HDMI_STATUS 0x04
-# define R600_HDMI_INT_PENDING (1 << 29)
-#define R600_HDMI_CNTL 0x08
-# define R600_HDMI_INT_EN (1 << 28)
-# define R600_HDMI_INT_ACK (1 << 29)
-#define R600_HDMI_UNKNOWN_0 0x0C
-#define R600_HDMI_AUDIOCNTL 0x10
-#define R600_HDMI_VIDEOCNTL 0x14
-#define R600_HDMI_VERSION 0x18
-#define R600_HDMI_UNKNOWN_1 0x28
-#define R600_HDMI_VIDEOINFOFRAME_0 0x54
-#define R600_HDMI_VIDEOINFOFRAME_1 0x58
-#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
-#define R600_HDMI_VIDEOINFOFRAME_3 0x60
-#define R600_HDMI_32kHz_CTS 0xac
-#define R600_HDMI_32kHz_N 0xb0
-#define R600_HDMI_44_1kHz_CTS 0xb4
-#define R600_HDMI_44_1kHz_N 0xb8
-#define R600_HDMI_48kHz_CTS 0xbc
-#define R600_HDMI_48kHz_N 0xc0
-#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
-#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
-#define R600_HDMI_IEC60958_1 0xd4
-#define R600_HDMI_IEC60958_2 0xd8
-#define R600_HDMI_UNKNOWN_2 0xdc
-#define R600_HDMI_AUDIO_DEBUG_0 0xe0
-#define R600_HDMI_AUDIO_DEBUG_1 0xe4
-#define R600_HDMI_AUDIO_DEBUG_2 0xe8
-#define R600_HDMI_AUDIO_DEBUG_3 0xec
-
-/* HDMI additional config base register addresses */
-#define R600_HDMI_CONFIG1 0x7600
-#define R600_HDMI_CONFIG2 0x7a00
+#define DCE2_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE2_HDMI_OFFSET1 (0x7700 - 0x7400)
+/* DCE3.2 second instance starts at 0x7800 */
+#define DCE3_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE3_HDMI_OFFSET1 (0x7800 - 0x7400)
#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 59f9c993cc31..025fd5b6c08c 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -219,6 +219,8 @@
#define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0
+#define PIPE_TILING__SHIFT 1
+#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
@@ -483,6 +485,7 @@
#define TC_L2_SIZE(x) ((x)<<5)
#define L2_DISABLE_LATE_HIT (1<<9)
+#define VC_ENHANCE 0x9714
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
@@ -824,6 +827,239 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+/* Audio clocks */
+#define DCCG_AUDIO_DTO0_PHASE 0x0514
+#define DCCG_AUDIO_DTO0_MODULE 0x0518
+#define DCCG_AUDIO_DTO0_LOAD 0x051c
+# define DTO_LOAD (1 << 31)
+#define DCCG_AUDIO_DTO0_CNTL 0x0520
+
+#define DCCG_AUDIO_DTO1_PHASE 0x0524
+#define DCCG_AUDIO_DTO1_MODULE 0x0528
+#define DCCG_AUDIO_DTO1_LOAD 0x052c
+#define DCCG_AUDIO_DTO1_CNTL 0x0530
+
+#define DCCG_AUDIO_DTO_SELECT 0x0534
+
+/* digital blocks */
+#define TMDSA_CNTL 0x7880
+# define TMDSA_HDMI_EN (1 << 2)
+#define LVTMA_CNTL 0x7a80
+# define LVTMA_HDMI_EN (1 << 2)
+#define DDIA_CNTL 0x7200
+# define DDIA_HDMI_EN (1 << 2)
+#define DIG0_CNTL 0x75a0
+# define DIG_MODE(x) (((x) & 7) << 8)
+# define DIG_MODE_DP 0
+# define DIG_MODE_LVDS 1
+# define DIG_MODE_TMDS_DVI 2
+# define DIG_MODE_TMDS_HDMI 3
+# define DIG_MODE_SDVO 4
+#define DIG1_CNTL 0x79a0
+
+/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
+ * instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
+ * different due to the new DIG blocks, but also have 2 instances.
+ * DCE 3.0 HDMI blocks are part of each DIG encoder.
+ */
+
+/* rs6xx/rs740/r6xx/dce3 */
+#define HDMI0_CONTROL 0x7400
+/* rs6xx/rs740/r6xx */
+# define HDMI0_ENABLE (1 << 0)
+# define HDMI0_STREAM(x) (((x) & 3) << 2)
+# define HDMI0_STREAM_TMDSA 0
+# define HDMI0_STREAM_LVTMA 1
+# define HDMI0_STREAM_DVOA 2
+# define HDMI0_STREAM_DDIA 3
+/* rs6xx/r6xx/dce3 */
+# define HDMI0_ERROR_ACK (1 << 8)
+# define HDMI0_ERROR_MASK (1 << 9)
+#define HDMI0_STATUS 0x7404
+# define HDMI0_ACTIVE_AVMUTE (1 << 0)
+# define HDMI0_AUDIO_ENABLE (1 << 4)
+# define HDMI0_AZ_FORMAT_WTRIG (1 << 28)
+# define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
+#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
+# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
+# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
+# define HDMI0_AUDIO_TEST_EN (1 << 12)
+# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
+# define HDMI0_60958_CS_UPDATE (1 << 26)
+# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
+#define HDMI0_AUDIO_CRC_CONTROL 0x740c
+# define HDMI0_AUDIO_CRC_EN (1 << 0)
+#define HDMI0_VBI_PACKET_CONTROL 0x7410
+# define HDMI0_NULL_SEND (1 << 0)
+# define HDMI0_GC_SEND (1 << 4)
+# define HDMI0_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI0_INFOFRAME_CONTROL0 0x7414
+# define HDMI0_AVI_INFO_SEND (1 << 0)
+# define HDMI0_AVI_INFO_CONT (1 << 1)
+# define HDMI0_AUDIO_INFO_SEND (1 << 4)
+# define HDMI0_AUDIO_INFO_CONT (1 << 5)
+# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
+# define HDMI0_MPEG_INFO_SEND (1 << 8)
+# define HDMI0_MPEG_INFO_CONT (1 << 9)
+# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
+#define HDMI0_INFOFRAME_CONTROL1 0x7418
+# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
+# define HDMI0_GENERIC0_SEND (1 << 0)
+# define HDMI0_GENERIC0_CONT (1 << 1)
+# define HDMI0_GENERIC0_UPDATE (1 << 2)
+# define HDMI0_GENERIC1_SEND (1 << 4)
+# define HDMI0_GENERIC1_CONT (1 << 5)
+# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI0_GC 0x7428
+# define HDMI0_GC_AVMUTE (1 << 0)
+#define HDMI0_AVI_INFO0 0x7454
+# define HDMI0_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_AVI_INFO_S(x) (((x) & 3) << 8)
+# define HDMI0_AVI_INFO_B(x) (((x) & 3) << 10)
+# define HDMI0_AVI_INFO_A(x) (((x) & 1) << 12)
+# define HDMI0_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define HDMI0_AVI_INFO_Y_RGB 0
+# define HDMI0_AVI_INFO_Y_YCBCR422 1
+# define HDMI0_AVI_INFO_Y_YCBCR444 2
+# define HDMI0_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define HDMI0_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define HDMI0_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define HDMI0_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define HDMI0_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define HDMI0_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define HDMI0_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define HDMI0_AVI_INFO1 0x7458
+# define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define HDMI0_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO2 0x745c
+# define HDMI0_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define HDMI0_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO3 0x7460
+# define HDMI0_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define HDMI0_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define HDMI0_MPEG_INFO0 0x7464
+# define HDMI0_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define HDMI0_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define HDMI0_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define HDMI0_MPEG_INFO1 0x7468
+# define HDMI0_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define HDMI0_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define HDMI0_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define HDMI0_GENERIC0_HDR 0x746c
+#define HDMI0_GENERIC0_0 0x7470
+#define HDMI0_GENERIC0_1 0x7474
+#define HDMI0_GENERIC0_2 0x7478
+#define HDMI0_GENERIC0_3 0x747c
+#define HDMI0_GENERIC0_4 0x7480
+#define HDMI0_GENERIC0_5 0x7484
+#define HDMI0_GENERIC0_6 0x7488
+#define HDMI0_GENERIC1_HDR 0x748c
+#define HDMI0_GENERIC1_0 0x7490
+#define HDMI0_GENERIC1_1 0x7494
+#define HDMI0_GENERIC1_2 0x7498
+#define HDMI0_GENERIC1_3 0x749c
+#define HDMI0_GENERIC1_4 0x74a0
+#define HDMI0_GENERIC1_5 0x74a4
+#define HDMI0_GENERIC1_6 0x74a8
+#define HDMI0_ACR_32_0 0x74ac
+# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_32_1 0x74b0
+# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_44_0 0x74b4
+# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_44_1 0x74b8
+# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_48_0 0x74bc
+# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_48_1 0x74c0
+# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_STATUS_0 0x74c4
+#define HDMI0_ACR_STATUS_1 0x74c8
+#define HDMI0_AUDIO_INFO0 0x74cc
+# define HDMI0_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define HDMI0_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+#define HDMI0_AUDIO_INFO1 0x74d0
+# define HDMI0_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define HDMI0_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define HDMI0_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define HDMI0_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define HDMI0_60958_0 0x74d4
+# define HDMI0_60958_CS_A(x) (((x) & 1) << 0)
+# define HDMI0_60958_CS_B(x) (((x) & 1) << 1)
+# define HDMI0_60958_CS_C(x) (((x) & 1) << 2)
+# define HDMI0_60958_CS_D(x) (((x) & 3) << 3)
+# define HDMI0_60958_CS_MODE(x) (((x) & 3) << 6)
+# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define HDMI0_60958_1 0x74d8
+# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define HDMI0_ACR_PACKET_CONTROL 0x74dc
+# define HDMI0_ACR_SEND (1 << 0)
+# define HDMI0_ACR_CONT (1 << 1)
+# define HDMI0_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI0_ACR_HW 0
+# define HDMI0_ACR_32 1
+# define HDMI0_ACR_44 2
+# define HDMI0_ACR_48 3
+# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI0_ACR_AUTO_SEND (1 << 12)
+#define HDMI0_RAMP_CONTROL0 0x74e0
+# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL1 0x74e4
+# define HDMI0_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL2 0x74e8
+# define HDMI0_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL3 0x74ec
+# define HDMI0_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+/* HDMI0_60958_2 is r7xx only */
+#define HDMI0_60958_2 0x74f0
+# define HDMI0_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define HDMI0_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+/* r6xx only; second instance starts at 0x7700 */
+#define HDMI1_CONTROL 0x7700
+#define HDMI1_STATUS 0x7704
+#define HDMI1_AUDIO_PACKET_CONTROL 0x7708
+/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
+#define DCE3_HDMI1_CONTROL 0x7800
+#define DCE3_HDMI1_STATUS 0x7804
+#define DCE3_HDMI1_AUDIO_PACKET_CONTROL 0x7808
+/* DCE3.2 (for interrupts) */
+#define AFMT_STATUS 0x7600
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x7604
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+
/*
* PM4
*/
@@ -928,6 +1164,7 @@
#define PACKET3_SET_CTL_CONST 0x6F
#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
#define PACKET3_SET_CTL_CONST_END 0x0003e200
+#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
#define PACKET3_SURFACE_BASE_UPDATE 0x73
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 138b95216d8d..fefcca55c1eb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -94,33 +94,38 @@ extern int radeon_disp_priority;
extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
extern int radeon_msi;
+extern int radeon_lockup_timeout;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
*/
-#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
-#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
+#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
-#define RADEON_IB_POOL_SIZE 16
-#define RADEON_DEBUGFS_MAX_COMPONENTS 32
-#define RADEONFB_CONN_LIMIT 4
-#define RADEON_BIOS_NUM_SCRATCH 8
+#define RADEON_IB_POOL_SIZE 16
+#define RADEON_DEBUGFS_MAX_COMPONENTS 32
+#define RADEONFB_CONN_LIMIT 4
+#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 3
+#define RADEON_NUM_RINGS 3
+
+/* fence seq are set to this number when signaled */
+#define RADEON_FENCE_SIGNALED_SEQ 0LL
+#define RADEON_FENCE_NOTEMITED_SEQ (~0LL)
/* internal ring indices */
/* r1xx+ has gfx CP ring */
-#define RADEON_RING_TYPE_GFX_INDEX 0
+#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
-#define CAYMAN_RING_TYPE_CP1_INDEX 1
-#define CAYMAN_RING_TYPE_CP2_INDEX 2
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
/* hardcode those limit for now */
-#define RADEON_VA_RESERVED_SIZE (8 << 20)
-#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+#define RADEON_VA_RESERVED_SIZE (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE (64 << 10)
/*
* Errata workarounds.
@@ -253,28 +258,20 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
- atomic_t seq;
- uint32_t last_seq;
- unsigned long last_jiffies;
- unsigned long last_timeout;
- wait_queue_head_t queue;
- struct list_head created;
- struct list_head emitted;
- struct list_head signaled;
+ /* seq is protected by ring emission lock */
+ uint64_t seq;
+ atomic64_t last_seq;
+ unsigned long last_activity;
bool initialized;
};
struct radeon_fence {
struct radeon_device *rdev;
struct kref kref;
- struct list_head list;
/* protected by radeon_fence.lock */
- uint32_t seq;
- bool emitted;
- bool signaled;
+ uint64_t seq;
/* RB, DMA, etc. */
- int ring;
- struct radeon_semaphore *semaphore;
+ unsigned ring;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -285,11 +282,14 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
/*
* Tiling registers
@@ -346,6 +346,9 @@ struct radeon_bo {
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -382,8 +385,11 @@ struct radeon_bo_list {
* alignment).
*/
struct radeon_sa_manager {
+ spinlock_t lock;
struct radeon_bo *bo;
- struct list_head sa_bo;
+ struct list_head *hole;
+ struct list_head flist[RADEON_NUM_RINGS];
+ struct list_head olist;
unsigned size;
uint64_t gpu_addr;
void *cpu_ptr;
@@ -394,10 +400,12 @@ struct radeon_sa_bo;
/* sub-allocation buffer */
struct radeon_sa_bo {
- struct list_head list;
+ struct list_head olist;
+ struct list_head flist;
struct radeon_sa_manager *manager;
- unsigned offset;
- unsigned size;
+ unsigned soffset;
+ unsigned eoffset;
+ struct radeon_fence *fence;
};
/*
@@ -428,42 +436,26 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
/*
* Semaphores.
*/
-struct radeon_ring;
-
-#define RADEON_SEMAPHORE_BO_SIZE 256
-
-struct radeon_semaphore_driver {
- rwlock_t lock;
- struct list_head bo;
-};
-
-struct radeon_semaphore_bo;
-
/* everything here is constant */
struct radeon_semaphore {
- struct list_head list;
+ struct radeon_sa_bo *sa_bo;
+ signed waiters;
uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- struct radeon_semaphore_bo *bo;
};
-struct radeon_semaphore_bo {
- struct list_head list;
- struct radeon_ib *ib;
- struct list_head free;
- struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
- unsigned nused;
-};
-
-void radeon_semaphore_driver_fini(struct radeon_device *rdev);
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ bool sync_to[RADEON_NUM_RINGS],
+ int dst_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore);
+ struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence);
/*
* GART structures, functions & helpers
@@ -560,6 +552,7 @@ struct radeon_unpin_work {
struct r500_irq_stat_regs {
u32 disp_int;
+ u32 hdmi0_status;
};
struct r600_irq_stat_regs {
@@ -568,6 +561,8 @@ struct r600_irq_stat_regs {
u32 disp_int_cont2;
u32 d1grph_int;
u32 d2grph_int;
+ u32 hdmi0_status;
+ u32 hdmi1_status;
};
struct evergreen_irq_stat_regs {
@@ -583,6 +578,12 @@ struct evergreen_irq_stat_regs {
u32 d4grph_int;
u32 d5grph_int;
u32 d6grph_int;
+ u32 afmt_status1;
+ u32 afmt_status2;
+ u32 afmt_status3;
+ u32 afmt_status4;
+ u32 afmt_status5;
+ u32 afmt_status6;
};
union radeon_irq_stat_regs {
@@ -593,7 +594,7 @@ union radeon_irq_stat_regs {
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_HDMI_BLOCKS 2
+#define RADEON_MAX_AFMT_BLOCKS 6
struct radeon_irq {
bool installed;
@@ -605,7 +606,7 @@ struct radeon_irq {
bool gui_idle;
bool gui_idle_acked;
wait_queue_head_t idle_queue;
- bool hdmi[RADEON_MAX_HDMI_BLOCKS];
+ bool afmt[RADEON_MAX_AFMT_BLOCKS];
spinlock_t sw_lock;
int sw_refcount[RADEON_NUM_RINGS];
union radeon_irq_stat_regs stat_regs;
@@ -625,26 +626,14 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
*/
struct radeon_ib {
- struct radeon_sa_bo sa_bo;
- unsigned idx;
- uint32_t length_dw;
- uint64_t gpu_addr;
- uint32_t *ptr;
- struct radeon_fence *fence;
- unsigned vm_id;
- bool is_const_ib;
-};
-
-/*
- * locking -
- * mutex protects scheduled_ibs, ready, alloc_bm
- */
-struct radeon_ib_pool {
- struct radeon_mutex mutex;
- struct radeon_sa_manager sa_manager;
- struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
- bool ready;
- unsigned head_id;
+ struct radeon_sa_bo *sa_bo;
+ uint32_t length_dw;
+ uint64_t gpu_addr;
+ uint32_t *ptr;
+ struct radeon_fence *fence;
+ unsigned vm_id;
+ bool is_const_ib;
+ struct radeon_semaphore *semaphore;
};
struct radeon_ring {
@@ -659,10 +648,11 @@ struct radeon_ring {
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
+ unsigned long last_activity;
+ unsigned last_rptr;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
- struct mutex mutex;
bool ready;
u32 ptr_reg_shift;
u32 ptr_reg_mask;
@@ -679,7 +669,7 @@ struct radeon_vm {
unsigned last_pfn;
u64 pt_gpu_addr;
u64 *pt;
- struct radeon_sa_bo sa_bo;
+ struct radeon_sa_bo *sa_bo;
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -756,7 +746,6 @@ struct r600_blit_cp_primitives {
};
struct r600_blit {
- struct mutex mutex;
struct radeon_bo *shader_obj;
struct r600_blit_cp_primitives primitives;
int max_dim;
@@ -766,8 +755,6 @@ struct r600_blit {
u32 vs_offset, ps_offset;
u32 state_offset;
u32 state_len;
- u32 vb_used, vb_total;
- struct radeon_ib *vb_ib;
};
void r600_blit_suspend(struct radeon_device *rdev);
@@ -785,14 +772,14 @@ struct si_rlc {
};
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size);
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
+ struct radeon_ib *ib, unsigned size);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_pool_start(struct radeon_device *rdev);
int radeon_ib_pool_suspend(struct radeon_device *rdev);
+int radeon_ib_ring_tests(struct radeon_device *rdev);
/* Ring access between begin & end cannot sleep */
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -800,8 +787,12 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsign
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
+void radeon_ring_lockup_update(struct radeon_ring *ring);
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
@@ -850,8 +841,8 @@ struct radeon_cs_parser {
int chunk_relocs_idx;
int chunk_flags_idx;
int chunk_const_ib_idx;
- struct radeon_ib *ib;
- struct radeon_ib *const_ib;
+ struct radeon_ib ib;
+ struct radeon_ib const_ib;
void *track;
unsigned family;
int parser_error;
@@ -860,7 +851,6 @@ struct radeon_cs_parser {
s32 priority;
};
-extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
@@ -1105,6 +1095,14 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
+struct r600_audio {
+ int channels;
+ int rate;
+ int bits_per_sample;
+ u8 status_bits;
+ u8 category_code;
+};
+
/*
* Benchmarking
*/
@@ -1144,7 +1142,6 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*asic_reset)(struct radeon_device *rdev);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
@@ -1173,6 +1170,7 @@ struct radeon_asic {
void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
} ring[RADEON_NUM_RINGS];
/* irqs */
struct {
@@ -1251,16 +1249,10 @@ struct radeon_asic {
/*
* Asic structures
*/
-struct r100_gpu_lockup {
- unsigned long last_jiffies;
- u32 last_cp_rptr;
-};
-
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 hdp_cntl;
- struct r100_gpu_lockup lockup;
};
struct r300_asic {
@@ -1268,7 +1260,6 @@ struct r300_asic {
unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
- struct r100_gpu_lockup lockup;
};
struct r600_asic {
@@ -1290,7 +1281,6 @@ struct r600_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- struct r100_gpu_lockup lockup;
};
struct rv770_asic {
@@ -1316,7 +1306,6 @@ struct rv770_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- struct r100_gpu_lockup lockup;
};
struct evergreen_asic {
@@ -1343,7 +1332,6 @@ struct evergreen_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- struct r100_gpu_lockup lockup;
};
struct cayman_asic {
@@ -1382,14 +1370,13 @@ struct cayman_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
- struct r100_gpu_lockup lockup;
};
struct si_asic {
unsigned max_shader_engines;
- unsigned max_pipes_per_simd;
unsigned max_tile_pipes;
- unsigned max_simds_per_se;
+ unsigned max_cu_per_sh;
+ unsigned max_sh_per_se;
unsigned max_backends_per_se;
unsigned max_texture_channel_caches;
unsigned max_gprs;
@@ -1400,7 +1387,6 @@ struct si_asic {
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_size;
- unsigned num_shader_engines;
unsigned num_tile_pipes;
unsigned num_backends_per_se;
unsigned backend_disable_mask_per_asic;
@@ -1413,7 +1399,6 @@ struct si_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
- struct r100_gpu_lockup lockup;
};
union radeon_asic_config {
@@ -1516,11 +1501,12 @@ struct radeon_device {
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
- rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
- struct radeon_semaphore_driver semaphore_drv;
+ wait_queue_head_t fence_queue;
+ struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
- struct radeon_ib_pool ib_pool;
+ bool ib_pool_ready;
+ struct radeon_sa_manager ring_tmp_bo;
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
@@ -1529,7 +1515,6 @@ struct radeon_device {
struct radeon_mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
- bool gpu_lockup;
bool shutdown;
bool suspend;
bool need_dma32;
@@ -1546,19 +1531,12 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct si_rlc rlc;
struct work_struct hotplug_work;
+ struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
struct mutex vram_mutex;
-
- /* audio stuff */
- bool audio_enabled;
- struct timer_list audio_timer;
- int audio_channels;
- int audio_rate;
- int audio_bits_per_sample;
- uint8_t audio_status_bits;
- uint8_t audio_category_code;
-
+ bool audio_enabled;
+ struct r600_audio audio_status; /* audio stuff */
struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
@@ -1730,7 +1708,6 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
@@ -1739,6 +1716,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -1828,6 +1806,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo);
+/* audio */
+void r600_audio_update_hdmi(struct work_struct *work);
/*
* R600 vram scratch functions
@@ -1848,9 +1828,36 @@ int r600_fmt_get_nblocksy(u32 format, u32 h);
/*
* r600 functions used by radeon_encoder.c
*/
+struct radeon_hdmi_acr {
+ u32 clock;
+
+ int n_32khz;
+ int cts_32khz;
+
+ int n_44_1khz;
+ int cts_44_1khz;
+
+ int n_48khz;
+ int cts_48khz;
+
+};
+
+extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
+
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+ u32 tiling_pipe_num,
+ u32 max_rb_num,
+ u32 total_max_rb_num,
+ u32 enabled_rb_mask);
+
+/*
+ * evergreen functions used by radeon_encoder.c
+ */
+
+extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index be4dc2ff0e40..f533df5f7d50 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -134,7 +134,6 @@ static struct radeon_asic r100_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -152,6 +151,7 @@ static struct radeon_asic r100_asic = {
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -208,7 +208,6 @@ static struct radeon_asic r200_asic = {
.suspend = &r100_suspend,
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -226,6 +225,7 @@ static struct radeon_asic r200_asic = {
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -282,7 +282,6 @@ static struct radeon_asic r300_asic = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -300,6 +299,7 @@ static struct radeon_asic r300_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -356,7 +356,6 @@ static struct radeon_asic r300_asic_pcie = {
.suspend = &r300_suspend,
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -374,6 +373,7 @@ static struct radeon_asic r300_asic_pcie = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -430,7 +430,6 @@ static struct radeon_asic r420_asic = {
.suspend = &r420_suspend,
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -448,6 +447,7 @@ static struct radeon_asic r420_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -504,7 +504,6 @@ static struct radeon_asic rs400_asic = {
.suspend = &rs400_suspend,
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &r300_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -522,6 +521,7 @@ static struct radeon_asic rs400_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -578,7 +578,6 @@ static struct radeon_asic rs600_asic = {
.suspend = &rs600_suspend,
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -596,6 +595,7 @@ static struct radeon_asic rs600_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -652,7 +652,6 @@ static struct radeon_asic rs690_asic = {
.suspend = &rs690_suspend,
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -670,6 +669,7 @@ static struct radeon_asic rs690_asic = {
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -726,7 +726,6 @@ static struct radeon_asic rv515_asic = {
.suspend = &rv515_suspend,
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -744,6 +743,7 @@ static struct radeon_asic rv515_asic = {
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -800,7 +800,6 @@ static struct radeon_asic r520_asic = {
.suspend = &rv515_suspend,
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
- .gpu_is_lockup = &r300_gpu_is_lockup,
.asic_reset = &rs600_asic_reset,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
@@ -818,6 +817,7 @@ static struct radeon_asic r520_asic = {
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
@@ -874,7 +874,6 @@ static struct radeon_asic r600_asic = {
.suspend = &r600_suspend,
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
- .gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
@@ -891,6 +890,7 @@ static struct radeon_asic r600_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
@@ -946,7 +946,6 @@ static struct radeon_asic rs780_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
- .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -964,6 +963,7 @@ static struct radeon_asic rs780_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
@@ -1020,7 +1020,6 @@ static struct radeon_asic rv770_asic = {
.suspend = &rv770_suspend,
.resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
- .gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
@@ -1037,6 +1036,7 @@ static struct radeon_asic rv770_asic = {
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
@@ -1092,7 +1092,6 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1110,6 +1109,7 @@ static struct radeon_asic evergreen_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1165,7 +1165,6 @@ static struct radeon_asic sumo_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1183,6 +1182,7 @@ static struct radeon_asic sumo_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
},
.irq = {
@@ -1238,7 +1238,6 @@ static struct radeon_asic btc_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1256,6 +1255,7 @@ static struct radeon_asic btc_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1321,7 +1321,6 @@ static struct radeon_asic cayman_asic = {
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
- .gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1340,6 +1339,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1349,6 +1349,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1358,6 +1359,7 @@ static struct radeon_asic cayman_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1413,7 +1415,6 @@ static struct radeon_asic trinity_asic = {
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
- .gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1432,6 +1433,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1441,6 +1443,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
@@ -1450,6 +1453,7 @@ static struct radeon_asic trinity_asic = {
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
@@ -1515,7 +1519,6 @@ static struct radeon_asic si_asic = {
.fini = &si_fini,
.suspend = &si_suspend,
.resume = &si_resume,
- .gpu_is_lockup = &si_gpu_is_lockup,
.asic_reset = &si_asic_reset,
.vga_set_state = &r600_vga_set_state,
.ioctl_wait_idle = r600_ioctl_wait_idle,
@@ -1534,6 +1537,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &si_ring_ib_execute,
@@ -1543,6 +1547,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &si_ring_ib_execute,
@@ -1552,6 +1557,7 @@ static struct radeon_asic si_asic = {
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
}
},
.irq = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d9f9f1d8f90..e76a941ef14e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -103,11 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
- struct radeon_ring *cp);
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
- struct r100_gpu_lockup *lockup,
- struct radeon_ring *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_irq_disable(struct radeon_device *rdev);
@@ -159,7 +154,6 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -362,26 +356,20 @@ void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
-int r600_audio_tmds_index(struct drm_encoder *encoder);
void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
-int r600_audio_channels(struct radeon_device *rdev);
-int r600_audio_bits_per_sample(struct radeon_device *rdev);
-int r600_audio_rate(struct radeon_device *rdev);
-uint8_t r600_audio_status_bits(struct radeon_device *rdev);
-uint8_t r600_audio_category_code(struct radeon_device *rdev);
-void r600_audio_schedule_polling(struct radeon_device *rdev);
-void r600_audio_enable_polling(struct drm_encoder *encoder);
-void r600_audio_disable_polling(struct drm_encoder *encoder);
+struct r600_audio r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
-void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_sa_bo **vb);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+ struct radeon_sa_bo *vb);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages);
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
/*
@@ -446,7 +434,6 @@ int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
-bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int cayman_asic_reset(struct radeon_device *rdev);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f6e69b8c06c6..b1e3820df363 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -444,7 +444,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*/
if ((dev->pdev->device == 0x9498) &&
(dev->pdev->subsystem_vendor == 0x1682) &&
- (dev->pdev->subsystem_device == 0x2452)) {
+ (dev->pdev->subsystem_device == 0x2452) &&
+ (i2c_bus->valid == false) &&
+ !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
struct radeon_device *rdev = dev->dev_private;
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index fef7b722b05d..364f5b1a04b9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -103,7 +103,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
int time;
n = RADEON_BENCHMARK_ITERATIONS;
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -115,7 +115,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2cad9fde92fc..576f4f6919f2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1561,6 +1561,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
(rdev->pdev->subsystem_device == 0x4150)) {
/* Mac G5 tower 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600;
+ } else if ((rdev->pdev->device == 0x4c66) &&
+ (rdev->pdev->subsystem_vendor == 0x1002) &&
+ (rdev->pdev->subsystem_device == 0x4c66)) {
+ /* SAM440ep RV250 embedded board */
+ rdev->mode_info.connector_table = CT_SAM440EP;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
@@ -2134,6 +2139,67 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
+ case CT_SAM440EP:
+ DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
+ rdev->mode_info.connector_table);
+ /* LVDS */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_LCD1_SUPPORT,
+ 0),
+ ATOM_DEVICE_LCD1_SUPPORT);
+ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
+ /* DVI-I - secondary dac, int tmds */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_DFP1_SUPPORT,
+ 0),
+ ATOM_DEVICE_DFP1_SUPPORT);
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT2_SUPPORT,
+ 2),
+ ATOM_DEVICE_CRT2_SUPPORT);
+ radeon_add_legacy_connector(dev, 1,
+ ATOM_DEVICE_DFP1_SUPPORT |
+ ATOM_DEVICE_CRT2_SUPPORT,
+ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
+ /* VGA - primary dac */
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ radeon_add_legacy_connector(dev, 2,
+ ATOM_DEVICE_CRT1_SUPPORT,
+ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
+ /* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
+ radeon_add_legacy_encoder(dev,
+ radeon_get_encoder_enum(dev,
+ ATOM_DEVICE_TV1_SUPPORT,
+ 2),
+ ATOM_DEVICE_TV1_SUPPORT);
+ radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
+ DRM_MODE_CONNECTOR_SVIDEO,
+ &ddc_i2c,
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3c2e7a000a2a..2914c5761cfc 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -84,6 +84,62 @@ static void radeon_property_change_mode(struct drm_encoder *encoder)
crtc->x, crtc->y, crtc->fb);
}
}
+
+int radeon_get_monitor_bpc(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector;
+ int bpc = 8;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (radeon_connector->use_digital) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
+ drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (connector->display_info.bpc)
+ bpc = connector->display_info.bpc;
+ else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
+ bpc = 6;
+ else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
+ bpc = 8;
+ }
+ break;
+ }
+ return bpc;
+}
+
static void
radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
{
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 0ebb7d4796fa..ef67e181377b 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1827,14 +1827,10 @@ void radeon_do_release(struct drm_device * dev)
r600_do_cleanup_cp(dev);
else
radeon_do_cleanup_cp(dev);
- if (dev_priv->me_fw) {
- release_firmware(dev_priv->me_fw);
- dev_priv->me_fw = NULL;
- }
- if (dev_priv->pfp_fw) {
- release_firmware(dev_priv->pfp_fw);
- dev_priv->pfp_fw = NULL;
- }
+ release_firmware(dev_priv->me_fw);
+ dev_priv->me_fw = NULL;
+ release_firmware(dev_priv->pfp_fw);
+ dev_priv->pfp_fw = NULL;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5cac83278338..142f89462aa4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -118,46 +118,36 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
bool sync_to_ring[RADEON_NUM_RINGS] = { };
+ bool need_sync = false;
int i, r;
for (i = 0; i < p->nrelocs; i++) {
+ struct radeon_fence *fence;
+
if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
continue;
- if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) {
- struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
- if (!radeon_fence_signaled(fence)) {
- sync_to_ring[fence->ring] = true;
- }
+ fence = p->relocs[i].robj->tbo.sync_obj;
+ if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
+ sync_to_ring[fence->ring] = true;
+ need_sync = true;
}
}
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready)
- continue;
-
- if (!p->ib->fence->semaphore) {
- r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
- if (r)
- return r;
- }
-
- r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
- if (r)
- return r;
- radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
- radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
+ if (!need_sync) {
+ return 0;
+ }
- r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
- if (r)
- return r;
- radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
- radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
+ r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
+ if (r) {
+ return r;
}
- return 0;
+
+ return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
+ sync_to_ring, p->ring);
}
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
@@ -172,6 +162,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
/* get chunks */
INIT_LIST_HEAD(&p->validated);
p->idx = 0;
+ p->ib.sa_bo = NULL;
+ p->ib.semaphore = NULL;
+ p->const_ib.sa_bo = NULL;
+ p->const_ib.semaphore = NULL;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
p->chunk_flags_idx = -1;
@@ -252,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
}
}
- if ((p->cs_flags & RADEON_CS_USE_VM) &&
- !p->rdev->vm_manager.enabled) {
- DRM_ERROR("VM not active on asic!\n");
- return -EINVAL;
- }
-
- /* we only support VM on SI+ */
- if ((p->rdev->family >= CHIP_TAHITI) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
- DRM_ERROR("VM required on SI+!\n");
- return -EINVAL;
- }
+ /* these are KMS only */
+ if (p->rdev) {
+ if ((p->cs_flags & RADEON_CS_USE_VM) &&
+ !p->rdev->vm_manager.enabled) {
+ DRM_ERROR("VM not active on asic!\n");
+ return -EINVAL;
+ }
- if (radeon_cs_get_ring(p, ring, priority))
- return -EINVAL;
+ /* we only support VM on SI+ */
+ if ((p->rdev->family >= CHIP_TAHITI) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+ DRM_ERROR("VM required on SI+!\n");
+ return -EINVAL;
+ }
+ if (radeon_cs_get_ring(p, ring, priority))
+ return -EINVAL;
+ }
/* deal with non-vm */
if ((p->chunk_ib_idx != -1) &&
@@ -278,11 +274,16 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
- p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
- p->chunks[p->chunk_ib_idx].kpage[1] == NULL)
- return -ENOMEM;
+ if ((p->rdev->flags & RADEON_IS_AGP)) {
+ p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+ p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+ kfree(p->chunks[i].kpage[0]);
+ kfree(p->chunks[i].kpage[1]);
+ return -ENOMEM;
+ }
+ }
p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
p->chunks[p->chunk_ib_idx].last_copied_page = -1;
@@ -305,10 +306,9 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
-
- if (!error && parser->ib)
+ if (!error)
ttm_eu_fence_buffer_objects(&parser->validated,
- parser->ib->fence);
+ parser->ib.fence);
else
ttm_eu_backoff_reservation(&parser->validated);
@@ -323,12 +323,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->relocs_ptr);
for (i = 0; i < parser->nchunks; i++) {
kfree(parser->chunks[i].kdata);
- kfree(parser->chunks[i].kpage[0]);
- kfree(parser->chunks[i].kpage[1]);
+ if ((parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
}
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
+ radeon_ib_free(parser->rdev, &parser->const_ib);
}
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
@@ -354,7 +357,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get ib !\n");
return r;
}
- parser->ib->length_dw = ib_chunk->length_dw;
+ parser->ib.length_dw = ib_chunk->length_dw;
r = radeon_cs_parse(rdev, parser->ring, parser);
if (r || parser->parser_error) {
DRM_ERROR("Invalid command stream !\n");
@@ -369,8 +372,8 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
if (r) {
DRM_ERROR("Failed to synchronize rings !\n");
}
- parser->ib->vm_id = 0;
- r = radeon_ib_schedule(rdev, parser->ib);
+ parser->ib.vm_id = 0;
+ r = radeon_ib_schedule(rdev, &parser->ib);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
@@ -421,14 +424,14 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get const ib !\n");
return r;
}
- parser->const_ib->is_const_ib = true;
- parser->const_ib->length_dw = ib_chunk->length_dw;
+ parser->const_ib.is_const_ib = true;
+ parser->const_ib.length_dw = ib_chunk->length_dw;
/* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->const_ib->ptr, ib_chunk->user_ptr,
+ if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
ib_chunk->length_dw * 4)) {
return -EFAULT;
}
- r = radeon_ring_ib_parse(rdev, parser->ring, parser->const_ib);
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
if (r) {
return r;
}
@@ -445,13 +448,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get ib !\n");
return r;
}
- parser->ib->length_dw = ib_chunk->length_dw;
+ parser->ib.length_dw = ib_chunk->length_dw;
/* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+ if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
ib_chunk->length_dw * 4)) {
return -EFAULT;
}
- r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
if (r) {
return r;
}
@@ -472,34 +475,44 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
- parser->const_ib->vm_id = vm->id;
+ parser->const_ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
*/
- parser->const_ib->gpu_addr = parser->const_ib->sa_bo.offset;
- r = radeon_ib_schedule(rdev, parser->const_ib);
+ parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
+ r = radeon_ib_schedule(rdev, &parser->const_ib);
if (r)
goto out;
}
- parser->ib->vm_id = vm->id;
+ parser->ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
*/
- parser->ib->gpu_addr = parser->ib->sa_bo.offset;
- parser->ib->is_const_ib = false;
- r = radeon_ib_schedule(rdev, parser->ib);
+ parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
+ parser->ib.is_const_ib = false;
+ r = radeon_ib_schedule(rdev, &parser->ib);
out:
if (!r) {
if (vm->fence) {
radeon_fence_unref(&vm->fence);
}
- vm->fence = radeon_fence_ref(parser->ib->fence);
+ vm->fence = radeon_fence_ref(parser->ib.fence);
}
mutex_unlock(&fpriv->vm.mutex);
return r;
}
+static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
+{
+ if (r == -EDEADLK) {
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ r = -EAGAIN;
+ }
+ return r;
+}
+
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
@@ -521,6 +534,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
+ r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
@@ -529,6 +543,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
+ r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
@@ -542,6 +557,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
out:
radeon_cs_parser_fini(&parser, r);
+ r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
@@ -559,7 +575,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
size = PAGE_SIZE;
}
- if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
size))
return -EFAULT;
@@ -567,15 +583,16 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
return 0;
}
-int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
{
int new_page;
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
int i;
int size = PAGE_SIZE;
+ bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
- if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
PAGE_SIZE)) {
p->parser_error = -EFAULT;
@@ -583,14 +600,16 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
}
}
- new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
-
if (pg_idx == ibc->last_page_index) {
size = (ibc->length_dw * 4) % PAGE_SIZE;
- if (size == 0)
- size = PAGE_SIZE;
+ if (size == 0)
+ size = PAGE_SIZE;
}
+ new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
+ if (copy1)
+ ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
+
if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
ibc->user_ptr + (pg_idx * PAGE_SIZE),
size)) {
@@ -598,11 +617,37 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
return 0;
}
- /* copy to IB here */
- memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
+ /* copy to IB for non single case */
+ if (!copy1)
+ memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
ibc->last_copied_page = pg_idx;
ibc->kpage_idx[new_page] = pg_idx;
return new_page;
}
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ u32 pg_idx, pg_offset;
+ u32 idx_value = 0;
+ int new_page;
+
+ pg_idx = (idx * 4) / PAGE_SIZE;
+ pg_offset = (idx * 4) % PAGE_SIZE;
+
+ if (ibc->kpage_idx[0] == pg_idx)
+ return ibc->kpage[0][pg_offset/4];
+ if (ibc->kpage_idx[1] == pg_idx)
+ return ibc->kpage[1][pg_offset/4];
+
+ new_page = radeon_cs_update_pages(p, pg_idx);
+ if (new_page < 0) {
+ p->parser_error = new_page;
+ return 0;
+ }
+
+ idx_value = ibc->kpage[new_page][pg_offset/4];
+ return idx_value;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5992502a3448..066c98b888a5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -193,7 +193,7 @@ int radeon_wb_init(struct radeon_device *rdev)
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
+ RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
@@ -225,9 +225,9 @@ int radeon_wb_init(struct radeon_device *rdev)
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
- if (radeon_no_wb == 1)
+ if (radeon_no_wb == 1) {
rdev->wb.enabled = false;
- else {
+ } else {
if (rdev->flags & RADEON_IS_AGP) {
/* often unreliable on AGP */
rdev->wb.enabled = false;
@@ -237,8 +237,9 @@ int radeon_wb_init(struct radeon_device *rdev)
} else {
rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */
- if (rdev->family >= CHIP_R600)
+ if (rdev->family >= CHIP_R600) {
rdev->wb.use_event = true;
+ }
}
}
/* always use writeback/events on NI, APUs */
@@ -696,6 +697,11 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
+static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
+ .set_gpu_state = radeon_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = radeon_switcheroo_can_switch,
+};
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
@@ -714,7 +720,6 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- rdev->gpu_lockup = false;
rdev->accel_working = false;
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
@@ -724,21 +729,18 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
- radeon_mutex_init(&rdev->ib_pool.mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- mutex_init(&rdev->ring[i].mutex);
+ mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
- rwlock_init(&rdev->fence_lock);
- rwlock_init(&rdev->semaphore_drv.lock);
- INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
- INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
/* initialize vm here */
rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20;
@@ -814,10 +816,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- vga_switcheroo_register_client(rdev->pdev,
- radeon_switcheroo_set_state,
- NULL,
- radeon_switcheroo_can_switch);
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
r = radeon_init(rdev);
if (r)
@@ -914,9 +913,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
/* evict vram memory */
radeon_bo_evict_vram(rdev);
+
+ mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++)
- radeon_fence_wait_last(rdev, i);
+ radeon_fence_wait_empty_locked(rdev, i);
+ mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
@@ -955,7 +957,6 @@ int radeon_resume_kms(struct drm_device *dev)
console_unlock();
return -1;
}
- pci_set_master(dev->pdev);
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
@@ -988,9 +989,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int r;
int resched;
- /* Prevent CS ioctl from interfering */
- radeon_mutex_lock(&rdev->cs_mutex);
-
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1005,8 +1003,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
- radeon_mutex_unlock(&rdev->cs_mutex);
-
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0a1d4bd65edc..64a008d14493 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -573,24 +573,6 @@ static const char *encoder_names[37] = {
"INTERNAL_VCE"
};
-static const char *connector_names[15] = {
- "Unknown",
- "VGA",
- "DVI-I",
- "DVI-D",
- "DVI-A",
- "Composite",
- "S-video",
- "LVDS",
- "Component",
- "DIN",
- "DisplayPort",
- "HDMI-A",
- "HDMI-B",
- "TV",
- "eDP",
-};
-
static const char *hpd_names[6] = {
"HPD1",
"HPD2",
@@ -613,7 +595,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
- DRM_INFO(" %s\n", connector_names[connector->connector_type]);
+ DRM_INFO(" %s\n", drm_get_connector_name(connector));
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
@@ -1243,6 +1225,93 @@ void radeon_update_display_priority(struct radeon_device *rdev)
}
+/*
+ * Allocate hdmi structs and determine register offsets
+ */
+static void radeon_afmt_init(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
+ rdev->mode_info.afmt[i] = NULL;
+
+ if (ASIC_IS_DCE6(rdev)) {
+ /* todo */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* DCE4/5 has 6 audio blocks tied to DIG encoders */
+ /* DCE4.1 has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ if (!ASIC_IS_DCE41(rdev)) {
+ rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[2]) {
+ rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ rdev->mode_info.afmt[2]->id = 2;
+ }
+ rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[3]) {
+ rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ rdev->mode_info.afmt[3]->id = 3;
+ }
+ rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[4]) {
+ rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ rdev->mode_info.afmt[4]->id = 4;
+ }
+ rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[5]) {
+ rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ rdev->mode_info.afmt[5]->id = 5;
+ }
+ }
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* DCE3.x has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ } else if (ASIC_IS_DCE2(rdev)) {
+ /* DCE2 has at least 1 routable audio block */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ /* r6xx has 2 routable audio blocks */
+ if (rdev->family >= CHIP_R600) {
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ }
+ }
+}
+
+static void radeon_afmt_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
+ kfree(rdev->mode_info.afmt[i]);
+ rdev->mode_info.afmt[i] = NULL;
+ }
+}
+
int radeon_modeset_init(struct radeon_device *rdev)
{
int i;
@@ -1251,7 +1320,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
drm_mode_config_init(rdev->ddev);
rdev->mode_info.mode_config_initialized = true;
- rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
+ rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
if (ASIC_IS_DCE5(rdev)) {
rdev->ddev->mode_config.max_width = 16384;
@@ -1303,6 +1372,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* initialize hpd */
radeon_hpd_init(rdev);
+ /* setup afmt */
+ radeon_afmt_init(rdev);
+
/* Initialize power management */
radeon_pm_init(rdev);
@@ -1319,6 +1391,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
+ radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ef7bb3f6ecae..2c4d53fd20c5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -57,9 +57,11 @@
* 2.13.0 - virtual memory support, streamout
* 2.14.0 - add evergreen tiling informations
* 2.15.0 - add max_pipes query
+ * 2.16.0 - fix evergreen 2D tiled surface calculation
+ * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 15
+#define KMS_DRIVER_MINOR 17
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -105,6 +107,11 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags);
+struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
@@ -128,6 +135,7 @@ int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
int radeon_msi = -1;
+int radeon_lockup_timeout = 10000;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -177,6 +185,9 @@ module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, radeon_msi, int, 0444);
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -329,7 +340,8 @@ static const struct file_operations radeon_driver_kms_fops = {
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_PRIME,
.dev_priv_size = 0,
.load = radeon_driver_load_kms,
.firstopen = radeon_driver_firstopen_kms,
@@ -364,6 +376,12 @@ static struct drm_driver kms_driver = {
.dumb_map_offset = radeon_mode_dumb_mmap,
.dumb_destroy = radeon_mode_dumb_destroy,
.fops = &radeon_driver_kms_fops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = radeon_gem_prime_export,
+ .gem_prime_import = radeon_gem_prime_import,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4bd36a354fbe..11f5f402d22c 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -63,98 +63,82 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (fence->emitted) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ /* we are protected by the ring emission mutex */
+ if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
return 0;
}
- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
- if (!rdev->ring[fence->ring].ready)
- /* FIXME: cp is not running assume everythings is done right
- * away
- */
- radeon_fence_write(rdev, fence->seq, fence->ring);
- else
- radeon_fence_ring_emit(rdev, fence->ring, fence);
-
+ fence->seq = ++rdev->fence_drv[fence->ring].seq;
+ radeon_fence_ring_emit(rdev, fence->ring, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
- fence->emitted = true;
- list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
{
- struct radeon_fence *fence;
- struct list_head *i, *n;
- uint32_t seq;
+ uint64_t seq, last_seq;
+ unsigned count_loop = 0;
bool wake = false;
- unsigned long cjiffies;
- seq = radeon_fence_read(rdev, ring);
- if (seq != rdev->fence_drv[ring].last_seq) {
- rdev->fence_drv[ring].last_seq = seq;
- rdev->fence_drv[ring].last_jiffies = jiffies;
- rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- } else {
- cjiffies = jiffies;
- if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
- cjiffies -= rdev->fence_drv[ring].last_jiffies;
- if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
- /* update the timeout */
- rdev->fence_drv[ring].last_timeout -= cjiffies;
- } else {
- /* the 500ms timeout is elapsed we should test
- * for GPU lockup
- */
- rdev->fence_drv[ring].last_timeout = 1;
- }
- } else {
- /* wrap around update last jiffies, we will just wait
- * a little longer
- */
- rdev->fence_drv[ring].last_jiffies = cjiffies;
+ /* Note there is a scenario here for an infinite loop but it's
+ * very unlikely to happen. For it to happen, the current polling
+ * process need to be interrupted by another process and another
+ * process needs to update the last_seq btw the atomic read and
+ * xchg of the current process.
+ *
+ * More over for this to go in infinite loop there need to be
+ * continuously new fence signaled ie radeon_fence_read needs
+ * to return a different value each time for both the currently
+ * polling process and the other process that xchg the last_seq
+ * btw atomic read and xchg of the current process. And the
+ * value the other process set as last seq must be higher than
+ * the seq value we just read. Which means that current process
+ * need to be interrupted after radeon_fence_read and before
+ * atomic xchg.
+ *
+ * To be even more safe we count the number of time we loop and
+ * we bail after 10 loop just accepting the fact that we might
+ * have temporarly set the last_seq not to the true real last
+ * seq but to an older one.
+ */
+ last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ do {
+ seq = radeon_fence_read(rdev, ring);
+ seq |= last_seq & 0xffffffff00000000LL;
+ if (seq < last_seq) {
+ seq += 0x100000000LL;
}
- return false;
- }
- n = NULL;
- list_for_each(i, &rdev->fence_drv[ring].emitted) {
- fence = list_entry(i, struct radeon_fence, list);
- if (fence->seq == seq) {
- n = i;
+
+ if (seq == last_seq) {
break;
}
- }
- /* all fence previous to this one are considered as signaled */
- if (n) {
- i = n;
- do {
- n = i->prev;
- list_move_tail(i, &rdev->fence_drv[ring].signaled);
- fence = list_entry(i, struct radeon_fence, list);
- fence->signaled = true;
- i = n;
- } while (i != &rdev->fence_drv[ring].emitted);
+ /* If we loop over we don't want to return without
+ * checking if a fence is signaled as it means that the
+ * seq we just read is different from the previous on.
+ */
wake = true;
+ last_seq = seq;
+ if ((count_loop++) > 10) {
+ /* We looped over too many time leave with the
+ * fact that we might have set an older fence
+ * seq then the current real last seq as signaled
+ * by the hw.
+ */
+ break;
+ }
+ } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
+
+ if (wake) {
+ rdev->fence_drv[ring].last_activity = jiffies;
+ wake_up_all(&rdev->fence_queue);
}
- return wake;
}
static void radeon_fence_destroy(struct kref *kref)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
+ struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
- list_del(&fence->list);
- fence->emitted = false;
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
- if (fence->semaphore)
- radeon_semaphore_free(fence->rdev, fence->semaphore);
+ fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
kfree(fence);
}
@@ -162,171 +146,342 @@ int radeon_fence_create(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{
- unsigned long irq_flags;
-
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->emitted = false;
- (*fence)->signaled = false;
- (*fence)->seq = 0;
+ (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
(*fence)->ring = ring;
- (*fence)->semaphore = NULL;
- INIT_LIST_HEAD(&(*fence)->list);
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-bool radeon_fence_signaled(struct radeon_fence *fence)
+static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
+ u64 seq, unsigned ring)
{
- unsigned long irq_flags;
- bool signaled = false;
-
- if (!fence)
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
-
- if (fence->rdev->gpu_lockup)
+ }
+ /* poll new last sequence at least once */
+ radeon_fence_process(rdev, ring);
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
+ }
+ return false;
+}
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
- signaled = fence->signaled;
- /* if we are shuting down report all fence as signaled */
- if (fence->rdev->shutdown) {
- signaled = true;
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+ if (!fence) {
+ return true;
}
- if (!fence->emitted) {
+ if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
WARN(1, "Querying an unemitted fence : %p !\n", fence);
- signaled = true;
+ return true;
}
- if (!signaled) {
- radeon_fence_poll_locked(fence->rdev, fence->ring);
- signaled = fence->signaled;
+ if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ return true;
+ }
+ if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return true;
}
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
- return signaled;
+ return false;
+}
+
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
+ unsigned ring, bool intr, bool lock_ring)
+{
+ unsigned long timeout, last_activity;
+ uint64_t seq;
+ unsigned i;
+ bool signaled;
+ int r;
+
+ while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ if (!rdev->ring[ring].ready) {
+ return -EBUSY;
+ }
+
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = rdev->fence_drv[ring].last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
+ timeout = 1;
+ }
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* Save current last activity valuee, used to check for GPU lockups */
+ last_activity = rdev->fence_drv[ring].last_activity;
+
+ trace_radeon_fence_wait_begin(rdev->ddev, seq);
+ radeon_irq_kms_sw_irq_get(rdev, ring);
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
+ }
+ radeon_irq_kms_sw_irq_put(rdev, ring);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, seq);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ /* check if sequence value has changed since last_activity */
+ if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ continue;
+ }
+
+ if (lock_ring) {
+ mutex_lock(&rdev->ring_lock);
+ }
+
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != rdev->fence_drv[ring].last_activity) {
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
+ target_seq, seq);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ return -EDEADLK;
+ }
+
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ }
+ }
+ return 0;
}
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
- struct radeon_device *rdev;
- unsigned long irq_flags, timeout;
- u32 seq;
int r;
if (fence == NULL) {
WARN(1, "Querying an invalid fence : %p !\n", fence);
- return 0;
+ return -EINVAL;
}
- rdev = fence->rdev;
- if (radeon_fence_signaled(fence)) {
- return 0;
+
+ r = radeon_fence_wait_seq(fence->rdev, fence->seq,
+ fence->ring, intr, true);
+ if (r) {
+ return r;
}
- timeout = rdev->fence_drv[fence->ring].last_timeout;
-retry:
- /* save current sequence used to check for GPU lockup */
- seq = rdev->fence_drv[fence->ring].last_seq;
- trace_radeon_fence_wait_begin(rdev->ddev, seq);
- if (intr) {
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
- r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
- radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
- if (unlikely(r < 0)) {
- return r;
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
+}
+
+bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
+ return true;
}
- } else {
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
- r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
- radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
}
- trace_radeon_fence_wait_end(rdev->ddev, seq);
- if (unlikely(!radeon_fence_signaled(fence))) {
- /* we were interrupted for some reason and fence isn't
- * isn't signaled yet, resume wait
- */
- if (r) {
- timeout = r;
- goto retry;
+ return false;
+}
+
+static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
+ u64 *target_seq, bool intr)
+{
+ unsigned long timeout, last_activity, tmp;
+ unsigned i, ring = RADEON_NUM_RINGS;
+ bool signaled;
+ int r;
+
+ for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i]) {
+ continue;
+ }
+
+ /* use the most recent one as indicator */
+ if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
+ last_activity = rdev->fence_drv[i].last_activity;
}
- /* don't protect read access to rdev->fence_drv[t].last_seq
- * if we experiencing a lockup the value doesn't change
+
+ /* For lockup detection just pick the lowest ring we are
+ * actively waiting for
*/
- if (seq == rdev->fence_drv[fence->ring].last_seq &&
- radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
- /* good news we believe it's a lockup */
- printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
- fence->seq, seq);
- /* FIXME: what should we do ? marking everyone
- * as signaled for now
+ if (i < ring) {
+ ring = i;
+ }
+ }
+
+ /* nothing to wait for ? */
+ if (ring == RADEON_NUM_RINGS) {
+ return 0;
+ }
+
+ while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
*/
- rdev->gpu_lockup = true;
- r = radeon_gpu_reset(rdev);
- if (r)
- return r;
- radeon_fence_write(rdev, fence->seq, fence->ring);
- rdev->gpu_lockup = false;
+ timeout = 1;
+ }
+
+ trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_get(rdev, i);
+ }
+ }
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_put(rdev, i);
+ }
+ }
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ mutex_lock(&rdev->ring_lock);
+ for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
+ tmp = rdev->fence_drv[i].last_activity;
+ }
+ }
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != tmp) {
+ last_activity = tmp;
+ mutex_unlock(&rdev->ring_lock);
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
+ target_seq[ring]);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ mutex_unlock(&rdev->ring_lock);
+ return -EDEADLK;
+ }
+ mutex_unlock(&rdev->ring_lock);
}
- timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- rdev->fence_drv[fence->ring].last_jiffies = jiffies;
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- goto retry;
}
return 0;
}
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
+ uint64_t seq[RADEON_NUM_RINGS];
+ unsigned i;
int r;
- if (rdev->gpu_lockup) {
- return 0;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ seq[i] = 0;
+
+ if (!fences[i]) {
+ continue;
+ }
+
+ if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ /* something was allready signaled */
+ return 0;
+ }
+
+ if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ seq[i] = fences[i]->seq;
+ }
}
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
+
+ r = radeon_fence_wait_any_seq(rdev, seq, intr);
+ if (r) {
+ return r;
}
- fence = list_entry(rdev->fence_drv[ring].emitted.next,
- struct radeon_fence, list);
- radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- r = radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
- return r;
+ return 0;
}
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
- int r;
-
- if (rdev->gpu_lockup) {
- return 0;
+ uint64_t seq;
+
+ /* We are not protected by ring lock when reading current seq but
+ * it's ok as worst case is we return to early while we could have
+ * wait.
+ */
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+ if (seq >= rdev->fence_drv[ring].seq) {
+ /* nothing to wait for, last_seq is
+ already the last emited fence */
+ return -ENOENT;
}
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
- }
- fence = list_entry(rdev->fence_drv[ring].emitted.prev,
- struct radeon_fence, list);
- radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- r = radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
- return r;
+ return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+}
+
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+{
+ /* We are not protected by ring lock when reading current seq
+ * but it's ok as wait empty is call from place where no more
+ * activity can be scheduled so there won't be concurrent access
+ * to seq value.
+ */
+ return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
+ ring, false, false);
}
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
@@ -345,49 +500,27 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
-void radeon_fence_process(struct radeon_device *rdev, int ring)
-{
- unsigned long irq_flags;
- bool wake;
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- wake = radeon_fence_poll_locked(rdev, ring);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- if (wake) {
- wake_up_all(&rdev->fence_drv[ring].queue);
- }
-}
-
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
- int not_processed = 0;
-
- read_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (!rdev->fence_drv[ring].initialized) {
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
+ uint64_t emitted;
+
+ /* We are not protected by ring lock when reading the last sequence
+ * but it's ok to report slightly wrong fence count here.
+ */
+ radeon_fence_process(rdev, ring);
+ emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* to avoid 32bits warp around */
+ if (emitted > 0x10000000) {
+ emitted = 0x10000000;
}
-
- if (!list_empty(&rdev->fence_drv[ring].emitted)) {
- struct list_head *ptr;
- list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
- /* count up to 3, that's enought info */
- if (++not_processed >= 3)
- break;
- }
- }
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return not_processed;
+ return (unsigned)emitted;
}
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
uint64_t index;
int r;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
if (rdev->wb.use_event) {
rdev->fence_drv[ring].scratch_reg = 0;
@@ -396,7 +529,6 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
if (r) {
dev_err(rdev->dev, "fence failed to get scratch register\n");
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return r;
}
index = RADEON_WB_SCRATCH_OFFSET +
@@ -405,11 +537,10 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+ radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
rdev->fence_drv[ring].initialized = true;
- DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+ dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
@@ -418,24 +549,20 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0;
- atomic_set(&rdev->fence_drv[ring].seq, 0);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
- init_waitqueue_head(&rdev->fence_drv[ring].queue);
+ rdev->fence_drv[ring].seq = 0;
+ atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
+ rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
int radeon_fence_driver_init(struct radeon_device *rdev)
{
- unsigned long irq_flags;
int ring;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ init_waitqueue_head(&rdev->fence_queue);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
radeon_fence_driver_init_ring(rdev, ring);
}
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
@@ -444,19 +571,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
- unsigned long irq_flags;
int ring;
+ mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- radeon_fence_wait_last(rdev, ring);
- wake_up_all(&rdev->fence_drv[ring].queue);
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_fence_wait_empty_locked(rdev, ring);
+ wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
rdev->fence_drv[ring].initialized = false;
}
+ mutex_unlock(&rdev->ring_lock);
}
@@ -469,7 +595,6 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_fence *fence;
int i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -477,14 +602,10 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
continue;
seq_printf(m, "--- ring %d ---\n", i);
- seq_printf(m, "Last signaled fence 0x%08X\n",
- radeon_fence_read(rdev, i));
- if (!list_empty(&rdev->fence_drv[i].emitted)) {
- fence = list_entry(rdev->fence_drv[i].emitted.prev,
- struct radeon_fence, list);
- seq_printf(m, "Last emitted fence %p with 0x%08X\n",
- fence, fence->seq);
- }
+ seq_printf(m, "Last signaled fence 0x%016llx\n",
+ (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
+ seq_printf(m, "Last emitted 0x%016llx\n",
+ rdev->fence_drv[i].seq);
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index c58a036233fb..84b648a7ddd8 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -80,7 +80,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->gart.robj);
+ NULL, &rdev->gart.robj);
if (r) {
return r;
}
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
rdev->vm_manager.enabled = false;
/* mark first vm as always in use, it's the system one */
+ /* allocate enough for 2 full VM pts */
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- rdev->vm_manager.max_pfn * 8,
+ rdev->vm_manager.max_pfn * 8 * 2,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -326,7 +327,7 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
list_del_init(&vm->list);
vm->id = -1;
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
vm->pt = NULL;
list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -395,7 +396,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
- RADEON_GPU_PAGE_SIZE);
+ RADEON_GPU_PAGE_SIZE, false);
if (r) {
if (list_empty(&rdev->vm_manager.lru_vm)) {
return r;
@@ -404,10 +405,8 @@ retry:
radeon_vm_unbind(rdev, vm_evict);
goto retry;
}
- vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
- vm->pt += (vm->sa_bo.offset >> 3);
- vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
- vm->pt_gpu_addr += vm->sa_bo.offset;
+ vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
+ vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
retry_id:
@@ -428,14 +427,14 @@ retry_id:
/* do hw bind */
r = rdev->vm_manager.funcs->bind(rdev, vm, id);
if (r) {
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
return r;
}
rdev->vm_manager.use_bitmap |= 1 << id;
vm->id = id;
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
- return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
- &rdev->ib_pool.sa_manager.bo->tbo.mem);
+ return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
+ &rdev->ring_tmp_bo.bo->tbo.mem);
}
/* object have to be reserved */
@@ -478,12 +477,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
mutex_lock(&vm->mutex);
if (last_pfn > vm->last_pfn) {
- /* grow va space 32M by 32M */
- unsigned align = ((32 << 20) >> 12) - 1;
+ /* release mutex and lock in right order */
+ mutex_unlock(&vm->mutex);
radeon_mutex_lock(&rdev->cs_mutex);
- radeon_vm_unbind_locked(rdev, vm);
+ mutex_lock(&vm->mutex);
+ /* and check again */
+ if (last_pfn > vm->last_pfn) {
+ /* grow va space 32M by 32M */
+ unsigned align = ((32 << 20) >> 12) - 1;
+ radeon_vm_unbind_locked(rdev, vm);
+ vm->last_pfn = (last_pfn + align) & ~align;
+ }
radeon_mutex_unlock(&rdev->cs_mutex);
- vm->last_pfn = (last_pfn + align) & ~align;
}
head = &vm->va;
last_offset = 0;
@@ -551,7 +556,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
/* nothing to do if vm isn't bound */
if (vm->id == -1)
- return 0;;
+ return 0;
bo_va = radeon_bo_va(bo, vm);
if (bo_va == NULL) {
@@ -597,8 +602,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
if (bo_va == NULL)
return 0;
- mutex_lock(&vm->mutex);
radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
radeon_mutex_unlock(&rdev->cs_mutex);
list_del(&bo_va->vm_list);
@@ -629,11 +634,19 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
- vm->last_pfn = 0;
+ /* SI requires equal sized PTs for all VMs, so always set
+ * last_pfn to max_pfn. cayman allows variable sized
+ * pts so we can grow then as needed. Once we switch
+ * to two level pts we can unify this again.
+ */
+ if (rdev->family >= CHIP_TAHITI)
+ vm->last_pfn = rdev->vm_manager.max_pfn;
+ else
+ vm->last_pfn = 0;
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
- r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+ r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
return r;
}
@@ -643,19 +656,18 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int r;
- mutex_lock(&vm->mutex);
-
radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&vm->mutex);
radeon_vm_unbind_locked(rdev, vm);
radeon_mutex_unlock(&rdev->cs_mutex);
/* remove all bo */
- r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+ bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
- radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
if (!list_empty(&vm->va)) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c7008b5210f7..21ec9f5653ce 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -42,6 +42,8 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
if (robj) {
+ if (robj->gem_base.import_attach)
+ drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
radeon_bo_unref(&robj);
}
}
@@ -59,7 +61,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
@@ -91,7 +93,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (!domain) {
/* Do nothings */
- printk(KERN_WARNING "Set domain withou domain !\n");
+ printk(KERN_WARNING "Set domain without domain !\n");
return 0;
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
@@ -154,6 +156,17 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
radeon_bo_unreserve(rbo);
}
+static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+{
+ if (r == -EDEADLK) {
+ radeon_mutex_lock(&rdev->cs_mutex);
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ r = -EAGAIN;
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ }
+ return r;
+}
/*
* GEM ioctls.
@@ -210,12 +223,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
args->initial_domain, false,
false, &gobj);
if (r) {
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
args->handle = handle;
@@ -245,6 +260,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
@@ -276,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -301,12 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
break;
}
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -319,9 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
- if (robj->rdev->asic->ioctl_wait_idle)
- robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+ if (rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 65060b77c805..5df58d1aba06 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -73,6 +73,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false;
+ rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
/* Clear bits */
@@ -108,6 +109,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
rdev->irq.pflip[i] = false;
+ rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
}
@@ -170,6 +172,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
int r = 0;
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
spin_lock_init(&rdev->irq.sw_lock);
for (i = 0; i < rdev->num_crtc; i++)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3c2628b14d56..5c58d7d90cb2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -57,8 +57,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
dev->dev_private = (void *)rdev;
- pci_set_master(dev->pdev);
-
/* update BUS flag */
if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
@@ -275,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_MAX_PIPES:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_pipes_per_simd;
+ value = rdev->config.si.max_cu_per_sh;
else if (rdev->family >= CHIP_CAYMAN)
value = rdev->config.cayman.max_pipes_per_simd;
else if (rdev->family >= CHIP_CEDAR)
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 42db254f6bb0..a0c82229e8f0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -369,6 +369,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
goto error;
}
+ memset(&props, 0, sizeof(props));
props.max_brightness = MAX_RADEON_LEVEL;
props.type = BACKLIGHT_RAW;
bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f7eb5d8b9fd3..5b10ffd7bb2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -210,6 +210,7 @@ enum radeon_connector_table {
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
+ CT_SAM440EP
};
enum radeon_dvo_chip {
@@ -219,12 +220,20 @@ enum radeon_dvo_chip {
struct radeon_fbdev;
+struct radeon_afmt {
+ bool enabled;
+ int offset;
+ bool last_buffer_filled_status;
+ int id;
+};
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[6];
+ struct radeon_afmt *afmt[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -363,6 +372,7 @@ struct radeon_encoder_atom_dig {
int dpms_mode;
uint8_t backlight_level;
int panel_mode;
+ struct radeon_afmt *afmt;
};
struct radeon_encoder_atom_dac {
@@ -384,10 +394,6 @@ struct radeon_encoder {
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
- int hdmi_offset;
- int hdmi_config_offset;
- int hdmi_audio_workaround;
- int hdmi_buffer_status;
bool is_ext_encoder;
u16 caps;
};
@@ -476,6 +482,7 @@ extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
+extern int radeon_get_monitor_bpc(struct drm_connector *connector);
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index df6a4dbd93f8..830f1a7b486f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -104,7 +104,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
- struct radeon_bo **bo_ptr)
+ struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
@@ -120,6 +120,8 @@ int radeon_bo_create(struct radeon_device *rdev,
}
if (kernel) {
type = ttm_bo_type_kernel;
+ } else if (sg) {
+ type = ttm_bo_type_sg;
} else {
type = ttm_bo_type_device;
}
@@ -155,7 +157,7 @@ retry:
mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, 0, !kernel, NULL,
- acc_size, &radeon_ttm_bo_destroy);
+ acc_size, sg, &radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f9104be88d7c..17fb99f177cf 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -111,9 +111,10 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
bool no_wait);
extern int radeon_bo_create(struct radeon_device *rdev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain,
- struct radeon_bo **bo_ptr);
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain,
+ struct sg_table *sg,
+ struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
@@ -146,6 +147,17 @@ extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
/*
* sub allocation
*/
+
+static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain);
@@ -157,9 +169,15 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align);
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
- struct radeon_sa_bo *sa_bo);
+ struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence);
+#if defined(CONFIG_DEBUG_FS)
+extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index caa55d68f319..5b37e283ec38 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,10 +252,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_lock(&rdev->ring[i].mutex);
- }
+ mutex_lock(&rdev->ring_lock);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
@@ -273,13 +270,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
} else {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
if (ring->ready) {
- struct radeon_fence *fence;
- radeon_ring_alloc(rdev, ring, 64);
- radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
- radeon_fence_emit(rdev, fence);
- radeon_ring_commit(rdev, ring);
- radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
+ radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
}
radeon_unmap_vram_bos(rdev);
@@ -311,10 +302,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_unlock(&rdev->ring[i].mutex);
- }
+ mutex_unlock(&rdev->ring_lock);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
@@ -813,9 +801,13 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
int i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- not_processed += radeon_fence_count_emitted(rdev, i);
- if (not_processed >= 3)
- break;
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (ring->ready) {
+ not_processed += radeon_fence_count_emitted(rdev, i);
+ if (not_processed >= 3)
+ break;
+ }
}
if (not_processed >= 3) { /* should upclock */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
new file mode 100644
index 000000000000..6bef46ace831
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * based on nouveau_prime.c
+ *
+ * Authors: Alex Deucher
+ */
+#include "drmP.h"
+#include "drm.h"
+
+#include "radeon.h"
+#include "radeon_drm.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct radeon_bo *bo = attachment->dmabuf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+ int npages = bo->tbo.num_pages;
+ struct sg_table *sg;
+ int nents;
+
+ mutex_lock(&dev->struct_mutex);
+ sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
+
+static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+
+ if (bo->gem_base.export_dma_buf == dma_buf) {
+ DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
+ bo->gem_base.export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
+ }
+}
+
+static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ if (bo->vmapping_count) {
+ bo->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+ &bo->dma_buf_vmap);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ bo->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return bo->dma_buf_vmap.virtual;
+}
+
+static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+
+ mutex_lock(&dev->struct_mutex);
+ bo->vmapping_count--;
+ if (bo->vmapping_count == 0) {
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+const static struct dma_buf_ops radeon_dmabuf_ops = {
+ .map_dma_buf = radeon_gem_map_dma_buf,
+ .unmap_dma_buf = radeon_gem_unmap_dma_buf,
+ .release = radeon_gem_dmabuf_release,
+ .kmap = radeon_gem_kmap,
+ .kmap_atomic = radeon_gem_kmap_atomic,
+ .kunmap = radeon_gem_kunmap,
+ .kunmap_atomic = radeon_gem_kunmap_atomic,
+ .mmap = radeon_gem_prime_mmap,
+ .vmap = radeon_gem_prime_vmap,
+ .vunmap = radeon_gem_prime_vunmap,
+};
+
+static int radeon_prime_create(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct radeon_bo **pbo)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_bo *bo;
+ int ret;
+
+ ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
+ RADEON_GEM_DOMAIN_GTT, sg, pbo);
+ if (ret)
+ return ret;
+ bo = *pbo;
+ bo->gem_base.driver_private = bo;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_add_tail(&bo->list, &rdev->gem.objects);
+ mutex_unlock(&rdev->gem.mutex);
+
+ return 0;
+}
+
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
+{
+ struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ int ret = 0;
+
+ ret = radeon_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ERR_PTR(ret);
+
+ /* pin buffer into GTT */
+ ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+ if (ret) {
+ radeon_bo_unreserve(bo);
+ return ERR_PTR(ret);
+ }
+ radeon_bo_unreserve(bo);
+ return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
+}
+
+struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct radeon_bo *bo;
+ int ret;
+
+ if (dma_buf->ops == &radeon_dmabuf_ops) {
+ bo = dma_buf->priv;
+ if (bo->gem_base.dev == dev) {
+ drm_gem_object_reference(&bo->gem_base);
+ return &bo->gem_base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
+ if (ret)
+ goto fail_unmap;
+
+ bo->gem_base.import_attach = attach;
+
+ return &bo->gem_base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cc33b3d7c33b..983658c91358 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -24,6 +24,7 @@
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
+ * Christian König
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -33,151 +34,42 @@
#include "radeon.h"
#include "atom.h"
-int radeon_debugfs_ib_init(struct radeon_device *rdev);
-int radeon_debugfs_ring_init(struct radeon_device *rdev);
-
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- u32 pg_idx, pg_offset;
- u32 idx_value = 0;
- int new_page;
-
- pg_idx = (idx * 4) / PAGE_SIZE;
- pg_offset = (idx * 4) % PAGE_SIZE;
-
- if (ibc->kpage_idx[0] == pg_idx)
- return ibc->kpage[0][pg_offset/4];
- if (ibc->kpage_idx[1] == pg_idx)
- return ibc->kpage[1][pg_offset/4];
-
- new_page = radeon_cs_update_pages(p, pg_idx);
- if (new_page < 0) {
- p->parser_error = new_page;
- return 0;
- }
-
- idx_value = ibc->kpage[new_page][pg_offset/4];
- return idx_value;
-}
-
-void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
-{
-#if DRM_DEBUG_CODE
- if (ring->count_dw <= 0) {
- DRM_ERROR("radeon: writting more dword to ring than expected !\n");
- }
-#endif
- ring->ring[ring->wptr++] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
- ring->ring_free_dw--;
-}
-
/*
* IB.
*/
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- bool done = false;
-
- /* only free ib which have been emited */
- if (ib->fence && ib->fence->emitted) {
- if (radeon_fence_signaled(ib->fence)) {
- radeon_fence_unref(&ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo);
- done = true;
- }
- }
- return done;
-}
+int radeon_debugfs_sa_init(struct radeon_device *rdev);
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size)
+ struct radeon_ib *ib, unsigned size)
{
- struct radeon_fence *fence;
- unsigned cretry = 0;
- int r = 0, i, idx;
-
- *ib = NULL;
- /* align size on 256 bytes */
- size = ALIGN(size, 256);
+ int r;
- r = radeon_fence_create(rdev, &fence, ring);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
- dev_err(rdev->dev, "failed to create fence for new IB\n");
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- idx = rdev->ib_pool.head_id;
-retry:
- if (cretry > 5) {
- dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return -ENOMEM;
- }
- cretry++;
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
- if (rdev->ib_pool.ibs[idx].fence == NULL) {
- r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
- &rdev->ib_pool.ibs[idx].sa_bo,
- size, 256);
- if (!r) {
- *ib = &rdev->ib_pool.ibs[idx];
- (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
- (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- (*ib)->gpu_addr += (*ib)->sa_bo.offset;
- (*ib)->fence = fence;
- (*ib)->vm_id = 0;
- (*ib)->is_const_ib = false;
- /* ib are most likely to be allocated in a ring fashion
- * thus rdev->ib_pool.head_id should be the id of the
- * oldest ib
- */
- rdev->ib_pool.head_id = (1 + idx);
- rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- return 0;
- }
- }
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
- }
- /* this should be rare event, ie all ib scheduled none signaled yet.
- */
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
- r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
- if (!r) {
- goto retry;
- }
- /* an error happened */
- break;
- }
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ r = radeon_fence_create(rdev, &ib->fence, ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
+ return r;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return r;
+
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ ib->vm_id = 0;
+ ib->is_const_ib = false;
+ ib->semaphore = NULL;
+
+ return 0;
}
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ib *tmp = *ib;
-
- *ib = NULL;
- if (tmp == NULL) {
- return;
- }
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (tmp->fence && !tmp->fence->emitted) {
- radeon_sa_bo_free(rdev, &tmp->sa_bo);
- radeon_fence_unref(&tmp->fence);
- }
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_semaphore_free(rdev, ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
@@ -187,14 +79,14 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
- DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
+ dev_err(rdev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, ring, 64);
if (r) {
- DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
@@ -205,74 +97,90 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- struct radeon_sa_manager tmp;
- int i, r;
+ int r;
- r = radeon_sa_bo_manager_init(rdev, &tmp,
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GEM_DOMAIN_GTT);
if (r) {
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_sa_bo_manager_fini(rdev, &tmp);
- return 0;
- }
-
- rdev->ib_pool.sa_manager = tmp;
- INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- rdev->ib_pool.ibs[i].fence = NULL;
- rdev->ib_pool.ibs[i].idx = i;
- rdev->ib_pool.ibs[i].length_dw = 0;
- INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
- }
- rdev->ib_pool.head_id = 0;
- rdev->ib_pool.ready = true;
- DRM_INFO("radeon: ib pool ready.\n");
-
- if (radeon_debugfs_ib_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for IB !\n");
- }
- if (radeon_debugfs_ring_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
return 0;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- unsigned i;
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
- radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
- }
- radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
- rdev->ib_pool.ready = false;
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
}
int radeon_ib_pool_start(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
}
int radeon_ib_pool_suspend(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+}
+
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (!ring->ready)
+ continue;
+
+ r = radeon_ib_test(rdev, i, ring);
+ if (r) {
+ ring->ready = false;
+
+ if (i == RADEON_RING_TYPE_GFX_INDEX) {
+ /* oh, oh, that's really bad */
+ DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+
+ } else {
+ /* still not good, but we can live with it */
+ DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+ }
+ }
+ }
+ return 0;
}
/*
* Ring.
*/
+int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+ if (ring->count_dw <= 0) {
+ DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ }
+#endif
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
+}
+
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
{
/* r1xx-r5xx only has CP ring */
@@ -319,7 +227,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
+ r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
if (r)
return r;
}
@@ -332,10 +240,10 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
{
int r;
- mutex_lock(&ring->mutex);
+ mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
return r;
}
return 0;
@@ -360,13 +268,85 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev, ring);
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
}
-void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_undo(struct radeon_ring *ring)
{
ring->wptr = ring->wptr_old;
- mutex_unlock(&ring->mutex);
+}
+
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_undo(ring);
+ mutex_unlock(&rdev->ring_lock);
+}
+
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ int r;
+
+ radeon_ring_free_size(rdev, ring);
+ if (ring->rptr == ring->wptr) {
+ r = radeon_ring_alloc(rdev, ring, 1);
+ if (!r) {
+ radeon_ring_write(ring, ring->nop);
+ radeon_ring_commit(rdev, ring);
+ }
+ }
+}
+
+void radeon_ring_lockup_update(struct radeon_ring *ring)
+{
+ ring->last_rptr = ring->rptr;
+ ring->last_activity = jiffies;
+}
+
+/**
+ * radeon_ring_test_lockup() - check if ring is lockedup by recording information
+ * @rdev: radeon device structure
+ * @ring: radeon_ring structure holding ring information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ unsigned long cjiffies, elapsed;
+ uint32_t rptr;
+
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, ring->last_activity)) {
+ /* likely a wrap around */
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+ if (ring->rptr != ring->last_rptr) {
+ /* CP is still working no lockup */
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
+ if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+ return true;
+ }
+ /* give a chance to the GPU ... */
+ return false;
}
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
@@ -385,8 +365,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
- &ring->ring_obj);
+ RADEON_GEM_DOMAIN_GTT,
+ NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
@@ -411,6 +391,9 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
+ if (radeon_debugfs_ring_init(rdev, ring)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
return 0;
}
@@ -419,11 +402,12 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
int r;
struct radeon_bo *ring_obj;
- mutex_lock(&ring->mutex);
+ mutex_lock(&rdev->ring_lock);
ring_obj = ring->ring_obj;
+ ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
@@ -476,59 +460,48 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
};
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
- unsigned i;
- if (ib == NULL) {
- return 0;
- }
- seq_printf(m, "IB %04u\n", ib->idx);
- seq_printf(m, "IB fence %p\n", ib->fence);
- seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
- for (i = 0; i < ib->length_dw; i++) {
- seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
- }
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
return 0;
+
}
-static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
-static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
-static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
#endif
-int radeon_debugfs_ring_init(struct radeon_device *rdev)
+int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- if (rdev->family >= CHIP_CAYMAN)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
- ARRAY_SIZE(radeon_debugfs_ring_info_list));
- else
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1);
-#else
- return 0;
+ unsigned i;
+ for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
+ struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
+ int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
+ unsigned r;
+
+ if (&rdev->ring[ridx] != ring)
+ continue;
+
+ r = radeon_debugfs_add_files(rdev, info, 1);
+ if (r)
+ return r;
+ }
#endif
+ return 0;
}
-int radeon_debugfs_ib_init(struct radeon_device *rdev)
+int radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
- radeon_debugfs_ib_idx[i] = i;
- radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
- radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
- radeon_debugfs_ib_list[i].driver_features = 0;
- radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
- }
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
- RADEON_IB_POOL_SIZE);
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 4cce47e7dc0d..32059b745728 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -27,23 +27,45 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
#include "drmP.h"
#include "drm.h"
#include "radeon.h"
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
+
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain)
{
- int r;
+ int i, r;
+ spin_lock_init(&sa_manager->lock);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
- INIT_LIST_HEAD(&sa_manager->sa_bo);
+ sa_manager->hole = &sa_manager->olist;
+ INIT_LIST_HEAD(&sa_manager->olist);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ INIT_LIST_HEAD(&sa_manager->flist[i]);
+ }
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
+ RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
@@ -57,11 +79,15 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
{
struct radeon_sa_bo *sa_bo, *tmp;
- if (!list_empty(&sa_manager->sa_bo)) {
- dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ if (!list_empty(&sa_manager->olist)) {
+ sa_manager->hole = &sa_manager->olist,
+ radeon_sa_bo_try_free(sa_manager);
+ if (!list_empty(&sa_manager->olist)) {
+ dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ }
}
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
- list_del_init(&sa_bo->list);
+ list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
+ radeon_sa_bo_remove_locked(sa_bo);
}
radeon_bo_unref(&sa_manager->bo);
sa_manager->size = 0;
@@ -113,77 +139,248 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
return r;
}
-/*
- * Principe is simple, we keep a list of sub allocation in offset
- * order (first entry has offset == 0, last entry has the highest
- * offset).
- *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
- *
- * Alignment can't be bigger than page size
- */
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+ struct radeon_sa_manager *sa_manager = sa_bo->manager;
+ if (sa_manager->hole == &sa_bo->olist) {
+ sa_manager->hole = sa_bo->olist.prev;
+ }
+ list_del_init(&sa_bo->olist);
+ list_del_init(&sa_bo->flist);
+ radeon_fence_unref(&sa_bo->fence);
+ kfree(sa_bo);
+}
+
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
+{
+ struct radeon_sa_bo *sa_bo, *tmp;
+
+ if (sa_manager->hole->next == &sa_manager->olist)
+ return;
+
+ sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
+ list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
+ if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
+ return;
+ }
+ radeon_sa_bo_remove_locked(sa_bo);
+ }
+}
+
+static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole != &sa_manager->olist) {
+ return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
+ }
+ return 0;
+}
+
+static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole->next != &sa_manager->olist) {
+ return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
+ }
+ return sa_manager->size;
+}
+
+static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align)
+{
+ unsigned soffset, eoffset, wasted;
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+ wasted = (align - (soffset % align)) % align;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ soffset += wasted;
+
+ sa_bo->manager = sa_manager;
+ sa_bo->soffset = soffset;
+ sa_bo->eoffset = soffset + size;
+ list_add(&sa_bo->olist, sa_manager->hole);
+ INIT_LIST_HEAD(&sa_bo->flist);
+ sa_manager->hole = &sa_bo->olist;
+ return true;
+ }
+ return false;
+}
+
+static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
+ struct radeon_fence **fences,
+ unsigned *tries)
+{
+ struct radeon_sa_bo *best_bo = NULL;
+ unsigned i, soffset, best, tmp;
+
+ /* if hole points to the end of the buffer */
+ if (sa_manager->hole->next == &sa_manager->olist) {
+ /* try again with its beginning */
+ sa_manager->hole = &sa_manager->olist;
+ return true;
+ }
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ /* to handle wrap around we add sa_manager->size */
+ best = sa_manager->size * 2;
+ /* go over all fence list and try to find the closest sa_bo
+ * of the current last
+ */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_sa_bo *sa_bo;
+
+ if (list_empty(&sa_manager->flist[i])) {
+ continue;
+ }
+
+ sa_bo = list_first_entry(&sa_manager->flist[i],
+ struct radeon_sa_bo, flist);
+
+ if (!radeon_fence_signaled(sa_bo->fence)) {
+ fences[i] = sa_bo->fence;
+ continue;
+ }
+
+ /* limit the number of tries each ring gets */
+ if (tries[i] > 2) {
+ continue;
+ }
+
+ tmp = sa_bo->soffset;
+ if (tmp < soffset) {
+ /* wrap around, pretend it's after */
+ tmp += sa_manager->size;
+ }
+ tmp -= soffset;
+ if (tmp < best) {
+ /* this sa bo is the closest one */
+ best = tmp;
+ best_bo = sa_bo;
+ }
+ }
+
+ if (best_bo) {
+ ++tries[best_bo->fence->ring];
+ sa_manager->hole = best_bo->olist.prev;
+
+ /* we knew that this one is signaled,
+ so it's save to remote it */
+ radeon_sa_bo_remove_locked(best_bo);
+ return true;
+ }
+ return false;
+}
+
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align)
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block)
{
- struct radeon_sa_bo *tmp;
- struct list_head *head;
- unsigned offset = 0, wasted = 0;
+ struct radeon_fence *fences[RADEON_NUM_RINGS];
+ unsigned tries[RADEON_NUM_RINGS];
+ int i, r = -ENOMEM;
BUG_ON(align > RADEON_GPU_PAGE_SIZE);
BUG_ON(size > sa_manager->size);
- /* no one ? */
- head = sa_manager->sa_bo.prev;
- if (list_empty(&sa_manager->sa_bo)) {
- goto out;
+ *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
+ if ((*sa_bo) == NULL) {
+ return -ENOMEM;
}
+ (*sa_bo)->manager = sa_manager;
+ (*sa_bo)->fence = NULL;
+ INIT_LIST_HEAD(&(*sa_bo)->olist);
+ INIT_LIST_HEAD(&(*sa_bo)->flist);
- /* look for a hole big enough */
- offset = 0;
- list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
- /* room before this object ? */
- if ((tmp->offset - offset) >= size) {
- head = tmp->list.prev;
- goto out;
+ spin_lock(&sa_manager->lock);
+ do {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ fences[i] = NULL;
+ tries[i] = 0;
}
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
+
+ do {
+ radeon_sa_bo_try_free(sa_manager);
+
+ if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
+ size, align)) {
+ spin_unlock(&sa_manager->lock);
+ return 0;
+ }
+
+ /* see if we can skip over some allocations */
+ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
+ if (block) {
+ spin_unlock(&sa_manager->lock);
+ r = radeon_fence_wait_any(rdev, fences, false);
+ spin_lock(&sa_manager->lock);
+ if (r) {
+ /* if we have nothing to wait for we
+ are practically out of memory */
+ if (r == -ENOENT) {
+ r = -ENOMEM;
+ }
+ goto out_err;
+ }
}
- offset += wasted;
- }
- /* room at the end ? */
- head = sa_manager->sa_bo.prev;
- tmp = list_entry(head, struct radeon_sa_bo, list);
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
- }
- offset += wasted;
- if ((sa_manager->size - offset) < size) {
- /* failed to find somethings big enough */
- return -ENOMEM;
+ } while (block);
+
+out_err:
+ spin_unlock(&sa_manager->lock);
+ kfree(*sa_bo);
+ *sa_bo = NULL;
+ return r;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence)
+{
+ struct radeon_sa_manager *sa_manager;
+
+ if (sa_bo == NULL || *sa_bo == NULL) {
+ return;
}
-out:
- sa_bo->manager = sa_manager;
- sa_bo->offset = offset;
- sa_bo->size = size;
- list_add(&sa_bo->list, head);
- return 0;
+ sa_manager = (*sa_bo)->manager;
+ spin_lock(&sa_manager->lock);
+ if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ (*sa_bo)->fence = radeon_fence_ref(fence);
+ list_add_tail(&(*sa_bo)->flist,
+ &sa_manager->flist[fence->ring]);
+ } else {
+ radeon_sa_bo_remove_locked(*sa_bo);
+ }
+ spin_unlock(&sa_manager->lock);
+ *sa_bo = NULL;
}
-void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+#if defined(CONFIG_DEBUG_FS)
+void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m)
{
- list_del_init(&sa_bo->list);
+ struct radeon_sa_bo *i;
+
+ spin_lock(&sa_manager->lock);
+ list_for_each_entry(i, &sa_manager->olist, olist) {
+ if (&i->olist == sa_manager->hole) {
+ seq_printf(m, ">");
+ } else {
+ seq_printf(m, " ");
+ }
+ seq_printf(m, "[0x%08x 0x%08x] size %8d",
+ i->soffset, i->eoffset, i->eoffset - i->soffset);
+ if (i->fence) {
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ i->fence->seq, i->fence->ring);
+ }
+ seq_printf(m, "\n");
+ }
+ spin_unlock(&sa_manager->lock);
}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 61dd4e3c9209..e2ace5dce117 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -31,148 +31,107 @@
#include "drm.h"
#include "radeon.h"
-static int radeon_semaphore_add_bo(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- int r, i;
-
-
- bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
- if (bo == NULL) {
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&bo->free);
- INIT_LIST_HEAD(&bo->list);
- bo->nused = 0;
-
- r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
- if (r) {
- dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
- kfree(bo);
- return r;
- }
- gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- gpu_addr += bo->ib->sa_bo.offset;
- cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- cpu_ptr += (bo->ib->sa_bo.offset >> 2);
- for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
- bo->semaphores[i].gpu_addr = gpu_addr;
- bo->semaphores[i].cpu_ptr = cpu_ptr;
- bo->semaphores[i].bo = bo;
- list_add_tail(&bo->semaphores[i].list, &bo->free);
- gpu_addr += 8;
- cpu_ptr += 2;
- }
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
- return 0;
-}
-
-static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
- struct radeon_semaphore_bo *bo)
-{
- radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
- radeon_fence_unref(&bo->ib->fence);
- list_del(&bo->list);
- kfree(bo);
-}
-
-void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo, *n;
-
- if (list_empty(&rdev->semaphore_drv.bo)) {
- return;
- }
- /* only shrink if first bo has free semaphore */
- bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
- if (list_empty(&bo->free)) {
- return;
- }
- list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
- if (bo->nused)
- continue;
- radeon_semaphore_del_bo_locked(rdev, bo);
- }
-}
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- bool do_retry = true;
int r;
-retry:
- *semaphore = NULL;
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
- if (list_empty(&bo->free))
- continue;
- *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
- (*semaphore)->cpu_ptr[0] = 0;
- (*semaphore)->cpu_ptr[1] = 0;
- list_del(&(*semaphore)->list);
- bo->nused++;
- break;
- }
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
-
+ *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
- if (do_retry) {
- do_retry = false;
- r = radeon_semaphore_add_bo(rdev);
- if (r)
- return r;
- goto retry;
- }
return -ENOMEM;
}
-
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+ &(*semaphore)->sa_bo, 8, 8, true);
+ if (r) {
+ kfree(*semaphore);
+ *semaphore = NULL;
+ return r;
+ }
+ (*semaphore)->waiters = 0;
+ (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
+ *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ --semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
}
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ ++semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
-void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore)
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ bool sync_to[RADEON_NUM_RINGS],
+ int dst_ring)
{
- unsigned long irq_flags;
+ int i = 0, r;
+
+ mutex_lock(&rdev->ring_lock);
+ r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8);
+ if (r) {
+ goto error;
+ }
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (!sync_to[i] || i == dst_ring)
+ continue;
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- semaphore->bo->nused--;
- list_add_tail(&semaphore->list, &semaphore->bo->free);
- radeon_semaphore_shrink_locked(rdev);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+ r = -EINVAL;
+ goto error;
+ }
+
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 8);
+ if (r) {
+ goto error;
+ }
+
+ radeon_semaphore_emit_signal(rdev, i, semaphore);
+ radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
+
+ radeon_ring_commit(rdev, &rdev->ring[i]);
+ }
+
+ radeon_ring_commit(rdev, &rdev->ring[dst_ring]);
+ mutex_unlock(&rdev->ring_lock);
+
+ return 0;
+
+error:
+ /* unlock all locks taken so far */
+ for (--i; i >= 0; --i) {
+ if (sync_to[i] || i == dst_ring) {
+ radeon_ring_undo(&rdev->ring[i]);
+ }
+ }
+ radeon_ring_undo(&rdev->ring[dst_ring]);
+ mutex_unlock(&rdev->ring_lock);
+ return r;
}
-void radeon_semaphore_driver_fini(struct radeon_device *rdev)
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence)
{
- struct radeon_semaphore_bo *bo, *n;
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- /* we force to free everything */
- list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
- if (!list_empty(&bo->free)) {
- dev_err(rdev->dev, "still in use semaphore\n");
- }
- radeon_semaphore_del_bo_locked(rdev, bo);
+ if (semaphore == NULL) {
+ return;
+ }
+ if (semaphore->waiters > 0) {
+ dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
+ " hardware lockup imminent!\n", semaphore);
}
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence);
+ kfree(semaphore);
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dc5dcf483aa3..efff929ea49d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -59,7 +59,7 @@ void radeon_test_moves(struct radeon_device *rdev)
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- &vram_obj);
+ NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -78,7 +78,7 @@ void radeon_test_moves(struct radeon_device *rdev)
void **vram_start, **vram_end;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
+ RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_cleanup;
@@ -317,7 +317,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
+ radeon_semaphore_free(rdev, semaphore, NULL);
if (fence1)
radeon_fence_unref(&fence1);
@@ -437,7 +437,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
+ radeon_semaphore_free(rdev, semaphore, NULL);
if (fenceA)
radeon_fence_unref(&fenceA);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f493c6403af5..c94a2257761f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -222,8 +222,9 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
- struct radeon_fence *fence;
- int r, i;
+ struct radeon_fence *fence, *old_fence;
+ struct radeon_semaphore *sem = NULL;
+ int r;
rdev = radeon_get_rdev(bo->bdev);
r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
@@ -242,6 +243,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ radeon_fence_unref(&fence);
return -EINVAL;
}
switch (new_mem->mem_type) {
@@ -253,42 +255,36 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ radeon_fence_unref(&fence);
return -EINVAL;
}
if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
+ radeon_fence_unref(&fence);
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
/* sync other rings */
- if (rdev->family >= CHIP_R600) {
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready)
- continue;
-
- if (!fence->semaphore) {
- r = radeon_semaphore_create(rdev, &fence->semaphore);
- /* FIXME: handle semaphore error */
- if (r)
- continue;
- }
+ old_fence = bo->sync_obj;
+ if (old_fence && old_fence->ring != fence->ring
+ && !radeon_fence_signaled(old_fence)) {
+ bool sync_to_ring[RADEON_NUM_RINGS] = { };
+ sync_to_ring[old_fence->ring] = true;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ radeon_fence_unref(&fence);
+ return r;
+ }
- r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
- /* FIXME: handle ring lock error */
- if (r)
- continue;
- radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
- radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
-
- r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3);
- /* FIXME: handle ring lock error */
- if (r)
- continue;
- radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore);
- radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]);
+ r = radeon_semaphore_sync_rings(rdev, sem,
+ sync_to_ring, fence->ring);
+ if (r) {
+ radeon_semaphore_free(rdev, sem, NULL);
+ radeon_fence_unref(&fence);
+ return r;
}
}
@@ -298,6 +294,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem);
+ radeon_semaphore_free(rdev, sem, fence);
radeon_fence_unref(&fence);
return r;
}
@@ -614,10 +611,18 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
+ if (slave && ttm->sg) {
+ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+ gtt->ttm.dma_address, ttm->num_pages);
+ ttm->state = tt_unbound;
+ return 0;
+ }
+
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
@@ -658,6 +663,10 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
+ bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+ if (slave)
+ return;
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
@@ -729,8 +738,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
return r;
}
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
- &rdev->stollen_vga_memory);
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 4cf381b3a6d8..a464eb5e2df2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -430,12 +430,9 @@ static int rs400_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d25cf869d08d..e95c5e61d4e2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -396,7 +396,6 @@ int rs600_asic_reset(struct radeon_device *rdev)
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
- rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
@@ -553,6 +552,12 @@ int rs600_irq_set(struct radeon_device *rdev)
~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ u32 hdmi0;
+ if (ASIC_IS_DCE2(rdev))
+ hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ else
+ hdmi0 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -579,10 +584,15 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.hpd[1]) {
hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
}
+ if (rdev->irq.afmt[0]) {
+ hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ }
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ if (ASIC_IS_DCE2(rdev))
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
return 0;
}
@@ -622,6 +632,17 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.r500.disp_int = 0;
}
+ if (ASIC_IS_DCE2(rdev)) {
+ rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
+ S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
+ tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
+ }
+ } else
+ rdev->irq.stat_regs.r500.hdmi0_status = 0;
+
if (irqs) {
WREG32(R_000044_GEN_INT_STATUS, irqs);
}
@@ -630,6 +651,9 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
void rs600_irq_disable(struct radeon_device *rdev)
{
+ u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+ ~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+ WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
@@ -641,15 +665,20 @@ int rs600_irq_process(struct radeon_device *rdev)
{
u32 status, msi_rearm;
bool queue_hotplug = false;
+ bool queue_hdmi = false;
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
status = rs600_irq_ack(rdev);
- if (!status && !rdev->irq.stat_regs.r500.disp_int) {
+ if (!status &&
+ !rdev->irq.stat_regs.r500.disp_int &&
+ !rdev->irq.stat_regs.r500.hdmi0_status) {
return IRQ_NONE;
}
- while (status || rdev->irq.stat_regs.r500.disp_int) {
+ while (status ||
+ rdev->irq.stat_regs.r500.disp_int ||
+ rdev->irq.stat_regs.r500.hdmi0_status) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
@@ -687,12 +716,18 @@ int rs600_irq_process(struct radeon_device *rdev)
queue_hotplug = true;
DRM_DEBUG("HPD2\n");
}
+ if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+ queue_hdmi = true;
+ DRM_DEBUG("HDMI0\n");
+ }
status = rs600_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
+ if (queue_hdmi)
+ schedule_work(&rdev->audio_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
@@ -873,20 +908,17 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ r = radeon_ib_pool_start(rdev);
+ if (r)
return r;
- }
- r = radeon_ib_pool_start(rdev);
+ r = radeon_ib_ring_tests(rdev);
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index a27c13ac47c3..f1f89414dc63 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -485,6 +485,20 @@
#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
+#define R_007404_HDMI0_STATUS 0x007404
+#define S_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) & 0x1) << 28)
+#define G_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) >> 28) & 0x1)
+#define C_007404_HDMI0_AZ_FORMAT_WTRIG 0xEFFFFFFF
+#define S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) & 0x1) << 29)
+#define G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) >> 29) & 0x1)
+#define C_007404_HDMI0_AZ_FORMAT_WTRIG_INT 0xDFFFFFFF
+#define R_007408_HDMI0_AUDIO_PACKET_CONTROL 0x007408
+#define S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) & 0x1) << 28)
+#define G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) >> 28) & 0x1)
+#define C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK 0xEFFFFFFF
+#define S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) & 0x1) << 29)
+#define G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) >> 29) & 0x1)
+#define C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK 0xDFFFFFFF
/* MC registers */
#define R_000000_MC_STATUS 0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index f2c3b9d75f18..159b6a43fda0 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -637,20 +637,17 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ r = radeon_ib_pool_start(rdev);
+ if (r)
return r;
- }
- r = radeon_ib_pool_start(rdev);
+ r = radeon_ib_ring_tests(rdev);
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index d8d78fe17946..7f08cedb5333 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -412,12 +412,10 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- rdev->accel_working = false;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
return r;
- }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index cdab1aeaed6e..b4f51c569c36 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ if (rdev->family == CHIP_RV740)
+ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask;
- u32 enabled_backends_count;
- u32 cur_pipe;
- u32 swizzle_pipe[R7XX_MAX_PIPES];
- u32 cur_backend;
- u32 i;
- bool force_no_swizzle;
-
- if (num_tile_pipes > R7XX_MAX_PIPES)
- num_tile_pipes = R7XX_MAX_PIPES;
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_backends > R7XX_MAX_BACKENDS)
- num_backends = R7XX_MAX_BACKENDS;
- if (num_backends < 1)
- num_backends = 1;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
- if (((backend_disable_mask >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends)
- break;
- }
-
- if (enabled_backends_count == 0) {
- enabled_backends_mask = 1;
- enabled_backends_count = 1;
- }
-
- if (enabled_backends_count != num_backends)
- num_backends = enabled_backends_count;
-
- switch (rdev->family) {
- case CHIP_RV770:
- case CHIP_RV730:
- force_no_swizzle = false;
- break;
- case CHIP_RV710:
- case CHIP_RV740:
- default:
- force_no_swizzle = true;
- break;
- }
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
- switch (num_tile_pipes) {
- case 1:
- swizzle_pipe[0] = 0;
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 3:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- }
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 3;
- swizzle_pipe[3] = 1;
- }
- break;
- case 5:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- }
- break;
- case 7:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 7;
- swizzle_pipe[7] = 5;
- }
- break;
- }
-
- cur_backend = 0;
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-
- backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
- cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
@@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
u32 sq_thread_resource_mgmt;
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
- u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg;
- u32 db_debug4;
+ u32 db_debug4, tmp;
+ u32 inactive_pipes, shader_pipe_config;
+ u32 disabled_rb_mask;
+ unsigned active_number;
/* setup chip specs */
+ rdev->config.rv770.tiling_group_size = 256;
switch (rdev->family) {
case CHIP_RV770:
rdev->config.rv770.max_pipes = 4;
@@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev)
/* setup tiling, simd, pipe config */
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+ shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+ inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+ for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+ if (!(inactive_pipes & tmp)) {
+ active_number++;
+ }
+ tmp <<= 1;
+ }
+ if (active_number == 1) {
+ WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+ } else {
+ WREG32(SPI_CONFIG_CNTL, 0);
+ }
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+ if (tmp < rdev->config.rv770.max_backends) {
+ rdev->config.rv770.max_backends = tmp;
+ }
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.rv770.max_pipes) {
+ rdev->config.rv770.max_pipes = tmp;
+ }
+ tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+ if (tmp < rdev->config.rv770.max_simds) {
+ rdev->config.rv770.max_simds = tmp;
+ }
+
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
default:
- gb_tiling_config |= PIPE_TILING(0);
+ gb_tiling_config = PIPE_TILING(0);
break;
case 2:
- gb_tiling_config |= PIPE_TILING(1);
+ gb_tiling_config = PIPE_TILING(1);
break;
case 4:
- gb_tiling_config |= PIPE_TILING(2);
+ gb_tiling_config = PIPE_TILING(2);
break;
case 8:
- gb_tiling_config |= PIPE_TILING(3);
+ gb_tiling_config = PIPE_TILING(3);
break;
}
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
+ disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+ tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+ R7XX_MAX_BACKENDS, disabled_rb_mask);
+ gb_tiling_config |= tmp << 16;
+ rdev->config.rv770.backend_map = tmp;
+
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
- else
- gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ gb_tiling_config |= BANK_TILING(1);
+ else
+ gb_tiling_config |= BANK_TILING(0);
+ }
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
- if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
- rdev->config.rv770.tiling_group_size = 512;
- else
- rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
gb_tiling_config |= BANK_SWAPS(1);
-
- cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
- cc_rb_backend_disable |=
- BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
-
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
- cc_gc_shader_pipe_config |=
- INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
- cc_gc_shader_pipe_config |=
- INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
-
- if (rdev->family == CHIP_RV740)
- backend_map = 0x28;
- else
- backend_map = r700_get_tile_pipe_to_backend_map(rdev,
- rdev->config.rv770.max_tile_pipes,
- (R7XX_MAX_BACKENDS -
- r600_count_pipe_bits((cc_rb_backend_disable &
- R7XX_MAX_BACKENDS_MASK) >> 16)),
- (cc_rb_backend_disable >> 16));
-
rdev->config.rv770.tile_config = gb_tiling_config;
- rdev->config.rv770.backend_map = backend_map;
- gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
- num_qd_pipes =
- R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+
+ num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -776,6 +616,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
ACK_FLUSH_CTL(3) |
SYNC_FLUSH_CTL));
+ if (rdev->family != CHIP_RV770)
+ WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
db_debug3 = RREG32(DB_DEBUG3);
db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
switch (rdev->family) {
@@ -809,8 +652,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(VGT_NUM_INSTANCES, 1);
- WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
-
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
WREG32(CP_PERFMON_CNTL, 0);
@@ -954,7 +795,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
-
+ WREG32(VC_ENHANCE, 0);
}
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
@@ -1114,10 +955,13 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ return r;
+
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
+ DRM_ERROR("radeon: audio init failed\n");
return r;
}
@@ -1143,12 +987,6 @@ int rv770_resume(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "radeon: audio init failed\n");
- return r;
- }
-
return r;
}
@@ -1178,10 +1016,6 @@ int rv770_init(struct radeon_device *rdev)
{
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -1261,12 +1095,6 @@ int rv770_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "radeon: audio init failed\n");
- return r;
- }
-
return 0;
}
@@ -1281,7 +1109,6 @@ void rv770_fini(struct radeon_device *rdev)
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 79fa588e9ed5..b0adfc595d75 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -106,10 +106,13 @@
#define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0
+#define PIPE_TILING__SHIFT 1
+#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_QD_PIPES_SHIFT 8
#define INACTIVE_SIMDS(x) ((x) << 16)
#define INACTIVE_SIMDS_MASK 0x00FF0000
@@ -174,6 +177,7 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
@@ -207,6 +211,7 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
+#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define CACHE_DEPTH(x) ((x) << 1)
@@ -306,6 +311,8 @@
#define TCP_CNTL 0x9610
#define TCP_CHAN_STEER 0x9614
+#define VC_ENHANCE 0x9714
+
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
#define VC_ONLY 0
@@ -353,6 +360,197 @@
#define SRBM_STATUS 0x0E50
+/* DCE 3.2 HDMI */
+#define HDMI_CONTROL 0x7400
+# define HDMI_KEEPOUT_MODE (1 << 0)
+# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
+# define HDMI_ERROR_ACK (1 << 8)
+# define HDMI_ERROR_MASK (1 << 9)
+#define HDMI_STATUS 0x7404
+# define HDMI_ACTIVE_AVMUTE (1 << 0)
+# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
+# define HDMI_VBI_PACKET_ERROR (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL 0x7408
+# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
+# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL 0x740c
+# define HDMI_ACR_SEND (1 << 0)
+# define HDMI_ACR_CONT (1 << 1)
+# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
+# define HDMI_ACR_HW 0
+# define HDMI_ACR_32 1
+# define HDMI_ACR_44 2
+# define HDMI_ACR_48 3
+# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
+# define HDMI_ACR_AUTO_SEND (1 << 12)
+#define HDMI_VBI_PACKET_CONTROL 0x7410
+# define HDMI_NULL_SEND (1 << 0)
+# define HDMI_GC_SEND (1 << 4)
+# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0 0x7414
+# define HDMI_AVI_INFO_SEND (1 << 0)
+# define HDMI_AVI_INFO_CONT (1 << 1)
+# define HDMI_AUDIO_INFO_SEND (1 << 4)
+# define HDMI_AUDIO_INFO_CONT (1 << 5)
+# define HDMI_MPEG_INFO_SEND (1 << 8)
+# define HDMI_MPEG_INFO_CONT (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1 0x7418
+# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
+# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
+# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL 0x741c
+# define HDMI_GENERIC0_SEND (1 << 0)
+# define HDMI_GENERIC0_CONT (1 << 1)
+# define HDMI_GENERIC1_SEND (1 << 4)
+# define HDMI_GENERIC1_CONT (1 << 5)
+# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
+# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
+#define HDMI_GC 0x7428
+# define HDMI_GC_AVMUTE (1 << 0)
+#define AFMT_AUDIO_PACKET_CONTROL2 0x742c
+# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
+# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
+# define AFMT_60958_CS_SOURCE (1 << 4)
+# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
+# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0 0x7454
+# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
+# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
+# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
+# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
+# define AFMT_AVI_INFO_Y_RGB 0
+# define AFMT_AVI_INFO_Y_YCBCR422 1
+# define AFMT_AVI_INFO_Y_YCBCR444 2
+# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
+# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
+# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
+# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
+# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
+# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
+# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
+# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
+# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
+# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1 0x7458
+# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2 0x745c
+# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3 0x7460
+# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
+# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0 0x7464
+# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
+# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
+# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1 0x7468
+# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
+# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
+# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR 0x746c
+#define AFMT_GENERIC0_0 0x7470
+#define AFMT_GENERIC0_1 0x7474
+#define AFMT_GENERIC0_2 0x7478
+#define AFMT_GENERIC0_3 0x747c
+#define AFMT_GENERIC0_4 0x7480
+#define AFMT_GENERIC0_5 0x7484
+#define AFMT_GENERIC0_6 0x7488
+#define AFMT_GENERIC1_HDR 0x748c
+#define AFMT_GENERIC1_0 0x7490
+#define AFMT_GENERIC1_1 0x7494
+#define AFMT_GENERIC1_2 0x7498
+#define AFMT_GENERIC1_3 0x749c
+#define AFMT_GENERIC1_4 0x74a0
+#define AFMT_GENERIC1_5 0x74a4
+#define AFMT_GENERIC1_6 0x74a8
+#define HDMI_ACR_32_0 0x74ac
+# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1 0x74b0
+# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0 0x74b4
+# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1 0x74b8
+# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0 0x74bc
+# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1 0x74c0
+# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0 0x74c4
+#define HDMI_ACR_STATUS_1 0x74c8
+#define AFMT_AUDIO_INFO0 0x74cc
+# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
+# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
+#define AFMT_AUDIO_INFO1 0x74d0
+# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
+# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
+# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
+# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define AFMT_60958_0 0x74d4
+# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
+# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
+# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
+# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
+# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
+# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
+# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
+# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
+#define AFMT_60958_1 0x74d8
+# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
+# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
+# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL 0x74dc
+# define AFMT_AUDIO_CRC_EN (1 << 0)
+#define AFMT_RAMP_CONTROL0 0x74e0
+# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_RAMP_DATA_SIGN (1 << 31)
+#define AFMT_RAMP_CONTROL1 0x74e4
+# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
+# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2 0x74e8
+# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3 0x74ec
+# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
+#define AFMT_60958_2 0x74f0
+# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
+# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
+# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
+# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
+# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
+# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
+#define AFMT_STATUS 0x7600
+# define AFMT_AUDIO_ENABLE (1 << 4)
+# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL 0x7604
+# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
+# define AFMT_AUDIO_TEST_EN (1 << 12)
+# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
+# define AFMT_60958_CS_UPDATE (1 << 26)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
+# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
+# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL 0x7608
+# define AFMT_GENERIC0_UPDATE (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0 0x760c
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
+# define AFMT_MPEG_INFO_UPDATE (1 << 10)
+#define AFMT_GENERIC0_7 0x7610
+/* second instance starts at 0x7800 */
+#define HDMI_OFFSET0 (0x7400 - 0x7400)
+#define HDMI_OFFSET1 (0x7800 - 0x7400)
+
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 27bda986fc2b..0b0279291a73 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends_per_asic,
- u32 *backend_disable_mask_per_asic,
- u32 num_shader_engines)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask = 0;
- u32 enabled_backends_count = 0;
- u32 num_backends_per_se;
- u32 cur_pipe;
- u32 swizzle_pipe[SI_MAX_PIPES];
- u32 cur_backend = 0;
- u32 i;
- bool force_no_swizzle;
-
- /* force legal values */
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_tile_pipes > rdev->config.si.max_tile_pipes)
- num_tile_pipes = rdev->config.si.max_tile_pipes;
- if (num_shader_engines < 1)
- num_shader_engines = 1;
- if (num_shader_engines > rdev->config.si.max_shader_engines)
- num_shader_engines = rdev->config.si.max_shader_engines;
- if (num_backends_per_asic < num_shader_engines)
- num_backends_per_asic = num_shader_engines;
- if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
- num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
-
- /* make sure we have the same number of backends per se */
- num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
- /* set up the number of backends per se */
- num_backends_per_se = num_backends_per_asic / num_shader_engines;
- if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
- num_backends_per_se = rdev->config.si.max_backends_per_se;
- num_backends_per_asic = num_backends_per_se * num_shader_engines;
- }
-
- /* create enable mask and count for enabled backends */
- for (i = 0; i < SI_MAX_BACKENDS; ++i) {
- if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends_per_asic)
- break;
- }
-
- /* force the backends mask to match the current number of backends */
- if (enabled_backends_count != num_backends_per_asic) {
- u32 this_backend_enabled;
- u32 shader_engine;
- u32 backend_per_se;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
- for (i = 0; i < SI_MAX_BACKENDS; ++i) {
- /* calc the current se */
- shader_engine = i / rdev->config.si.max_backends_per_se;
- /* calc the backend per se */
- backend_per_se = i % rdev->config.si.max_backends_per_se;
- /* default to not enabled */
- this_backend_enabled = 0;
- if ((shader_engine < num_shader_engines) &&
- (backend_per_se < num_backends_per_se))
- this_backend_enabled = 1;
- if (this_backend_enabled) {
- enabled_backends_mask |= (1 << i);
- *backend_disable_mask_per_asic &= ~(1 << i);
- ++enabled_backends_count;
- }
- }
- }
-
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
- switch (rdev->family) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- force_no_swizzle = true;
- break;
- default:
- force_no_swizzle = false;
- break;
- }
- if (force_no_swizzle) {
- bool last_backend_enabled = false;
-
- force_no_swizzle = false;
- for (i = 0; i < SI_MAX_BACKENDS; ++i) {
- if (((enabled_backends_mask >> i) & 1) == 1) {
- if (last_backend_enabled)
- force_no_swizzle = true;
- last_backend_enabled = true;
- } else
- last_backend_enabled = false;
- }
- }
-
- switch (num_tile_pipes) {
- case 1:
- case 3:
- case 5:
- case 7:
- DRM_ERROR("odd number of pipes!\n");
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- swizzle_pipe[3] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- }
- break;
- }
-
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-
- backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
- cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
-static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
- u32 disable_mask_per_se,
- u32 max_disable_mask_per_se,
- u32 num_shader_engines)
-{
- u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
- u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
- if (num_shader_engines == 1)
- return disable_mask_per_asic;
- else if (num_shader_engines == 2)
- return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
- else
- return 0xffffffff;
-}
-
static void si_tiling_mode_table_init(struct radeon_device *rdev)
{
const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
}
+static void si_select_se_sh(struct radeon_device *rdev,
+ u32 se_num, u32 sh_num)
+{
+ u32 data = INSTANCE_BROADCAST_WRITES;
+
+ if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+ data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ else if (se_num == 0xffffffff)
+ data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+ else if (sh_num == 0xffffffff)
+ data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+ else
+ data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+ WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+ u32 i, mask = 0;
+
+ for (i = 0; i < bit_width; i++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+ return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ if (data & 1)
+ data &= INACTIVE_CUS_MASK;
+ else
+ data = 0;
+ data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+ data >>= INACTIVE_CUS_SHIFT;
+
+ mask = si_create_bitmask(cu_per_sh);
+
+ return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+ u32 cu_per_sh)
+{
+ int i, j, k;
+ u32 data, mask, active_cu;
+
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ si_select_se_sh(rdev, i, j);
+ data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+ active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+ mask = 1;
+ for (k = 0; k < 16; k++) {
+ mask <<= k;
+ if (active_cu & mask) {
+ data &= ~mask;
+ WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+ break;
+ }
+ }
+ }
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+ u32 max_rb_num, u32 se_num,
+ u32 sh_per_se)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_RB_BACKEND_DISABLE);
+ if (data & 1)
+ data &= BACKEND_DISABLE_MASK;
+ else
+ data = 0;
+ data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+ data >>= BACKEND_DISABLE_SHIFT;
+
+ mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+ return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+ u32 max_rb_num)
+{
+ int i, j;
+ u32 data, mask;
+ u32 disabled_rbs = 0;
+ u32 enabled_rbs = 0;
+
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ si_select_se_sh(rdev, i, j);
+ data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+ }
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+ mask = 1;
+ for (i = 0; i < max_rb_num; i++) {
+ if (!(disabled_rbs & mask))
+ enabled_rbs |= mask;
+ mask <<= 1;
+ }
+
+ for (i = 0; i < se_num; i++) {
+ si_select_se_sh(rdev, i, 0xffffffff);
+ data = 0;
+ for (j = 0; j < sh_per_se; j++) {
+ switch (enabled_rbs & 3) {
+ case 1:
+ data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+ break;
+ case 2:
+ data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+ break;
+ case 3:
+ default:
+ data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+ break;
+ }
+ enabled_rbs >>= 2;
+ }
+ WREG32(PA_SC_RASTER_CONFIG, data);
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
static void si_gpu_init(struct radeon_device *rdev)
{
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_array_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 gb_backend_map;
- u32 cgts_tcc_disable;
u32 sx_debug_1;
- u32 gc_user_shader_array_config;
- u32 gc_user_rb_backend_disable;
- u32 cgts_user_tcc_disable;
u32 hdp_host_path_cntl;
u32 tmp;
int i, j;
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_TAHITI:
rdev->config.si.max_shader_engines = 2;
- rdev->config.si.max_pipes_per_simd = 4;
rdev->config.si.max_tile_pipes = 12;
- rdev->config.si.max_simds_per_se = 8;
+ rdev->config.si.max_cu_per_sh = 8;
+ rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 12;
rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_prim_fifo_size_backend = 0x100;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_PITCAIRN:
rdev->config.si.max_shader_engines = 2;
- rdev->config.si.max_pipes_per_simd = 4;
rdev->config.si.max_tile_pipes = 8;
- rdev->config.si.max_simds_per_se = 5;
+ rdev->config.si.max_cu_per_sh = 5;
+ rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 8;
rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_prim_fifo_size_backend = 0x100;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_VERDE:
default:
rdev->config.si.max_shader_engines = 1;
- rdev->config.si.max_pipes_per_simd = 4;
rdev->config.si.max_tile_pipes = 4;
- rdev->config.si.max_simds_per_se = 2;
+ rdev->config.si.max_cu_per_sh = 2;
+ rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 4;
rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_prim_fifo_size_backend = 0x40;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
}
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev)
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
- cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
- cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
- cgts_tcc_disable = 0xffff0000;
- for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
- cgts_tcc_disable &= ~(1 << (16 + i));
- gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
- gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
- cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
- rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
- tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
- tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.si.backend_disable_mask_per_asic =
- si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
- rdev->config.si.num_shader_engines);
- rdev->config.si.backend_map =
- si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
- rdev->config.si.num_backends_per_se *
- rdev->config.si.num_shader_engines,
- &rdev->config.si.backend_disable_mask_per_asic,
- rdev->config.si.num_shader_engines);
- tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
- rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
rdev->config.si.mem_max_burst_length_bytes = 256;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.num_gpus = 1;
rdev->config.si.multi_gpu_tile_size = 64;
- gb_addr_config = 0;
- switch (rdev->config.si.num_tile_pipes) {
- case 1:
- gb_addr_config |= NUM_PIPES(0);
- break;
- case 2:
- gb_addr_config |= NUM_PIPES(1);
- break;
- case 4:
- gb_addr_config |= NUM_PIPES(2);
- break;
- case 8:
- default:
- gb_addr_config |= NUM_PIPES(3);
- break;
- }
-
- tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
- gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
- gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
- tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
- gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
- switch (rdev->config.si.num_gpus) {
- case 1:
- default:
- gb_addr_config |= NUM_GPUS(0);
- break;
- case 2:
- gb_addr_config |= NUM_GPUS(1);
- break;
- case 4:
- gb_addr_config |= NUM_GPUS(2);
- break;
- }
- switch (rdev->config.si.multi_gpu_tile_size) {
- case 16:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
- break;
- case 32:
- default:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
- break;
- case 64:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
- break;
- case 128:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
- break;
- }
+ /* fix up row size */
+ gb_addr_config &= ~ROW_SIZE_MASK;
switch (rdev->config.si.mem_row_size_in_kb) {
case 1:
default:
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev)
break;
}
- tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
- rdev->config.si.num_tile_pipes = (1 << tmp);
- tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
- rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
- tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
- rdev->config.si.num_shader_engines = tmp + 1;
- tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
- rdev->config.si.num_gpus = tmp + 1;
- tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
- rdev->config.si.multi_gpu_tile_size = 1 << tmp;
- tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
- rdev->config.si.mem_row_size_in_kb = 1 << tmp;
-
- gb_backend_map =
- si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
- rdev->config.si.num_backends_per_se *
- rdev->config.si.num_shader_engines,
- &rdev->config.si.backend_disable_mask_per_asic,
- rdev->config.si.num_shader_engines);
-
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.tile_config |= (3 << 0);
break;
}
- rdev->config.si.tile_config |=
- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ rdev->config.si.tile_config |= 1 << 4;
+ else
+ rdev->config.si.tile_config |= 0 << 4;
rdev->config.si.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.si.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
- rdev->config.si.backend_map = gb_backend_map;
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- /* primary versions */
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
-
- WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+ si_tiling_mode_table_init(rdev);
- /* user versions */
- WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
+ si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+ rdev->config.si.max_sh_per_se,
+ rdev->config.si.max_backends_per_se);
- WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+ si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+ rdev->config.si.max_sh_per_se,
+ rdev->config.si.max_cu_per_sh);
- si_tiling_mode_table_init(rdev);
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
@@ -2217,8 +2064,6 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 srbm_status;
u32 grbm_status, grbm_status2;
u32 grbm_status_se0, grbm_status_se1;
- struct r100_gpu_lockup *lockup = &rdev->config.si.lockup;
- int r;
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
@@ -2226,20 +2071,12 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, ring, 2);
- if (!r) {
- /* PACKET2 NOP */
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_unlock_commit(rdev, ring);
- }
- /* XXX deal with CP0,1,2 */
- ring->rptr = RREG32(ring->rptr_reg);
- return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
static int si_gpu_soft_reset(struct radeon_device *rdev)
@@ -2275,6 +2112,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
SOFT_RESET_GDS |
SOFT_RESET_PA |
SOFT_RESET_SC |
+ SOFT_RESET_BCI |
SOFT_RESET_SPI |
SOFT_RESET_SX |
SOFT_RESET_TC |
@@ -2527,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15DC, 0);
/* empty context1-15 */
- /* FIXME start with 1G, once using 2 level pt switch to full
+ /* FIXME start with 4G, once using 2 level pt switch to full
* vm size space
*/
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
@@ -2985,7 +2823,8 @@ int si_rlc_init(struct radeon_device *rdev)
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.save_restore_obj);
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -3009,7 +2848,8 @@ int si_rlc_init(struct radeon_device *rdev)
/* clear state block */
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.clear_state_obj);
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
si_rlc_fini(rdev);
@@ -3216,6 +3056,8 @@ static int si_irq_init(struct radeon_device *rdev)
/* force the active interrupt state to all disabled */
si_disable_interrupt_state(rdev);
+ pci_set_master(rdev->pdev);
+
/* enable irqs */
si_enable_interrupts(rdev);
@@ -3994,10 +3836,6 @@ int si_init(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -4117,7 +3955,6 @@ void si_fini(struct radeon_device *rdev)
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h
index eda938a7cb6e..501f9d431d57 100644
--- a/drivers/gpu/drm/radeon/si_reg.h
+++ b/drivers/gpu/drm/radeon/si_reg.h
@@ -30,4 +30,76 @@
#define SI_DC_GPIO_HPD_EN 0x65b8
#define SI_DC_GPIO_HPD_Y 0x65bc
+#define SI_GRPH_CONTROL 0x6804
+# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define SI_GRPH_DEPTH_8BPP 0
+# define SI_GRPH_DEPTH_16BPP 1
+# define SI_GRPH_DEPTH_32BPP 2
+# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define SI_ADDR_SURF_2_BANK 0
+# define SI_ADDR_SURF_4_BANK 1
+# define SI_ADDR_SURF_8_BANK 2
+# define SI_ADDR_SURF_16_BANK 3
+# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
+# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define SI_ADDR_SURF_BANK_WIDTH_1 0
+# define SI_ADDR_SURF_BANK_WIDTH_2 1
+# define SI_ADDR_SURF_BANK_WIDTH_4 2
+# define SI_ADDR_SURF_BANK_WIDTH_8 3
+# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define SI_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define SI_GRPH_FORMAT_ARGB1555 0
+# define SI_GRPH_FORMAT_ARGB565 1
+# define SI_GRPH_FORMAT_ARGB4444 2
+# define SI_GRPH_FORMAT_AI88 3
+# define SI_GRPH_FORMAT_MONO16 4
+# define SI_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define SI_GRPH_FORMAT_ARGB8888 0
+# define SI_GRPH_FORMAT_ARGB2101010 1
+# define SI_GRPH_FORMAT_32BPP_DIG 2
+# define SI_GRPH_FORMAT_8B_ARGB2101010 3
+# define SI_GRPH_FORMAT_BGRA1010102 4
+# define SI_GRPH_FORMAT_8B_BGRA1010102 5
+# define SI_GRPH_FORMAT_RGB111110 6
+# define SI_GRPH_FORMAT_BGR101111 7
+# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define SI_ADDR_SURF_BANK_HEIGHT_1 0
+# define SI_ADDR_SURF_BANK_HEIGHT_2 1
+# define SI_ADDR_SURF_BANK_HEIGHT_4 2
+# define SI_ADDR_SURF_BANK_HEIGHT_8 3
+# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define SI_ADDR_SURF_TILE_SPLIT_64B 0
+# define SI_ADDR_SURF_TILE_SPLIT_128B 1
+# define SI_ADDR_SURF_TILE_SPLIT_256B 2
+# define SI_ADDR_SURF_TILE_SPLIT_512B 3
+# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
+# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
+# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
+# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
+# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
+# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
+# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
+# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
+# define SI_ADDR_SURF_P2 0
+# define SI_ADDR_SURF_P4_8x16 4
+# define SI_ADDR_SURF_P4_16x16 5
+# define SI_ADDR_SURF_P4_16x32 6
+# define SI_ADDR_SURF_P4_32x32 7
+# define SI_ADDR_SURF_P8_16x16_8x16 8
+# define SI_ADDR_SURF_P8_16x32_8x16 9
+# define SI_ADDR_SURF_P8_32x32_8x16 10
+# define SI_ADDR_SURF_P8_16x32_16x16 11
+# define SI_ADDR_SURF_P8_32x32_16x16 12
+# define SI_ADDR_SURF_P8_32x32_16x32 13
+# define SI_ADDR_SURF_P8_32x64_32x32 14
+
#endif
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 53ea2c42dbd6..db4067962868 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -24,6 +24,11 @@
#ifndef SI_H
#define SI_H
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -408,6 +413,12 @@
#define SOFT_RESET_IA (1 << 15)
#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SH_INDEX(x) ((x) << 8)
+#define SE_INDEX(x) ((x) << 16)
+#define SH_BROADCAST_WRITES (1 << 29)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
#define GRBM_INT_CNTL 0x8060
# define RDERR_INT_ENABLE (1 << 0)
@@ -480,6 +491,8 @@
#define VGT_TF_MEMORY_BASE 0x89B8
#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
+#define INACTIVE_CUS_MASK 0xFFFF0000
+#define INACTIVE_CUS_SHIFT 16
#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
#define PA_CL_ENHANCE 0x8A14
@@ -688,6 +701,12 @@
#define RLC_MC_CNTL 0xC344
#define RLC_UCODE_CNTL 0xC348
+#define PA_SC_RASTER_CONFIG 0x28350
+# define RASTER_CONFIG_RB_MAP_0 0
+# define RASTER_CONFIG_RB_MAP_1 1
+# define RASTER_CONFIG_RB_MAP_2 2
+# define RASTER_CONFIG_RB_MAP_3 3
+
#define VGT_EVENT_INITIATOR 0x28a90
# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index cb1ee4e0050a..6eb507a5d130 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -735,7 +735,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
return -EINVAL;
}
drm_core_ioremap(dev->agp_buffer_map, dev);
- if (!dev->agp_buffer_map) {
+ if (!dev->agp_buffer_map->handle) {
DRM_ERROR("failed to ioremap DMA buffer region!\n");
savage_do_cleanup_bci(dev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 30d98d14b5c5..dd14cd1a0033 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -47,9 +47,9 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv == NULL)
return -ENOMEM;
+ idr_init(&dev_priv->object_idr);
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- idr_init(&dev->object_name_idr);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1f5c67c579cf..36f4b28c1b90 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -343,6 +343,16 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
+ case ttm_bo_type_sg:
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags | TTM_PAGE_FLAG_SG,
+ glob->dummy_read_page);
+ if (unlikely(bo->ttm == NULL)) {
+ ret = -ENOMEM;
+ break;
+ }
+ bo->ttm->sg = bo->sg;
+ break;
default:
pr_err("Illegal buffer object type\n");
ret = -EINVAL;
@@ -1169,6 +1179,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bool interruptible,
struct file *persistent_swap_storage,
size_t acc_size,
+ struct sg_table *sg,
void (*destroy) (struct ttm_buffer_object *))
{
int ret = 0;
@@ -1193,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
(*destroy)(bo);
else
kfree(bo);
+ ttm_mem_global_free(mem_glob, acc_size);
return -EINVAL;
}
bo->destroy = destroy;
@@ -1223,6 +1235,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->seq_valid = false;
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
+ bo->sg = sg;
atomic_inc(&bo->glob->bo_count);
ret = ttm_bo_check_placement(bo, placement);
@@ -1233,7 +1246,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (bo->type == ttm_bo_type_device) {
+ if (bo->type == ttm_bo_type_device ||
+ bo->type == ttm_bo_type_sg) {
ret = ttm_bo_setup_vm(bo);
if (ret)
goto out_err;
@@ -1294,25 +1308,17 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size;
int ret;
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (unlikely(ret != 0))
- return ret;
-
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-
- if (unlikely(bo == NULL)) {
- ttm_mem_global_free(mem_glob, acc_size);
+ if (unlikely(bo == NULL))
return -ENOMEM;
- }
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
buffer_start, interruptible,
- persistent_swap_storage, acc_size, NULL);
+ persistent_swap_storage, acc_size, NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
@@ -1821,6 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_unlock(&glob->lru_lock);
(void) ttm_bo_cleanup_refs(bo, false, false, false);
kref_put(&bo->list_kref, ttm_bo_release_list);
+ spin_lock(&glob->lru_lock);
continue;
}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 53673907a6a0..6e52069894b3 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -13,8 +13,21 @@
static struct drm_driver driver;
+/*
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
+ */
static struct usb_device_id id_table[] = {
- {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
+ {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0x00,
+ .bInterfaceProtocol = 0x00,
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -38,7 +51,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
drm_unplug_dev(dev);
}
-static struct vm_operations_struct udl_gem_vm_ops = {
+static const struct vm_operations_struct udl_gem_vm_ops = {
.fault = udl_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -57,7 +70,7 @@ static const struct file_operations udl_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = udl_driver_load,
.unload = udl_driver_unload,
@@ -70,6 +83,10 @@ static struct drm_driver driver = {
.dumb_map_offset = udl_gem_mmap,
.dumb_destroy = udl_dumb_destroy,
.fops = &udl_driver_fops,
+
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = udl_gem_prime_import,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 96820d03a303..fccd361f7b50 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -66,6 +66,7 @@ struct udl_gem_object {
struct drm_gem_object base;
struct page **pages;
void *vmapping;
+ struct sg_table *sg;
};
#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
@@ -118,6 +119,8 @@ int udl_gem_init_object(struct drm_gem_object *obj);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size);
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 4d9c3a5d8a45..ce9a61179925 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
if (!fb->active_16)
return 0;
- if (!fb->obj->vmapping)
- udl_gem_vmap(fb->obj);
+ if (!fb->obj->vmapping) {
+ ret = udl_gem_vmap(fb->obj);
+ if (ret == -ENOMEM) {
+ DRM_ERROR("failed to vmap fb\n");
+ return 0;
+ }
+ if (!fb->obj->vmapping) {
+ DRM_ERROR("failed to vmapping\n");
+ return 0;
+ }
+ }
start_cycles = get_cycles();
@@ -593,11 +602,20 @@ udl_fb_user_fb_create(struct drm_device *dev,
struct drm_gem_object *obj;
struct udl_framebuffer *ufb;
int ret;
+ uint32_t size;
obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ if (size > obj->size) {
+ DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
+ return ERR_PTR(-ENOMEM);
+ }
+
ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
if (ufb == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 92f19ef329b0..7bd65bdd15a8 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -9,6 +9,7 @@
#include "drmP.h"
#include "udl_drv.h"
#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size)
@@ -161,6 +162,12 @@ static void udl_gem_put_pages(struct udl_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int i;
+ if (obj->base.import_attach) {
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return;
+ }
+
for (i = 0; i < page_count; i++)
page_cache_release(obj->pages[i]);
@@ -173,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int ret;
+ if (obj->base.import_attach) {
+ ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
+ 0, obj->base.size, DMA_BIDIRECTIONAL);
+ if (ret)
+ return -EINVAL;
+
+ obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
+ if (!obj->vmapping)
+ return -ENOMEM;
+ return 0;
+ }
+
ret = udl_gem_get_pages(obj, GFP_KERNEL);
if (ret)
return ret;
@@ -185,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
void udl_gem_vunmap(struct udl_gem_object *obj)
{
+ if (obj->base.import_attach) {
+ dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
+ dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
+ obj->base.size, DMA_BIDIRECTIONAL);
+ return;
+ }
+
if (obj->vmapping)
vunmap(obj->vmapping);
@@ -198,6 +224,9 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->vmapping)
udl_gem_vunmap(obj);
+ if (gem_obj->import_attach)
+ drm_prime_gem_destroy(gem_obj, obj->sg);
+
if (obj->pages)
udl_gem_put_pages(obj);
@@ -224,7 +253,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
ret = udl_gem_get_pages(gobj, GFP_KERNEL);
if (ret)
- return ret;
+ goto out;
if (!gobj->base.map_list.map) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
@@ -239,3 +268,66 @@ unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
+
+static int udl_prime_create(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct udl_gem_object **obj_p)
+{
+ struct udl_gem_object *obj;
+ int npages;
+
+ npages = size / PAGE_SIZE;
+
+ *obj_p = NULL;
+ obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->sg = sg;
+ obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (obj->pages == NULL) {
+ DRM_ERROR("obj pages is NULL %d\n", npages);
+ return -ENOMEM;
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+
+ *obj_p = obj;
+ return 0;
+}
+
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct udl_gem_object *uobj;
+ int ret;
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_PTR(PTR_ERR(attach));
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
+ if (ret) {
+ goto fail_unmap;
+ }
+
+ uobj->base.import_attach = attach;
+
+ return &uobj->base;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index a8d5f09428c7..4c2d836a0893 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -61,7 +61,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
u8 length;
u16 key;
- key = *((u16 *) desc);
+ key = le16_to_cpu(*((u16 *) desc));
desc += sizeof(u16);
length = *desc;
desc++;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b3ecb3d12a1d..0d7816789da1 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -395,7 +395,7 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.prefer_shadow = 0;
dev->mode_config.preferred_depth = 24;
- dev->mode_config.funcs = (void *)&udl_mode_funcs;
+ dev->mode_config.funcs = &udl_mode_funcs;
drm_mode_create_dirty_info_property(dev);
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 1f182254e81e..c126182ac07e 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -100,12 +100,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv == NULL)
return -ENOMEM;
+ idr_init(&dev_priv->object_idr);
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- idr_init(&dev->object_name_idr);
-
pci_set_master(dev->pdev);
ret = drm_vblank_init(dev, 1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 51c9ba5cd2fb..21ee78226560 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
cmd += sizeof(remap_cmd) / sizeof(uint32);
for (i = 0; i < num_pages; ++i) {
- if (VMW_PPN_SIZE > 4)
+ if (VMW_PPN_SIZE <= 4)
*cmd = page_to_pfn(*pages++);
else
*((uint64_t *)cmd) = page_to_pfn(*pages++);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 2286d47e5022..6b0078ffa763 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1178,7 +1178,7 @@ err_out:
return &vfb->base;
}
-static struct drm_mode_config_funcs vmw_kms_funcs = {
+static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a37abb581cbb..22bf9a21ec71 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1567,7 +1567,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, 0, interruptible,
- NULL, acc_size, bo_free);
+ NULL, acc_size, NULL, bo_free);
return ret;
}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 96c83a9a76bb..f34838839b08 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO
bool "Laptop Hybrid Graphics - GPU switching support"
depends on X86
depends on ACPI
+ select VGA_ARB
help
Many laptops released in 2008/9/10 have two GPUs with a multiplexer
to switch between them. This adds support for dynamic switching when
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 58434e804d91..5b3c7d135dc9 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -28,15 +28,16 @@
#include <linux/pci.h>
#include <linux/vga_switcheroo.h>
+#include <linux/vgaarb.h>
+
struct vga_switcheroo_client {
struct pci_dev *pdev;
struct fb_info *fb_info;
int pwr_state;
- void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
- void (*reprobe)(struct pci_dev *pdev);
- bool (*can_switch)(struct pci_dev *pdev);
+ const struct vga_switcheroo_client_ops *ops;
int id;
bool active;
+ struct list_head list;
};
static DEFINE_MUTEX(vgasr_mutex);
@@ -51,16 +52,23 @@ struct vgasr_priv {
struct dentry *switch_file;
int registered_clients;
- struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
+ struct list_head clients;
struct vga_switcheroo_handler *handler;
};
+#define ID_BIT_AUDIO 0x100
+#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
+#define client_is_vga(c) ((c)->id == -1 || !client_is_audio(c))
+#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
+
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
/* only one switcheroo per system */
-static struct vgasr_priv vgasr_priv;
+static struct vgasr_priv vgasr_priv = {
+ .clients = LIST_HEAD_INIT(vgasr_priv.clients),
+};
int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
{
@@ -86,72 +94,132 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
static void vga_switcheroo_enable(void)
{
- int i;
int ret;
+ struct vga_switcheroo_client *client;
+
/* call the handler to init */
vgasr_priv.handler->init();
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev);
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->id != -1)
+ continue;
+ ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
return;
- vgasr_priv.clients[i].id = ret;
+ client->id = ret;
}
vga_switcheroo_debugfs_init(&vgasr_priv);
vgasr_priv.active = true;
}
-int vga_switcheroo_register_client(struct pci_dev *pdev,
- void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
- void (*reprobe)(struct pci_dev *pdev),
- bool (*can_switch)(struct pci_dev *pdev))
+static int register_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active)
{
- int index;
+ struct vga_switcheroo_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->pwr_state = VGA_SWITCHEROO_ON;
+ client->pdev = pdev;
+ client->ops = ops;
+ client->id = id;
+ client->active = active;
mutex_lock(&vgasr_mutex);
- /* don't do IGD vs DIS here */
- if (vgasr_priv.registered_clients & 1)
- index = 1;
- else
- index = 0;
-
- vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
- vgasr_priv.clients[index].pdev = pdev;
- vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
- vgasr_priv.clients[index].reprobe = reprobe;
- vgasr_priv.clients[index].can_switch = can_switch;
- vgasr_priv.clients[index].id = -1;
- if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
- vgasr_priv.clients[index].active = true;
-
- vgasr_priv.registered_clients |= (1 << index);
+ list_add_tail(&client->list, &vgasr_priv.clients);
+ if (client_is_vga(client))
+ vgasr_priv.registered_clients++;
/* if we get two clients + handler */
- if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) {
+ if (!vgasr_priv.active &&
+ vgasr_priv.registered_clients == 2 && vgasr_priv.handler) {
printk(KERN_INFO "vga_switcheroo: enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
return 0;
}
+
+int vga_switcheroo_register_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops)
+{
+ return register_client(pdev, ops, -1,
+ pdev == vga_default_device());
+}
EXPORT_SYMBOL(vga_switcheroo_register_client);
+int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active)
+{
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, active);
+}
+EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
+
+static struct vga_switcheroo_client *
+find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->pdev == pdev)
+ return client;
+ return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_client_from_id(struct list_head *head, int client_id)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->id == client_id)
+ return client;
+ return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_active_client(struct list_head *head)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->active && client_is_vga(client))
+ return client;
+ return NULL;
+}
+
+int vga_switcheroo_get_client_state(struct pci_dev *pdev)
+{
+ struct vga_switcheroo_client *client;
+
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (!client)
+ return VGA_SWITCHEROO_NOT_FOUND;
+ if (!vgasr_priv.active)
+ return VGA_SWITCHEROO_INIT;
+ return client->pwr_state;
+}
+EXPORT_SYMBOL(vga_switcheroo_get_client_state);
+
void vga_switcheroo_unregister_client(struct pci_dev *pdev)
{
- int i;
+ struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].pdev == pdev) {
- vgasr_priv.registered_clients &= ~(1 << i);
- break;
- }
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (client) {
+ if (client_is_vga(client))
+ vgasr_priv.registered_clients--;
+ list_del(&client->list);
+ kfree(client);
+ }
+ if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
+ printk(KERN_INFO "vga_switcheroo: disabled\n");
+ vga_switcheroo_debugfs_fini(&vgasr_priv);
+ vgasr_priv.active = false;
}
-
- printk(KERN_INFO "vga_switcheroo: disabled\n");
- vga_switcheroo_debugfs_fini(&vgasr_priv);
- vgasr_priv.active = false;
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_unregister_client);
@@ -159,29 +227,29 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_client);
void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
struct fb_info *info)
{
- int i;
+ struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].pdev == pdev) {
- vgasr_priv.clients[i].fb_info = info;
- break;
- }
- }
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (client)
+ client->fb_info = info;
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
static int vga_switcheroo_show(struct seq_file *m, void *v)
{
- int i;
+ struct vga_switcheroo_client *client;
+ int i = 0;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- seq_printf(m, "%d:%s:%c:%s:%s\n", i,
- vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
- vgasr_priv.clients[i].active ? '+' : ' ',
- vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
- pci_name(vgasr_priv.clients[i].pdev));
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ seq_printf(m, "%d:%s%s:%c:%s:%s\n", i,
+ client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+ client_is_vga(client) ? "" : "-Audio",
+ client->active ? '+' : ' ',
+ client->pwr_state ? "Pwr" : "Off",
+ pci_name(client->pdev));
+ i++;
}
mutex_unlock(&vgasr_mutex);
return 0;
@@ -197,7 +265,7 @@ static int vga_switchon(struct vga_switcheroo_client *client)
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
/* call the driver callback to turn on device */
- client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
+ client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
client->pwr_state = VGA_SWITCHEROO_ON;
return 0;
}
@@ -205,34 +273,37 @@ static int vga_switchon(struct vga_switcheroo_client *client)
static int vga_switchoff(struct vga_switcheroo_client *client)
{
/* call the driver callback to turn off device */
- client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
+ client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
client->pwr_state = VGA_SWITCHEROO_OFF;
return 0;
}
+static void set_audio_state(int id, int state)
+{
+ struct vga_switcheroo_client *client;
+
+ client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO);
+ if (client && client->pwr_state != state) {
+ client->ops->set_gpu_state(client->pdev, state);
+ client->pwr_state = state;
+ }
+}
+
/* stage one happens before delay */
static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
{
- int i;
- struct vga_switcheroo_client *active = NULL;
+ struct vga_switcheroo_client *active;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active == true) {
- active = &vgasr_priv.clients[i];
- break;
- }
- }
+ active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
vga_switchon(new_client);
- /* swap shadow resource to denote boot VGA device has changed so X starts on new device */
- active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
- new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
+ vga_set_default_device(new_client->pdev);
return 0;
}
@@ -240,20 +311,16 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
{
int ret;
- int i;
- struct vga_switcheroo_client *active = NULL;
+ struct vga_switcheroo_client *active;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active == true) {
- active = &vgasr_priv.clients[i];
- break;
- }
- }
+ active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
active->active = false;
+ set_audio_state(active->id, VGA_SWITCHEROO_OFF);
+
if (new_client->fb_info) {
struct fb_event event;
event.info = new_client->fb_info;
@@ -264,23 +331,38 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (ret)
return ret;
- if (new_client->reprobe)
- new_client->reprobe(new_client->pdev);
+ if (new_client->ops->reprobe)
+ new_client->ops->reprobe(new_client->pdev);
if (active->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(active);
+ set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+
new_client->active = true;
return 0;
}
+static bool check_can_switch(void)
+{
+ struct vga_switcheroo_client *client;
+
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (!client->ops->can_switch(client->pdev)) {
+ printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+ return false;
+ }
+ }
+ return true;
+}
+
static ssize_t
vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char usercmd[64];
const char *pdev_name;
- int i, ret;
+ int ret;
bool delay = false, can_switch;
bool just_mux = false;
int client_id = -1;
@@ -301,21 +383,23 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
/* pwr off the device not in use */
if (strncmp(usercmd, "OFF", 3) == 0) {
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active)
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->active || client_is_audio(client))
continue;
- if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON)
- vga_switchoff(&vgasr_priv.clients[i]);
+ set_audio_state(client->id, VGA_SWITCHEROO_OFF);
+ if (client->pwr_state == VGA_SWITCHEROO_ON)
+ vga_switchoff(client);
}
goto out;
}
/* pwr on the device not in use */
if (strncmp(usercmd, "ON", 2) == 0) {
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active)
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->active || client_is_audio(client))
continue;
- if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF)
- vga_switchon(&vgasr_priv.clients[i]);
+ if (client->pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(client);
+ set_audio_state(client->id, VGA_SWITCHEROO_ON);
}
goto out;
}
@@ -348,13 +432,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (client_id == -1)
goto out;
-
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].id == client_id) {
- client = &vgasr_priv.clients[i];
- break;
- }
- }
+ client = find_client_from_id(&vgasr_priv.clients, client_id);
+ if (!client)
+ goto out;
vgasr_priv.delayed_switch_active = false;
@@ -363,23 +443,16 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
goto out;
}
- if (client->active == true)
+ if (client->active)
goto out;
/* okay we want a switch - test if devices are willing to switch */
- can_switch = true;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
- if (can_switch == false) {
- printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
- break;
- }
- }
+ can_switch = check_can_switch();
if (can_switch == false && delay == false)
goto out;
- if (can_switch == true) {
+ if (can_switch) {
pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage1(client);
if (ret)
@@ -451,10 +524,8 @@ fail:
int vga_switcheroo_process_delayed_switch(void)
{
- struct vga_switcheroo_client *client = NULL;
+ struct vga_switcheroo_client *client;
const char *pdev_name;
- bool can_switch = true;
- int i;
int ret;
int err = -EINVAL;
@@ -464,17 +535,9 @@ int vga_switcheroo_process_delayed_switch(void)
printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id)
- client = &vgasr_priv.clients[i];
- can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
- if (can_switch == false) {
- printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
- break;
- }
- }
-
- if (can_switch == false || client == NULL)
+ client = find_client_from_id(&vgasr_priv.clients,
+ vgasr_priv.delayed_client_id);
+ if (!client || !check_can_switch())
goto err;
pdev_name = pci_name(client->pdev);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 111d956d8e7d..3df8fc0ec01a 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -136,6 +136,13 @@ struct pci_dev *vga_default_device(void)
{
return vga_default;
}
+
+EXPORT_SYMBOL_GPL(vga_default_device);
+
+void vga_set_default_device(struct pci_dev *pdev)
+{
+ vga_default = pdev;
+}
#endif
static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
@@ -605,10 +612,12 @@ static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
goto bail;
}
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
if (vga_default == pdev) {
pci_dev_put(vga_default);
vga_default = NULL;
}
+#endif
if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
vga_decode_count--;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index bcaf3fa79e93..00445bc3019c 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1,20 +1,11 @@
#
# HID driver configuration
#
-menuconfig HID_SUPPORT
- bool "HID Devices"
- depends on INPUT
- default y
- ---help---
- Say Y here to get to see options for various computer-human interface
- device drivers. This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if HID_SUPPORT
+menu "HID support"
+ depends on INPUT
config HID
- tristate "Generic HID support"
+ tristate "HID bus support"
depends on INPUT
default y
---help---
@@ -23,14 +14,17 @@ config HID
most commonly used to refer to the USB-HID specification, but other
devices (such as, but not strictly limited to, Bluetooth) are
designed using HID specification (this involves certain keyboards,
- mice, tablets, etc). This option compiles into kernel the generic
- HID layer code (parser, usages, etc.), which can then be used by
- transport-specific HID implementation (like USB or Bluetooth).
+ mice, tablets, etc). This option adds the HID bus to the kernel,
+ together with generic HID layer code. The HID devices are added and
+ removed from the HID bus by the transport-layer drivers, such as
+ usbhid (USB_HID) and hidp (BT_HIDP).
For docs and specs, see http://www.usb.org/developers/hidpage/
If unsure, say Y.
+if HID
+
config HID_BATTERY_STRENGTH
bool "Battery level reporting for HID devices"
depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
@@ -59,23 +53,22 @@ config HIDRAW
If unsure, say Y.
-source "drivers/hid/usbhid/Kconfig"
-
-menu "Special HID drivers"
- depends on HID
-
config HID_GENERIC
tristate "Generic HID driver"
depends on HID
- default y
+ default HID
---help---
- Support for generic HID devices.
+ Support for generic devices on the HID bus. This includes most
+ keyboards and mice, joysticks, tablets and digitizers.
To compile this driver as a module, choose M here: the module
will be called hid-generic.
If unsure, say Y.
+menu "Special HID drivers"
+ depends on HID
+
config HID_A4TECH
tristate "A4 tech mice" if EXPERT
depends on USB_HID
@@ -200,10 +193,12 @@ config HID_EZKEY
Support for Ezkey BTC 8193 keyboard.
config HID_HOLTEK
- tristate "Holtek On Line Grip based game controller support"
+ tristate "Holtek HID devices"
depends on USB_HID
---help---
- Say Y here if you have a Holtek On Line Grip based game controller.
+ Support for Holtek based devices:
+ - Holtek On Line Grip based game controller
+ - Trust GXT 18 Gaming Keyboard
config HOLTEK_FF
bool "Holtek On Line Grip force feedback support"
@@ -268,6 +263,19 @@ config HID_LCPOWER
---help---
Support for LC-Power RC1000MCE RF remote control.
+config HID_LENOVO_TPKBD
+ tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
+ depends on USB_HID
+ select NEW_LEDS
+ select LEDS_CLASS
+ ---help---
+ Support for the Lenovo ThinkPad USB Keyboard with TrackPoint.
+
+ Say Y here if you have a Lenovo ThinkPad USB Keyboard with TrackPoint
+ and would like to use device-specific features like changing the
+ sensitivity of the trackpoint, using the microphone mute button or
+ controlling the mute and microphone mute LEDs.
+
config HID_LOGITECH
tristate "Logitech devices" if EXPERT
depends on USB_HID
@@ -471,7 +479,7 @@ config HID_PICOLCD_FB
select FB_SYS_FOPS
---help---
Provide access to PicoLCD's 256x64 monochrome display via a
- frambuffer device.
+ framebuffer device.
config HID_PICOLCD_BACKLIGHT
bool "Backlight control" if EXPERT
@@ -663,4 +671,8 @@ config HID_ZYDACRON
endmenu
-endif # HID_SUPPORT
+endif # HID
+
+source "drivers/hid/usbhid/Kconfig"
+
+endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index ca6cc9f0485c..02fa93896951 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -48,12 +48,14 @@ obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
+obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
+obj-$(CONFIG_HID_LENOVO_TPKBD) += hid-lenovo-tpkbd.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
@@ -69,7 +71,8 @@ obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
- hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-pyra.o
+ hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-pyra.o \
+ hid-roccat-savu.o
obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index fa10f847f7db..585344b6d338 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -517,6 +517,12 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index b99af346fdff..a2abb8e15727 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -60,6 +60,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ch_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ }
};
MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8e3a6b261477..500844f04f93 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1194,8 +1194,10 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
goto out;
}
- for (a = 0; a < report->maxfield; a++)
- hid_input_field(hid, report->field[a], cdata, interrupt);
+ if (hid->claimed != HID_CLAIMED_HIDRAW) {
+ for (a = 0; a < report->maxfield; a++)
+ hid_input_field(hid, report->field[a], cdata, interrupt);
+ }
if (hid->claimed & HID_CLAIMED_INPUT)
hidinput_report_event(hid, report);
@@ -1243,6 +1245,10 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
goto unlock;
}
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (list_empty(&hid->debug_list))
+ goto nomem;
+
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
if (!buf)
@@ -1373,8 +1379,10 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
hdev->claimed |= HID_CLAIMED_HIDRAW;
- if (!hdev->claimed) {
- hid_err(hdev, "claimed by neither input, hiddev nor hidraw\n");
+ /* Drivers with the ->raw_event callback set are not required to connect
+ * to any other listener. */
+ if (!hdev->claimed && !hdev->driver->raw_event) {
+ hid_err(hdev, "device has no listeners, quitting\n");
return -ENODEV;
}
@@ -1503,6 +1511,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1518,10 +1529,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
@@ -1536,6 +1549,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
@@ -1544,6 +1558,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1617,6 +1632,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
@@ -1880,6 +1896,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
@@ -1994,6 +2011,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2088,6 +2106,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 2f0be4c66af7..9e43aaca9774 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -129,6 +129,8 @@ static const struct hid_device_id cp_devices[] = {
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4),
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
.driver_data = CP_2WHEEL_MOUSE_HACK },
{ }
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
new file mode 100644
index 000000000000..e0a5d1739fc3
--- /dev/null
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -0,0 +1,183 @@
+/*
+ * HID driver for Holtek keyboard
+ * Copyright (c) 2012 Tom Harwood
+*/
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+
+/* Holtek based keyboards (USB ID 04d9:a055) have the following issues:
+ * - The report descriptor specifies an excessively large number of consumer
+ * usages (2^15), which is more than HID_MAX_USAGES. This prevents proper
+ * parsing of the report descriptor.
+ * - The report descriptor reports on caps/scroll/num lock key presses, but
+ * doesn't have an LED output usage block.
+ *
+ * The replacement descriptor below fixes the number of consumer usages,
+ * and provides an LED output usage block. LED output events are redirected
+ * to the boot interface.
+ */
+
+static __u8 holtek_kbd_rdesc_fixed[] = {
+ /* Original report descriptor, with reduced number of consumer usages */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x80, /* Usage (Sys Control), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x19, 0x81, /* Usage Minimum (Sys Power Down), */
+ 0x29, 0x83, /* Usage Maximum (Sys Wake Up), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x05, /* Report Size (5), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0C, /* Usage Page (Consumer), */
+ 0x09, 0x01, /* Usage (Consumer Control), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x19, 0x00, /* Usage Minimum (00h), */
+ 0x2A, 0xFF, 0x2F, /* Usage Maximum (0x2FFF), previously 0x7FFF */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x2F, /* Logical Maximum (0x2FFF),previously 0x7FFF*/
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x81, 0x00, /* Input, */
+ 0xC0, /* End Collection, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x03, /* Report ID (3), */
+ 0x95, 0x38, /* Report Count (56), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */
+ 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */
+ 0x19, 0x00, /* Usage Minimum (None), */
+ 0x29, 0x2F, /* Usage Maximum (KB Lboxbracket And Lbrace),*/
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x04, /* Report ID (4), */
+ 0x95, 0x38, /* Report Count (56), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0x30, /* Usage Minimum (KB Rboxbracket And Rbrace),*/
+ 0x29, 0x67, /* Usage Maximum (KP Equals), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection */
+
+ /* LED usage for the boot protocol interface */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x05, 0x08, /* Usage Page (LED), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x03, /* Usage Maximum (03h), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x95, 0x05, /* Report Count (5), */
+ 0x91, 0x01, /* Output (Constant), */
+ 0xC0, /* End Collection */
+};
+
+static __u8 *holtek_kbd_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ rdesc = holtek_kbd_rdesc_fixed;
+ *rsize = sizeof(holtek_kbd_rdesc_fixed);
+ }
+ return rdesc;
+}
+
+static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code,
+ int value)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct usb_device *usb_dev = hid_to_usb_dev(hid);
+
+ /* Locate the boot interface, to receive the LED change events */
+ struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+
+ struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
+ struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+ struct hid_input, list);
+
+ return boot_hid_input->input->event(boot_hid_input->input, type, code,
+ value);
+}
+
+static int holtek_kbd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ int ret = hid_parse(hdev);
+
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ struct hid_input *hidinput;
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+ hidinput->input->event = holtek_kbd_input_event;
+ }
+ }
+
+ return ret;
+}
+
+static const struct hid_device_id holtek_kbd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, holtek_kbd_devices);
+
+static struct hid_driver holtek_kbd_driver = {
+ .name = "holtek_kbd",
+ .id_table = holtek_kbd_devices,
+ .report_fixup = holtek_kbd_report_fixup,
+ .probe = holtek_kbd_probe
+};
+
+static int __init holtek_kbd_init(void)
+{
+ return hid_register_driver(&holtek_kbd_driver);
+}
+
+static void __exit holtek_kbd_exit(void)
+{
+ hid_unregister_driver(&holtek_kbd_driver);
+}
+
+module_exit(holtek_kbd_exit);
+module_init(holtek_kbd_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c77bfdde94ed..41c34f21bd00 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -125,6 +125,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -160,6 +163,9 @@
#define USB_VENDOR_ID_AVERMEDIA 0x07ca
#define USB_DEVICE_ID_AVER_FM_MR800 0xb800
+#define USB_VENDOR_ID_AXENTIA 0x12cf
+#define USB_DEVICE_ID_AXENTIA_FM_RADIO 0x7111
+
#define USB_VENDOR_ID_BAANTO 0x2453
#define USB_DEVICE_ID_BAANTO_MT_190W2 0x0100
@@ -202,6 +208,7 @@
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
+#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
#define USB_VENDOR_ID_CHUNGHWAT 0x2247
#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
@@ -231,6 +238,7 @@
#define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
#define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
#define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
+#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
@@ -404,6 +412,9 @@
#define USB_VENDOR_ID_HOLTEK 0x1241
#define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015
+#define USB_VENDOR_ID_HOLTEK_ALT 0x04d9
+#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
+
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -473,6 +484,9 @@
#define USB_DEVICE_ID_LD_HYBRID 0x2090
#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0
+#define USB_VENDOR_ID_LENOVO 0x17ef
+#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
+
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
@@ -515,6 +529,9 @@
#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
+#define USB_VENDOR_ID_MADCATZ 0x0738
+#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
@@ -644,6 +661,7 @@
#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
+#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
@@ -653,6 +671,9 @@
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
+#define USB_VENDOR_ID_SENNHEISER 0x1395
+#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c
+
#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 132b0019365e..811bfad64609 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -301,6 +301,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{}
};
@@ -834,6 +837,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
break;
+ case HID_UP_HPVENDOR2:
+ set_bit(EV_REP, input->evbit);
+ switch (usage->hid & HID_USAGE) {
+ case 0x003: map_key_clear(KEY_BRIGHTNESSDOWN); break;
+ case 0x004: map_key_clear(KEY_BRIGHTNESSUP); break;
+ default: goto ignore;
+ }
+ break;
+
case HID_UP_MSVENDOR:
goto ignore;
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
new file mode 100644
index 000000000000..77d2df04c97b
--- /dev/null
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -0,0 +1,564 @@
+/*
+ * HID driver for Lenovo ThinkPad USB Keyboard with TrackPoint
+ *
+ * Copyright (c) 2012 Bernhard Seibold
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include "usbhid/usbhid.h"
+
+#include "hid-ids.h"
+
+/* This is only used for the trackpoint part of the driver, hence _tp */
+struct tpkbd_data_pointer {
+ int led_state;
+ struct led_classdev led_mute;
+ struct led_classdev led_micmute;
+ int press_to_select;
+ int dragging;
+ int release_to_select;
+ int select_right;
+ int sensitivity;
+ int press_speed;
+};
+
+#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int tpkbd_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ struct usbhid_device *uhdev;
+
+ uhdev = (struct usbhid_device *) hdev->driver_data;
+ if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ map_key_clear(KEY_MICMUTE);
+ return 1;
+ }
+ return 0;
+}
+
+#undef map_key_clear
+
+static int tpkbd_features_set(struct hid_device *hdev)
+{
+ struct hid_report *report;
+ struct tpkbd_data_pointer *data_pointer;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4];
+
+ report->field[0]->value[0] = data_pointer->press_to_select ? 0x01 : 0x02;
+ report->field[0]->value[0] |= data_pointer->dragging ? 0x04 : 0x08;
+ report->field[0]->value[0] |= data_pointer->release_to_select ? 0x10 : 0x20;
+ report->field[0]->value[0] |= data_pointer->select_right ? 0x80 : 0x40;
+ report->field[1]->value[0] = 0x03; // unknown setting, imitate windows driver
+ report->field[2]->value[0] = data_pointer->sensitivity;
+ report->field[3]->value[0] = data_pointer->press_speed;
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ return 0;
+}
+
+static ssize_t pointer_press_to_select_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select);
+}
+
+static ssize_t pointer_press_to_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->press_to_select = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_dragging_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging);
+}
+
+static ssize_t pointer_dragging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->dragging = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_release_to_select_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select);
+}
+
+static ssize_t pointer_release_to_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->release_to_select = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_select_right_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right);
+}
+
+static ssize_t pointer_select_right_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->select_right = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_sensitivity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ data_pointer->sensitivity);
+}
+
+static ssize_t pointer_sensitivity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
+ return -EINVAL;
+
+ data_pointer->sensitivity = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_press_speed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ data_pointer->press_speed);
+}
+
+static ssize_t pointer_press_speed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
+ return -EINVAL;
+
+ data_pointer->press_speed = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_pointer_press_to_select =
+ __ATTR(press_to_select, S_IWUSR | S_IRUGO,
+ pointer_press_to_select_show,
+ pointer_press_to_select_store);
+
+static struct device_attribute dev_attr_pointer_dragging =
+ __ATTR(dragging, S_IWUSR | S_IRUGO,
+ pointer_dragging_show,
+ pointer_dragging_store);
+
+static struct device_attribute dev_attr_pointer_release_to_select =
+ __ATTR(release_to_select, S_IWUSR | S_IRUGO,
+ pointer_release_to_select_show,
+ pointer_release_to_select_store);
+
+static struct device_attribute dev_attr_pointer_select_right =
+ __ATTR(select_right, S_IWUSR | S_IRUGO,
+ pointer_select_right_show,
+ pointer_select_right_store);
+
+static struct device_attribute dev_attr_pointer_sensitivity =
+ __ATTR(sensitivity, S_IWUSR | S_IRUGO,
+ pointer_sensitivity_show,
+ pointer_sensitivity_store);
+
+static struct device_attribute dev_attr_pointer_press_speed =
+ __ATTR(press_speed, S_IWUSR | S_IRUGO,
+ pointer_press_speed_show,
+ pointer_press_speed_store);
+
+static struct attribute *tpkbd_attributes_pointer[] = {
+ &dev_attr_pointer_press_to_select.attr,
+ &dev_attr_pointer_dragging.attr,
+ &dev_attr_pointer_release_to_select.attr,
+ &dev_attr_pointer_select_right.attr,
+ &dev_attr_pointer_sensitivity.attr,
+ &dev_attr_pointer_press_speed.attr,
+ NULL
+};
+
+static const struct attribute_group tpkbd_attr_group_pointer = {
+ .attrs = tpkbd_attributes_pointer,
+};
+
+static enum led_brightness tpkbd_led_brightness_get(
+ struct led_classdev *led_cdev)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int led_nr = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (led_cdev == &data_pointer->led_micmute)
+ led_nr = 1;
+
+ return data_pointer->led_state & (1 << led_nr)
+ ? LED_FULL
+ : LED_OFF;
+}
+
+static void tpkbd_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct hid_report *report;
+ struct tpkbd_data_pointer *data_pointer;
+ int led_nr = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (led_cdev == &data_pointer->led_micmute)
+ led_nr = 1;
+
+ if (value == LED_OFF)
+ data_pointer->led_state &= ~(1 << led_nr);
+ else
+ data_pointer->led_state |= 1 << led_nr;
+
+ report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3];
+ report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1;
+ report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1;
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+}
+
+static int tpkbd_probe_tp(struct hid_device *hdev)
+{
+ struct device *dev = &hdev->dev;
+ struct tpkbd_data_pointer *data_pointer;
+ size_t name_sz = strlen(dev_name(dev)) + 16;
+ char *name_mute, *name_micmute;
+ int ret;
+
+ if (sysfs_create_group(&hdev->dev.kobj,
+ &tpkbd_attr_group_pointer)) {
+ hid_warn(hdev, "Could not create sysfs group\n");
+ }
+
+ data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL);
+ if (data_pointer == NULL) {
+ hid_err(hdev, "Could not allocate memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ // set same default values as windows driver
+ data_pointer->sensitivity = 0xa0;
+ data_pointer->press_speed = 0x38;
+
+ name_mute = kzalloc(name_sz, GFP_KERNEL);
+ if (name_mute == NULL) {
+ hid_err(hdev, "Could not allocate memory for led data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
+
+ name_micmute = kzalloc(name_sz, GFP_KERNEL);
+ if (name_micmute == NULL) {
+ hid_err(hdev, "Could not allocate memory for led data\n");
+ ret = -ENOMEM;
+ goto err2;
+ }
+ snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
+
+ hid_set_drvdata(hdev, data_pointer);
+
+ data_pointer->led_mute.name = name_mute;
+ data_pointer->led_mute.brightness_get = tpkbd_led_brightness_get;
+ data_pointer->led_mute.brightness_set = tpkbd_led_brightness_set;
+ data_pointer->led_mute.dev = dev;
+ led_classdev_register(dev, &data_pointer->led_mute);
+
+ data_pointer->led_micmute.name = name_micmute;
+ data_pointer->led_micmute.brightness_get = tpkbd_led_brightness_get;
+ data_pointer->led_micmute.brightness_set = tpkbd_led_brightness_set;
+ data_pointer->led_micmute.dev = dev;
+ led_classdev_register(dev, &data_pointer->led_micmute);
+
+ tpkbd_features_set(hdev);
+
+ return 0;
+
+err2:
+ kfree(name_mute);
+err:
+ kfree(data_pointer);
+ return ret;
+}
+
+static int tpkbd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+ struct usbhid_device *uhdev;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid_parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hid_hw_start failed\n");
+ goto err_free;
+ }
+
+ uhdev = (struct usbhid_device *) hdev->driver_data;
+
+ if (uhdev->ifnum == 1)
+ return tpkbd_probe_tp(hdev);
+
+ return 0;
+err_free:
+ return ret;
+}
+
+static void tpkbd_remove_tp(struct hid_device *hdev)
+{
+ struct tpkbd_data_pointer *data_pointer;
+
+ sysfs_remove_group(&hdev->dev.kobj,
+ &tpkbd_attr_group_pointer);
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ led_classdev_unregister(&data_pointer->led_micmute);
+ led_classdev_unregister(&data_pointer->led_mute);
+
+ hid_set_drvdata(hdev, NULL);
+ kfree(data_pointer);
+}
+
+static void tpkbd_remove(struct hid_device *hdev)
+{
+ struct usbhid_device *uhdev;
+
+ uhdev = (struct usbhid_device *) hdev->driver_data;
+ if (uhdev->ifnum == 1)
+ tpkbd_remove_tp(hdev);
+
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id tpkbd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, tpkbd_devices);
+
+static struct hid_driver tpkbd_driver = {
+ .name = "lenovo_tpkbd",
+ .id_table = tpkbd_devices,
+ .input_mapping = tpkbd_input_mapping,
+ .probe = tpkbd_probe,
+ .remove = tpkbd_remove,
+};
+
+static int __init tpkbd_init(void)
+{
+ return hid_register_driver(&tpkbd_driver);
+}
+
+static void __exit tpkbd_exit(void)
+{
+ hid_unregister_driver(&tpkbd_driver);
+}
+
+module_init(tpkbd_init);
+module_exit(tpkbd_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 5e8a7ed42344..0f9c146fc00d 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -436,27 +436,37 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
{
- struct dj_report dj_report;
+ struct dj_report *dj_report;
+ int retval;
- memset(&dj_report, 0, sizeof(dj_report));
- dj_report.report_id = REPORT_ID_DJ_SHORT;
- dj_report.device_index = 0xFF;
- dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+ dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ if (!dj_report)
+ return -ENOMEM;
+ dj_report->report_id = REPORT_ID_DJ_SHORT;
+ dj_report->device_index = 0xFF;
+ dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+ kfree(dj_report);
+ return retval;
}
static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
unsigned timeout)
{
- struct dj_report dj_report;
+ struct dj_report *dj_report;
+ int retval;
- memset(&dj_report, 0, sizeof(dj_report));
- dj_report.report_id = REPORT_ID_DJ_SHORT;
- dj_report.device_index = 0xFF;
- dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
- dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
- dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+ dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ if (!dj_report)
+ return -ENOMEM;
+ dj_report->report_id = REPORT_ID_DJ_SHORT;
+ dj_report->device_index = 0xFF;
+ dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+ dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+ dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+ kfree(dj_report);
+ return retval;
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 7cf3ffe4b7bc..73647266daad 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/hid.h>
+#include <linux/input/mt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
@@ -48,10 +49,6 @@ static bool scroll_acceleration = false;
module_param(scroll_acceleration, bool, 0644);
MODULE_PARM_DESC(scroll_acceleration, "Accelerate sequential scroll events");
-static bool report_touches = true;
-module_param(report_touches, bool, 0644);
-MODULE_PARM_DESC(report_touches, "Emit touch records (otherwise, only use them for emulation)");
-
static bool report_undeciphered;
module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
@@ -72,15 +69,6 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define SCROLL_ACCEL_DEFAULT 7
-/* Single touch emulation should only begin when no touches are currently down.
- * This is true when single_touch_id is equal to NO_TOUCHES. If multiple touches
- * are down and the touch providing for single touch emulation is lifted,
- * single_touch_id is equal to SINGLE_TOUCH_UP. While single touch emulation is
- * occurring, single_touch_id corresponds with the tracking id of the touch used.
- */
-#define NO_TOUCHES -1
-#define SINGLE_TOUCH_UP -2
-
/* Touch surface information. Dimension is in hundredths of a mm, min and max
* are in units. */
#define MOUSE_DIMENSION_X (float)9056
@@ -129,7 +117,6 @@ struct magicmouse_sc {
u8 size;
} touches[16];
int tracking_ids[16];
- int single_touch_id;
};
static int magicmouse_firm_touch(struct magicmouse_sc *msc)
@@ -268,16 +255,14 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
}
}
- if (down) {
+ if (down)
msc->ntouches++;
- if (msc->single_touch_id == NO_TOUCHES)
- msc->single_touch_id = id;
- } else if (msc->single_touch_id == id)
- msc->single_touch_id = SINGLE_TOUCH_UP;
+
+ input_mt_slot(input, id);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, down);
/* Generate the input events for this touch. */
- if (report_touches && down) {
- input_report_abs(input, ABS_MT_TRACKING_ID, id);
+ if (down) {
input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2);
input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2);
input_report_abs(input, ABS_MT_ORIENTATION, -orientation);
@@ -290,8 +275,6 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
else /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_event(input, EV_MSC, MSC_RAW, tdata[8]);
}
-
- input_mt_sync(input);
}
}
@@ -312,12 +295,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
- /* We don't need an MT sync here because trackpad emits a
- * BTN_TOUCH event in a new frame when all touches are released.
- */
- if (msc->ntouches == 0)
- msc->single_touch_id = NO_TOUCHES;
-
clicks = data[1];
/* The following bits provide a device specific timestamp. They
@@ -335,9 +312,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
- if (report_touches && msc->ntouches == 0)
- input_mt_sync(input);
-
/* When emulating three-button mode, it is important
* to have the current touch information before
* generating a click event.
@@ -370,25 +344,17 @@ static int magicmouse_raw_event(struct hid_device *hdev,
input_report_rel(input, REL_Y, y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
- input_report_key(input, BTN_TOUCH, msc->ntouches > 0);
- input_report_key(input, BTN_TOOL_FINGER, msc->ntouches == 1);
- input_report_key(input, BTN_TOOL_DOUBLETAP, msc->ntouches == 2);
- input_report_key(input, BTN_TOOL_TRIPLETAP, msc->ntouches == 3);
- input_report_key(input, BTN_TOOL_QUADTAP, msc->ntouches == 4);
- if (msc->single_touch_id >= 0) {
- input_report_abs(input, ABS_X,
- msc->touches[msc->single_touch_id].x);
- input_report_abs(input, ABS_Y,
- msc->touches[msc->single_touch_id].y);
- }
+ input_mt_report_pointer_emulation(input, true);
}
input_sync(input);
return 1;
}
-static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
+static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
{
+ int error;
+
__set_bit(EV_KEY, input->evbit);
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
@@ -417,60 +383,66 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
__set_bit(BTN_TOOL_QUADTAP, input->keybit);
+ __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
__set_bit(BTN_TOUCH, input->keybit);
__set_bit(INPUT_PROP_POINTER, input->propbit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
- if (report_touches) {
- __set_bit(EV_ABS, input->evbit);
- input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
+ __set_bit(EV_ABS, input->evbit);
- /* Note: Touch Y position from the device is inverted relative
- * to how pointer motion is reported (and relative to how USB
- * HID recommends the coordinates work). This driver keeps
- * the origin at the same position, and just uses the additive
- * inverse of the reported Y.
- */
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
- input_set_abs_params(input, ABS_MT_POSITION_X,
- MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y,
- MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
-
- input_abs_set_res(input, ABS_MT_POSITION_X,
- MOUSE_RES_X);
- input_abs_set_res(input, ABS_MT_POSITION_Y,
- MOUSE_RES_Y);
- } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
- input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
- TRACKPAD_MAX_X, 4, 0);
- input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
- TRACKPAD_MAX_Y, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_X,
- TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y,
- TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
-
- input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
- input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
- input_abs_set_res(input, ABS_MT_POSITION_X,
- TRACKPAD_RES_X);
- input_abs_set_res(input, ABS_MT_POSITION_Y,
- TRACKPAD_RES_Y);
- }
+ error = input_mt_init_slots(input, 16);
+ if (error)
+ return error;
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
+ 4, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
+ 4, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
- input_set_events_per_packet(input, 60);
+ /* Note: Touch Y position from the device is inverted relative
+ * to how pointer motion is reported (and relative to how USB
+ * HID recommends the coordinates work). This driver keeps
+ * the origin at the same position, and just uses the additive
+ * inverse of the reported Y.
+ */
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ MOUSE_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ MOUSE_RES_Y);
+ } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
+ TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
+ TRACKPAD_MAX_Y, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ TRACKPAD_RES_Y);
}
+ input_set_events_per_packet(input, 60);
+
if (report_undeciphered) {
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
+
+ return 0;
}
static int magicmouse_input_mapping(struct hid_device *hdev,
@@ -509,8 +481,6 @@ static int magicmouse_probe(struct hid_device *hdev,
msc->quirks = id->driver_data;
hid_set_drvdata(hdev, msc);
- msc->single_touch_id = NO_TOUCHES;
-
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "magicmouse hid parse failed\n");
@@ -526,8 +496,13 @@ static int magicmouse_probe(struct hid_device *hdev,
/* We do this after hid-input is done parsing reports so that
* hid-input uses the most natural button and axis IDs.
*/
- if (msc->input)
- magicmouse_setup_input(msc->input, hdev);
+ if (msc->input) {
+ ret = magicmouse_setup_input(msc->input, hdev);
+ if (ret) {
+ hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
+ goto err_stop_hw;
+ }
+ }
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 45c3433f7986..27c8ebdfad01 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -1846,7 +1846,7 @@ static void picolcd_debug_out_report(struct picolcd_data *data,
#define BUFF_SZ 256
/* Avoid unnecessary overhead if debugfs is disabled */
- if (!hdev->debug_events)
+ if (list_empty(&hdev->debug_list))
return;
buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
@@ -2613,11 +2613,7 @@ static int picolcd_probe(struct hid_device *hdev,
goto err_cleanup_data;
}
- /* We don't use hidinput but hid_hw_start() fails if nothing is
- * claimed. So spoof claimed input. */
- hdev->claimed = HID_CLAIMED_INPUT;
error = hid_hw_start(hdev, 0);
- hdev->claimed = 0;
if (error) {
hid_err(hdev, "hardware start failed\n");
goto err_cleanup_data;
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index 093bfad00b02..327f9b8ed1f4 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -39,7 +39,7 @@ static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_COMMAND_MODE_KEY,
+ retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -67,7 +67,7 @@ static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
temp_buf.state = state;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_COMMAND_MODE_KEY,
+ retval = roccat_common2_send(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -87,7 +87,7 @@ static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_COMMAND_KEY_MASK,
+ retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -115,7 +115,7 @@ static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
temp_buf.key_mask = key_mask;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_COMMAND_KEY_MASK,
+ retval = roccat_common2_send(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -130,7 +130,7 @@ static int arvo_get_actual_profile(struct usb_device *usb_dev)
struct arvo_actual_profile temp_buf;
int retval;
- retval = roccat_common_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (retval)
@@ -170,7 +170,7 @@ static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
temp_buf.actual_profile = profile;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (!retval) {
arvo->actual_profile = profile;
@@ -194,7 +194,7 @@ static ssize_t arvo_sysfs_write(struct file *fp,
return -EINVAL;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, command, buf, real_size);
+ retval = roccat_common2_send(usb_dev, command, buf, real_size);
mutex_unlock(&arvo->arvo_lock);
return (retval ? retval : real_size);
@@ -217,7 +217,7 @@ static ssize_t arvo_sysfs_read(struct file *fp,
return -EINVAL;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, command, buf, real_size);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&arvo->arvo_lock);
return (retval ? retval : real_size);
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index a6d93992c75a..74f704032627 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -16,12 +16,12 @@
#include <linux/module.h>
#include "hid-roccat-common.h"
-static inline uint16_t roccat_common_feature_report(uint8_t report_id)
+static inline uint16_t roccat_common2_feature_report(uint8_t report_id)
{
return 0x300 | report_id;
}
-int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
+int roccat_common2_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size)
{
char *buf;
@@ -34,16 +34,16 @@ int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
HID_REQ_GET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- roccat_common_feature_report(report_id),
+ roccat_common2_feature_report(report_id),
0, buf, size, USB_CTRL_SET_TIMEOUT);
memcpy(data, buf, size);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
-EXPORT_SYMBOL_GPL(roccat_common_receive);
+EXPORT_SYMBOL_GPL(roccat_common2_receive);
-int roccat_common_send(struct usb_device *usb_dev, uint report_id,
+int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size)
{
char *buf;
@@ -56,13 +56,71 @@ int roccat_common_send(struct usb_device *usb_dev, uint report_id,
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- roccat_common_feature_report(report_id),
+ roccat_common2_feature_report(report_id),
0, buf, size, USB_CTRL_SET_TIMEOUT);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
-EXPORT_SYMBOL_GPL(roccat_common_send);
+EXPORT_SYMBOL_GPL(roccat_common2_send);
+
+enum roccat_common2_control_states {
+ ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD = 0,
+ ROCCAT_COMMON_CONTROL_STATUS_OK = 1,
+ ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2,
+ ROCCAT_COMMON_CONTROL_STATUS_WAIT = 3,
+};
+
+static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
+{
+ int retval;
+ struct roccat_common2_control control;
+
+ do {
+ msleep(50);
+ retval = roccat_common2_receive(usb_dev,
+ ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
+
+ if (retval)
+ return retval;
+
+ switch (control.value) {
+ case ROCCAT_COMMON_CONTROL_STATUS_OK:
+ return 0;
+ case ROCCAT_COMMON_CONTROL_STATUS_WAIT:
+ msleep(500);
+ continue;
+ case ROCCAT_COMMON_CONTROL_STATUS_INVALID:
+
+ case ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD:
+ /* seems to be critical - replug necessary */
+ return -EINVAL;
+ default:
+ dev_err(&usb_dev->dev,
+ "roccat_common2_receive_control_status: "
+ "unknown response value 0x%x\n",
+ control.value);
+ return -EINVAL;
+ }
+
+ } while (1);
+}
+
+int roccat_common2_send_with_status(struct usb_device *usb_dev,
+ uint command, void const *buf, uint size)
+{
+ int retval;
+
+ retval = roccat_common2_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+
+ msleep(100);
+
+ return roccat_common2_receive_control_status(usb_dev);
+}
+EXPORT_SYMBOL_GPL(roccat_common2_send_with_status);
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat common driver");
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index 9a5bc61f9699..a97746a63b70 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -15,9 +15,21 @@
#include <linux/usb.h>
#include <linux/types.h>
-int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
+enum roccat_common2_commands {
+ ROCCAT_COMMON_COMMAND_CONTROL = 0x4,
+};
+
+struct roccat_common2_control {
+ uint8_t command;
+ uint8_t value;
+ uint8_t request; /* always 0 on requesting write check */
+} __packed;
+
+int roccat_common2_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size);
-int roccat_common_send(struct usb_device *usb_dev, uint report_id,
+int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size);
+int roccat_common2_send_with_status(struct usb_device *usb_dev,
+ uint command, void const *buf, uint size);
#endif
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 0e4a0ab47142..5669916c2943 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -36,51 +36,7 @@ static void isku_profile_activated(struct isku_device *isku, uint new_profile)
static int isku_receive(struct usb_device *usb_dev, uint command,
void *buf, uint size)
{
- return roccat_common_receive(usb_dev, command, buf, size);
-}
-
-static int isku_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct isku_control control;
-
- do {
- msleep(50);
- retval = isku_receive(usb_dev, ISKU_COMMAND_CONTROL,
- &control, sizeof(struct isku_control));
-
- if (retval)
- return retval;
-
- switch (control.value) {
- case ISKU_CONTROL_VALUE_STATUS_OK:
- return 0;
- case ISKU_CONTROL_VALUE_STATUS_WAIT:
- continue;
- case ISKU_CONTROL_VALUE_STATUS_INVALID:
- /* seems to be critical - replug necessary */
- case ISKU_CONTROL_VALUE_STATUS_OVERLOAD:
- return -EINVAL;
- default:
- hid_err(usb_dev, "isku_receive_control_status: "
- "unknown response value 0x%x\n",
- control.value);
- return -EINVAL;
- }
-
- } while (1);
-}
-
-static int isku_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
-
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
-
- return isku_receive_control_status(usb_dev);
+ return roccat_common2_receive(usb_dev, command, buf, size);
}
static int isku_get_actual_profile(struct usb_device *usb_dev)
@@ -100,7 +56,8 @@ static int isku_set_actual_profile(struct usb_device *usb_dev, int new_profile)
buf.command = ISKU_COMMAND_ACTUAL_PROFILE;
buf.size = sizeof(struct isku_actual_profile);
buf.actual_profile = new_profile;
- return isku_send(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf,
+ return roccat_common2_send_with_status(usb_dev,
+ ISKU_COMMAND_ACTUAL_PROFILE, &buf,
sizeof(struct isku_actual_profile));
}
@@ -197,7 +154,8 @@ static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&isku->isku_lock);
- retval = isku_send(usb_dev, command, (void *)buf, real_size);
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ (void *)buf, real_size);
mutex_unlock(&isku->isku_lock);
return retval ? retval : real_size;
diff --git a/drivers/hid/hid-roccat-isku.h b/drivers/hid/hid-roccat-isku.h
index 075f6efaec58..605b3ce21638 100644
--- a/drivers/hid/hid-roccat-isku.h
+++ b/drivers/hid/hid-roccat-isku.h
@@ -25,13 +25,6 @@ struct isku_control {
uint8_t request;
} __packed;
-enum isku_control_values {
- ISKU_CONTROL_VALUE_STATUS_OVERLOAD = 0,
- ISKU_CONTROL_VALUE_STATUS_OK = 1,
- ISKU_CONTROL_VALUE_STATUS_INVALID = 2,
- ISKU_CONTROL_VALUE_STATUS_WAIT = 3,
-};
-
struct isku_actual_profile {
uint8_t command; /* ISKU_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 40090d602158..9ce2d0b615a4 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -138,7 +138,7 @@ static int kone_check_write(struct usb_device *usb_dev)
return 0;
/* unknown answer */
- hid_err(usb_dev, "got retval %d when checking write\n", data);
+ dev_err(&usb_dev->dev, "got retval %d when checking write\n", data);
return -EIO;
}
@@ -503,7 +503,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
retval = kone_set_settings(usb_dev, &kone->settings);
if (retval) {
- hid_err(usb_dev, "couldn't set tcu state\n");
+ dev_err(&usb_dev->dev, "couldn't set tcu state\n");
/*
* try to reread valid settings into buffer overwriting
* first error code
@@ -519,7 +519,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
retval = size;
exit_no_settings:
- hid_err(usb_dev, "couldn't read settings\n");
+ dev_err(&usb_dev->dev, "couldn't read settings\n");
exit_unlock:
mutex_unlock(&kone->kone_lock);
return retval;
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 59e47770fa10..f5602fec4865 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -39,88 +39,26 @@ static void koneplus_profile_activated(struct koneplus_device *koneplus,
static int koneplus_send_control(struct usb_device *usb_dev, uint value,
enum koneplus_control_requests request)
{
- struct koneplus_control control;
+ struct roccat_common2_control control;
if ((request == KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
value > 4)
return -EINVAL;
- control.command = KONEPLUS_COMMAND_CONTROL;
+ control.command = ROCCAT_COMMON_COMMAND_CONTROL;
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, KONEPLUS_COMMAND_CONTROL,
- &control, sizeof(struct koneplus_control));
-}
-
-static int koneplus_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct koneplus_control control;
-
- do {
- retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_CONTROL,
- &control, sizeof(struct koneplus_control));
-
- /* check if we get a completely wrong answer */
- if (retval)
- return retval;
-
- if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OK)
- return 0;
-
- /* indicates that hardware needs some more time to complete action */
- if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) {
- msleep(500); /* windows driver uses 1000 */
- continue;
- }
-
- /* seems to be critical - replug necessary */
- if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
- return -EINVAL;
-
- hid_err(usb_dev, "koneplus_receive_control_status: "
- "unknown response value 0x%x\n", control.value);
- return -EINVAL;
- } while (1);
-}
-
-static int koneplus_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
-
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
-
- return koneplus_receive_control_status(usb_dev);
-}
-
-static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
- enum koneplus_control_requests request)
-{
- int retval;
-
- retval = koneplus_send_control(usb_dev, number, request);
- if (retval)
- return retval;
-
- /* allow time to settle things - windows driver uses 500 */
- msleep(100);
-
- retval = koneplus_receive_control_status(usb_dev);
- if (retval)
- return retval;
-
- return 0;
+ return roccat_common2_send_with_status(usb_dev,
+ ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
}
static int koneplus_get_info(struct usb_device *usb_dev,
struct koneplus_info *buf)
{
- return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_INFO,
+ return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_INFO,
buf, sizeof(struct koneplus_info));
}
@@ -129,19 +67,20 @@ static int koneplus_get_profile_settings(struct usb_device *usb_dev,
{
int retval;
- retval = koneplus_select_profile(usb_dev, number,
+ retval = koneplus_send_control(usb_dev, number,
KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct koneplus_profile_settings));
}
static int koneplus_set_profile_settings(struct usb_device *usb_dev,
struct koneplus_profile_settings const *settings)
{
- return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_send_with_status(usb_dev,
+ KONEPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct koneplus_profile_settings));
}
@@ -150,19 +89,20 @@ static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
{
int retval;
- retval = koneplus_select_profile(usb_dev, number,
+ retval = koneplus_send_control(usb_dev, number,
KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct koneplus_profile_buttons));
}
static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
struct koneplus_profile_buttons const *buttons)
{
- return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_send_with_status(usb_dev,
+ KONEPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct koneplus_profile_buttons));
}
@@ -172,7 +112,7 @@ static int koneplus_get_actual_profile(struct usb_device *usb_dev)
struct koneplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -187,7 +127,8 @@ static int koneplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct koneplus_actual_profile);
buf.actual_profile = new_profile;
- return koneplus_send(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
+ return roccat_common2_send_with_status(usb_dev,
+ KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
}
@@ -208,7 +149,7 @@ static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = roccat_common_receive(usb_dev, command, buf, real_size);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
@@ -231,7 +172,8 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = koneplus_send(usb_dev, command, buf, real_size);
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
diff --git a/drivers/hid/hid-roccat-koneplus.h b/drivers/hid/hid-roccat-koneplus.h
index c03332a4fa9a..7074b2a4b94b 100644
--- a/drivers/hid/hid-roccat-koneplus.h
+++ b/drivers/hid/hid-roccat-koneplus.h
@@ -20,32 +20,11 @@ struct koneplus_talk {
uint8_t data[14];
} __packed;
-/*
- * case 1: writes request 80 and reads value 1
- *
- */
-struct koneplus_control {
- uint8_t command; /* KONEPLUS_COMMAND_CONTROL */
- /*
- * value is profile number in range 0-4 for requesting settings and buttons
- * 1 if status ok for requesting status
- */
- uint8_t value;
- uint8_t request;
-} __attribute__ ((__packed__));
-
enum koneplus_control_requests {
- KONEPLUS_CONTROL_REQUEST_STATUS = 0x00,
KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x80,
KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x90,
};
-enum koneplus_control_values {
- KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0,
- KONEPLUS_CONTROL_REQUEST_STATUS_OK = 1,
- KONEPLUS_CONTROL_REQUEST_STATUS_WAIT = 3,
-};
-
struct koneplus_actual_profile {
uint8_t command; /* KONEPLUS_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
@@ -137,7 +116,6 @@ struct koneplus_tcu_image {
} __attribute__ ((__packed__));
enum koneplus_commands {
- KONEPLUS_COMMAND_CONTROL = 0x4,
KONEPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
KONEPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KONEPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 112d934132c8..ca6527ac655d 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -47,69 +47,23 @@ static int kovaplus_send_control(struct usb_device *usb_dev, uint value,
enum kovaplus_control_requests request)
{
int retval;
- struct kovaplus_control control;
+ struct roccat_common2_control control;
if ((request == KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
value > 4)
return -EINVAL;
- control.command = KOVAPLUS_COMMAND_CONTROL;
+ control.command = ROCCAT_COMMON_COMMAND_CONTROL;
control.value = value;
control.request = request;
- retval = roccat_common_send(usb_dev, KOVAPLUS_COMMAND_CONTROL,
- &control, sizeof(struct kovaplus_control));
+ retval = roccat_common2_send(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
return retval;
}
-static int kovaplus_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct kovaplus_control control;
-
- do {
- retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_CONTROL,
- &control, sizeof(struct kovaplus_control));
-
- /* check if we get a completely wrong answer */
- if (retval)
- return retval;
-
- if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OK)
- return 0;
-
- /* indicates that hardware needs some more time to complete action */
- if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT) {
- msleep(500); /* windows driver uses 1000 */
- continue;
- }
-
- /* seems to be critical - replug necessary */
- if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
- return -EINVAL;
-
- hid_err(usb_dev, "roccat_common_receive_control_status: "
- "unknown response value 0x%x\n", control.value);
- return -EINVAL;
- } while (1);
-}
-
-static int kovaplus_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
-
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
-
- msleep(100);
-
- return kovaplus_receive_control_status(usb_dev);
-}
-
static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
enum kovaplus_control_requests request)
{
@@ -119,7 +73,7 @@ static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
static int kovaplus_get_info(struct usb_device *usb_dev,
struct kovaplus_info *buf)
{
- return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
+ return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
buf, sizeof(struct kovaplus_info));
}
@@ -133,14 +87,15 @@ static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct kovaplus_profile_settings));
}
static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
struct kovaplus_profile_settings const *settings)
{
- return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_send_with_status(usb_dev,
+ KOVAPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct kovaplus_profile_settings));
}
@@ -154,14 +109,15 @@ static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct kovaplus_profile_buttons));
}
static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
struct kovaplus_profile_buttons const *buttons)
{
- return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_send_with_status(usb_dev,
+ KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct kovaplus_profile_buttons));
}
@@ -171,7 +127,7 @@ static int kovaplus_get_actual_profile(struct usb_device *usb_dev)
struct kovaplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -186,7 +142,8 @@ static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct kovaplus_actual_profile);
buf.actual_profile = new_profile;
- return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
+ return roccat_common2_send_with_status(usb_dev,
+ KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
}
diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h
index fb2aed44a8e0..f82daa1cdcb9 100644
--- a/drivers/hid/hid-roccat-kovaplus.h
+++ b/drivers/hid/hid-roccat-kovaplus.h
@@ -14,27 +14,13 @@
#include <linux/types.h>
-struct kovaplus_control {
- uint8_t command; /* KOVAPLUS_COMMAND_CONTROL */
- uint8_t value;
- uint8_t request;
-} __packed;
-
enum kovaplus_control_requests {
- /* read after write; value = 1 */
- KOVAPLUS_CONTROL_REQUEST_STATUS = 0x0,
/* write; value = profile number range 0-4 */
KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
/* write; value = profile number range 0-4 */
KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20,
};
-enum kovaplus_control_values {
- KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0, /* supposed */
- KOVAPLUS_CONTROL_REQUEST_STATUS_OK = 1,
- KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT = 3, /* supposed */
-};
-
struct kovaplus_actual_profile {
uint8_t command; /* KOVAPLUS_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
@@ -75,7 +61,6 @@ struct kovaplus_a {
} __packed;
enum kovaplus_commands {
- KOVAPLUS_COMMAND_CONTROL = 0x4,
KOVAPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
KOVAPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KOVAPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index df05c1b1064f..1317c177a3e2 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -42,43 +42,19 @@ static void profile_activated(struct pyra_device *pyra,
static int pyra_send_control(struct usb_device *usb_dev, int value,
enum pyra_control_requests request)
{
- struct pyra_control control;
+ struct roccat_common2_control control;
if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == PYRA_CONTROL_REQUEST_PROFILE_BUTTONS) &&
(value < 0 || value > 4))
return -EINVAL;
- control.command = PYRA_COMMAND_CONTROL;
+ control.command = ROCCAT_COMMON_COMMAND_CONTROL;
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, PYRA_COMMAND_CONTROL,
- &control, sizeof(struct pyra_control));
-}
-
-static int pyra_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct pyra_control control;
-
- do {
- msleep(10);
- retval = roccat_common_receive(usb_dev, PYRA_COMMAND_CONTROL,
- &control, sizeof(struct pyra_control));
-
- /* requested too early, try again */
- } while (retval == -EPROTO);
-
- if (!retval && control.command == PYRA_COMMAND_CONTROL &&
- control.request == PYRA_CONTROL_REQUEST_STATUS &&
- control.value == 1)
- return 0;
- else {
- hid_err(usb_dev, "receive control status: unknown response 0x%x 0x%x\n",
- control.request, control.value);
- return retval ? retval : -EINVAL;
- }
+ return roccat_common2_send(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
}
static int pyra_get_profile_settings(struct usb_device *usb_dev,
@@ -89,7 +65,7 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct pyra_profile_settings));
}
@@ -101,51 +77,44 @@ static int pyra_get_profile_buttons(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct pyra_profile_buttons));
}
static int pyra_get_settings(struct usb_device *usb_dev,
struct pyra_settings *buf)
{
- return roccat_common_receive(usb_dev, PYRA_COMMAND_SETTINGS,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS,
buf, sizeof(struct pyra_settings));
}
static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
{
- return roccat_common_receive(usb_dev, PYRA_COMMAND_INFO,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_INFO,
buf, sizeof(struct pyra_info));
}
-static int pyra_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
- return pyra_receive_control_status(usb_dev);
-}
-
static int pyra_set_profile_settings(struct usb_device *usb_dev,
struct pyra_profile_settings const *settings)
{
- return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS, settings,
+ return roccat_common2_send_with_status(usb_dev,
+ PYRA_COMMAND_PROFILE_SETTINGS, settings,
sizeof(struct pyra_profile_settings));
}
static int pyra_set_profile_buttons(struct usb_device *usb_dev,
struct pyra_profile_buttons const *buttons)
{
- return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS, buttons,
+ return roccat_common2_send_with_status(usb_dev,
+ PYRA_COMMAND_PROFILE_BUTTONS, buttons,
sizeof(struct pyra_profile_buttons));
}
static int pyra_set_settings(struct usb_device *usb_dev,
struct pyra_settings const *settings)
{
- return pyra_send(usb_dev, PYRA_COMMAND_SETTINGS, settings,
+ return roccat_common2_send_with_status(usb_dev,
+ PYRA_COMMAND_SETTINGS, settings,
sizeof(struct pyra_settings));
}
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
index 0442d7fa2dcf..eada7830fa99 100644
--- a/drivers/hid/hid-roccat-pyra.h
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -20,18 +20,7 @@ struct pyra_b {
uint8_t unknown; /* 1 */
} __attribute__ ((__packed__));
-struct pyra_control {
- uint8_t command; /* PYRA_COMMAND_CONTROL */
- /*
- * value is profile number for request_settings and request_buttons
- * 1 if status ok for request_status
- */
- uint8_t value; /* Range 0-4 */
- uint8_t request;
-} __attribute__ ((__packed__));
-
enum pyra_control_requests {
- PYRA_CONTROL_REQUEST_STATUS = 0x00,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20
};
@@ -75,7 +64,6 @@ struct pyra_info {
} __attribute__ ((__packed__));
enum pyra_commands {
- PYRA_COMMAND_CONTROL = 0x4,
PYRA_COMMAND_SETTINGS = 0x5,
PYRA_COMMAND_PROFILE_SETTINGS = 0x6,
PYRA_COMMAND_PROFILE_BUTTONS = 0x7,
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
new file mode 100644
index 000000000000..014afba407e0
--- /dev/null
+++ b/drivers/hid/hid-roccat-savu.c
@@ -0,0 +1,316 @@
+/*
+ * Roccat Savu driver for Linux
+ *
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/* Roccat Savu is a gamer mouse with macro keys that can be configured in
+ * 5 profiles.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-savu.h"
+
+static struct class *savu_class;
+
+static ssize_t savu_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&savu->savu_lock);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&savu->savu_lock);
+
+ return retval ? retval : real_size;
+}
+
+static ssize_t savu_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&savu->savu_lock);
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ (void *)buf, real_size);
+ mutex_unlock(&savu->savu_lock);
+
+ return retval ? retval : real_size;
+}
+
+#define SAVU_SYSFS_W(thingy, THINGY) \
+static ssize_t savu_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return savu_sysfs_write(fp, kobj, buf, off, count, \
+ SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
+}
+
+#define SAVU_SYSFS_R(thingy, THINGY) \
+static ssize_t savu_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return savu_sysfs_read(fp, kobj, buf, off, count, \
+ SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
+}
+
+#define SAVU_SYSFS_RW(thingy, THINGY) \
+SAVU_SYSFS_W(thingy, THINGY) \
+SAVU_SYSFS_R(thingy, THINGY)
+
+#define SAVU_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = SAVU_SIZE_ ## THINGY, \
+ .read = savu_sysfs_read_ ## thingy, \
+ .write = savu_sysfs_write_ ## thingy \
+}
+
+#define SAVU_BIN_ATTRIBUTE_R(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = SAVU_SIZE_ ## THINGY, \
+ .read = savu_sysfs_read_ ## thingy, \
+}
+
+#define SAVU_BIN_ATTRIBUTE_W(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = SAVU_SIZE_ ## THINGY, \
+ .write = savu_sysfs_write_ ## thingy \
+}
+
+SAVU_SYSFS_W(control, CONTROL)
+SAVU_SYSFS_RW(profile, PROFILE)
+SAVU_SYSFS_RW(general, GENERAL)
+SAVU_SYSFS_RW(buttons, BUTTONS)
+SAVU_SYSFS_RW(macro, MACRO)
+SAVU_SYSFS_R(info, INFO)
+SAVU_SYSFS_RW(sensor, SENSOR)
+
+static struct bin_attribute savu_bin_attributes[] = {
+ SAVU_BIN_ATTRIBUTE_W(control, CONTROL),
+ SAVU_BIN_ATTRIBUTE_RW(profile, PROFILE),
+ SAVU_BIN_ATTRIBUTE_RW(general, GENERAL),
+ SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS),
+ SAVU_BIN_ATTRIBUTE_RW(macro, MACRO),
+ SAVU_BIN_ATTRIBUTE_R(info, INFO),
+ SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR),
+ __ATTR_NULL
+};
+
+static int savu_init_savu_device_struct(struct usb_device *usb_dev,
+ struct savu_device *savu)
+{
+ mutex_init(&savu->savu_lock);
+
+ return 0;
+}
+
+static int savu_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct savu_device *savu;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ savu = kzalloc(sizeof(*savu), GFP_KERNEL);
+ if (!savu) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, savu);
+
+ retval = savu_init_savu_device_struct(usb_dev, savu);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct savu_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(savu_class, hdev,
+ sizeof(struct savu_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ savu->chrdev_minor = retval;
+ savu->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(savu);
+ return retval;
+}
+
+static void savu_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct savu_device *savu;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE)
+ return;
+
+ savu = hid_get_drvdata(hdev);
+ if (savu->roccat_claimed)
+ roccat_disconnect(savu->chrdev_minor);
+ kfree(savu);
+}
+
+static int savu_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = savu_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void savu_remove(struct hid_device *hdev)
+{
+ savu_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void savu_report_to_chrdev(struct savu_device const *savu,
+ u8 const *data)
+{
+ struct savu_roccat_report roccat_report;
+ struct savu_mouse_report_special const *special_report;
+
+ if (data[0] != SAVU_MOUSE_REPORT_NUMBER_SPECIAL)
+ return;
+
+ special_report = (struct savu_mouse_report_special const *)data;
+
+ roccat_report.type = special_report->type;
+ roccat_report.data[0] = special_report->data[0];
+ roccat_report.data[1] = special_report->data[1];
+ roccat_report_event(savu->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int savu_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct savu_device *savu = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE)
+ return 0;
+
+ if (savu == NULL)
+ return 0;
+
+ if (savu->roccat_claimed)
+ savu_report_to_chrdev(savu, data);
+
+ return 0;
+}
+
+static const struct hid_device_id savu_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, savu_devices);
+
+static struct hid_driver savu_driver = {
+ .name = "savu",
+ .id_table = savu_devices,
+ .probe = savu_probe,
+ .remove = savu_remove,
+ .raw_event = savu_raw_event
+};
+
+static int __init savu_init(void)
+{
+ int retval;
+
+ savu_class = class_create(THIS_MODULE, "savu");
+ if (IS_ERR(savu_class))
+ return PTR_ERR(savu_class);
+ savu_class->dev_bin_attrs = savu_bin_attributes;
+
+ retval = hid_register_driver(&savu_driver);
+ if (retval)
+ class_destroy(savu_class);
+ return retval;
+}
+
+static void __exit savu_exit(void)
+{
+ hid_unregister_driver(&savu_driver);
+ class_destroy(savu_class);
+}
+
+module_init(savu_init);
+module_exit(savu_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Savu driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-savu.h b/drivers/hid/hid-roccat-savu.h
new file mode 100644
index 000000000000..9120ba72087f
--- /dev/null
+++ b/drivers/hid/hid-roccat-savu.h
@@ -0,0 +1,87 @@
+#ifndef __HID_ROCCAT_SAVU_H
+#define __HID_ROCCAT_SAVU_H
+
+/*
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+enum {
+ SAVU_SIZE_CONTROL = 0x03,
+ SAVU_SIZE_PROFILE = 0x03,
+ SAVU_SIZE_GENERAL = 0x10,
+ SAVU_SIZE_BUTTONS = 0x2f,
+ SAVU_SIZE_MACRO = 0x0823,
+ SAVU_SIZE_INFO = 0x08,
+ SAVU_SIZE_SENSOR = 0x04,
+};
+
+enum savu_control_requests {
+ SAVU_CONTROL_REQUEST_GENERAL = 0x80,
+ SAVU_CONTROL_REQUEST_BUTTONS = 0x90,
+};
+
+enum savu_commands {
+ SAVU_COMMAND_CONTROL = 0x4,
+ SAVU_COMMAND_PROFILE = 0x5,
+ SAVU_COMMAND_GENERAL = 0x6,
+ SAVU_COMMAND_BUTTONS = 0x7,
+ SAVU_COMMAND_MACRO = 0x8,
+ SAVU_COMMAND_INFO = 0x9,
+ SAVU_COMMAND_SENSOR = 0xc,
+};
+
+struct savu_mouse_report_special {
+ uint8_t report_number; /* always 3 */
+ uint8_t zero;
+ uint8_t type;
+ uint8_t data[2];
+} __packed;
+
+enum {
+ SAVU_MOUSE_REPORT_NUMBER_SPECIAL = 3,
+};
+
+enum savu_mouse_report_button_types {
+ /* data1 = new profile range 1-5 */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_PROFILE = 0x20,
+
+ /* data1 = button number range 1-24; data2 = action */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 0x60,
+
+ /* data1 = button number range 1-24; data2 = action */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_TIMER = 0x80,
+
+ /* data1 = setting number range 1-5 */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_CPI = 0xb0,
+
+ /* data1 and data2 = range 0x1-0xb */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 0xc0,
+
+ /* data1 = 22 = next track...
+ * data2 = action
+ */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
+};
+
+struct savu_roccat_report {
+ uint8_t type;
+ uint8_t data[2];
+} __packed;
+
+struct savu_device {
+ int roccat_claimed;
+ int chrdev_minor;
+
+ struct mutex savu_lock;
+};
+
+#endif
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
index aa958706c0e5..0a1805c9b0e5 100644
--- a/drivers/hid/hid-wiimote-ext.c
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -77,7 +77,7 @@ static __u16 wiiext_keymap[] = {
BTN_TR, /* WIIEXT_KEY_RT */
};
-/* diable all extensions */
+/* disable all extensions */
static void ext_disable(struct wiimote_ext *ext)
{
unsigned long flags;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 36fa77b40ffb..3b6f7bf5a77e 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -96,6 +96,7 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
}
kfree(list->buffer[list->tail].value);
+ list->buffer[list->tail].value = NULL;
list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
}
out:
@@ -300,6 +301,7 @@ static int hidraw_release(struct inode * inode, struct file * file)
struct hidraw *dev;
struct hidraw_list *list = file->private_data;
int ret;
+ int i;
mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
@@ -317,6 +319,9 @@ static int hidraw_release(struct inode * inode, struct file * file)
kfree(list->hidraw);
}
}
+
+ for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
+ kfree(list->buffer[i].value);
kfree(list);
ret = 0;
unlock:
@@ -446,12 +451,17 @@ int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
int ret = 0;
list_for_each_entry(list, &dev->list, node) {
+ int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
+
+ if (new_head == list->tail)
+ continue;
+
if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) {
ret = -ENOMEM;
break;
}
list->buffer[list->head].len = len;
- list->head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
+ list->head = new_head;
kill_fasync(&list->fasync, SIGIO, POLL_IN);
}
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 0f20fd17cf06..0108c5991a04 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -1,13 +1,13 @@
-comment "USB Input Devices"
+menu "USB HID support"
depends on USB
config USB_HID
- tristate "USB Human Interface Device (full HID) support"
+ tristate "USB HID transport layer"
default y
depends on USB && INPUT
select HID
---help---
- Say Y here if you want full HID support to connect USB keyboards,
+ Say Y here if you want to connect USB keyboards,
mice, joysticks, graphic tablets, or any other HID based devices
to your computer via USB, as well as Uninterruptible Power Supply
(UPS) and monitor control devices.
@@ -81,4 +81,4 @@ config USB_MOUSE
endmenu
-
+endmenu
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 482f936fc29b..dedd8e4e5c6d 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -84,7 +84,7 @@ static int hid_start_in(struct hid_device *hid)
spin_lock_irqsave(&usbhid->lock, flags);
if (hid->open > 0 &&
!test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
- !test_bit(HID_REPORTED_IDLE, &usbhid->iofl) &&
+ !test_bit(HID_SUSPENDED, &usbhid->iofl) &&
!test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC);
if (rc != 0) {
@@ -207,15 +207,27 @@ static int usbhid_restart_out_queue(struct usbhid_device *usbhid)
int kicked;
int r;
- if (!hid)
+ if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) ||
+ test_bit(HID_SUSPENDED, &usbhid->iofl))
return 0;
if ((kicked = (usbhid->outhead != usbhid->outtail))) {
hid_dbg(hid, "Kicking head %d tail %d", usbhid->outhead, usbhid->outtail);
+ /* Try to wake up from autosuspend... */
r = usb_autopm_get_interface_async(usbhid->intf);
if (r < 0)
return r;
+
+ /*
+ * If still suspended, don't submit. Submission will
+ * occur if/when resume drains the queue.
+ */
+ if (test_bit(HID_SUSPENDED, &usbhid->iofl)) {
+ usb_autopm_put_interface_no_suspend(usbhid->intf);
+ return r;
+ }
+
/* Asynchronously flush queue. */
set_bit(HID_OUT_RUNNING, &usbhid->iofl);
if (hid_submit_out(hid)) {
@@ -234,15 +246,27 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
int r;
WARN_ON(hid == NULL);
- if (!hid)
+ if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) ||
+ test_bit(HID_SUSPENDED, &usbhid->iofl))
return 0;
if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) {
hid_dbg(hid, "Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail);
+ /* Try to wake up from autosuspend... */
r = usb_autopm_get_interface_async(usbhid->intf);
if (r < 0)
return r;
+
+ /*
+ * If still suspended, don't submit. Submission will
+ * occur if/when resume drains the queue.
+ */
+ if (test_bit(HID_SUSPENDED, &usbhid->iofl)) {
+ usb_autopm_put_interface_no_suspend(usbhid->intf);
+ return r;
+ }
+
/* Asynchronously flush queue. */
set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
if (hid_submit_ctrl(hid)) {
@@ -331,9 +355,12 @@ static int hid_submit_out(struct hid_device *hid)
usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) +
1 + (report->id > 0);
usbhid->urbout->dev = hid_to_usb_dev(hid);
- memcpy(usbhid->outbuf, raw_report,
- usbhid->urbout->transfer_buffer_length);
- kfree(raw_report);
+ if (raw_report) {
+ memcpy(usbhid->outbuf, raw_report,
+ usbhid->urbout->transfer_buffer_length);
+ kfree(raw_report);
+ usbhid->out[usbhid->outtail].raw_report = NULL;
+ }
dbg_hid("submitting out urb\n");
@@ -362,8 +389,11 @@ static int hid_submit_ctrl(struct hid_device *hid)
if (dir == USB_DIR_OUT) {
usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
usbhid->urbctrl->transfer_buffer_length = len;
- memcpy(usbhid->ctrlbuf, raw_report, len);
- kfree(raw_report);
+ if (raw_report) {
+ memcpy(usbhid->ctrlbuf, raw_report, len);
+ kfree(raw_report);
+ usbhid->ctrl[usbhid->ctrltail].raw_report = NULL;
+ }
} else {
int maxpacket, padlen;
@@ -407,16 +437,6 @@ static int hid_submit_ctrl(struct hid_device *hid)
* Output interrupt completion handler.
*/
-static int irq_out_pump_restart(struct hid_device *hid)
-{
- struct usbhid_device *usbhid = hid->driver_data;
-
- if (usbhid->outhead != usbhid->outtail)
- return hid_submit_out(hid);
- else
- return -1;
-}
-
static void hid_irq_out(struct urb *urb)
{
struct hid_device *hid = urb->context;
@@ -441,15 +461,17 @@ static void hid_irq_out(struct urb *urb)
spin_lock_irqsave(&usbhid->lock, flags);
- if (unplug)
+ if (unplug) {
usbhid->outtail = usbhid->outhead;
- else
+ } else {
usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1);
- if (!irq_out_pump_restart(hid)) {
- /* Successfully submitted next urb in queue */
- spin_unlock_irqrestore(&usbhid->lock, flags);
- return;
+ if (usbhid->outhead != usbhid->outtail &&
+ hid_submit_out(hid) == 0) {
+ /* Successfully submitted next urb in queue */
+ spin_unlock_irqrestore(&usbhid->lock, flags);
+ return;
+ }
}
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
@@ -461,15 +483,6 @@ static void hid_irq_out(struct urb *urb)
/*
* Control pipe completion handler.
*/
-static int ctrl_pump_restart(struct hid_device *hid)
-{
- struct usbhid_device *usbhid = hid->driver_data;
-
- if (usbhid->ctrlhead != usbhid->ctrltail)
- return hid_submit_ctrl(hid);
- else
- return -1;
-}
static void hid_ctrl(struct urb *urb)
{
@@ -498,15 +511,17 @@ static void hid_ctrl(struct urb *urb)
hid_warn(urb->dev, "ctrl urb status %d received\n", status);
}
- if (unplug)
+ if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead;
- else
+ } else {
usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
- if (!ctrl_pump_restart(hid)) {
- /* Successfully submitted next urb in queue */
- spin_unlock(&usbhid->lock);
- return;
+ if (usbhid->ctrlhead != usbhid->ctrltail &&
+ hid_submit_ctrl(hid) == 0) {
+ /* Successfully submitted next urb in queue */
+ spin_unlock(&usbhid->lock);
+ return;
+ }
}
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
@@ -540,49 +555,36 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->out[usbhid->outhead].report = report;
usbhid->outhead = head;
- /* Try to awake from autosuspend... */
- if (usb_autopm_get_interface_async(usbhid->intf) < 0)
- return;
+ /* If the queue isn't running, restart it */
+ if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
+ usbhid_restart_out_queue(usbhid);
- /*
- * But if still suspended, leave urb enqueued, don't submit.
- * Submission will occur if/when resume() drains the queue.
- */
- if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
- return;
+ /* Otherwise see if an earlier request has timed out */
+ } else if (time_after(jiffies, usbhid->last_out + HZ * 5)) {
+
+ /* Prevent autosuspend following the unlink */
+ usb_autopm_get_interface_no_resume(usbhid->intf);
- if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
- if (hid_submit_out(hid)) {
- clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
- usb_autopm_put_interface_async(usbhid->intf);
- }
- wake_up(&usbhid->wait);
- } else {
/*
- * the queue is known to run
- * but an earlier request may be stuck
- * we may need to time out
- * no race because the URB is blocked under
- * spinlock
+ * Prevent resubmission in case the URB completes
+ * before we can unlink it. We don't want to cancel
+ * the wrong transfer!
*/
- if (time_after(jiffies, usbhid->last_out + HZ * 5)) {
- usb_block_urb(usbhid->urbout);
- /* drop lock to not deadlock if the callback is called */
- spin_unlock(&usbhid->lock);
- usb_unlink_urb(usbhid->urbout);
- spin_lock(&usbhid->lock);
- usb_unblock_urb(usbhid->urbout);
- /*
- * if the unlinking has already completed
- * the pump will have been stopped
- * it must be restarted now
- */
- if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- if (!irq_out_pump_restart(hid))
- set_bit(HID_OUT_RUNNING, &usbhid->iofl);
+ usb_block_urb(usbhid->urbout);
+ /* Drop lock to avoid deadlock if the callback runs */
+ spin_unlock(&usbhid->lock);
- }
+ usb_unlink_urb(usbhid->urbout);
+ spin_lock(&usbhid->lock);
+ usb_unblock_urb(usbhid->urbout);
+
+ /* Unlink might have stopped the queue */
+ if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+
+ /* Now we can allow autosuspend again */
+ usb_autopm_put_interface_async(usbhid->intf);
}
return;
}
@@ -604,47 +606,36 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->ctrl[usbhid->ctrlhead].dir = dir;
usbhid->ctrlhead = head;
- /* Try to awake from autosuspend... */
- if (usb_autopm_get_interface_async(usbhid->intf) < 0)
- return;
+ /* If the queue isn't running, restart it */
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
+ usbhid_restart_ctrl_queue(usbhid);
- /*
- * If already suspended, leave urb enqueued, but don't submit.
- * Submission will occur if/when resume() drains the queue.
- */
- if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
- return;
+ /* Otherwise see if an earlier request has timed out */
+ } else if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) {
+
+ /* Prevent autosuspend following the unlink */
+ usb_autopm_get_interface_no_resume(usbhid->intf);
- if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
- if (hid_submit_ctrl(hid)) {
- clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- usb_autopm_put_interface_async(usbhid->intf);
- }
- wake_up(&usbhid->wait);
- } else {
/*
- * the queue is known to run
- * but an earlier request may be stuck
- * we may need to time out
- * no race because the URB is blocked under
- * spinlock
+ * Prevent resubmission in case the URB completes
+ * before we can unlink it. We don't want to cancel
+ * the wrong transfer!
*/
- if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) {
- usb_block_urb(usbhid->urbctrl);
- /* drop lock to not deadlock if the callback is called */
- spin_unlock(&usbhid->lock);
- usb_unlink_urb(usbhid->urbctrl);
- spin_lock(&usbhid->lock);
- usb_unblock_urb(usbhid->urbctrl);
- /*
- * if the unlinking has already completed
- * the pump will have been stopped
- * it must be restarted now
- */
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- if (!ctrl_pump_restart(hid))
- set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- }
+ usb_block_urb(usbhid->urbctrl);
+
+ /* Drop lock to avoid deadlock if the callback runs */
+ spin_unlock(&usbhid->lock);
+
+ usb_unlink_urb(usbhid->urbctrl);
+ spin_lock(&usbhid->lock);
+ usb_unblock_urb(usbhid->urbctrl);
+
+ /* Unlink might have stopped the queue */
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+
+ /* Now we can allow autosuspend again */
+ usb_autopm_put_interface_async(usbhid->intf);
}
}
@@ -1002,9 +993,10 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
static void usbhid_restart_queues(struct usbhid_device *usbhid)
{
- if (usbhid->urbout)
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
usbhid_restart_out_queue(usbhid);
- usbhid_restart_ctrl_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
}
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
@@ -1471,11 +1463,38 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
+static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int status;
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
+ test_bit(HID_RESET_PENDING, &usbhid->iofl))
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+
+ usbhid_restart_queues(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+
+ status = hid_start_in(hid);
+ if (status < 0)
+ hid_io_error(hid);
+
+ if (driver_suspended && hid->driver && hid->driver->resume)
+ status = hid->driver->resume(hid);
+ return status;
+}
+
static int hid_suspend(struct usb_interface *intf, pm_message_t message)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
int status;
+ bool driver_suspended = false;
if (PMSG_IS_AUTO(message)) {
spin_lock_irq(&usbhid->lock); /* Sync with error handler */
@@ -1486,13 +1505,14 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
&& !test_bit(HID_KEYS_PRESSED, &usbhid->iofl)
&& (!usbhid->ledcount || ignoreled))
{
- set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
+ set_bit(HID_SUSPENDED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
if (hid->driver && hid->driver->suspend) {
status = hid->driver->suspend(hid, message);
if (status < 0)
- return status;
+ goto failed;
}
+ driver_suspended = true;
} else {
usbhid_mark_busy(usbhid);
spin_unlock_irq(&usbhid->lock);
@@ -1505,11 +1525,14 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
if (status < 0)
return status;
}
+ driver_suspended = true;
spin_lock_irq(&usbhid->lock);
- set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
+ set_bit(HID_SUSPENDED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
- if (usbhid_wait_io(hid) < 0)
- return -EIO;
+ if (usbhid_wait_io(hid) < 0) {
+ status = -EIO;
+ goto failed;
+ }
}
hid_cancel_delayed_stuff(usbhid);
@@ -1517,14 +1540,15 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
/* lost race against keypresses */
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_mark_busy(usbhid);
- return -EBUSY;
+ status = -EBUSY;
+ goto failed;
}
dev_dbg(&intf->dev, "suspend\n");
return 0;
+
+ failed:
+ hid_resume_common(hid, driver_suspended);
+ return status;
}
static int hid_resume(struct usb_interface *intf)
@@ -1536,23 +1560,7 @@ static int hid_resume(struct usb_interface *intf)
if (!test_bit(HID_STARTED, &usbhid->iofl))
return 0;
- clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
-
- if (status >= 0 && hid->driver && hid->driver->resume) {
- int ret = hid->driver->resume(hid);
- if (ret < 0)
- status = ret;
- }
+ status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
}
@@ -1563,7 +1571,7 @@ static int hid_reset_resume(struct usb_interface *intf)
struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0597ee604f6e..903eef3d3e10 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -76,6 +76,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 1883d7b94870..bd87a61e5303 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -53,7 +53,6 @@ struct usb_interface *usbhid_find_interface(int minor);
#define HID_CLEAR_HALT 6
#define HID_DISCONNECTED 7
#define HID_STARTED 8
-#define HID_REPORTED_IDLE 9
#define HID_KEYS_PRESSED 10
#define HID_NO_BANDWIDTH 11
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 8deedc1b9840..6f1d167cb1ea 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -318,7 +318,7 @@ config SENSORS_EXYNOS4_TMU
tristate "Temperature sensor on Samsung EXYNOS4"
depends on ARCH_EXYNOS4
help
- If you say yes here you get support for TMU (Thermal Managment
+ If you say yes here you get support for TMU (Thermal Management
Unit) on SAMSUNG EXYNOS4 series of SoC.
This driver can also be built as a module. If so, the module
@@ -1036,8 +1036,9 @@ config SENSORS_SCH56XX_COMMON
config SENSORS_SCH5627
tristate "SMSC SCH5627"
- depends on !PPC
+ depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
+ select WATCHDOG_CORE
help
If you say yes here you get support for the hardware monitoring
features of the SMSC SCH5627 Super-I/O chip including support for
@@ -1048,8 +1049,9 @@ config SENSORS_SCH5627
config SENSORS_SCH5636
tristate "SMSC SCH5636"
- depends on !PPC
+ depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
+ select WATCHDOG_CORE
help
SMSC SCH5636 Super I/O chips include an embedded microcontroller for
hardware monitoring solutions, allowing motherboard manufacturers to
@@ -1102,6 +1104,19 @@ config SENSORS_AMC6821
This driver can also be build as a module. If so, the module
will be called amc6821.
+config SENSORS_INA2XX
+ tristate "Texas Instruments INA219, INA226"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for INA219 and INA226 power
+ monitor chips.
+
+ The INA2xx driver is configured for the default configuration of
+ the part as described in the datasheet.
+ Default value for Rshunt is 10 mOhms.
+ This driver can also be built as a module. If so, the module
+ will be called ina2xx.
+
config SENSORS_THMC50
tristate "Texas Instruments THMC50 / Analog Devices ADM1022"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 6d3f11f71815..e1eeac13b851 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
+obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 9140236a0182..34ad5a27a7e9 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -107,15 +107,7 @@ struct acpi_power_meter_resource {
struct kobject *holders_dir;
};
-struct ro_sensor_template {
- char *label;
- ssize_t (*show)(struct device *dev,
- struct device_attribute *devattr,
- char *buf);
- int index;
-};
-
-struct rw_sensor_template {
+struct sensor_template {
char *label;
ssize_t (*show)(struct device *dev,
struct device_attribute *devattr,
@@ -469,52 +461,67 @@ static ssize_t show_name(struct device *dev,
return sprintf(buf, "%s\n", ACPI_POWER_METER_NAME);
}
+#define RO_SENSOR_TEMPLATE(_label, _show, _index) \
+ { \
+ .label = _label, \
+ .show = _show, \
+ .index = _index, \
+ }
+
+#define RW_SENSOR_TEMPLATE(_label, _show, _set, _index) \
+ { \
+ .label = _label, \
+ .show = _show, \
+ .set = _set, \
+ .index = _index, \
+ }
+
/* Sensor descriptions. If you add a sensor, update NUM_SENSORS above! */
-static struct ro_sensor_template meter_ro_attrs[] = {
-{POWER_AVERAGE_NAME, show_power, 0},
-{"power1_accuracy", show_accuracy, 0},
-{"power1_average_interval_min", show_val, 0},
-{"power1_average_interval_max", show_val, 1},
-{"power1_is_battery", show_val, 5},
-{NULL, NULL, 0},
+static struct sensor_template meter_attrs[] = {
+ RO_SENSOR_TEMPLATE(POWER_AVERAGE_NAME, show_power, 0),
+ RO_SENSOR_TEMPLATE("power1_accuracy", show_accuracy, 0),
+ RO_SENSOR_TEMPLATE("power1_average_interval_min", show_val, 0),
+ RO_SENSOR_TEMPLATE("power1_average_interval_max", show_val, 1),
+ RO_SENSOR_TEMPLATE("power1_is_battery", show_val, 5),
+ RW_SENSOR_TEMPLATE(POWER_AVG_INTERVAL_NAME, show_avg_interval,
+ set_avg_interval, 0),
+ {},
};
-static struct rw_sensor_template meter_rw_attrs[] = {
-{POWER_AVG_INTERVAL_NAME, show_avg_interval, set_avg_interval, 0},
-{NULL, NULL, NULL, 0},
+static struct sensor_template misc_cap_attrs[] = {
+ RO_SENSOR_TEMPLATE("power1_cap_min", show_val, 2),
+ RO_SENSOR_TEMPLATE("power1_cap_max", show_val, 3),
+ RO_SENSOR_TEMPLATE("power1_cap_hyst", show_val, 4),
+ RO_SENSOR_TEMPLATE(POWER_ALARM_NAME, show_val, 6),
+ {},
};
-static struct ro_sensor_template misc_cap_attrs[] = {
-{"power1_cap_min", show_val, 2},
-{"power1_cap_max", show_val, 3},
-{"power1_cap_hyst", show_val, 4},
-{POWER_ALARM_NAME, show_val, 6},
-{NULL, NULL, 0},
+static struct sensor_template ro_cap_attrs[] = {
+ RO_SENSOR_TEMPLATE(POWER_CAP_NAME, show_cap, 0),
+ {},
};
-static struct ro_sensor_template ro_cap_attrs[] = {
-{POWER_CAP_NAME, show_cap, 0},
-{NULL, NULL, 0},
+static struct sensor_template rw_cap_attrs[] = {
+ RW_SENSOR_TEMPLATE(POWER_CAP_NAME, show_cap, set_cap, 0),
+ {},
};
-static struct rw_sensor_template rw_cap_attrs[] = {
-{POWER_CAP_NAME, show_cap, set_cap, 0},
-{NULL, NULL, NULL, 0},
+static struct sensor_template trip_attrs[] = {
+ RW_SENSOR_TEMPLATE("power1_average_min", show_val, set_trip, 7),
+ RW_SENSOR_TEMPLATE("power1_average_max", show_val, set_trip, 8),
+ {},
};
-static struct rw_sensor_template trip_attrs[] = {
-{"power1_average_min", show_val, set_trip, 7},
-{"power1_average_max", show_val, set_trip, 8},
-{NULL, NULL, NULL, 0},
+static struct sensor_template misc_attrs[] = {
+ RO_SENSOR_TEMPLATE("name", show_name, 0),
+ RO_SENSOR_TEMPLATE("power1_model_number", show_str, 0),
+ RO_SENSOR_TEMPLATE("power1_oem_info", show_str, 2),
+ RO_SENSOR_TEMPLATE("power1_serial_number", show_str, 1),
+ {},
};
-static struct ro_sensor_template misc_attrs[] = {
-{"name", show_name, 0},
-{"power1_model_number", show_str, 0},
-{"power1_oem_info", show_str, 2},
-{"power1_serial_number", show_str, 1},
-{NULL, NULL, 0},
-};
+#undef RO_SENSOR_TEMPLATE
+#undef RW_SENSOR_TEMPLATE
/* Read power domain data */
static void remove_domain_devices(struct acpi_power_meter_resource *resource)
@@ -619,49 +626,24 @@ end:
}
/* Registration and deregistration */
-static int register_ro_attrs(struct acpi_power_meter_resource *resource,
- struct ro_sensor_template *ro)
+static int register_attrs(struct acpi_power_meter_resource *resource,
+ struct sensor_template *attrs)
{
struct device *dev = &resource->acpi_dev->dev;
struct sensor_device_attribute *sensors =
&resource->sensors[resource->num_sensors];
int res = 0;
- while (ro->label) {
- sensors->dev_attr.attr.name = ro->label;
+ while (attrs->label) {
+ sensors->dev_attr.attr.name = attrs->label;
sensors->dev_attr.attr.mode = S_IRUGO;
- sensors->dev_attr.show = ro->show;
- sensors->index = ro->index;
+ sensors->dev_attr.show = attrs->show;
+ sensors->index = attrs->index;
- sysfs_attr_init(&sensors->dev_attr.attr);
- res = device_create_file(dev, &sensors->dev_attr);
- if (res) {
- sensors->dev_attr.attr.name = NULL;
- goto error;
+ if (attrs->set) {
+ sensors->dev_attr.attr.mode |= S_IWUSR;
+ sensors->dev_attr.store = attrs->set;
}
- sensors++;
- resource->num_sensors++;
- ro++;
- }
-
-error:
- return res;
-}
-
-static int register_rw_attrs(struct acpi_power_meter_resource *resource,
- struct rw_sensor_template *rw)
-{
- struct device *dev = &resource->acpi_dev->dev;
- struct sensor_device_attribute *sensors =
- &resource->sensors[resource->num_sensors];
- int res = 0;
-
- while (rw->label) {
- sensors->dev_attr.attr.name = rw->label;
- sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
- sensors->dev_attr.show = rw->show;
- sensors->dev_attr.store = rw->set;
- sensors->index = rw->index;
sysfs_attr_init(&sensors->dev_attr.attr);
res = device_create_file(dev, &sensors->dev_attr);
@@ -671,7 +653,7 @@ static int register_rw_attrs(struct acpi_power_meter_resource *resource,
}
sensors++;
resource->num_sensors++;
- rw++;
+ attrs++;
}
error:
@@ -703,10 +685,7 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
return res;
if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
- res = register_ro_attrs(resource, meter_ro_attrs);
- if (res)
- goto error;
- res = register_rw_attrs(resource, meter_rw_attrs);
+ res = register_attrs(resource, meter_attrs);
if (res)
goto error;
}
@@ -718,28 +697,27 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
goto skip_unsafe_cap;
}
- if (resource->caps.configurable_cap) {
- res = register_rw_attrs(resource, rw_cap_attrs);
- if (res)
- goto error;
- } else {
- res = register_ro_attrs(resource, ro_cap_attrs);
- if (res)
- goto error;
- }
- res = register_ro_attrs(resource, misc_cap_attrs);
+ if (resource->caps.configurable_cap)
+ res = register_attrs(resource, rw_cap_attrs);
+ else
+ res = register_attrs(resource, ro_cap_attrs);
+
+ if (res)
+ goto error;
+
+ res = register_attrs(resource, misc_cap_attrs);
if (res)
goto error;
}
-skip_unsafe_cap:
+skip_unsafe_cap:
if (resource->caps.flags & POWER_METER_CAN_TRIP) {
- res = register_rw_attrs(resource, trip_attrs);
+ res = register_attrs(resource, trip_attrs);
if (res)
goto error;
}
- res = register_ro_attrs(resource, misc_attrs);
+ res = register_attrs(resource, misc_attrs);
if (res)
goto error;
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index f85ce70d9677..cfec802cf9ca 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -18,21 +18,14 @@
#include <linux/hwmon-sysfs.h>
/*
- * AD7314 power mode
- */
-#define AD7314_PD 0x2000
-
-/*
* AD7314 temperature masks
*/
-#define AD7314_TEMP_SIGN 0x200
#define AD7314_TEMP_MASK 0x7FE0
-#define AD7314_TEMP_OFFSET 5
+#define AD7314_TEMP_SHIFT 5
/*
* ADT7301 and ADT7302 temperature masks
*/
-#define ADT7301_TEMP_SIGN 0x2000
#define ADT7301_TEMP_MASK 0x3FFF
enum ad7314_variant {
@@ -73,7 +66,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
return ret;
switch (spi_get_device_id(chip->spi_dev)->driver_data) {
case ad7314:
- data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET;
+ data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
data = (data << 6) >> 6;
return sprintf(buf, "%d\n", 250 * data);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index f082e48ab113..2cde9ecf7731 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -8,7 +8,7 @@
*
* Based on hdaps.c driver:
* Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
*
* Fan control based on smcFanControl:
* Copyright (C) 2006 Hendrik Holtmann <holtmann@mac.com>
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
int i;
if (send_command(cmd) || send_argument(key)) {
- pr_warn("%s: read arg fail\n", key);
+ pr_warn("%.4s: read arg fail\n", key);
return -EIO;
}
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
for (i = 0; i < len; i++) {
if (__wait_status(0x05)) {
- pr_warn("%s: read data fail\n", key);
+ pr_warn("%.4s: read data fail\n", key);
return -EIO;
}
buffer[i] = inb(APPLESMC_DATA_PORT);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index b9d512331ed4..637c51c11b44 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -191,6 +191,24 @@ static ssize_t show_temp(struct device *dev,
return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
}
+struct tjmax {
+ char const *id;
+ int tjmax;
+};
+
+static struct tjmax __cpuinitconst tjmax_table[] = {
+ { "CPU D410", 100000 },
+ { "CPU D425", 100000 },
+ { "CPU D510", 100000 },
+ { "CPU D525", 100000 },
+ { "CPU N450", 100000 },
+ { "CPU N455", 100000 },
+ { "CPU N470", 100000 },
+ { "CPU N475", 100000 },
+ { "CPU 230", 100000 },
+ { "CPU 330", 125000 },
+};
+
static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
struct device *dev)
{
@@ -202,6 +220,13 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
int err;
u32 eax, edx;
struct pci_dev *host_bridge;
+ int i;
+
+ /* explicit tjmax table entries override heuristics */
+ for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
+ if (strstr(c->x86_model_id, tjmax_table[i].id))
+ return tjmax_table[i].tjmax;
+ }
/* Early chips have no MSR for TjMax */
@@ -210,7 +235,8 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
/* Atom CPUs */
- if (c->x86_model == 0x1c) {
+ if (c->x86_model == 0x1c || c->x86_model == 0x26
+ || c->x86_model == 0x27) {
usemsr_ee = 0;
host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
@@ -223,6 +249,9 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
tjmax = 90000;
pci_dev_put(host_bridge);
+ } else if (c->x86_model == 0x36) {
+ usemsr_ee = 0;
+ tjmax = 100000;
}
if (c->x86_model > 0xe && usemsr_ee) {
@@ -664,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
* sensors. We check this bit only, all the early CPUs
* without thermal sensors will be filtered out.
*/
- if (!cpu_has(c, X86_FEATURE_DTS))
+ if (!cpu_has(c, X86_FEATURE_DTHERM))
return;
if (!pdev) {
@@ -765,14 +794,14 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
};
static const struct x86_cpu_id coretemp_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
{}
};
MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
static int __init coretemp_init(void)
{
- int i, err = -ENODEV;
+ int i, err;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 9691f664c76e..e7d234b59312 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -451,11 +451,15 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
data->fan_rpm_control = true;
break;
default:
- mutex_unlock(&data->update_lock);
- return -EINVAL;
+ count = -EINVAL;
+ goto err;
}
- read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+ result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+ if (result) {
+ count = result;
+ goto err;
+ }
if (data->fan_rpm_control)
conf_reg |= 0x80;
@@ -463,7 +467,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
conf_reg &= ~0x80;
i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
-
+err:
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index e8e18cab1fb8..6b13f1a4dc27 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -257,15 +257,4 @@ static struct pci_driver fam15h_power_driver = {
.remove = __devexit_p(fam15h_power_remove),
};
-static int __init fam15h_power_init(void)
-{
- return pci_register_driver(&fam15h_power_driver);
-}
-
-static void __exit fam15h_power_exit(void)
-{
- pci_unregister_driver(&fam15h_power_driver);
-}
-
-module_init(fam15h_power_init)
-module_exit(fam15h_power_exit)
+module_pci_driver(fam15h_power_driver);
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
new file mode 100644
index 000000000000..7f3f4a385729
--- /dev/null
+++ b/drivers/hwmon/ina2xx.c
@@ -0,0 +1,368 @@
+/*
+ * Driver for Texas Instruments INA219, INA226 power monitor chips
+ *
+ * INA219:
+ * Zero Drift Bi-Directional Current/Power Monitor with I2C Interface
+ * Datasheet: http://www.ti.com/product/ina219
+ *
+ * INA226:
+ * Bi-Directional Current/Power Monitor with I2C Interface
+ * Datasheet: http://www.ti.com/product/ina226
+ *
+ * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Thanks to Jan Volkering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <linux/platform_data/ina2xx.h>
+
+/* common register definitions */
+#define INA2XX_CONFIG 0x00
+#define INA2XX_SHUNT_VOLTAGE 0x01 /* readonly */
+#define INA2XX_BUS_VOLTAGE 0x02 /* readonly */
+#define INA2XX_POWER 0x03 /* readonly */
+#define INA2XX_CURRENT 0x04 /* readonly */
+#define INA2XX_CALIBRATION 0x05
+
+/* INA226 register definitions */
+#define INA226_MASK_ENABLE 0x06
+#define INA226_ALERT_LIMIT 0x07
+#define INA226_DIE_ID 0xFF
+
+
+/* register count */
+#define INA219_REGISTERS 6
+#define INA226_REGISTERS 8
+
+#define INA2XX_MAX_REGISTERS 8
+
+/* settings - depend on use case */
+#define INA219_CONFIG_DEFAULT 0x399F /* PGA=8 */
+#define INA226_CONFIG_DEFAULT 0x4527 /* averages=16 */
+
+/* worst case is 68.10 ms (~14.6Hz, ina219) */
+#define INA2XX_CONVERSION_RATE 15
+
+enum ina2xx_ids { ina219, ina226 };
+
+struct ina2xx_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated;
+
+ int kind;
+ int registers;
+ u16 regs[INA2XX_MAX_REGISTERS];
+};
+
+int ina2xx_read_word(struct i2c_client *client, int reg)
+{
+ int val = i2c_smbus_read_word_data(client, reg);
+ if (unlikely(val < 0)) {
+ dev_dbg(&client->dev,
+ "Failed to read register: %d\n", reg);
+ return val;
+ }
+ return be16_to_cpu(val);
+}
+
+void ina2xx_write_word(struct i2c_client *client, int reg, int data)
+{
+ i2c_smbus_write_word_data(client, reg, cpu_to_be16(data));
+}
+
+static struct ina2xx_data *ina2xx_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ina2xx_data *data = i2c_get_clientdata(client);
+ struct ina2xx_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated +
+ HZ / INA2XX_CONVERSION_RATE) || !data->valid) {
+
+ int i;
+
+ dev_dbg(&client->dev, "Starting ina2xx update\n");
+
+ /* Read all registers */
+ for (i = 0; i < data->registers; i++) {
+ int rv = ina2xx_read_word(client, i);
+ if (rv < 0) {
+ ret = ERR_PTR(rv);
+ goto abort;
+ }
+ data->regs[i] = rv;
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static int ina219_get_value(struct ina2xx_data *data, u8 reg)
+{
+ /*
+ * calculate exact value for the given register
+ * we assume default power-on reset settings:
+ * bus voltage range 32V
+ * gain = /8
+ * adc 1 & 2 -> conversion time 532uS
+ * mode is continuous shunt and bus
+ * calibration value is INA219_CALIBRATION_VALUE
+ */
+ int val = data->regs[reg];
+
+ switch (reg) {
+ case INA2XX_SHUNT_VOLTAGE:
+ /* LSB=10uV. Convert to mV. */
+ val = DIV_ROUND_CLOSEST(val, 100);
+ break;
+ case INA2XX_BUS_VOLTAGE:
+ /* LSB=4mV. Register is not right aligned, convert to mV. */
+ val = (val >> 3) * 4;
+ break;
+ case INA2XX_POWER:
+ /* LSB=20mW. Convert to uW */
+ val = val * 20 * 1000;
+ break;
+ case INA2XX_CURRENT:
+ /* LSB=1mA (selected). Is in mA */
+ break;
+ default:
+ /* programmer goofed */
+ WARN_ON_ONCE(1);
+ val = 0;
+ break;
+ }
+
+ return val;
+}
+
+static int ina226_get_value(struct ina2xx_data *data, u8 reg)
+{
+ /*
+ * calculate exact value for the given register
+ * we assume default power-on reset settings:
+ * bus voltage range 32V
+ * gain = /8
+ * adc 1 & 2 -> conversion time 532uS
+ * mode is continuous shunt and bus
+ * calibration value is INA226_CALIBRATION_VALUE
+ */
+ int val = data->regs[reg];
+
+ switch (reg) {
+ case INA2XX_SHUNT_VOLTAGE:
+ /* LSB=2.5uV. Convert to mV. */
+ val = DIV_ROUND_CLOSEST(val, 400);
+ break;
+ case INA2XX_BUS_VOLTAGE:
+ /* LSB=1.25mV. Convert to mV. */
+ val = val + DIV_ROUND_CLOSEST(val, 4);
+ break;
+ case INA2XX_POWER:
+ /* LSB=25mW. Convert to uW */
+ val = val * 25 * 1000;
+ break;
+ case INA2XX_CURRENT:
+ /* LSB=1mA (selected). Is in mA */
+ break;
+ default:
+ /* programmer goofed */
+ WARN_ON_ONCE(1);
+ val = 0;
+ break;
+ }
+
+ return val;
+}
+
+static ssize_t ina2xx_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ina2xx_data *data = ina2xx_update_device(dev);
+ int value = 0;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ switch (data->kind) {
+ case ina219:
+ value = ina219_get_value(data, attr->index);
+ break;
+ case ina226:
+ value = ina226_get_value(data, attr->index);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+/* shunt voltage */
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, \
+ ina2xx_show_value, NULL, INA2XX_SHUNT_VOLTAGE);
+
+/* bus voltage */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, \
+ ina2xx_show_value, NULL, INA2XX_BUS_VOLTAGE);
+
+/* calculated current */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, \
+ ina2xx_show_value, NULL, INA2XX_CURRENT);
+
+/* calculated power */
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, \
+ ina2xx_show_value, NULL, INA2XX_POWER);
+
+/* pointers to created device attributes */
+static struct attribute *ina2xx_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ina2xx_group = {
+ .attrs = ina2xx_attributes,
+};
+
+static int ina2xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct ina2xx_data *data;
+ struct ina2xx_platform_data *pdata;
+ int ret = 0;
+ long shunt = 10000; /* default shunt value 10mOhms */
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (client->dev.platform_data) {
+ pdata =
+ (struct ina2xx_platform_data *)client->dev.platform_data;
+ shunt = pdata->shunt_uohms;
+ }
+
+ if (shunt <= 0)
+ return -ENODEV;
+
+ /* set the device type */
+ data->kind = id->driver_data;
+
+ switch (data->kind) {
+ case ina219:
+ /* device configuration */
+ ina2xx_write_word(client, INA2XX_CONFIG, INA219_CONFIG_DEFAULT);
+
+ /* set current LSB to 1mA, shunt is in uOhms */
+ /* (equation 13 in datasheet) */
+ ina2xx_write_word(client, INA2XX_CALIBRATION, 40960000 / shunt);
+ dev_info(&client->dev,
+ "power monitor INA219 (Rshunt = %li uOhm)\n", shunt);
+ data->registers = INA219_REGISTERS;
+ break;
+ case ina226:
+ /* device configuration */
+ ina2xx_write_word(client, INA2XX_CONFIG, INA226_CONFIG_DEFAULT);
+
+ /* set current LSB to 1mA, shunt is in uOhms */
+ /* (equation 1 in datasheet)*/
+ ina2xx_write_word(client, INA2XX_CALIBRATION, 5120000 / shunt);
+ dev_info(&client->dev,
+ "power monitor INA226 (Rshunt = %li uOhm)\n", shunt);
+ data->registers = INA226_REGISTERS;
+ break;
+ default:
+ /* unknown device id */
+ return -ENODEV;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ ret = sysfs_create_group(&client->dev.kobj, &ina2xx_group);
+ if (ret)
+ return ret;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_err_hwmon;
+ }
+
+ return 0;
+
+out_err_hwmon:
+ sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
+ return ret;
+}
+
+static int ina2xx_remove(struct i2c_client *client)
+{
+ struct ina2xx_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
+
+ return 0;
+}
+
+static const struct i2c_device_id ina2xx_id[] = {
+ { "ina219", ina219 },
+ { "ina226", ina226 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ina2xx_id);
+
+static struct i2c_driver ina2xx_driver = {
+ .driver = {
+ .name = "ina2xx",
+ },
+ .probe = ina2xx_probe,
+ .remove = ina2xx_remove,
+ .id_table = ina2xx_id,
+};
+
+static int __init ina2xx_init(void)
+{
+ return i2c_add_driver(&ina2xx_driver);
+}
+
+static void __exit ina2xx_exit(void)
+{
+ i2c_del_driver(&ina2xx_driver);
+}
+
+MODULE_AUTHOR("Lothar Felten <l-felten@ti.com>");
+MODULE_DESCRIPTION("ina2xx driver");
+MODULE_LICENSE("GPL");
+
+module_init(ina2xx_init);
+module_exit(ina2xx_exit);
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 0b204e4cf51c..f1de3979181f 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -19,6 +19,8 @@
* IT8726F Super I/O chip w/LPC interface
* IT8728F Super I/O chip w/LPC interface
* IT8758E Super I/O chip w/LPC interface
+ * IT8782F Super I/O chip w/LPC interface
+ * IT8783E/F Super I/O chip w/LPC interface
* Sis950 A clone of the IT8705F
*
* Copyright (C) 2001 Chris Gauthron
@@ -59,7 +61,8 @@
#define DRVNAME "it87"
-enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8782,
+ it8783 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -137,13 +140,18 @@ static inline void superio_exit(void)
#define IT8721F_DEVID 0x8721
#define IT8726F_DEVID 0x8726
#define IT8728F_DEVID 0x8728
+#define IT8782F_DEVID 0x8782
+#define IT8783E_DEVID 0x8783
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
/* Logical device 7 registers (IT8712F and later) */
+#define IT87_SIO_GPIO1_REG 0x25
#define IT87_SIO_GPIO3_REG 0x27
#define IT87_SIO_GPIO5_REG 0x29
+#define IT87_SIO_PINX1_REG 0x2a /* Pin selection */
#define IT87_SIO_PINX2_REG 0x2c /* Pin selection */
+#define IT87_SIO_SPI_REG 0xef /* SPI function pin select */
#define IT87_SIO_VID_REG 0xfc /* VID value */
#define IT87_SIO_BEEP_PIN_REG 0xf6 /* Beep pin mapping */
@@ -210,6 +218,7 @@ static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
#define IT87_REG_VIN_ENABLE 0x50
#define IT87_REG_TEMP_ENABLE 0x51
+#define IT87_REG_TEMP_EXTRA 0x55
#define IT87_REG_BEEP_ENABLE 0x5c
#define IT87_REG_CHIPID 0x58
@@ -226,9 +235,11 @@ struct it87_sio_data {
u8 beep_pin;
u8 internal; /* Internal sensors can be labeled */
/* Features skipped based on config or DMI */
+ u16 skip_in;
u8 skip_vid;
u8 skip_fan;
u8 skip_pwm;
+ u8 skip_temp;
};
/*
@@ -253,6 +264,7 @@ struct it87_data {
u8 has_fan; /* Bitfield, fans enabled */
u16 fan[5]; /* Register values, possibly combined */
u16 fan_min[5]; /* Register values, possibly combined */
+ u8 has_temp; /* Bitfield, temp sensors enabled */
s8 temp[3]; /* Register value */
s8 temp_high[3]; /* Register value */
s8 temp_low[3]; /* Register value */
@@ -304,31 +316,23 @@ static inline int has_newer_autopwm(const struct it87_data *data)
|| data->type == it8728;
}
-static u8 in_to_reg(const struct it87_data *data, int nr, long val)
+static int adc_lsb(const struct it87_data *data, int nr)
{
- long lsb;
-
- if (has_12mv_adc(data)) {
- if (data->in_scaled & (1 << nr))
- lsb = 24;
- else
- lsb = 12;
- } else
- lsb = 16;
+ int lsb = has_12mv_adc(data) ? 12 : 16;
+ if (data->in_scaled & (1 << nr))
+ lsb <<= 1;
+ return lsb;
+}
- val = DIV_ROUND_CLOSEST(val, lsb);
+static u8 in_to_reg(const struct it87_data *data, int nr, long val)
+{
+ val = DIV_ROUND_CLOSEST(val, adc_lsb(data, nr));
return SENSORS_LIMIT(val, 0, 255);
}
static int in_from_reg(const struct it87_data *data, int nr, int val)
{
- if (has_12mv_adc(data)) {
- if (data->in_scaled & (1 << nr))
- return val * 24;
- else
- return val * 12;
- } else
- return val * 16;
+ return val * adc_lsb(data, nr);
}
static inline u8 FAN_TO_REG(long rpm, int div)
@@ -407,7 +411,9 @@ static inline int has_16bit_fans(const struct it87_data *data)
|| data->type == it8718
|| data->type == it8720
|| data->type == it8721
- || data->type == it8728;
+ || data->type == it8728
+ || data->type == it8782
+ || data->type == it8783;
}
static inline int has_old_autopwm(const struct it87_data *data)
@@ -1369,57 +1375,103 @@ static ssize_t show_name(struct device *dev, struct device_attribute
}
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static struct attribute *it87_attributes[] = {
+static struct attribute *it87_attributes_in[9][5] = {
+{
&sensor_dev_attr_in0_input.dev_attr.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in8_input.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
- &sensor_dev_attr_in1_min.dev_attr.attr,
- &sensor_dev_attr_in2_min.dev_attr.attr,
- &sensor_dev_attr_in3_min.dev_attr.attr,
- &sensor_dev_attr_in4_min.dev_attr.attr,
- &sensor_dev_attr_in5_min.dev_attr.attr,
- &sensor_dev_attr_in6_min.dev_attr.attr,
- &sensor_dev_attr_in7_min.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
- &sensor_dev_attr_in1_max.dev_attr.attr,
- &sensor_dev_attr_in2_max.dev_attr.attr,
- &sensor_dev_attr_in3_max.dev_attr.attr,
- &sensor_dev_attr_in4_max.dev_attr.attr,
- &sensor_dev_attr_in5_max.dev_attr.attr,
- &sensor_dev_attr_in6_max.dev_attr.attr,
- &sensor_dev_attr_in7_max.dev_attr.attr,
&sensor_dev_attr_in0_alarm.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in3_min.dev_attr.attr,
+ &sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in5_min.dev_attr.attr,
+ &sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_in5_alarm.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in6_min.dev_attr.attr,
+ &sensor_dev_attr_in6_max.dev_attr.attr,
&sensor_dev_attr_in6_alarm.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ &sensor_dev_attr_in7_min.dev_attr.attr,
+ &sensor_dev_attr_in7_max.dev_attr.attr,
&sensor_dev_attr_in7_alarm.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_in8_input.dev_attr.attr,
+ NULL
+} };
+static const struct attribute_group it87_group_in[9] = {
+ { .attrs = it87_attributes_in[0] },
+ { .attrs = it87_attributes_in[1] },
+ { .attrs = it87_attributes_in[2] },
+ { .attrs = it87_attributes_in[3] },
+ { .attrs = it87_attributes_in[4] },
+ { .attrs = it87_attributes_in[5] },
+ { .attrs = it87_attributes_in[6] },
+ { .attrs = it87_attributes_in[7] },
+ { .attrs = it87_attributes_in[8] },
+};
+
+static struct attribute *it87_attributes_temp[3][6] = {
+{
&sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp1_type.dev_attr.attr,
- &sensor_dev_attr_temp2_type.dev_attr.attr,
- &sensor_dev_attr_temp3_type.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
+ NULL
+} , {
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_type.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
+ NULL
+} , {
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_type.dev_attr.attr,
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
+ NULL
+} };
+
+static const struct attribute_group it87_group_temp[3] = {
+ { .attrs = it87_attributes_temp[0] },
+ { .attrs = it87_attributes_temp[1] },
+ { .attrs = it87_attributes_temp[2] },
+};
+static struct attribute *it87_attributes[] = {
&dev_attr_alarms.attr,
&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
&dev_attr_name.attr,
@@ -1430,7 +1482,7 @@ static const struct attribute_group it87_group = {
.attrs = it87_attributes,
};
-static struct attribute *it87_attributes_beep[] = {
+static struct attribute *it87_attributes_in_beep[] = {
&sensor_dev_attr_in0_beep.dev_attr.attr,
&sensor_dev_attr_in1_beep.dev_attr.attr,
&sensor_dev_attr_in2_beep.dev_attr.attr,
@@ -1439,15 +1491,13 @@ static struct attribute *it87_attributes_beep[] = {
&sensor_dev_attr_in5_beep.dev_attr.attr,
&sensor_dev_attr_in6_beep.dev_attr.attr,
&sensor_dev_attr_in7_beep.dev_attr.attr,
+ NULL
+};
+static struct attribute *it87_attributes_temp_beep[] = {
&sensor_dev_attr_temp1_beep.dev_attr.attr,
&sensor_dev_attr_temp2_beep.dev_attr.attr,
&sensor_dev_attr_temp3_beep.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group it87_group_beep = {
- .attrs = it87_attributes_beep,
};
static struct attribute *it87_attributes_fan16[5][3+1] = { {
@@ -1651,6 +1701,12 @@ static int __init it87_find(unsigned short *address,
case IT8728F_DEVID:
sio_data->type = it8728;
break;
+ case IT8782F_DEVID:
+ sio_data->type = it8782;
+ break;
+ case IT8783E_DEVID:
+ sio_data->type = it8783;
+ break;
case 0xffff: /* No device at all */
goto exit;
default:
@@ -1686,16 +1742,86 @@ static int __init it87_find(unsigned short *address,
/* The IT8705F has a different LD number for GPIO */
superio_select(5);
sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ } else if (sio_data->type == it8783) {
+ int reg25, reg27, reg2A, reg2C, regEF;
+
+ sio_data->skip_vid = 1; /* No VID */
+
+ superio_select(GPIO);
+
+ reg25 = superio_inb(IT87_SIO_GPIO1_REG);
+ reg27 = superio_inb(IT87_SIO_GPIO3_REG);
+ reg2A = superio_inb(IT87_SIO_PINX1_REG);
+ reg2C = superio_inb(IT87_SIO_PINX2_REG);
+ regEF = superio_inb(IT87_SIO_SPI_REG);
+
+ /* Check if fan3 is there or not */
+ if ((reg27 & (1 << 0)) || !(reg2C & (1 << 2)))
+ sio_data->skip_fan |= (1 << 2);
+ if ((reg25 & (1 << 4))
+ || (!(reg2A & (1 << 1)) && (regEF & (1 << 0))))
+ sio_data->skip_pwm |= (1 << 2);
+
+ /* Check if fan2 is there or not */
+ if (reg27 & (1 << 7))
+ sio_data->skip_fan |= (1 << 1);
+ if (reg27 & (1 << 3))
+ sio_data->skip_pwm |= (1 << 1);
+
+ /* VIN5 */
+ if ((reg27 & (1 << 0)) || (reg2C & (1 << 2)))
+ sio_data->skip_in |= (1 << 5); /* No VIN5 */
+
+ /* VIN6 */
+ if (reg27 & (1 << 1))
+ sio_data->skip_in |= (1 << 6); /* No VIN6 */
+
+ /*
+ * VIN7
+ * Does not depend on bit 2 of Reg2C, contrary to datasheet.
+ */
+ if (reg27 & (1 << 2)) {
+ /*
+ * The data sheet is a bit unclear regarding the
+ * internal voltage divider for VCCH5V. It says
+ * "This bit enables and switches VIN7 (pin 91) to the
+ * internal voltage divider for VCCH5V".
+ * This is different to other chips, where the internal
+ * voltage divider would connect VIN7 to an internal
+ * voltage source. Maybe that is the case here as well.
+ *
+ * Since we don't know for sure, re-route it if that is
+ * not the case, and ask the user to report if the
+ * resulting voltage is sane.
+ */
+ if (!(reg2C & (1 << 1))) {
+ reg2C |= (1 << 1);
+ superio_outb(IT87_SIO_PINX2_REG, reg2C);
+ pr_notice("Routing internal VCCH5V to in7.\n");
+ }
+ pr_notice("in7 routed to internal voltage divider, with external pin disabled.\n");
+ pr_notice("Please report if it displays a reasonable voltage.\n");
+ }
+
+ if (reg2C & (1 << 0))
+ sio_data->internal |= (1 << 0);
+ if (reg2C & (1 << 1))
+ sio_data->internal |= (1 << 1);
+
+ sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+
} else {
int reg;
+ bool uart6;
superio_select(GPIO);
reg = superio_inb(IT87_SIO_GPIO3_REG);
- if (sio_data->type == it8721 || sio_data->type == it8728) {
+ if (sio_data->type == it8721 || sio_data->type == it8728 ||
+ sio_data->type == it8782) {
/*
- * The IT8721F/IT8758E doesn't have VID pins at all,
- * not sure about the IT8728F.
+ * IT8721F/IT8758E, and IT8782F don't have VID pins
+ * at all, not sure about the IT8728F.
*/
sio_data->skip_vid = 1;
} else {
@@ -1724,6 +1850,9 @@ static int __init it87_find(unsigned short *address,
sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
reg = superio_inb(IT87_SIO_PINX2_REG);
+
+ uart6 = sio_data->type == it8782 && (reg & (1 << 2));
+
/*
* The IT8720F has no VIN7 pin, so VCCH should always be
* routed internally to VIN7 with an internal divider.
@@ -1733,8 +1862,12 @@ static int __init it87_find(unsigned short *address,
* configured, even though the IT8720F datasheet claims
* that the internal routing of VCCH to VIN7 is the default
* setting. So we force the internal routing in this case.
+ *
+ * On IT8782F, VIN7 is multiplexed with one of the UART6 pins.
+ * If UART6 is enabled, re-route VIN7 to the internal divider
+ * if that is not already the case.
*/
- if (sio_data->type == it8720 && !(reg & (1 << 1))) {
+ if ((sio_data->type == it8720 || uart6) && !(reg & (1 << 1))) {
reg |= (1 << 1);
superio_outb(IT87_SIO_PINX2_REG, reg);
pr_notice("Routing internal VCCH to in7\n");
@@ -1745,6 +1878,20 @@ static int __init it87_find(unsigned short *address,
sio_data->type == it8728)
sio_data->internal |= (1 << 1);
+ /*
+ * On IT8782F, UART6 pins overlap with VIN5, VIN6, and VIN7.
+ * While VIN7 can be routed to the internal voltage divider,
+ * VIN5 and VIN6 are not available if UART6 is enabled.
+ *
+ * Also, temp3 is not available if UART6 is enabled and TEMPIN3
+ * is the temperature source. Since we can not read the
+ * temperature source here, skip_temp is preliminary.
+ */
+ if (uart6) {
+ sio_data->skip_in |= (1 << 5) | (1 << 6);
+ sio_data->skip_temp |= (1 << 2);
+ }
+
sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
}
if (sio_data->beep_pin)
@@ -1782,8 +1929,22 @@ static void it87_remove_files(struct device *dev)
int i;
sysfs_remove_group(&dev->kobj, &it87_group);
- if (sio_data->beep_pin)
- sysfs_remove_group(&dev->kobj, &it87_group_beep);
+ for (i = 0; i < 9; i++) {
+ if (sio_data->skip_in & (1 << i))
+ continue;
+ sysfs_remove_group(&dev->kobj, &it87_group_in[i]);
+ if (it87_attributes_in_beep[i])
+ sysfs_remove_file(&dev->kobj,
+ it87_attributes_in_beep[i]);
+ }
+ for (i = 0; i < 3; i++) {
+ if (!(data->has_temp & (1 << i)))
+ continue;
+ sysfs_remove_group(&dev->kobj, &it87_group_temp[i]);
+ if (sio_data->beep_pin)
+ sysfs_remove_file(&dev->kobj,
+ it87_attributes_temp_beep[i]);
+ }
for (i = 0; i < 5; i++) {
if (!(data->has_fan & (1 << i)))
continue;
@@ -1823,22 +1984,22 @@ static int __devinit it87_probe(struct platform_device *pdev)
"it8720",
"it8721",
"it8728",
+ "it8782",
+ "it8783",
};
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, IT87_EC_EXTENT, DRVNAME)) {
+ if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
+ DRVNAME)) {
dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
(unsigned long)res->start,
(unsigned long)(res->start + IT87_EC_EXTENT - 1));
- err = -EBUSY;
- goto ERROR0;
+ return -EBUSY;
}
- data = kzalloc(sizeof(struct it87_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto ERROR1;
- }
+ data = devm_kzalloc(&pdev->dev, sizeof(struct it87_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
data->addr = res->start;
data->type = sio_data->type;
@@ -1847,10 +2008,8 @@ static int __devinit it87_probe(struct platform_device *pdev)
/* Now, we do the remaining detection. */
if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80)
- || it87_read_value(data, IT87_REG_CHIPID) != 0x90) {
- err = -ENODEV;
- goto ERROR2;
- }
+ || it87_read_value(data, IT87_REG_CHIPID) != 0x90)
+ return -ENODEV;
platform_set_drvdata(pdev, data);
@@ -1867,6 +2026,18 @@ static int __devinit it87_probe(struct platform_device *pdev)
data->in_scaled |= (1 << 7); /* in7 is VSB */
if (sio_data->internal & (1 << 2))
data->in_scaled |= (1 << 8); /* in8 is Vbat */
+ } else if (sio_data->type == it8782 || sio_data->type == it8783) {
+ if (sio_data->internal & (1 << 0))
+ data->in_scaled |= (1 << 3); /* in3 is VCC5V */
+ if (sio_data->internal & (1 << 1))
+ data->in_scaled |= (1 << 7); /* in7 is VCCH5V */
+ }
+
+ data->has_temp = 0x07;
+ if (sio_data->skip_temp & (1 << 2)) {
+ if (sio_data->type == it8782
+ && !(it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x80))
+ data->has_temp &= ~(1 << 2);
}
/* Initialize the IT87 chip */
@@ -1875,12 +2046,34 @@ static int __devinit it87_probe(struct platform_device *pdev)
/* Register sysfs hooks */
err = sysfs_create_group(&dev->kobj, &it87_group);
if (err)
- goto ERROR2;
+ return err;
- if (sio_data->beep_pin) {
- err = sysfs_create_group(&dev->kobj, &it87_group_beep);
+ for (i = 0; i < 9; i++) {
+ if (sio_data->skip_in & (1 << i))
+ continue;
+ err = sysfs_create_group(&dev->kobj, &it87_group_in[i]);
if (err)
- goto ERROR4;
+ goto error;
+ if (sio_data->beep_pin && it87_attributes_in_beep[i]) {
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_in_beep[i]);
+ if (err)
+ goto error;
+ }
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (!(data->has_temp & (1 << i)))
+ continue;
+ err = sysfs_create_group(&dev->kobj, &it87_group_temp[i]);
+ if (err)
+ goto error;
+ if (sio_data->beep_pin) {
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_temp_beep[i]);
+ if (err)
+ goto error;
+ }
}
/* Do not create fan files for disabled fans */
@@ -1891,13 +2084,13 @@ static int __devinit it87_probe(struct platform_device *pdev)
continue;
err = sysfs_create_group(&dev->kobj, &fan_group[i]);
if (err)
- goto ERROR4;
+ goto error;
if (sio_data->beep_pin) {
err = sysfs_create_file(&dev->kobj,
it87_attributes_fan_beep[i]);
if (err)
- goto ERROR4;
+ goto error;
if (!fan_beep_need_rw)
continue;
@@ -1922,14 +2115,14 @@ static int __devinit it87_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj,
&it87_group_pwm[i]);
if (err)
- goto ERROR4;
+ goto error;
if (!has_old_autopwm(data))
continue;
err = sysfs_create_group(&dev->kobj,
&it87_group_autopwm[i]);
if (err)
- goto ERROR4;
+ goto error;
}
}
@@ -1939,7 +2132,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
data->vid = sio_data->vid_value;
err = sysfs_create_group(&dev->kobj, &it87_group_vid);
if (err)
- goto ERROR4;
+ goto error;
}
/* Export labels for internal sensors */
@@ -1949,25 +2142,19 @@ static int __devinit it87_probe(struct platform_device *pdev)
err = sysfs_create_file(&dev->kobj,
it87_attributes_label[i]);
if (err)
- goto ERROR4;
+ goto error;
}
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto ERROR4;
+ goto error;
}
return 0;
-ERROR4:
+error:
it87_remove_files(dev);
-ERROR2:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-ERROR1:
- release_region(res->start, IT87_EC_EXTENT);
-ERROR0:
return err;
}
@@ -1978,10 +2165,6 @@ static int __devexit it87_remove(struct platform_device *pdev)
hwmon_device_unregister(data->hwmon_dev);
it87_remove_files(&pdev->dev);
- release_region(data->addr, IT87_EC_EXTENT);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
return 0;
}
@@ -2143,8 +2326,9 @@ static void __devinit it87_init_device(struct platform_device *pdev)
it87_write_value(data, IT87_REG_FAN_16BIT,
tmp | 0x07);
}
- /* IT8705F only supports three fans. */
- if (data->type != it87) {
+ /* IT8705F, IT8782F, and IT8783E/F only support three fans. */
+ if (data->type != it87 && data->type != it8782 &&
+ data->type != it8783) {
if (tmp & (1 << 4))
data->has_fan |= (1 << 3); /* fan4 enabled */
if (tmp & (1 << 5))
@@ -2157,7 +2341,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
/* Start monitoring */
it87_write_value(data, IT87_REG_CONFIG,
- (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
+ (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
| (update_vbat ? 0x41 : 0x01));
}
@@ -2233,6 +2417,8 @@ static struct it87_data *it87_update_device(struct device *dev)
}
}
for (i = 0; i < 3; i++) {
+ if (!(data->has_temp & (1 << i)))
+ continue;
data->temp[i] =
it87_read_value(data, IT87_REG_TEMP(i));
data->temp_high[i] =
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index a9bfd6736d9a..e72ba5d2a824 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -590,6 +590,6 @@ abort:
module_i2c_driver(jc42_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("JC42 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 307bb325dde9..7356b5ec8f67 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -225,15 +225,4 @@ static struct pci_driver k10temp_driver = {
.remove = __devexit_p(k10temp_remove),
};
-static int __init k10temp_init(void)
-{
- return pci_register_driver(&k10temp_driver);
-}
-
-static void __exit k10temp_exit(void)
-{
- pci_unregister_driver(&k10temp_driver);
-}
-
-module_init(k10temp_init)
-module_exit(k10temp_exit)
+module_pci_driver(k10temp_driver);
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 575101988751..35aac82ee8eb 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -339,19 +339,8 @@ static struct pci_driver k8temp_driver = {
.remove = __devexit_p(k8temp_remove),
};
-static int __init k8temp_init(void)
-{
- return pci_register_driver(&k8temp_driver);
-}
-
-static void __exit k8temp_exit(void)
-{
- pci_unregister_driver(&k8temp_driver);
-}
+module_pci_driver(k8temp_driver);
MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
MODULE_DESCRIPTION("AMD K8 core temperature monitor");
MODULE_LICENSE("GPL");
-
-module_init(k8temp_init)
-module_exit(k8temp_exit)
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index d264937c7f5e..bd75d2415432 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -567,6 +567,6 @@ static struct i2c_driver pem_driver = {
module_i2c_driver(pem_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 069b7d34d8f9..77476a575c4e 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -292,6 +292,6 @@ static struct i2c_driver ltc4261_driver = {
module_i2c_driver(ltc4261_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("LTC4261 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 822261be84dd..019427d7a5fd 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -692,6 +692,6 @@ static struct i2c_driver max16065_driver = {
module_i2c_driver(max16065_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("MAX16065 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 9b382ec2c3bd..6da9696e1827 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -134,8 +134,7 @@ static inline u64 div64_u64_safe(u64 dividend, u64 divisor)
return div64_u64(dividend, divisor);
}
-static unsigned int get_ohm_of_thermistor(struct ntc_data *data,
- unsigned int uV)
+static int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uV)
{
struct ntc_thermistor_platform_data *pdata = data->pdata;
u64 mV = uV / 1000;
@@ -146,12 +145,12 @@ static unsigned int get_ohm_of_thermistor(struct ntc_data *data,
if (mV == 0) {
if (pdata->connect == NTC_CONNECTED_POSITIVE)
- return UINT_MAX;
+ return INT_MAX;
return 0;
}
if (mV >= pmV)
return (pdata->connect == NTC_CONNECTED_POSITIVE) ?
- 0 : UINT_MAX;
+ 0 : INT_MAX;
if (pdata->connect == NTC_CONNECTED_POSITIVE && puO == 0)
N = div64_u64_safe(pdO * (pmV - mV), mV);
@@ -163,113 +162,109 @@ static unsigned int get_ohm_of_thermistor(struct ntc_data *data,
else
N = div64_u64_safe(pdO * puO * mV, pdO * (pmV - mV) - puO * mV);
- return (unsigned int) N;
+ if (N > INT_MAX)
+ N = INT_MAX;
+ return N;
}
-static int lookup_comp(struct ntc_data *data,
- unsigned int ohm, int *i_low, int *i_high)
+static void lookup_comp(struct ntc_data *data, unsigned int ohm,
+ int *i_low, int *i_high)
{
- int start, end, mid = -1;
+ int start, end, mid;
+
+ /*
+ * Handle special cases: Resistance is higher than or equal to
+ * resistance in first table entry, or resistance is lower or equal
+ * to resistance in last table entry.
+ * In these cases, return i_low == i_high, either pointing to the
+ * beginning or to the end of the table depending on the condition.
+ */
+ if (ohm >= data->comp[0].ohm) {
+ *i_low = 0;
+ *i_high = 0;
+ return;
+ }
+ if (ohm <= data->comp[data->n_comp - 1].ohm) {
+ *i_low = data->n_comp - 1;
+ *i_high = data->n_comp - 1;
+ return;
+ }
/* Do a binary search on compensation table */
start = 0;
end = data->n_comp;
-
- while (end > start) {
+ while (start < end) {
mid = start + (end - start) / 2;
- if (data->comp[mid].ohm < ohm)
+ /*
+ * start <= mid < end
+ * data->comp[start].ohm > ohm >= data->comp[end].ohm
+ *
+ * We could check for "ohm == data->comp[mid].ohm" here, but
+ * that is a quite unlikely condition, and we would have to
+ * check again after updating start. Check it at the end instead
+ * for simplicity.
+ */
+ if (ohm >= data->comp[mid].ohm) {
end = mid;
- else if (data->comp[mid].ohm > ohm)
- start = mid + 1;
- else
- break;
- }
-
- if (mid == 0) {
- if (data->comp[mid].ohm > ohm) {
- *i_high = mid;
- *i_low = mid + 1;
- return 0;
- } else {
- *i_low = mid;
- *i_high = -1;
- return -EINVAL;
- }
- }
- if (mid == (data->n_comp - 1)) {
- if (data->comp[mid].ohm <= ohm) {
- *i_low = mid;
- *i_high = mid - 1;
- return 0;
} else {
- *i_low = -1;
- *i_high = mid;
- return -EINVAL;
+ start = mid + 1;
+ /*
+ * ohm >= data->comp[start].ohm might be true here,
+ * since we set start to mid + 1. In that case, we are
+ * done. We could keep going, but the condition is quite
+ * likely to occur, so it is worth checking for it.
+ */
+ if (ohm >= data->comp[start].ohm)
+ end = start;
}
+ /*
+ * start <= end
+ * data->comp[start].ohm >= ohm >= data->comp[end].ohm
+ */
}
-
- if (data->comp[mid].ohm <= ohm) {
- *i_low = mid;
- *i_high = mid - 1;
- } else {
- *i_low = mid + 1;
- *i_high = mid;
- }
-
- return 0;
+ /*
+ * start == end
+ * ohm >= data->comp[end].ohm
+ */
+ *i_low = end;
+ if (ohm == data->comp[end].ohm)
+ *i_high = end;
+ else
+ *i_high = end - 1;
}
-static int get_temp_mC(struct ntc_data *data, unsigned int ohm, int *temp)
+static int get_temp_mC(struct ntc_data *data, unsigned int ohm)
{
int low, high;
- int ret;
+ int temp;
- ret = lookup_comp(data, ohm, &low, &high);
- if (ret) {
+ lookup_comp(data, ohm, &low, &high);
+ if (low == high) {
/* Unable to use linear approximation */
- if (low != -1)
- *temp = data->comp[low].temp_C * 1000;
- else if (high != -1)
- *temp = data->comp[high].temp_C * 1000;
- else
- return ret;
+ temp = data->comp[low].temp_C * 1000;
} else {
- *temp = data->comp[low].temp_C * 1000 +
+ temp = data->comp[low].temp_C * 1000 +
((data->comp[high].temp_C - data->comp[low].temp_C) *
1000 * ((int)ohm - (int)data->comp[low].ohm)) /
((int)data->comp[high].ohm - (int)data->comp[low].ohm);
}
-
- return 0;
+ return temp;
}
-static int ntc_thermistor_read(struct ntc_data *data, int *temp)
+static int ntc_thermistor_get_ohm(struct ntc_data *data)
{
- int ret;
- int read_ohm, read_uV;
- unsigned int ohm = 0;
-
- if (data->pdata->read_ohm) {
- read_ohm = data->pdata->read_ohm();
- if (read_ohm < 0)
- return read_ohm;
- ohm = (unsigned int)read_ohm;
- }
+ int read_uV;
+
+ if (data->pdata->read_ohm)
+ return data->pdata->read_ohm();
if (data->pdata->read_uV) {
read_uV = data->pdata->read_uV();
if (read_uV < 0)
return read_uV;
- ohm = get_ohm_of_thermistor(data, (unsigned int)read_uV);
- }
-
- ret = get_temp_mC(data, ohm, temp);
- if (ret) {
- dev_dbg(data->dev, "Sensor reading function not available.\n");
- return ret;
+ return get_ohm_of_thermistor(data, read_uV);
}
-
- return 0;
+ return -EINVAL;
}
static ssize_t ntc_show_name(struct device *dev,
@@ -290,12 +285,13 @@ static ssize_t ntc_show_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ntc_data *data = dev_get_drvdata(dev);
- int temp, ret;
+ int ohm;
- ret = ntc_thermistor_read(data, &temp);
- if (ret)
- return ret;
- return sprintf(buf, "%d\n", temp);
+ ohm = ntc_thermistor_get_ohm(data);
+ if (ohm < 0)
+ return ohm;
+
+ return sprintf(buf, "%d\n", get_temp_mC(data, ohm));
}
static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, ntc_show_type, NULL, 0);
@@ -326,14 +322,14 @@ static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
/* Either one of the two is required. */
if (!pdata->read_uV && !pdata->read_ohm) {
- dev_err(&pdev->dev, "Both read_uV and read_ohm missing."
- "Need either one of the two.\n");
+ dev_err(&pdev->dev,
+ "Both read_uV and read_ohm missing. Need either one of the two.\n");
return -EINVAL;
}
if (pdata->read_uV && pdata->read_ohm) {
- dev_warn(&pdev->dev, "Only one of read_uV and read_ohm "
- "is needed; ignoring read_uV.\n");
+ dev_warn(&pdev->dev,
+ "Only one of read_uV and read_ohm is needed; ignoring read_uV.\n");
pdata->read_uV = NULL;
}
@@ -344,12 +340,12 @@ static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
NTC_CONNECTED_POSITIVE) ||
(pdata->connect != NTC_CONNECTED_POSITIVE &&
pdata->connect != NTC_CONNECTED_GROUND))) {
- dev_err(&pdev->dev, "Required data to use read_uV not "
- "supplied.\n");
+ dev_err(&pdev->dev,
+ "Required data to use read_uV not supplied.\n");
return -EINVAL;
}
- data = kzalloc(sizeof(struct ntc_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct ntc_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -370,8 +366,7 @@ static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n",
pdev->id_entry->driver_data,
pdev->id_entry->name);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
platform_set_drvdata(pdev, data);
@@ -379,13 +374,13 @@ static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
ret = sysfs_create_group(&data->dev->kobj, &ntc_attr_group);
if (ret) {
dev_err(data->dev, "unable to create sysfs files\n");
- goto err;
+ return ret;
}
data->hwmon_dev = hwmon_device_register(data->dev);
- if (IS_ERR_OR_NULL(data->hwmon_dev)) {
+ if (IS_ERR(data->hwmon_dev)) {
dev_err(data->dev, "unable to register as hwmon device.\n");
- ret = -EINVAL;
+ ret = PTR_ERR(data->hwmon_dev);
goto err_after_sysfs;
}
@@ -395,8 +390,6 @@ static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
return 0;
err_after_sysfs:
sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
-err:
- kfree(data);
return ret;
}
@@ -408,8 +401,6 @@ static int __devexit ntc_thermistor_remove(struct platform_device *pdev)
sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
platform_set_drvdata(pdev, NULL);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 8ec6dfbccb64..8342275378b8 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -579,7 +579,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(data->addr,
+ data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
(build_code << 24) | (build_id << 8) | hwmon_rev,
&data->update_lock, 1);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 906d4ed32d81..96a7e68718ca 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -510,7 +510,7 @@ static int __devinit sch5636_probe(struct platform_device *pdev)
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(data->addr,
+ data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
(revision[0] << 8) | revision[1],
&data->update_lock, 0);
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index ce52fc57d41d..4380f5d07be2 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -66,15 +66,10 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
struct sch56xx_watchdog_data {
u16 addr;
- u32 revision;
struct mutex *io_lock;
- struct mutex watchdog_lock;
- struct list_head list; /* member of the watchdog_data_list */
struct kref kref;
- struct miscdevice watchdog_miscdev;
- unsigned long watchdog_is_open;
- char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
- char watchdog_expect_close;
+ struct watchdog_info wdinfo;
+ struct watchdog_device wddev;
u8 watchdog_preset;
u8 watchdog_control;
u8 watchdog_output_enable;
@@ -82,15 +77,6 @@ struct sch56xx_watchdog_data {
static struct platform_device *sch56xx_pdev;
-/*
- * Somewhat ugly :( global data pointer list with all sch56xx devices, so that
- * we can find our device data as when using misc_register there is no other
- * method to get to ones device data from the open fop.
- */
-static LIST_HEAD(watchdog_data_list);
-/* Note this lock not only protect list access, but also data.kref access */
-static DEFINE_MUTEX(watchdog_data_mutex);
-
/* Super I/O functions */
static inline int superio_inb(int base, int reg)
{
@@ -272,22 +258,22 @@ EXPORT_SYMBOL(sch56xx_read_virtual_reg12);
* Watchdog routines
*/
-/*
- * Release our data struct when the platform device has been released *and*
- * all references to our watchdog device are released.
- */
-static void sch56xx_watchdog_release_resources(struct kref *r)
+/* Release our data struct when we're unregistered *and*
+ all references to our watchdog device are released */
+static void watchdog_release_resources(struct kref *r)
{
struct sch56xx_watchdog_data *data =
container_of(r, struct sch56xx_watchdog_data, kref);
kfree(data);
}
-static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
- int timeout)
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+ unsigned int timeout)
{
- int ret, resolution;
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
+ unsigned int resolution;
u8 control;
+ int ret;
/* 1 second or 60 second resolution? */
if (timeout <= 255)
@@ -298,12 +284,6 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
if (timeout < resolution || timeout > (resolution * 255))
return -EINVAL;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave;
- }
-
if (resolution == 1)
control = data->watchdog_control | SCH56XX_WDOG_TIME_BASE_SEC;
else
@@ -316,7 +296,7 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
control);
mutex_unlock(data->io_lock);
if (ret)
- goto leave;
+ return ret;
data->watchdog_control = control;
}
@@ -326,38 +306,17 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
* the watchdog countdown.
*/
data->watchdog_preset = DIV_ROUND_UP(timeout, resolution);
+ wddev->timeout = data->watchdog_preset * resolution;
- ret = data->watchdog_preset * resolution;
-leave:
- mutex_unlock(&data->watchdog_lock);
- return ret;
-}
-
-static int watchdog_get_timeout(struct sch56xx_watchdog_data *data)
-{
- int timeout;
-
- mutex_lock(&data->watchdog_lock);
- if (data->watchdog_control & SCH56XX_WDOG_TIME_BASE_SEC)
- timeout = data->watchdog_preset;
- else
- timeout = data->watchdog_preset * 60;
- mutex_unlock(&data->watchdog_lock);
-
- return timeout;
+ return 0;
}
-static int watchdog_start(struct sch56xx_watchdog_data *data)
+static int watchdog_start(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret;
u8 val;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave_unlock_watchdog;
- }
-
/*
* The sch56xx's watchdog cannot really be started / stopped
* it is always running, but we can avoid the timer expiring
@@ -385,18 +344,14 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
if (ret)
goto leave;
- /* 2. Enable output (if not already enabled) */
- if (!(data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) {
- val = data->watchdog_output_enable |
- SCH56XX_WDOG_OUTPUT_ENABLE;
- ret = sch56xx_write_virtual_reg(data->addr,
- SCH56XX_REG_WDOG_OUTPUT_ENABLE,
- val);
- if (ret)
- goto leave;
+ /* 2. Enable output */
+ val = data->watchdog_output_enable | SCH56XX_WDOG_OUTPUT_ENABLE;
+ ret = sch56xx_write_virtual_reg(data->addr,
+ SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+ if (ret)
+ goto leave;
- data->watchdog_output_enable = val;
- }
+ data->watchdog_output_enable = val;
/* 3. Clear the watchdog event bit if set */
val = inb(data->addr + 9);
@@ -405,234 +360,70 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
leave:
mutex_unlock(data->io_lock);
-leave_unlock_watchdog:
- mutex_unlock(&data->watchdog_lock);
return ret;
}
-static int watchdog_trigger(struct sch56xx_watchdog_data *data)
+static int watchdog_trigger(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave;
- }
-
/* Reset the watchdog countdown counter */
mutex_lock(data->io_lock);
ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_PRESET,
data->watchdog_preset);
mutex_unlock(data->io_lock);
-leave:
- mutex_unlock(&data->watchdog_lock);
+
return ret;
}
-static int watchdog_stop_unlocked(struct sch56xx_watchdog_data *data)
+static int watchdog_stop(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret = 0;
u8 val;
- if (!data->addr)
- return -ENODEV;
-
- if (data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) {
- val = data->watchdog_output_enable &
- ~SCH56XX_WDOG_OUTPUT_ENABLE;
- mutex_lock(data->io_lock);
- ret = sch56xx_write_virtual_reg(data->addr,
- SCH56XX_REG_WDOG_OUTPUT_ENABLE,
- val);
- mutex_unlock(data->io_lock);
- if (ret)
- return ret;
-
- data->watchdog_output_enable = val;
- }
-
- return ret;
-}
-
-static int watchdog_stop(struct sch56xx_watchdog_data *data)
-{
- int ret;
-
- mutex_lock(&data->watchdog_lock);
- ret = watchdog_stop_unlocked(data);
- mutex_unlock(&data->watchdog_lock);
-
- return ret;
-}
-
-static int watchdog_release(struct inode *inode, struct file *filp)
-{
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- if (data->watchdog_expect_close) {
- watchdog_stop(data);
- data->watchdog_expect_close = 0;
- } else {
- watchdog_trigger(data);
- pr_crit("unexpected close, not stopping watchdog!\n");
- }
-
- clear_bit(0, &data->watchdog_is_open);
-
- mutex_lock(&watchdog_data_mutex);
- kref_put(&data->kref, sch56xx_watchdog_release_resources);
- mutex_unlock(&watchdog_data_mutex);
+ val = data->watchdog_output_enable & ~SCH56XX_WDOG_OUTPUT_ENABLE;
+ mutex_lock(data->io_lock);
+ ret = sch56xx_write_virtual_reg(data->addr,
+ SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+ mutex_unlock(data->io_lock);
+ if (ret)
+ return ret;
+ data->watchdog_output_enable = val;
return 0;
}
-static int watchdog_open(struct inode *inode, struct file *filp)
+static void watchdog_ref(struct watchdog_device *wddev)
{
- struct sch56xx_watchdog_data *pos, *data = NULL;
- int ret, watchdog_is_open;
-
- /*
- * We get called from drivers/char/misc.c with misc_mtx hold, and we
- * call misc_register() from sch56xx_watchdog_probe() with
- * watchdog_data_mutex hold, as misc_register() takes the misc_mtx
- * lock, this is a possible deadlock, so we use mutex_trylock here.
- */
- if (!mutex_trylock(&watchdog_data_mutex))
- return -ERESTARTSYS;
- list_for_each_entry(pos, &watchdog_data_list, list) {
- if (pos->watchdog_miscdev.minor == iminor(inode)) {
- data = pos;
- break;
- }
- }
- /* Note we can never not have found data, so we don't check for this */
- watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
- if (!watchdog_is_open)
- kref_get(&data->kref);
- mutex_unlock(&watchdog_data_mutex);
-
- if (watchdog_is_open)
- return -EBUSY;
-
- filp->private_data = data;
-
- /* Start the watchdog */
- ret = watchdog_start(data);
- if (ret) {
- watchdog_release(inode, filp);
- return ret;
- }
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
- return nonseekable_open(inode, filp);
+ kref_get(&data->kref);
}
-static ssize_t watchdog_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *offset)
+static void watchdog_unref(struct watchdog_device *wddev)
{
- int ret;
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- if (count) {
- if (!nowayout) {
- size_t i;
-
- /* Clear it in case it was set with a previous write */
- data->watchdog_expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- data->watchdog_expect_close = 1;
- }
- }
- ret = watchdog_trigger(data);
- if (ret)
- return ret;
- }
- return count;
-}
-
-static long watchdog_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
- .identity = "sch56xx watchdog"
- };
- int i, ret = 0;
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ident.firmware_version = data->revision;
- if (!nowayout)
- ident.options |= WDIOF_MAGICCLOSE;
- if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
- ret = -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int __user *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- ret = watchdog_trigger(data);
- break;
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
- case WDIOC_GETTIMEOUT:
- i = watchdog_get_timeout(data);
- ret = put_user(i, (int __user *)arg);
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(i, (int __user *)arg)) {
- ret = -EFAULT;
- break;
- }
- ret = watchdog_set_timeout(data, i);
- if (ret >= 0)
- ret = put_user(ret, (int __user *)arg);
- break;
-
- case WDIOC_SETOPTIONS:
- if (get_user(i, (int __user *)arg)) {
- ret = -EFAULT;
- break;
- }
-
- if (i & WDIOS_DISABLECARD)
- ret = watchdog_stop(data);
- else if (i & WDIOS_ENABLECARD)
- ret = watchdog_trigger(data);
- else
- ret = -EINVAL;
- break;
-
- default:
- ret = -ENOTTY;
- }
- return ret;
+ kref_put(&data->kref, watchdog_release_resources);
}
-static const struct file_operations watchdog_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = watchdog_open,
- .release = watchdog_release,
- .write = watchdog_write,
- .unlocked_ioctl = watchdog_ioctl,
+static const struct watchdog_ops watchdog_ops = {
+ .owner = THIS_MODULE,
+ .start = watchdog_start,
+ .stop = watchdog_stop,
+ .ping = watchdog_trigger,
+ .set_timeout = watchdog_set_timeout,
+ .ref = watchdog_ref,
+ .unref = watchdog_unref,
};
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
u16 addr, u32 revision, struct mutex *io_lock, int check_enabled)
{
struct sch56xx_watchdog_data *data;
- int i, err, control, output_enable;
- const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+ int err, control, output_enable;
/* Cache the watchdog registers */
mutex_lock(io_lock);
@@ -656,82 +447,55 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(
return NULL;
data->addr = addr;
- data->revision = revision;
data->io_lock = io_lock;
- data->watchdog_control = control;
- data->watchdog_output_enable = output_enable;
- mutex_init(&data->watchdog_lock);
- INIT_LIST_HEAD(&data->list);
kref_init(&data->kref);
- err = watchdog_set_timeout(data, 60);
- if (err < 0)
- goto error;
-
- /*
- * We take the data_mutex lock early so that watchdog_open() cannot
- * run when misc_register() has completed, but we've not yet added
- * our data to the watchdog_data_list.
- */
- mutex_lock(&watchdog_data_mutex);
- for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
- /* Register our watchdog part */
- snprintf(data->watchdog_name, sizeof(data->watchdog_name),
- "watchdog%c", (i == 0) ? '\0' : ('0' + i));
- data->watchdog_miscdev.name = data->watchdog_name;
- data->watchdog_miscdev.fops = &watchdog_fops;
- data->watchdog_miscdev.minor = watchdog_minors[i];
- err = misc_register(&data->watchdog_miscdev);
- if (err == -EBUSY)
- continue;
- if (err)
- break;
+ strlcpy(data->wdinfo.identity, "sch56xx watchdog",
+ sizeof(data->wdinfo.identity));
+ data->wdinfo.firmware_version = revision;
+ data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT;
+ if (!nowayout)
+ data->wdinfo.options |= WDIOF_MAGICCLOSE;
+
+ data->wddev.info = &data->wdinfo;
+ data->wddev.ops = &watchdog_ops;
+ data->wddev.parent = parent;
+ data->wddev.timeout = 60;
+ data->wddev.min_timeout = 1;
+ data->wddev.max_timeout = 255 * 60;
+ if (nowayout)
+ set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
+ if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
+ set_bit(WDOG_ACTIVE, &data->wddev.status);
+
+ /* Since the watchdog uses a downcounter there is no register to read
+ the BIOS set timeout from (if any was set at all) ->
+ Choose a preset which will give us a 1 minute timeout */
+ if (control & SCH56XX_WDOG_TIME_BASE_SEC)
+ data->watchdog_preset = 60; /* seconds */
+ else
+ data->watchdog_preset = 1; /* minute */
- list_add(&data->list, &watchdog_data_list);
- pr_info("Registered /dev/%s chardev major 10, minor: %d\n",
- data->watchdog_name, watchdog_minors[i]);
- break;
- }
- mutex_unlock(&watchdog_data_mutex);
+ data->watchdog_control = control;
+ data->watchdog_output_enable = output_enable;
+ watchdog_set_drvdata(&data->wddev, data);
+ err = watchdog_register_device(&data->wddev);
if (err) {
pr_err("Registering watchdog chardev: %d\n", err);
- goto error;
- }
- if (i == ARRAY_SIZE(watchdog_minors)) {
- pr_warn("Couldn't register watchdog (no free minor)\n");
- goto error;
+ kfree(data);
+ return NULL;
}
return data;
-
-error:
- kfree(data);
- return NULL;
}
EXPORT_SYMBOL(sch56xx_watchdog_register);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data)
{
- mutex_lock(&watchdog_data_mutex);
- misc_deregister(&data->watchdog_miscdev);
- list_del(&data->list);
- mutex_unlock(&watchdog_data_mutex);
-
- mutex_lock(&data->watchdog_lock);
- if (data->watchdog_is_open) {
- pr_warn("platform device unregistered with watchdog "
- "open! Stopping watchdog.\n");
- watchdog_stop_unlocked(data);
- }
- /* Tell the wdog start/stop/trigger functions our dev is gone */
- data->addr = 0;
- data->io_lock = NULL;
- mutex_unlock(&data->watchdog_lock);
-
- mutex_lock(&watchdog_data_mutex);
- kref_put(&data->kref, sch56xx_watchdog_release_resources);
- mutex_unlock(&watchdog_data_mutex);
+ watchdog_unregister_device(&data->wddev);
+ kref_put(&data->kref, watchdog_release_resources);
+ /* Don't touch data after this it may have been free-ed! */
}
EXPORT_SYMBOL(sch56xx_watchdog_unregister);
diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h
index 7475086eb978..704ea2c6d28a 100644
--- a/drivers/hwmon/sch56xx-common.h
+++ b/drivers/hwmon/sch56xx-common.h
@@ -27,6 +27,6 @@ int sch56xx_read_virtual_reg16(u16 addr, u16 reg);
int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
int high_nibble);
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
u16 addr, u32 revision, struct mutex *io_lock, int check_enabled);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 61c9cf15fa52..1201a15784c3 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
spin_lock_init(&hwlock->lock);
hwlock->bank = bank;
- ret = hwspin_lock_register_single(hwlock, i);
+ ret = hwspin_lock_register_single(hwlock, base_id + i);
if (ret)
goto reg_failed;
}
@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
reg_failed:
while (--i >= 0)
- hwspin_lock_unregister_single(i);
+ hwspin_lock_unregister_single(base_id + i);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_register);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 5f13c62e64b4..5a3bb3d738d8 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -49,7 +49,6 @@ config I2C_CHARDEV
config I2C_MUX
tristate "I2C bus multiplexing support"
- depends on EXPERIMENTAL
help
Say Y here if you want the I2C core to support the ability to
handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 7f0b83219744..fad22b0bb5b0 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -608,7 +608,7 @@ bailout:
static u32 bit_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d2c5095deeac..7244c8be6063 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -351,7 +351,7 @@ config I2C_DAVINCI
For details please see http://www.ti.com/davinci
config I2C_DESIGNWARE_PLATFORM
- tristate "Synopsys DesignWare Platfrom"
+ tristate "Synopsys DesignWare Platform"
depends on HAVE_CLK
help
If you say yes to this option, support will be included for the
@@ -445,20 +445,6 @@ config I2C_IOP3XX
This driver can also be built as a module. If so, the module
will be called i2c-iop3xx.
-config I2C_IXP2000
- tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)"
- depends on ARCH_IXP2000
- select I2C_ALGOBIT
- help
- Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based
- system and are using GPIO lines for an I2C bus.
-
- This support is also available as a module. If so, the module
- will be called i2c-ixp2000.
-
- This driver is deprecated and will be dropped soon. Use i2c-gpio
- instead.
-
config I2C_MPC
tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
depends on PPC
@@ -483,6 +469,7 @@ config I2C_MV64XXX
config I2C_MXS
tristate "Freescale i.MX28 I2C interface"
depends on SOC_IMX28
+ select STMP_DEVICE
help
Say Y here if you want to use the I2C bus controller on
the Freescale i.MX28 processors.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 569567b0d027..ce3c2be7fb40 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IMX) += i2c-imx.o
obj-$(CONFIG_I2C_INTEL_MID) += i2c-intel-mid.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
-obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index a76d85fa3ad7..79b4bcb3b85c 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -755,7 +755,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
dev->clk = NULL;
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
- free_irq(IRQ_I2C, dev);
+ free_irq(dev->irq, dev);
iounmap(dev->base);
kfree(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index df8799241009..1e48bec80edf 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -164,9 +164,15 @@ static char *abort_sources[] = {
u32 dw_readl(struct dw_i2c_dev *dev, int offset)
{
- u32 value = readl(dev->base + offset);
+ u32 value;
- if (dev->swab)
+ if (dev->accessor_flags & ACCESS_16BIT)
+ value = readw(dev->base + offset) |
+ (readw(dev->base + offset + 2) << 16);
+ else
+ value = readl(dev->base + offset);
+
+ if (dev->accessor_flags & ACCESS_SWAP)
return swab32(value);
else
return value;
@@ -174,10 +180,15 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
{
- if (dev->swab)
+ if (dev->accessor_flags & ACCESS_SWAP)
b = swab32(b);
- writel(b, dev->base + offset);
+ if (dev->accessor_flags & ACCESS_16BIT) {
+ writew((u16)b, dev->base + offset);
+ writew((u16)(b >> 16), dev->base + offset + 2);
+ } else {
+ writel(b, dev->base + offset);
+ }
}
static u32
@@ -251,14 +262,14 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
input_clock_khz = dev->get_clk_rate_khz(dev);
- /* Configure register endianess access */
reg = dw_readl(dev, DW_IC_COMP_TYPE);
if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
- dev->swab = 1;
- reg = DW_IC_COMP_TYPE_VALUE;
- }
-
- if (reg != DW_IC_COMP_TYPE_VALUE) {
+ /* Configure register endianess access */
+ dev->accessor_flags |= ACCESS_SWAP;
+ } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
+ /* Configure register access mode 16bit */
+ dev->accessor_flags |= ACCESS_16BIT;
+ } else if (reg != DW_IC_COMP_TYPE_VALUE) {
dev_err(dev->dev, "Unknown Synopsys component type: "
"0x%08x\n", reg);
return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 02d1a2ddd853..9c1840ee09c7 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -82,7 +82,7 @@ struct dw_i2c_dev {
unsigned int status;
u32 abort_source;
int irq;
- int swab;
+ u32 accessor_flags;
struct i2c_adapter adapter;
u32 functionality;
u32 master_cfg;
@@ -90,6 +90,9 @@ struct dw_i2c_dev {
unsigned int rx_fifo_depth;
};
+#define ACCESS_SWAP 0x00000001
+#define ACCESS_16BIT 0x00000002
+
extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
extern int i2c_dw_init(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4ba589ab8614..0506fef8dc00 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -36,6 +36,7 @@
#include <linux/interrupt.h>
#include <linux/of_i2c.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/io.h>
#include <linux/slab.h>
#include "i2c-designware-core.h"
@@ -95,7 +96,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
r = -ENODEV;
goto err_free_mem;
}
- clk_enable(dev->clk);
+ clk_prepare_enable(dev->clk);
dev->functionality =
I2C_FUNC_I2C |
@@ -155,7 +156,7 @@ err_free_irq:
err_iounmap:
iounmap(dev->base);
err_unuse_clocks:
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
err_free_mem:
@@ -177,7 +178,7 @@ static int __devexit dw_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
@@ -198,6 +199,31 @@ static const struct of_device_id dw_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#endif
+#ifdef CONFIG_PM
+static int dw_i2c_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(i_dev->clk);
+
+ return 0;
+}
+
+static int dw_i2c_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+ clk_prepare_enable(i_dev->clk);
+ i2c_dw_init(i_dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
@@ -207,6 +233,7 @@ static struct platform_driver dw_i2c_driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(dw_i2c_of_match),
+ .pm = &dw_i2c_dev_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index c811289b61e2..2f74ae872e1e 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -263,11 +263,6 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
init_waitqueue_head(&pch_event);
}
-static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
-{
- return cmp1.tv64 < cmp2.tv64;
-}
-
/**
* pch_i2c_wait_for_bus_idle() - check the status of bus.
* @adap: Pointer to struct i2c_algo_pch_data.
@@ -317,33 +312,6 @@ static void pch_i2c_start(struct i2c_algo_pch_data *adap)
}
/**
- * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event
- * @adap: Pointer to struct i2c_algo_pch_data.
- */
-static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
-{
- long ret;
- ret = wait_event_timeout(pch_event,
- (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
-
- if (ret == 0) {
- pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
- adap->pch_event_flag = 0;
- return -ETIMEDOUT;
- }
-
- if (adap->pch_event_flag & I2C_ERROR_MASK) {
- pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
- adap->pch_event_flag = 0;
- return -EIO;
- }
-
- adap->pch_event_flag = 0;
-
- return 0;
-}
-
-/**
* pch_i2c_getack() - to confirm ACK/NACK
* @adap: Pointer to struct i2c_algo_pch_data.
*/
@@ -373,6 +341,40 @@ static void pch_i2c_stop(struct i2c_algo_pch_data *adap)
pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
}
+static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap)
+{
+ long ret;
+
+ ret = wait_event_timeout(pch_event,
+ (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
+ if (!ret) {
+ pch_err(adap, "%s:wait-event timeout\n", __func__);
+ adap->pch_event_flag = 0;
+ pch_i2c_stop(adap);
+ pch_i2c_init(adap);
+ return -ETIMEDOUT;
+ }
+
+ if (adap->pch_event_flag & I2C_ERROR_MASK) {
+ pch_err(adap, "Lost Arbitration\n");
+ adap->pch_event_flag = 0;
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
+ pch_i2c_init(adap);
+ return -EAGAIN;
+ }
+
+ adap->pch_event_flag = 0;
+
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave address"
+ "setting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* pch_i2c_repstart() - generate repeated start condition in normal mode
* @adap: Pointer to struct i2c_algo_pch_data.
@@ -427,27 +429,12 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
if (first)
pch_i2c_start(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- addr_8_lsb = (addr & I2C_ADDR_MSK);
- iowrite32(addr_8_lsb, p + PCH_I2CDR);
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ addr_8_lsb = (addr & I2C_ADDR_MSK);
+ iowrite32(addr_8_lsb, p + PCH_I2CDR);
} else {
/* set 7 bit slave address and R/W bit as 0 */
iowrite32(addr << 1, p + PCH_I2CDR);
@@ -455,44 +442,21 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
pch_i2c_start(adap);
}
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
for (wrcount = 0; wrcount < length; ++wrcount) {
/* write buffer value to I2C data register */
iowrite32(buf[wrcount], p + PCH_I2CDR);
pch_dbg(adap, "writing %x to Data register\n", buf[wrcount]);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMCF_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMCF_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
}
/* check if this is the last message */
@@ -580,50 +544,21 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (first)
pch_i2c_start(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- addr_8_lsb = (addr & I2C_ADDR_MSK);
- iowrite32(addr_8_lsb, p + PCH_I2CDR);
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ addr_8_lsb = (addr & I2C_ADDR_MSK);
+ iowrite32(addr_8_lsb, p + PCH_I2CDR);
+
pch_i2c_restart(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- addr_2_msb |= I2C_RD;
- iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK,
- p + PCH_I2CDR);
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ addr_2_msb |= I2C_RD;
+ iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
} else {
/* 7 address bits + R/W bit */
addr = (((addr) << 1) | (I2C_RD));
@@ -634,23 +569,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (first)
pch_i2c_start(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
if (length == 0) {
pch_i2c_stop(adap);
@@ -669,18 +590,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (loop != 1)
read_index++;
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave"
- "address setting\n");
- return -EIO;
- }
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
-
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
} /* end for */
pch_i2c_sendnack(adap);
@@ -690,17 +602,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (length != 1)
read_index++;
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave"
- "address setting\n");
- return -EIO;
- }
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
if (last)
pch_i2c_stop(adap);
@@ -790,7 +694,7 @@ static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
ret = mutex_lock_interruptible(&pch_mutex);
if (ret)
- return -ERESTARTSYS;
+ return ret;
if (adap->p_adapter_info->pch_i2c_suspended) {
mutex_unlock(&pch_mutex);
@@ -909,7 +813,7 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->owner = THIS_MODULE;
pch_adap->class = I2C_CLASS_HWMON;
- strcpy(pch_adap->name, KBUILD_MODNAME);
+ strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
pch_adap->algo = &pch_algorithm;
pch_adap->algo_data = &adap_info->pch_data[i];
@@ -963,7 +867,7 @@ static void __devexit pch_i2c_remove(struct pci_dev *pdev)
pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
for (i = 0; i < adap_info->ch_num; i++)
- adap_info->pch_data[i].pch_base_address = 0;
+ adap_info->pch_data[i].pch_base_address = NULL;
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c0330a41db03..e62d2d938628 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -190,12 +190,7 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
- /*
- * If "dev->id" is negative we consider it as zero.
- * The reason to do so is to avoid sysfs names that only make
- * sense when there are multiple adapters.
- */
- adap->nr = (pdev->id != -1) ? pdev->id : 0;
+ adap->nr = pdev->id;
ret = i2c_bit_add_numbered_bus(adap);
if (ret)
goto err_add_bus;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 56bce9a8bcbb..8d6b504d65c4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -512,7 +512,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
}
/* Setup i2c_imx driver structure */
- strcpy(i2c_imx->adapter.name, pdev->name);
+ strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
i2c_imx->adapter.owner = THIS_MODULE;
i2c_imx->adapter.algo = &i2c_imx_algo;
i2c_imx->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
deleted file mode 100644
index 5d263f9014d6..000000000000
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * drivers/i2c/busses/i2c-ixp2000.c
- *
- * I2C adapter for IXP2000 systems using GPIOs for I2C bus
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- * Based on IXDP2400 code by: Naeem M. Afzal <naeem.m.afzal@intel.com>
- * Made generic by: Jeff Daly <jeffrey.daly@intel.com>
- *
- * Copyright (c) 2003-2004 MontaVista Software Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
- * From Jeff Daly:
- *
- * I2C adapter driver for Intel IXDP2xxx platforms. This should work for any
- * IXP2000 platform if it uses the HW GPIO in the same manner. Basically,
- * SDA and SCL GPIOs have external pullups. Setting the respective GPIO to
- * an input will make the signal a '1' via the pullup. Setting them to
- * outputs will pull them down.
- *
- * The GPIOs are open drain signals and are used as configuration strap inputs
- * during power-up so there's generally a buffer on the board that needs to be
- * 'enabled' to drive the GPIOs.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/slab.h>
-
-#include <mach/hardware.h> /* Pick up IXP2000-specific bits */
-#include <mach/gpio-ixp2000.h>
-
-static inline int ixp2000_scl_pin(void *data)
-{
- return ((struct ixp2000_i2c_pins*)data)->scl_pin;
-}
-
-static inline int ixp2000_sda_pin(void *data)
-{
- return ((struct ixp2000_i2c_pins*)data)->sda_pin;
-}
-
-
-static void ixp2000_bit_setscl(void *data, int val)
-{
- int i = 5000;
-
- if (val) {
- gpio_line_config(ixp2000_scl_pin(data), GPIO_IN);
- while(!gpio_line_get(ixp2000_scl_pin(data)) && i--);
- } else {
- gpio_line_config(ixp2000_scl_pin(data), GPIO_OUT);
- }
-}
-
-static void ixp2000_bit_setsda(void *data, int val)
-{
- if (val) {
- gpio_line_config(ixp2000_sda_pin(data), GPIO_IN);
- } else {
- gpio_line_config(ixp2000_sda_pin(data), GPIO_OUT);
- }
-}
-
-static int ixp2000_bit_getscl(void *data)
-{
- return gpio_line_get(ixp2000_scl_pin(data));
-}
-
-static int ixp2000_bit_getsda(void *data)
-{
- return gpio_line_get(ixp2000_sda_pin(data));
-}
-
-struct ixp2000_i2c_data {
- struct ixp2000_i2c_pins *gpio_pins;
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo_data;
-};
-
-static int ixp2000_i2c_remove(struct platform_device *plat_dev)
-{
- struct ixp2000_i2c_data *drv_data = platform_get_drvdata(plat_dev);
-
- platform_set_drvdata(plat_dev, NULL);
-
- i2c_del_adapter(&drv_data->adapter);
-
- kfree(drv_data);
-
- return 0;
-}
-
-static int ixp2000_i2c_probe(struct platform_device *plat_dev)
-{
- int err;
- struct ixp2000_i2c_pins *gpio = plat_dev->dev.platform_data;
- struct ixp2000_i2c_data *drv_data =
- kzalloc(sizeof(struct ixp2000_i2c_data), GFP_KERNEL);
-
- if (!drv_data)
- return -ENOMEM;
- drv_data->gpio_pins = gpio;
-
- drv_data->algo_data.data = gpio;
- drv_data->algo_data.setsda = ixp2000_bit_setsda;
- drv_data->algo_data.setscl = ixp2000_bit_setscl;
- drv_data->algo_data.getsda = ixp2000_bit_getsda;
- drv_data->algo_data.getscl = ixp2000_bit_getscl;
- drv_data->algo_data.udelay = 6;
- drv_data->algo_data.timeout = HZ;
-
- strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
- sizeof(drv_data->adapter.name));
- drv_data->adapter.algo_data = &drv_data->algo_data,
-
- drv_data->adapter.dev.parent = &plat_dev->dev;
-
- gpio_line_config(gpio->sda_pin, GPIO_IN);
- gpio_line_config(gpio->scl_pin, GPIO_IN);
- gpio_line_set(gpio->scl_pin, 0);
- gpio_line_set(gpio->sda_pin, 0);
-
- if ((err = i2c_bit_add_bus(&drv_data->adapter)) != 0) {
- dev_err(&plat_dev->dev, "Could not install, error %d\n", err);
- kfree(drv_data);
- return err;
- }
-
- platform_set_drvdata(plat_dev, drv_data);
-
- return 0;
-}
-
-static struct platform_driver ixp2000_i2c_driver = {
- .probe = ixp2000_i2c_probe,
- .remove = ixp2000_i2c_remove,
- .driver = {
- .name = "IXP2000-I2C",
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(ixp2000_i2c_driver);
-
-MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:IXP2000-I2C");
-
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 206caacd30d7..b76731edbf10 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -64,6 +64,9 @@ struct mpc_i2c {
struct i2c_adapter adap;
int irq;
u32 real_clk;
+#ifdef CONFIG_PM
+ u8 fdr, dfsrr;
+#endif
};
struct mpc_i2c_divider {
@@ -703,6 +706,30 @@ static int __devexit fsl_i2c_remove(struct platform_device *op)
return 0;
};
+#ifdef CONFIG_PM
+static int mpc_i2c_suspend(struct device *dev)
+{
+ struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+ i2c->fdr = readb(i2c->base + MPC_I2C_FDR);
+ i2c->dfsrr = readb(i2c->base + MPC_I2C_DFSRR);
+
+ return 0;
+}
+
+static int mpc_i2c_resume(struct device *dev)
+{
+ struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+ writeb(i2c->fdr, i2c->base + MPC_I2C_FDR);
+ writeb(i2c->dfsrr, i2c->base + MPC_I2C_DFSRR);
+
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
+#endif
+
static struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = {
.setup = mpc_i2c_setup_512x,
};
@@ -747,6 +774,9 @@ static struct platform_driver mpc_i2c_driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = mpc_i2c_of_match,
+#ifdef CONFIG_PM
+ .pm = &mpc_i2c_pm_ops,
+#endif
},
};
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 7fa73eed84a7..04eb441b6ce1 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -27,8 +27,10 @@
#include <linux/jiffies.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
-
-#include <mach/common.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_i2c.h>
#define DRIVER_NAME "mxs-i2c"
@@ -112,13 +114,9 @@ struct mxs_i2c_dev {
struct i2c_adapter adapter;
};
-/*
- * TODO: check if calls to here are really needed. If not, we could get rid of
- * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
- */
static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
{
- mxs_reset_block(i2c->regs);
+ stmp_reset_block(i2c->regs);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
i2c->regs + MXS_I2C_QUEUECTRL_SET);
@@ -371,6 +369,7 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
adap->algo = &mxs_i2c_algo;
adap->dev.parent = dev;
adap->nr = pdev->id;
+ adap->dev.of_node = pdev->dev.of_node;
i2c_set_adapdata(adap, i2c);
err = i2c_add_numbered_adapter(adap);
if (err) {
@@ -380,6 +379,8 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
return err;
}
+ of_i2c_register_devices(adap);
+
return 0;
}
@@ -399,10 +400,17 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id mxs_i2c_dt_ids[] = {
+ { .compatible = "fsl,imx28-i2c", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
+
static struct platform_driver mxs_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mxs_i2c_dt_ids,
},
.remove = __devexit_p(mxs_i2c_remove),
};
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 03b615778887..a26dfb8cd586 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -502,7 +502,8 @@ static int nuc900_i2c_xfer(struct i2c_adapter *adap,
/* declare our i2c functionality */
static u32 nuc900_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 18068dee48f1..75194c579b6d 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -55,6 +55,7 @@
#include <linux/i2c-ocores.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of_i2c.h>
struct ocores_i2c {
void __iomem *base;
@@ -343,6 +344,8 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
i2c_new_device(&i2c->adap, pdata->devices + i);
+ } else {
+ of_i2c_register_devices(&i2c->adap);
}
return 0;
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 2adbf1a8fdea..675878f49f76 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -171,7 +171,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
i2c->io_size = resource_size(res);
i2c->irq = irq;
- i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
+ i2c->adap.nr = pdev->id;
i2c->adap.owner = THIS_MODULE;
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
"PCA9564/PCA9665 at 0x%08lx",
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 7b397c6f607e..31c47e18d83c 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -227,6 +227,72 @@ static int __devexit i2c_powermac_remove(struct platform_device *dev)
return 0;
}
+static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
+ struct pmac_i2c_bus *bus)
+{
+ struct i2c_client *newdev;
+ struct device_node *node;
+
+ for_each_child_of_node(adap->dev.of_node, node) {
+ struct i2c_board_info info = {};
+ struct dev_archdata dev_ad = {};
+ const __be32 *reg;
+ char tmp[16];
+ u32 addr;
+ int len;
+
+ /* Get address & channel */
+ reg = of_get_property(node, "reg", &len);
+ if (!reg || (len < sizeof(int))) {
+ dev_err(&adap->dev, "i2c-powermac: invalid reg on %s\n",
+ node->full_name);
+ continue;
+ }
+ addr = be32_to_cpup(reg);
+
+ /* Multibus setup, check channel */
+ if (!pmac_i2c_match_adapter(node, adap))
+ continue;
+
+ dev_dbg(&adap->dev, "i2c-powermac: register %s\n",
+ node->full_name);
+
+ /* Make up a modalias. Note: we to _NOT_ want the standard
+ * i2c drivers to match with any of our powermac stuff
+ * unless they have been specifically modified to handle
+ * it on a case by case basis. For example, for thermal
+ * control, things like lm75 etc... shall match with their
+ * corresponding windfarm drivers, _NOT_ the generic ones,
+ * so we force a prefix of AAPL, onto the modalias to
+ * make that happen
+ */
+ if (of_modalias_node(node, tmp, sizeof(tmp)) < 0) {
+ dev_err(&adap->dev, "i2c-powermac: modalias failure"
+ " on %s\n", node->full_name);
+ continue;
+ }
+ snprintf(info.type, sizeof(info.type), "MAC,%s", tmp);
+
+ /* Fill out the rest of the info structure */
+ info.addr = (addr & 0xff) >> 1;
+ info.irq = irq_of_parse_and_map(node, 0);
+ info.of_node = of_node_get(node);
+ info.archdata = &dev_ad;
+
+ newdev = i2c_new_device(adap, &info);
+ if (!newdev) {
+ dev_err(&adap->dev, "i2c-powermac: Failure to register"
+ " %s\n", node->full_name);
+ of_node_put(node);
+ /* We do not dispose of the interrupt mapping on
+ * purpose. It's not necessary (interrupt cannot be
+ * re-used) and somebody else might have grabbed it
+ * via direct DT lookup so let's not bother
+ */
+ continue;
+ }
+ }
+}
static int __devinit i2c_powermac_probe(struct platform_device *dev)
{
@@ -272,6 +338,7 @@ static int __devinit i2c_powermac_probe(struct platform_device *dev)
adapter->algo = &i2c_powermac_algorithm;
i2c_set_adapdata(adapter, bus);
adapter->dev.parent = &dev->dev;
+ adapter->dev.of_node = dev->dev.of_node;
rc = i2c_add_adapter(adapter);
if (rc) {
printk(KERN_ERR "i2c-powermac: Adapter %s registration "
@@ -281,33 +348,10 @@ static int __devinit i2c_powermac_probe(struct platform_device *dev)
printk(KERN_INFO "PowerMac i2c bus %s registered\n", adapter->name);
- if (!strncmp(basename, "uni-n", 5)) {
- struct device_node *np;
- const u32 *prop;
- struct i2c_board_info info;
-
- /* Instantiate I2C motion sensor if present */
- np = of_find_node_by_name(NULL, "accelerometer");
- if (np && of_device_is_compatible(np, "AAPL,accelerometer_1") &&
- (prop = of_get_property(np, "reg", NULL))) {
- int i2c_bus;
- const char *tmp_bus;
-
- /* look for bus either using "reg" or by path */
- tmp_bus = strstr(np->full_name, "/i2c-bus@");
- if (tmp_bus)
- i2c_bus = *(tmp_bus + 9) - '0';
- else
- i2c_bus = ((*prop) >> 8) & 0x0f;
-
- if (pmac_i2c_get_channel(bus) == i2c_bus) {
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = ((*prop) & 0xff) >> 1;
- strlcpy(info.type, "ams", I2C_NAME_SIZE);
- i2c_new_device(adapter, &info);
- }
- }
- }
+ /* Cannot use of_i2c_register_devices() due to Apple device-tree
+ * funkyness
+ */
+ i2c_powermac_register_devices(adapter, bus);
return rc;
}
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f6733267fa9c..a997c7d3f95d 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1131,11 +1131,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- /*
- * If "dev->id" is negative we consider it as zero.
- * The reason to do so is to avoid sysfs names that only make
- * sense when there are multiple adapters.
- */
i2c->adap.nr = dev->id;
snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
i2c->adap.nr);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 737f7218a32c..01959154572d 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -44,8 +44,12 @@
#include <plat/regs-iic.h>
#include <plat/iic.h>
-/* i2c controller state */
+/* Treat S3C2410 as baseline hardware, anything else is supported via quirks */
+#define QUIRK_S3C2440 (1 << 0)
+#define QUIRK_HDMIPHY (1 << 1)
+#define QUIRK_NO_GPIO (1 << 2)
+/* i2c controller state */
enum s3c24xx_i2c_state {
STATE_IDLE,
STATE_START,
@@ -54,14 +58,10 @@ enum s3c24xx_i2c_state {
STATE_STOP
};
-enum s3c24xx_i2c_type {
- TYPE_S3C2410,
- TYPE_S3C2440,
-};
-
struct s3c24xx_i2c {
spinlock_t lock;
wait_queue_head_t wait;
+ unsigned int quirks;
unsigned int suspended:1;
struct i2c_msg *msg;
@@ -88,26 +88,45 @@ struct s3c24xx_i2c {
#endif
};
-/* default platform data removed, dev should always carry data. */
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-i2c",
+ .driver_data = 0,
+ }, {
+ .name = "s3c2440-i2c",
+ .driver_data = QUIRK_S3C2440,
+ }, {
+ .name = "s3c2440-hdmiphy-i2c",
+ .driver_data = QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO,
+ }, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id s3c24xx_i2c_match[] = {
+ { .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
+ { .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
+ { .compatible = "samsung,s3c2440-hdmiphy-i2c",
+ .data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
+#endif
-/* s3c24xx_i2c_is2440()
+/* s3c24xx_get_device_quirks
*
- * return true is this is an s3c2440
+ * Get controller type either from device tree or platform device variant.
*/
-static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
+static inline unsigned int s3c24xx_get_device_quirks(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(i2c->dev);
- enum s3c24xx_i2c_type type;
-
-#ifdef CONFIG_OF
- if (i2c->dev->of_node)
- return of_device_is_compatible(i2c->dev->of_node,
- "samsung,s3c2440-i2c");
-#endif
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(&s3c24xx_i2c_match, pdev->dev.of_node);
+ return (unsigned int)match->data;
+ }
- type = platform_get_device_id(pdev)->driver_data;
- return type == TYPE_S3C2440;
+ return platform_get_device_id(pdev)->driver_data;
}
/* s3c24xx_i2c_master_complete
@@ -471,6 +490,13 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
unsigned long iicstat;
int timeout = 400;
+ /* the timeout for HDMIPHY is reduced to 10 ms because
+ * the hangup is expected to happen, so waiting 400 ms
+ * causes only unnecessary system hangup
+ */
+ if (i2c->quirks & QUIRK_HDMIPHY)
+ timeout = 10;
+
while (timeout-- > 0) {
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
@@ -480,6 +506,15 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
msleep(1);
}
+ /* hang-up of bus dedicated for HDMIPHY occurred, resetting */
+ if (i2c->quirks & QUIRK_HDMIPHY) {
+ writel(0, i2c->regs + S3C2410_IICCON);
+ writel(0, i2c->regs + S3C2410_IICSTAT);
+ writel(0, i2c->regs + S3C2410_IICDS);
+
+ return 0;
+ }
+
return -ETIMEDOUT;
}
@@ -591,7 +626,8 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
/* declare our i2c functionality */
static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
@@ -676,7 +712,7 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
writel(iiccon, i2c->regs + S3C2410_IICCON);
- if (s3c24xx_i2c_is2440(i2c)) {
+ if (i2c->quirks & QUIRK_S3C2440) {
unsigned long sda_delay;
if (pdata->sda_delay) {
@@ -761,6 +797,9 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
int idx, gpio, ret;
+ if (i2c->quirks & QUIRK_NO_GPIO)
+ return 0;
+
for (idx = 0; idx < 2; idx++) {
gpio = of_get_gpio(i2c->dev->of_node, idx);
if (!gpio_is_valid(gpio)) {
@@ -785,6 +824,10 @@ free_gpio:
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
{
unsigned int idx;
+
+ if (i2c->quirks & QUIRK_NO_GPIO)
+ return;
+
for (idx = 0; idx < 2; idx++)
gpio_free(i2c->gpios[idx]);
}
@@ -906,6 +949,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_noclk;
}
+ i2c->quirks = s3c24xx_get_device_quirks(pdev);
if (pdata)
memcpy(i2c->pdata, pdata, sizeof(*pdata));
else
@@ -1110,28 +1154,6 @@ static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
/* device driver for platform bus bits */
-static struct platform_device_id s3c24xx_driver_ids[] = {
- {
- .name = "s3c2410-i2c",
- .driver_data = TYPE_S3C2410,
- }, {
- .name = "s3c2440-i2c",
- .driver_data = TYPE_S3C2440,
- }, { },
-};
-MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-
-#ifdef CONFIG_OF
-static const struct of_device_id s3c24xx_i2c_match[] = {
- { .compatible = "samsung,s3c2410-i2c" },
- { .compatible = "samsung,s3c2440-i2c" },
- {},
-};
-MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
-#else
-#define s3c24xx_i2c_match NULL
-#endif
-
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
@@ -1140,7 +1162,7 @@ static struct platform_driver s3c24xx_i2c_driver = {
.owner = THIS_MODULE,
.name = "s3c-i2c",
.pm = S3C24XX_DEV_PM_OPS,
- .of_match_table = s3c24xx_i2c_match,
+ .of_match_table = of_match_ptr(s3c24xx_i2c_match),
},
};
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index c64ba736f480..b76a29d1f8e4 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -3,7 +3,7 @@
*
* Description: Driver for S6000 Family I2C Interface
* Copyright (c) 2008 emlix GmbH
- * Author: Oskar Schirmer <os@emlix.com>
+ * Author: Oskar Schirmer <oskar@scara.com>
*
* Partially based on i2c-bfin-twi.c driver by <sonic.zhang@analog.com>
* Copyright (c) 2005-2007 Analog Devices, Inc.
diff --git a/drivers/i2c/busses/i2c-s6000.h b/drivers/i2c/busses/i2c-s6000.h
index ff23b81ded44..4936f9f2256f 100644
--- a/drivers/i2c/busses/i2c-s6000.h
+++ b/drivers/i2c/busses/i2c-s6000.h
@@ -6,7 +6,7 @@
* for more details.
*
* Copyright (C) 2008 Emlix GmbH <info@emlix.com>
- * Author: Oskar Schirmer <os@emlix.com>
+ * Author: Oskar Schirmer <oskar@scara.com>
*/
#ifndef __DRIVERS_I2C_BUSSES_I2C_S6000_H
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 675c9692d148..8110ca45f342 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -27,6 +27,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/of_i2c.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
@@ -653,6 +654,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
adap->dev.parent = &dev->dev;
adap->retries = 5;
adap->nr = dev->id;
+ adap->dev.of_node = dev->dev.of_node;
strlcpy(adap->name, dev->name, sizeof(adap->name));
@@ -667,6 +669,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
adap->nr, pd->bus_speed);
+
+ of_i2c_register_devices(adap);
return 0;
err_all:
@@ -710,11 +714,18 @@ static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
.runtime_resume = sh_mobile_i2c_runtime_nop,
};
+static const struct of_device_id sh_mobile_i2c_dt_ids[] __devinitconst = {
+ { .compatible = "renesas,rmobile-iic", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
+
static struct platform_driver sh_mobile_i2c_driver = {
.driver = {
.name = "i2c-sh_mobile",
.owner = THIS_MODULE,
.pm = &sh_mobile_i2c_dev_pm_ops,
+ .of_match_table = sh_mobile_i2c_dt_ids,
},
.probe = sh_mobile_i2c_probe,
.remove = sh_mobile_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 55e5ea62ccee..8b2e555a9563 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -401,8 +401,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
disable_irq_nosync(i2c_dev->irq);
i2c_dev->irq_disabled = 1;
}
-
- complete(&i2c_dev->msg_complete);
goto err;
}
@@ -411,7 +409,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
i2c_dev->msg_err |= I2C_ERR_NO_ACK;
if (status & I2C_INT_ARBITRATION_LOST)
i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
- complete(&i2c_dev->msg_complete);
goto err;
}
@@ -429,14 +426,14 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
}
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
if (status & I2C_INT_PACKET_XFER_COMPLETE) {
BUG_ON(i2c_dev->msg_buf_remaining);
complete(&i2c_dev->msg_complete);
}
-
- i2c_writel(i2c_dev, status, I2C_INT_STATUS);
- if (i2c_dev->is_dvc)
- dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
return IRQ_HANDLED;
err:
/* An error occurred, mask all interrupts */
@@ -446,6 +443,8 @@ err:
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
if (i2c_dev->is_dvc)
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
+ complete(&i2c_dev->msg_complete);
return IRQ_HANDLED;
}
@@ -476,12 +475,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
packet_header = msg->len - 1;
i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
- packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
- packet_header |= I2C_HEADER_IE_ENABLE;
+ packet_header = I2C_HEADER_IE_ENABLE;
if (!stop)
packet_header |= I2C_HEADER_REPEAT_START;
- if (msg->flags & I2C_M_TEN)
+ if (msg->flags & I2C_M_TEN) {
+ packet_header |= msg->addr;
packet_header |= I2C_HEADER_10BIT_ADDR;
+ } else {
+ packet_header |= msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ }
if (msg->flags & I2C_M_IGNORE_NAK)
packet_header |= I2C_HEADER_CONT_ON_NAK;
if (msg->flags & I2C_M_RD)
@@ -557,7 +559,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
static u32 tegra_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
}
static const struct i2c_algorithm tegra_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index f585aead50cc..eec20db6246f 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -104,13 +104,8 @@ static int i2c_versatile_probe(struct platform_device *dev)
i2c->algo = i2c_versatile_algo;
i2c->algo.data = i2c;
- if (dev->id >= 0) {
- /* static bus numbering */
- i2c->adap.nr = dev->id;
- ret = i2c_bit_add_numbered_bus(&i2c->adap);
- } else
- /* dynamic bus numbering */
- ret = i2c_bit_add_bus(&i2c->adap);
+ i2c->adap.nr = dev->id;
+ ret = i2c_bit_add_numbered_bus(&i2c->adap);
if (ret >= 0) {
platform_set_drvdata(dev, i2c);
of_i2c_register_devices(&i2c->adap);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 2bded7647ef2..641d0e5e3303 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -40,6 +40,7 @@
#include <linux/i2c-xiic.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of_i2c.h>
#define DRIVER_NAME "xiic-i2c"
@@ -705,8 +706,6 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
goto resource_missing;
pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
- if (!pdata)
- return -EINVAL;
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -730,6 +729,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
i2c->adap = xiic_adapter;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
xiic_reinit(i2c);
@@ -748,9 +748,13 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
goto add_adapter_failed;
}
- /* add in known devices to the bus */
- for (i = 0; i < pdata->num_devices; i++)
- i2c_new_device(&i2c->adap, pdata->devices + i);
+ if (pdata) {
+ /* add in known devices to the bus */
+ for (i = 0; i < pdata->num_devices; i++)
+ i2c_new_device(&i2c->adap, pdata->devices + i);
+ }
+
+ of_i2c_register_devices(&i2c->adap);
return 0;
@@ -795,12 +799,21 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id xiic_of_match[] __devinitconst = {
+ { .compatible = "xlnx,xps-iic-2.00.a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xiic_of_match);
+#endif
+
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = __devexit_p(xiic_i2c_remove),
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(xiic_of_match),
},
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index feb7dc359186..a6ad32bc0a96 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -772,6 +772,23 @@ struct device_type i2c_adapter_type = {
};
EXPORT_SYMBOL_GPL(i2c_adapter_type);
+/**
+ * i2c_verify_adapter - return parameter as i2c_adapter or NULL
+ * @dev: device, probably from some driver model iterator
+ *
+ * When traversing the driver model tree, perhaps using driver model
+ * iterators like @device_for_each_child(), you can't assume very much
+ * about the nodes you find. Use this function to avoid oopses caused
+ * by wrongly treating some non-I2C device as an i2c_adapter.
+ */
+struct i2c_adapter *i2c_verify_adapter(struct device *dev)
+{
+ return (dev->type == &i2c_adapter_type)
+ ? to_i2c_adapter(dev)
+ : NULL;
+}
+EXPORT_SYMBOL(i2c_verify_adapter);
+
#ifdef CONFIG_I2C_COMPAT
static struct class_compat *i2c_adapter_compat_class;
#endif
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 45048323b75e..5ec2261574ec 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -265,19 +265,41 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
res = 0;
for (i = 0; i < rdwr_arg.nmsgs; i++) {
- /* Limit the size of the message to a sane amount;
- * and don't let length change either. */
- if ((rdwr_pa[i].len > 8192) ||
- (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
+ /* Limit the size of the message to a sane amount */
+ if (rdwr_pa[i].len > 8192) {
res = -EINVAL;
break;
}
+
data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
if (IS_ERR(rdwr_pa[i].buf)) {
res = PTR_ERR(rdwr_pa[i].buf);
break;
}
+
+ /*
+ * If the message length is received from the slave (similar
+ * to SMBus block read), we must ensure that the buffer will
+ * be large enough to cope with a message length of
+ * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus
+ * drivers allow. The first byte in the buffer must be
+ * pre-filled with the number of extra bytes, which must be
+ * at least one to hold the message length, but can be
+ * greater (for example to account for a checksum byte at
+ * the end of the message.)
+ */
+ if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
+ if (!(rdwr_pa[i].flags & I2C_M_RD) ||
+ rdwr_pa[i].buf[0] < 1 ||
+ rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+ I2C_SMBUS_BLOCK_MAX) {
+ res = -EINVAL;
+ break;
+ }
+
+ rdwr_pa[i].len = rdwr_pa[i].buf[0];
+ }
}
if (res < 0) {
int j;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d7a4833be416..1038c381aea5 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
/* multiplexer per channel data */
struct i2c_mux_priv {
@@ -31,11 +33,11 @@ struct i2c_mux_priv {
struct i2c_algorithm algo;
struct i2c_adapter *parent;
- void *mux_dev; /* the mux chip/device */
+ void *mux_priv; /* the mux chip/device */
u32 chan_id; /* the channel id */
- int (*select)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
- int (*deselect)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
+ int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+ int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
};
static int i2c_mux_master_xfer(struct i2c_adapter *adap,
@@ -47,11 +49,11 @@ static int i2c_mux_master_xfer(struct i2c_adapter *adap,
/* Switch to the right mux port and perform the transfer. */
- ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+ ret = priv->select(parent, priv->mux_priv, priv->chan_id);
if (ret >= 0)
ret = parent->algo->master_xfer(parent, msgs, num);
if (priv->deselect)
- priv->deselect(parent, priv->mux_dev, priv->chan_id);
+ priv->deselect(parent, priv->mux_priv, priv->chan_id);
return ret;
}
@@ -67,12 +69,12 @@ static int i2c_mux_smbus_xfer(struct i2c_adapter *adap,
/* Select the right mux port and perform the transfer. */
- ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+ ret = priv->select(parent, priv->mux_priv, priv->chan_id);
if (ret >= 0)
ret = parent->algo->smbus_xfer(parent, addr, flags,
read_write, command, size, data);
if (priv->deselect)
- priv->deselect(parent, priv->mux_dev, priv->chan_id);
+ priv->deselect(parent, priv->mux_priv, priv->chan_id);
return ret;
}
@@ -87,7 +89,8 @@ static u32 i2c_mux_functionality(struct i2c_adapter *adap)
}
struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
- void *mux_dev, u32 force_nr, u32 chan_id,
+ struct device *mux_dev,
+ void *mux_priv, u32 force_nr, u32 chan_id,
int (*select) (struct i2c_adapter *,
void *, u32),
int (*deselect) (struct i2c_adapter *,
@@ -102,7 +105,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
/* Set up private adapter data */
priv->parent = parent;
- priv->mux_dev = mux_dev;
+ priv->mux_priv = mux_priv;
priv->chan_id = chan_id;
priv->select = select;
priv->deselect = deselect;
@@ -124,6 +127,25 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
priv->adap.algo_data = priv;
priv->adap.dev.parent = &parent->dev;
+ /*
+ * Try to populate the mux adapter's of_node, expands to
+ * nothing if !CONFIG_OF.
+ */
+ if (mux_dev->of_node) {
+ struct device_node *child;
+ u32 reg;
+
+ for_each_child_of_node(mux_dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret)
+ continue;
+ if (chan_id == reg) {
+ priv->adap.dev.of_node = child;
+ break;
+ }
+ }
+ }
+
if (force_nr) {
priv->adap.nr = force_nr;
ret = i2c_add_numbered_adapter(&priv->adap);
@@ -141,6 +163,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
i2c_adapter_id(&priv->adap));
+ of_i2c_register_devices(&priv->adap);
+
return &priv->adap;
}
EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 90b7a0163899..a0edd9854218 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -15,7 +15,7 @@ config I2C_MUX_GPIO
through GPIO pins.
This driver can also be built as a module. If so, the module
- will be called gpio-i2cmux.
+ will be called i2c-mux-gpio.
config I2C_MUX_PCA9541
tristate "NXP PCA9541 I2C Master Selector"
@@ -25,7 +25,7 @@ config I2C_MUX_PCA9541
I2C Master Selector.
This driver can also be built as a module. If so, the module
- will be called pca9541.
+ will be called i2c-mux-pca9541.
config I2C_MUX_PCA954x
tristate "Philips PCA954x I2C Mux/switches"
@@ -35,6 +35,18 @@ config I2C_MUX_PCA954x
I2C mux/switch devices.
This driver can also be built as a module. If so, the module
- will be called pca954x.
+ will be called i2c-mux-pca954x.
+
+config I2C_MUX_PINCTRL
+ tristate "pinctrl-based I2C multiplexer"
+ depends on PINCTRL
+ help
+ If you say yes to this option, support will be included for an I2C
+ multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
+ This is useful for SoCs whose I2C module's signals can be routed to
+ different sets of pins at run-time.
+
+ This driver can also be built as a module. If so, the module will be
+ called pinctrl-i2cmux.
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 4640436ea61f..76da8692afff 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,8 +1,9 @@
#
# Makefile for multiplexer I2C chip drivers.
-obj-$(CONFIG_I2C_MUX_GPIO) += gpio-i2cmux.o
-obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o
-obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o
+obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
+obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
+obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index e5fa695eb0fa..68b1f8ec3436 100644
--- a/drivers/i2c/muxes/gpio-i2cmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -10,7 +10,7 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-#include <linux/gpio-i2cmux.h>
+#include <linux/i2c-mux-gpio.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -20,10 +20,10 @@
struct gpiomux {
struct i2c_adapter *parent;
struct i2c_adapter **adap; /* child busses */
- struct gpio_i2cmux_platform_data data;
+ struct i2c_mux_gpio_platform_data data;
};
-static void gpiomux_set(const struct gpiomux *mux, unsigned val)
+static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
{
int i;
@@ -31,28 +31,28 @@ static void gpiomux_set(const struct gpiomux *mux, unsigned val)
gpio_set_value(mux->data.gpios[i], val & (1 << i));
}
-static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
{
struct gpiomux *mux = data;
- gpiomux_set(mux, mux->data.values[chan]);
+ i2c_mux_gpio_set(mux, mux->data.values[chan]);
return 0;
}
-static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
{
struct gpiomux *mux = data;
- gpiomux_set(mux, mux->data.idle);
+ i2c_mux_gpio_set(mux, mux->data.idle);
return 0;
}
-static int __devinit gpiomux_probe(struct platform_device *pdev)
+static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
{
struct gpiomux *mux;
- struct gpio_i2cmux_platform_data *pdata;
+ struct i2c_mux_gpio_platform_data *pdata;
struct i2c_adapter *parent;
int (*deselect) (struct i2c_adapter *, void *, u32);
unsigned initial_state;
@@ -86,16 +86,16 @@ static int __devinit gpiomux_probe(struct platform_device *pdev)
goto alloc_failed2;
}
- if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
+ if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
initial_state = pdata->idle;
- deselect = gpiomux_deselect;
+ deselect = i2c_mux_gpio_deselect;
} else {
initial_state = pdata->values[0];
deselect = NULL;
}
for (i = 0; i < pdata->n_gpios; i++) {
- ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
+ ret = gpio_request(pdata->gpios[i], "i2c-mux-gpio");
if (ret)
goto err_request_gpio;
gpio_direction_output(pdata->gpios[i],
@@ -105,8 +105,8 @@ static int __devinit gpiomux_probe(struct platform_device *pdev)
for (i = 0; i < pdata->n_values; i++) {
u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
- mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
- gpiomux_select, deselect);
+ mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, i,
+ i2c_mux_gpio_select, deselect);
if (!mux->adap[i]) {
ret = -ENODEV;
dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
@@ -137,7 +137,7 @@ alloc_failed:
return ret;
}
-static int __devexit gpiomux_remove(struct platform_device *pdev)
+static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
{
struct gpiomux *mux = platform_get_drvdata(pdev);
int i;
@@ -156,18 +156,18 @@ static int __devexit gpiomux_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver gpiomux_driver = {
- .probe = gpiomux_probe,
- .remove = __devexit_p(gpiomux_remove),
+static struct platform_driver i2c_mux_gpio_driver = {
+ .probe = i2c_mux_gpio_probe,
+ .remove = __devexit_p(i2c_mux_gpio_remove),
.driver = {
.owner = THIS_MODULE,
- .name = "gpio-i2cmux",
+ .name = "i2c-mux-gpio",
},
};
-module_platform_driver(gpiomux_driver);
+module_platform_driver(i2c_mux_gpio_driver);
MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:gpio-i2cmux");
+MODULE_ALIAS("platform:i2c-mux-gpio");
diff --git a/drivers/i2c/muxes/pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index e0df9b6c66b3..8aacde1516ac 100644
--- a/drivers/i2c/muxes/pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -353,7 +353,8 @@ static int pca9541_probe(struct i2c_client *client,
force = 0;
if (pdata)
force = pdata->modes[0].adap_id;
- data->mux_adap = i2c_add_mux_adapter(adap, client, force, 0,
+ data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client,
+ force, 0,
pca9541_select_chan,
pca9541_release_chan);
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 0e37ef27aa12..f2dfe0d8fcce 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -226,7 +226,7 @@ static int pca954x_probe(struct i2c_client *client,
}
data->virt_adaps[num] =
- i2c_add_mux_adapter(adap, client,
+ i2c_add_mux_adapter(adap, &client->dev, client,
force, num, pca954x_select_chan,
(pdata && pdata->modes[num].deselect_on_exit)
? pca954x_deselect_mux : NULL);
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
new file mode 100644
index 000000000000..46a669763476
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -0,0 +1,279 @@
+/*
+ * I2C multiplexer using pinctrl API
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/i2c-mux-pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct i2c_mux_pinctrl {
+ struct device *dev;
+ struct i2c_mux_pinctrl_platform_data *pdata;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state **states;
+ struct pinctrl_state *state_idle;
+ struct i2c_adapter *parent;
+ struct i2c_adapter **busses;
+};
+
+static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
+ u32 chan)
+{
+ struct i2c_mux_pinctrl *mux = data;
+
+ return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
+}
+
+static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
+ u32 chan)
+{
+ struct i2c_mux_pinctrl *mux = data;
+
+ return pinctrl_select_state(mux->pinctrl, mux->state_idle);
+}
+
+#ifdef CONFIG_OF
+static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int num_names, i, ret;
+ struct device_node *adapter_np;
+ struct i2c_adapter *adapter;
+
+ if (!np)
+ return 0;
+
+ mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
+ if (!mux->pdata) {
+ dev_err(mux->dev,
+ "Cannot allocate i2c_mux_pinctrl_platform_data\n");
+ return -ENOMEM;
+ }
+
+ num_names = of_property_count_strings(np, "pinctrl-names");
+ if (num_names < 0) {
+ dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+ num_names);
+ return num_names;
+ }
+
+ mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->pdata->pinctrl_states) * num_names,
+ GFP_KERNEL);
+ if (!mux->pdata->pinctrl_states) {
+ dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_names; i++) {
+ ret = of_property_read_string_index(np, "pinctrl-names", i,
+ &mux->pdata->pinctrl_states[mux->pdata->bus_count]);
+ if (ret < 0) {
+ dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+ ret);
+ return ret;
+ }
+ if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
+ "idle")) {
+ if (i != num_names - 1) {
+ dev_err(mux->dev, "idle state must be last\n");
+ return -EINVAL;
+ }
+ mux->pdata->pinctrl_state_idle = "idle";
+ } else {
+ mux->pdata->bus_count++;
+ }
+ }
+
+ adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!adapter_np) {
+ dev_err(mux->dev, "Cannot parse i2c-parent\n");
+ return -ENODEV;
+ }
+ adapter = of_find_i2c_adapter_by_node(adapter_np);
+ if (!adapter) {
+ dev_err(mux->dev, "Cannot find parent bus\n");
+ return -ENODEV;
+ }
+ mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
+ put_device(&adapter->dev);
+
+ return 0;
+}
+#else
+static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+{
+ struct i2c_mux_pinctrl *mux;
+ int (*deselect)(struct i2c_adapter *, void *, u32);
+ int i, ret;
+
+ mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ platform_set_drvdata(pdev, mux);
+
+ mux->dev = &pdev->dev;
+
+ mux->pdata = pdev->dev.platform_data;
+ if (!mux->pdata) {
+ ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
+ if (ret < 0)
+ goto err;
+ }
+ if (!mux->pdata) {
+ dev_err(&pdev->dev, "Missing platform data\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ mux->states = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->states) * mux->pdata->bus_count,
+ GFP_KERNEL);
+ if (!mux->states) {
+ dev_err(&pdev->dev, "Cannot allocate states\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mux->busses = devm_kzalloc(&pdev->dev,
+ sizeof(mux->busses) * mux->pdata->bus_count,
+ GFP_KERNEL);
+ if (!mux->states) {
+ dev_err(&pdev->dev, "Cannot allocate busses\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mux->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(mux->pinctrl)) {
+ ret = PTR_ERR(mux->pinctrl);
+ dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret);
+ goto err;
+ }
+ for (i = 0; i < mux->pdata->bus_count; i++) {
+ mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
+ mux->pdata->pinctrl_states[i]);
+ if (IS_ERR(mux->states[i])) {
+ ret = PTR_ERR(mux->states[i]);
+ dev_err(&pdev->dev,
+ "Cannot look up pinctrl state %s: %d\n",
+ mux->pdata->pinctrl_states[i], ret);
+ goto err;
+ }
+ }
+ if (mux->pdata->pinctrl_state_idle) {
+ mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
+ mux->pdata->pinctrl_state_idle);
+ if (IS_ERR(mux->state_idle)) {
+ ret = PTR_ERR(mux->state_idle);
+ dev_err(&pdev->dev,
+ "Cannot look up pinctrl state %s: %d\n",
+ mux->pdata->pinctrl_state_idle, ret);
+ goto err;
+ }
+
+ deselect = i2c_mux_pinctrl_deselect;
+ } else {
+ deselect = NULL;
+ }
+
+ mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
+ if (!mux->parent) {
+ dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+ mux->pdata->parent_bus_num);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ for (i = 0; i < mux->pdata->bus_count; i++) {
+ u32 bus = mux->pdata->base_bus_num ?
+ (mux->pdata->base_bus_num + i) : 0;
+
+ mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
+ mux, bus, i,
+ i2c_mux_pinctrl_select,
+ deselect);
+ if (!mux->busses[i]) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+ goto err_del_adapter;
+ }
+ }
+
+ return 0;
+
+err_del_adapter:
+ for (; i > 0; i--)
+ i2c_del_mux_adapter(mux->busses[i - 1]);
+ i2c_put_adapter(mux->parent);
+err:
+ return ret;
+}
+
+static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+{
+ struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < mux->pdata->bus_count; i++)
+ i2c_del_mux_adapter(mux->busses[i]);
+
+ i2c_put_adapter(mux->parent);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+ { .compatible = "i2c-mux-pinctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
+#endif
+
+static struct platform_driver i2c_mux_pinctrl_driver = {
+ .driver = {
+ .name = "i2c-mux-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
+ },
+ .probe = i2c_mux_pinctrl_probe,
+ .remove = __devexit_p(i2c_mux_pinctrl_remove),
+};
+module_platform_driver(i2c_mux_pinctrl_driver);
+
+MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-mux-pinctrl");
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 8716066a2f2b..bcb507b0cfd4 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = {
*/
static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- unsigned long cycle_time;
+ unsigned long cycle_time = 0;
int use_dma_info = 0;
const u8 xfer_mode = drive->dma_mode;
@@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
ide_set_drivedata(drive, (void *)cycle_time);
- printk("%s: %s selected (peak %dMB/s)\n", drive->name,
- ide_xfer_verbose(xfer_mode),
- 2000 / (unsigned long)ide_get_drivedata(drive));
+ printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
+ drive->name, ide_xfer_verbose(xfer_mode),
+ 2000 / (cycle_time ? cycle_time : (unsigned long) -1));
}
static const struct ide_port_ops icside_v6_port_ops = {
@@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = {
.dma_test_irq = icside_dma_test_irq,
.dma_lost_irq = ide_dma_lost_irq,
};
-#else
-#define icside_v6_dma_ops NULL
#endif
static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
@@ -456,7 +454,6 @@ err_free:
static const struct ide_port_info icside_v6_port_info __initdata = {
.init_dma = icside_dma_off_init,
.port_ops = &icside_v6_no_dma_port_ops,
- .dma_ops = &icside_v6_dma_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
.mwdma_mask = ATA_MWDMA2,
.swdma_mask = ATA_SWDMA2,
@@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
ecard_set_drvdata(ec, state);
+#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
d.init_dma = icside_dma_init;
d.port_ops = &icside_v6_port_ops;
- } else
- d.dma_ops = NULL;
+ d.dma_ops = &icside_v6_dma_ops;
+ }
+#endif
ret = ide_host_register(host, &d, hws);
if (ret)
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 28e344ea514c..f1e922e2479a 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
{
int *is_kme = priv_data;
- if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) {
+ if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
+ != IO_DATA_PATH_WIDTH_8) {
pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
}
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 56eecefcec75..2ec93da41e2c 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -8,8 +8,7 @@ menuconfig IIO
help
The industrial I/O subsystem provides a unified framework for
drivers for many different types of embedded sensors using a
- number of different physical interfaces (i2c, spi, etc). See
- Documentation/iio for more information.
+ number of different physical interfaces (i2c, spi, etc).
if IIO
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 1ddd8861c71b..4f947e4377ef 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -661,7 +661,6 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
* New channel registration method - relies on the fact a group does
* not need to be initialized if it is name is NULL.
*/
- INIT_LIST_HEAD(&indio_dev->channel_attr_list);
if (indio_dev->channels)
for (i = 0; i < indio_dev->num_channels; i++) {
ret = iio_device_add_channel_sysfs(indio_dev,
@@ -725,12 +724,16 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
static void iio_dev_release(struct device *device)
{
struct iio_dev *indio_dev = dev_to_iio_dev(device);
- cdev_del(&indio_dev->chrdev);
+ if (indio_dev->chrdev.dev)
+ cdev_del(&indio_dev->chrdev);
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
iio_device_unregister_trigger_consumer(indio_dev);
iio_device_unregister_eventset(indio_dev);
iio_device_unregister_sysfs(indio_dev);
iio_device_unregister_debugfs(indio_dev);
+
+ ida_simple_remove(&iio_ida, indio_dev->id);
+ kfree(indio_dev);
}
static struct device_type iio_dev_type = {
@@ -761,6 +764,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
dev_set_drvdata(&dev->dev, (void *)dev);
mutex_init(&dev->mlock);
mutex_init(&dev->info_exist_lock);
+ INIT_LIST_HEAD(&dev->channel_attr_list);
dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
if (dev->id < 0) {
@@ -778,10 +782,8 @@ EXPORT_SYMBOL(iio_device_alloc);
void iio_device_free(struct iio_dev *dev)
{
- if (dev) {
- ida_simple_remove(&iio_ida, dev->id);
- kfree(dev);
- }
+ if (dev)
+ put_device(&dev->dev);
}
EXPORT_SYMBOL(iio_device_free);
@@ -902,7 +904,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
mutex_lock(&indio_dev->info_exist_lock);
indio_dev->info = NULL;
mutex_unlock(&indio_dev->info_exist_lock);
- device_unregister(&indio_dev->dev);
+ device_del(&indio_dev->dev);
}
EXPORT_SYMBOL(iio_device_unregister);
subsys_initcall(iio_init);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 55d5642eb10a..2e826f9702c6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
{
- return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
+ return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
(ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
(id->qp_type == IB_QPT_UD)) ||
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 55ab284e22f2..b18870c455ad 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
struct net_device *pdev;
pdev = ip_dev_find(&init_net, peer_ip);
+ if (!pdev) {
+ err = -ENODEV;
+ goto out;
+ }
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
n, pdev, 0);
if (!ep->l2t)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ee1c577238f7..3530c41fcd1f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
- props->max_qp_wr = dev->dev->caps.max_wqes;
+ props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_sge = min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg);
props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
@@ -1084,12 +1084,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
int total_eqs = 0;
int i, j, eq;
- /* Init eq table */
- ibdev->eq_table = NULL;
- ibdev->eq_added = 0;
-
- /* Legacy mode? */
- if (dev->caps.comp_pool == 0)
+ /* Legacy mode or comp_pool is not large enough */
+ if (dev->caps.comp_pool == 0 ||
+ dev->caps.num_ports > dev->caps.comp_pool)
return;
eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
@@ -1135,7 +1132,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
int i;
- int total_eqs;
+
+ /* no additional eqs were added */
+ if (!ibdev->eq_table)
+ return;
/* Reset the advertised EQ number */
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
@@ -1148,12 +1148,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
mlx4_release_eq(dev, ibdev->eq_table[i]);
}
- total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
- memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
kfree(ibdev->eq_table);
-
- ibdev->eq_table = NULL;
- ibdev->eq_added = 0;
}
static void *mlx4_ib_add(struct mlx4_dev *dev)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e62297cc77cc..ff36655d23d3 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -44,6 +44,14 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+enum {
+ MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
+ MLX4_IB_MAX_HEADROOM = 2048
+};
+
+#define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
+#define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
+
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ceb33327091a..8d4ed24aef93 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
int is_user, int has_rq, struct mlx4_ib_qp *qp)
{
/* Sanity check RQ size before proceeding */
- if (cap->max_recv_wr > dev->dev->caps.max_wqes ||
- cap->max_recv_sge > dev->dev->caps.max_rq_sg)
+ if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
+ cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
return -EINVAL;
if (!has_rq) {
@@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
}
- cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
- cap->max_recv_sge = qp->rq.max_gs;
+ /* leave userspace return values as they were, so as not to break ABI */
+ if (is_user) {
+ cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
+ cap->max_recv_sge = qp->rq.max_gs;
+ } else {
+ cap->max_recv_wr = qp->rq.max_post =
+ min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
+ cap->max_recv_sge = min(qp->rq.max_gs,
+ min(dev->dev->caps.max_sq_sg,
+ dev->dev->caps.max_rq_sg));
+ }
return 0;
}
@@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
int s;
/* Sanity check SQ size before proceeding */
- if (cap->max_send_wr > dev->dev->caps.max_wqes ||
- cap->max_send_sge > dev->dev->caps.max_sq_sg ||
+ if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
+ cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
return -EINVAL;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 85a69c958559..48970af23679 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -61,6 +61,7 @@ struct ocrdma_dev_attr {
u32 max_inline_data;
int max_send_sge;
int max_recv_sge;
+ int max_srq_sge;
int max_mr;
u64 max_mr_size;
u32 max_num_mr_pbl;
@@ -231,7 +232,6 @@ struct ocrdma_qp_hwq_info {
u32 entry_size;
u32 max_cnt;
u32 max_wqe_idx;
- u32 free_delta;
u16 dbid; /* qid, where to ring the doorbell. */
u32 len;
dma_addr_t pa;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index a411a4e3193d..517ab20b727c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp {
u32 rsvd1;
u32 num_wqe_allocated;
u32 num_rqe_allocated;
- u32 free_wqe_delta;
- u32 free_rqe_delta;
u32 db_sq_offset;
u32 db_rq_offset;
u32 db_shift;
@@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp {
u32 db_rq_offset;
u32 db_shift;
- u32 free_rqe_delta;
- u32 rsvd2;
+ u64 rsvd2;
u64 rsvd3;
} __packed;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 9b204b1ba336..71942af4fce9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
break;
case OCRDMA_SRQ_LIMIT_EVENT:
ib_evt.element.srq = &qp->srq->ibsrq;
- ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
srq_event = 1;
qp_event = 0;
break;
@@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
struct ocrdma_dev_attr *attr,
struct ocrdma_mbx_query_config *rsp)
{
- int max_q_mem;
-
attr->max_pd =
(rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_recv_sge = (rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+ attr->max_srq_sge = (rsp->max_srq_rqe_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_inline_data =
attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
sizeof(struct ocrdma_sge));
- max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
- /* hw can queue one less then the configured size,
- * so publish less by one to stack.
- */
if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
attr->ird = 1;
attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
- } else
- dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1;
- dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1;
+ }
+ dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
+ OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
+ dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
}
static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
@@ -1990,19 +1988,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
max_wqe_allocated = 1 << max_wqe_allocated;
max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
- if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- qp->sq.free_delta = 0;
- qp->rq.free_delta = 1;
- } else
- qp->sq.free_delta = 1;
-
qp->sq.max_cnt = max_wqe_allocated;
qp->sq.max_wqe_idx = max_wqe_allocated - 1;
if (!attrs->srq) {
qp->rq.max_cnt = max_rqe_allocated;
qp->rq.max_wqe_idx = max_rqe_allocated - 1;
- qp->rq.free_delta = 1;
}
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index a20d16eaae71..b050e629e9c3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -26,7 +26,6 @@
*******************************************************************/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/idr.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -98,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
sgid->raw[15] = mac_addr[5];
}
-static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
bool is_vlan, u16 vlan_id)
{
int i;
- bool found = false;
union ib_gid new_sgid;
- int free_idx = OCRDMA_MAX_SGID;
unsigned long flags;
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@@ -116,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
sizeof(union ib_gid))) {
/* found free entry */
- if (!found) {
- free_idx = i;
- found = true;
- break;
- }
+ memcpy(&dev->sgid_tbl[i], &new_sgid,
+ sizeof(union ib_gid));
+ spin_unlock_irqrestore(&dev->sgid_lock, flags);
+ return true;
} else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
sizeof(union ib_gid))) {
/* entry already present, no addition is required. */
spin_unlock_irqrestore(&dev->sgid_lock, flags);
- return;
+ return false;
}
}
- /* if entry doesn't exist and if table has some space, add entry */
- if (found)
- memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
- sizeof(union ib_gid));
spin_unlock_irqrestore(&dev->sgid_lock, flags);
+ return false;
}
static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@@ -168,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
ocrdma_get_guid(dev, &sgid->raw[8]);
}
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
{
struct net_device *netdev, *tmp;
u16 vlan_id;
@@ -176,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
netdev = dev->nic_info.netdev;
- ocrdma_add_default_sgid(dev);
-
rcu_read_lock();
for_each_netdev_rcu(&init_net, tmp) {
if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@@ -195,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
}
}
rcu_read_unlock();
+}
+#else
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
+{
+
+}
+#endif /* VLAN */
+
+static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+{
+ ocrdma_add_default_sgid(dev);
+ ocrdma_add_vlan_sgids(dev);
return 0;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
+defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
@@ -209,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
struct ib_event gid_event;
struct ocrdma_dev *dev;
bool found = false;
+ bool updated = false;
bool is_vlan = false;
u16 vid = 0;
@@ -234,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
mutex_lock(&dev->dev_lock);
switch (event) {
case NETDEV_UP:
- ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+ updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
break;
case NETDEV_DOWN:
- found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
- if (found) {
- /* found the matching entry, notify
- * the consumers about it
- */
- gid_event.device = &dev->ibdev;
- gid_event.element.port_num = 1;
- gid_event.event = IB_EVENT_GID_CHANGE;
- ib_dispatch_event(&gid_event);
- }
+ updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
break;
default:
break;
}
+ if (updated) {
+ /* GID table updated, notify the consumers about it */
+ gid_event.device = &dev->ibdev;
+ gid_event.element.port_num = 1;
+ gid_event.event = IB_EVENT_GID_CHANGE;
+ ib_dispatch_event(&gid_event);
+ }
mutex_unlock(&dev->dev_lock);
return NOTIFY_OK;
}
@@ -259,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = {
.notifier_call = ocrdma_inet6addr_event
};
-#endif /* IPV6 */
+#endif /* IPV6 and VLAN */
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
u8 port_num)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 7fd80cc0f037..c75cbdfa87e7 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -418,6 +418,9 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@@ -458,7 +461,7 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0,
OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF <<
- OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16,
OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF <<
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e9f74d1b48f6..2e2e7aecc990 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
dev = get_ocrdma_dev(ibdev);
memset(sgid, 0, sizeof(*sgid));
- if (index > OCRDMA_MAX_SGID)
+ if (index >= OCRDMA_MAX_SGID)
return -EINVAL;
memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY;
- attr->max_sge = dev->attr.max_send_sge;
- attr->max_sge_rd = dev->attr.max_send_sge;
+ attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
+ attr->max_sge_rd = 0;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
attr->max_srq = (dev->attr.max_qp - 1);
- attr->max_srq_sge = attr->max_sge;
+ attr->max_srq_sge = attr->max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
attr->max_fast_reg_page_list_len = 0;
@@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
uresp.db_shift = 16;
}
- uresp.free_wqe_delta = qp->sq.free_delta;
- uresp.free_rqe_delta = qp->rq.free_delta;
if (qp->dpp_enabled) {
uresp.dpp_credit = dpp_credit_lmt;
@@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
free_cnt = (q->max_cnt - q->head) + q->tail;
else
free_cnt = q->tail - q->head;
- if (q->free_delta)
- free_cnt -= q->free_delta;
return free_cnt;
}
@@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
(srq->pd->id * srq->dev->nic_info.db_page_size);
uresp.db_page_size = srq->dev->nic_info.db_page_size;
uresp.num_rqe_allocated = srq->rq.max_cnt;
- uresp.free_rqe_delta = 1;
if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
uresp.db_shift = 24;
@@ -2306,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
*stop = true;
expand = false;
}
- } else
+ } else {
+ *polled = true;
expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+ }
return expand;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index e6483439f25f..633f03d80274 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -28,7 +28,6 @@
#ifndef __OCRDMA_VERBS_H__
#define __OCRDMA_VERBS_H__
-#include <linux/version.h>
int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
struct ib_send_wr **bad_wr);
int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5c1bc995e560..f10221f40803 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
skb_frag_size_set(frag, size);
skb->data_len += size;
- skb->truesize += size;
+ skb->truesize += PAGE_SIZE;
} else
skb_put(skb, length);
@@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int buf_size;
+ int tailroom;
u64 *mapping;
- if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
buf_size = IPOIB_UD_HEAD_SIZE;
- else
+ tailroom = 128; /* reserve some tailroom for IP/TCP headers */
+ } else {
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ tailroom = 0;
+ }
- skb = dev_alloc_skb(buf_size + 4);
+ skb = dev_alloc_skb(buf_size + tailroom + 4);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 332597980817..55f7e57d4e42 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -25,10 +25,6 @@ config INPUT
if INPUT
-config INPUT_OF_MATRIX_KEYMAP
- depends on USE_OF
- bool
-
config INPUT_FF_MEMLESS
tristate "Support for memoryless force-feedback devices"
help
@@ -68,6 +64,19 @@ config INPUT_SPARSEKMAP
To compile this driver as a module, choose M here: the
module will be called sparse-keymap.
+config INPUT_MATRIXKMAP
+ tristate "Matrix keymap support library"
+ help
+ Say Y here if you are using a driver for an input
+ device that uses matrix keymap. This option is only
+ useful for out-of-tree drivers since in-tree drivers
+ select it automatically.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called matrix-keymap.
+
comment "Userland interfaces"
config INPUT_MOUSEDEV
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index b173a13a73ca..5ca3f631497f 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -10,6 +10,7 @@ input-core-y := input.o input-compat.o input-mt.o ff-core.o
obj-$(CONFIG_INPUT_FF_MEMLESS) += ff-memless.o
obj-$(CONFIG_INPUT_POLLDEV) += input-polldev.o
obj-$(CONFIG_INPUT_SPARSEKMAP) += sparse-keymap.o
+obj-$(CONFIG_INPUT_MATRIXKMAP) += matrix-keymap.o
obj-$(CONFIG_INPUT_MOUSEDEV) += mousedev.o
obj-$(CONFIG_INPUT_JOYDEV) += joydev.o
@@ -24,4 +25,3 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
obj-$(CONFIG_INPUT_MISC) += misc/
obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
-obj-$(CONFIG_INPUT_OF_MATRIX_KEYMAP) += of_keymap.o
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 4b2e10d5d641..6c58bfff01a3 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -180,7 +180,10 @@ static int evdev_grab(struct evdev *evdev, struct evdev_client *client)
static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client)
{
- if (evdev->grab != client)
+ struct evdev_client *grab = rcu_dereference_protected(evdev->grab,
+ lockdep_is_held(&evdev->mutex));
+
+ if (grab != client)
return -EINVAL;
rcu_assign_pointer(evdev->grab, NULL);
@@ -259,8 +262,7 @@ static int evdev_release(struct inode *inode, struct file *file)
struct evdev *evdev = client->evdev;
mutex_lock(&evdev->mutex);
- if (evdev->grab == client)
- evdev_ungrab(evdev, client);
+ evdev_ungrab(evdev, client);
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
@@ -343,7 +345,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
struct input_event event;
int retval = 0;
- if (count < input_event_size())
+ if (count != 0 && count < input_event_size())
return -EINVAL;
retval = mutex_lock_interruptible(&evdev->mutex);
@@ -355,7 +357,8 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
goto out;
}
- do {
+ while (retval + input_event_size() <= count) {
+
if (input_event_from_user(buffer + retval, &event)) {
retval = -EFAULT;
goto out;
@@ -364,7 +367,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
- } while (retval + input_event_size() <= count);
+ }
out:
mutex_unlock(&evdev->mutex);
@@ -395,35 +398,49 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
- int retval = 0;
+ size_t read = 0;
+ int error;
- if (count < input_event_size())
+ if (count != 0 && count < input_event_size())
return -EINVAL;
- if (!(file->f_flags & O_NONBLOCK)) {
- retval = wait_event_interruptible(evdev->wait,
- client->packet_head != client->tail ||
- !evdev->exist);
- if (retval)
- return retval;
- }
+ for (;;) {
+ if (!evdev->exist)
+ return -ENODEV;
- if (!evdev->exist)
- return -ENODEV;
+ if (client->packet_head == client->tail &&
+ (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
- while (retval + input_event_size() <= count &&
- evdev_fetch_next_event(client, &event)) {
+ /*
+ * count == 0 is special - no IO is done but we check
+ * for error conditions (see above).
+ */
+ if (count == 0)
+ break;
- if (input_event_to_user(buffer + retval, &event))
- return -EFAULT;
+ while (read + input_event_size() <= count &&
+ evdev_fetch_next_event(client, &event)) {
- retval += input_event_size();
- }
+ if (input_event_to_user(buffer + read, &event))
+ return -EFAULT;
- if (retval == 0 && (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
+ read += input_event_size();
+ }
- return retval;
+ if (read)
+ break;
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ error = wait_event_interruptible(evdev->wait,
+ client->packet_head != client->tail ||
+ !evdev->exist);
+ if (error)
+ return error;
+ }
+ }
+
+ return read;
}
/* No kernel lock - fine */
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 117a59aaa70e..5f558851d646 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -31,8 +31,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-
-#include "fixp-arith.h"
+#include <linux/fixp-arith.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <anssi.hannula@gmail.com>");
diff --git a/drivers/input/fixp-arith.h b/drivers/input/fixp-arith.h
deleted file mode 100644
index 3089d7382325..000000000000
--- a/drivers/input/fixp-arith.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef _FIXP_ARITH_H
-#define _FIXP_ARITH_H
-
-/*
- * Simplistic fixed-point arithmetics.
- * Hmm, I'm probably duplicating some code :(
- *
- * Copyright (c) 2002 Johann Deneux
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so by
- * e-mail - mail your message to <johann.deneux@gmail.com>
- */
-
-#include <linux/types.h>
-
-/* The type representing fixed-point values */
-typedef s16 fixp_t;
-
-#define FRAC_N 8
-#define FRAC_MASK ((1<<FRAC_N)-1)
-
-/* Not to be used directly. Use fixp_{cos,sin} */
-static const fixp_t cos_table[46] = {
- 0x0100, 0x00FF, 0x00FF, 0x00FE, 0x00FD, 0x00FC, 0x00FA, 0x00F8,
- 0x00F6, 0x00F3, 0x00F0, 0x00ED, 0x00E9, 0x00E6, 0x00E2, 0x00DD,
- 0x00D9, 0x00D4, 0x00CF, 0x00C9, 0x00C4, 0x00BE, 0x00B8, 0x00B1,
- 0x00AB, 0x00A4, 0x009D, 0x0096, 0x008F, 0x0087, 0x0080, 0x0078,
- 0x0070, 0x0068, 0x005F, 0x0057, 0x004F, 0x0046, 0x003D, 0x0035,
- 0x002C, 0x0023, 0x001A, 0x0011, 0x0008, 0x0000
-};
-
-
-/* a: 123 -> 123.0 */
-static inline fixp_t fixp_new(s16 a)
-{
- return a<<FRAC_N;
-}
-
-/* a: 0xFFFF -> -1.0
- 0x8000 -> 1.0
- 0x0000 -> 0.0
-*/
-static inline fixp_t fixp_new16(s16 a)
-{
- return ((s32)a)>>(16-FRAC_N);
-}
-
-static inline fixp_t fixp_cos(unsigned int degrees)
-{
- int quadrant = (degrees / 90) & 3;
- unsigned int i = degrees % 90;
-
- if (quadrant == 1 || quadrant == 3)
- i = 90 - i;
-
- i >>= 1;
-
- return (quadrant == 1 || quadrant == 2)? -cos_table[i] : cos_table[i];
-}
-
-static inline fixp_t fixp_sin(unsigned int degrees)
-{
- return -fixp_cos(degrees + 90);
-}
-
-static inline fixp_t fixp_mult(fixp_t a, fixp_t b)
-{
- return ((s32)(a*b))>>FRAC_N;
-}
-
-#endif
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index 422aa0a6b77f..daceafe7ee7d 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -125,15 +125,4 @@ static struct pci_driver emu_driver = {
.remove = __devexit_p(emu_remove),
};
-static int __init emu_init(void)
-{
- return pci_register_driver(&emu_driver);
-}
-
-static void __exit emu_exit(void)
-{
- pci_unregister_driver(&emu_driver);
-}
-
-module_init(emu_init);
-module_exit(emu_exit);
+module_pci_driver(emu_driver);
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index a3b70ff21018..48ad3829ff20 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -144,6 +144,7 @@ static const struct pci_device_id fm801_gp_id_table[] = {
{ PCI_VENDOR_ID_FORTEMEDIA, PCI_DEVICE_ID_FM801_GP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0 }
};
+MODULE_DEVICE_TABLE(pci, fm801_gp_id_table);
static struct pci_driver fm801_gp_driver = {
.name = "FM801_gameport",
@@ -152,20 +153,7 @@ static struct pci_driver fm801_gp_driver = {
.remove = __devexit_p(fm801_gp_remove),
};
-static int __init fm801_gp_init(void)
-{
- return pci_register_driver(&fm801_gp_driver);
-}
-
-static void __exit fm801_gp_exit(void)
-{
- pci_unregister_driver(&fm801_gp_driver);
-}
-
-module_init(fm801_gp_init);
-module_exit(fm801_gp_exit);
-
-MODULE_DEVICE_TABLE(pci, fm801_gp_id_table);
+module_pci_driver(fm801_gp_driver);
MODULE_DESCRIPTION("FM801 gameport driver");
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 1639ab2b94b7..85bc8dc07cfc 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -413,15 +413,4 @@ static struct gameport_driver a3d_drv = {
.disconnect = a3d_disconnect,
};
-static int __init a3d_init(void)
-{
- return gameport_register_driver(&a3d_drv);
-}
-
-static void __exit a3d_exit(void)
-{
- gameport_unregister_driver(&a3d_drv);
-}
-
-module_init(a3d_init);
-module_exit(a3d_exit);
+module_gameport_driver(a3d_drv);
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index b992fbf91f2f..0cbfd2dfabf4 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -557,10 +557,6 @@ static void adi_disconnect(struct gameport *gameport)
kfree(port);
}
-/*
- * The gameport device structure.
- */
-
static struct gameport_driver adi_drv = {
.driver = {
.name = "adi",
@@ -570,15 +566,4 @@ static struct gameport_driver adi_drv = {
.disconnect = adi_disconnect,
};
-static int __init adi_init(void)
-{
- return gameport_register_driver(&adi_drv);
-}
-
-static void __exit adi_exit(void)
-{
- gameport_unregister_driver(&adi_drv);
-}
-
-module_init(adi_init);
-module_exit(adi_exit);
+module_gameport_driver(adi_drv);
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index 3063464474bf..c96653b58867 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -231,6 +231,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
}
if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING)) {
dev_err(&client->dev,
"need i2c bus that supports protocol mangling\n");
@@ -281,7 +282,8 @@ static int __devinit as5011_probe(struct i2c_client *client,
error = request_threaded_irq(as5011->button_irq,
NULL, as5011_button_interrupt,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"as5011_button", as5011);
if (error < 0) {
dev_err(&client->dev,
@@ -295,7 +297,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
error = request_threaded_irq(as5011->axis_irq, NULL,
as5011_axis_interrupt,
- plat_data->axis_irqflags,
+ plat_data->axis_irqflags | IRQF_ONESHOT,
"as5011_joystick", as5011);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index 3497b87c3d05..65367e44d715 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -261,15 +261,4 @@ static struct gameport_driver cobra_drv = {
.disconnect = cobra_disconnect,
};
-static int __init cobra_init(void)
-{
- return gameport_register_driver(&cobra_drv);
-}
-
-static void __exit cobra_exit(void)
-{
- gameport_unregister_driver(&cobra_drv);
-}
-
-module_init(cobra_init);
-module_exit(cobra_exit);
+module_gameport_driver(cobra_drv);
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 0536b1b2f018..ab1cf2882004 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -373,15 +373,4 @@ static struct gameport_driver gf2k_drv = {
.disconnect = gf2k_disconnect,
};
-static int __init gf2k_init(void)
-{
- return gameport_register_driver(&gf2k_drv);
-}
-
-static void __exit gf2k_exit(void)
-{
- gameport_unregister_driver(&gf2k_drv);
-}
-
-module_init(gf2k_init);
-module_exit(gf2k_exit);
+module_gameport_driver(gf2k_drv);
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index fc55899ba6c5..9e1beff57c33 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -424,15 +424,4 @@ static struct gameport_driver grip_drv = {
.disconnect = grip_disconnect,
};
-static int __init grip_init(void)
-{
- return gameport_register_driver(&grip_drv);
-}
-
-static void __exit grip_exit(void)
-{
- gameport_unregister_driver(&grip_drv);
-}
-
-module_init(grip_init);
-module_exit(grip_exit);
+module_gameport_driver(grip_drv);
diff --git a/drivers/input/joystick/grip_mp.c b/drivers/input/joystick/grip_mp.c
index 2d47baf47769..c0f9c7b7eb4e 100644
--- a/drivers/input/joystick/grip_mp.c
+++ b/drivers/input/joystick/grip_mp.c
@@ -687,15 +687,4 @@ static struct gameport_driver grip_drv = {
.disconnect = grip_disconnect,
};
-static int __init grip_init(void)
-{
- return gameport_register_driver(&grip_drv);
-}
-
-static void __exit grip_exit(void)
-{
- gameport_unregister_driver(&grip_drv);
-}
-
-module_init(grip_init);
-module_exit(grip_exit);
+module_gameport_driver(grip_drv);
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index 4058d4b272fe..55196f730af6 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -281,15 +281,4 @@ static struct gameport_driver guillemot_drv = {
.disconnect = guillemot_disconnect,
};
-static int __init guillemot_init(void)
-{
- return gameport_register_driver(&guillemot_drv);
-}
-
-static void __exit guillemot_exit(void)
-{
- gameport_unregister_driver(&guillemot_drv);
-}
-
-module_init(guillemot_init);
-module_exit(guillemot_exit);
+module_gameport_driver(guillemot_drv);
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 16fb19d1ca25..88c22623a2e8 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -311,15 +311,4 @@ static struct gameport_driver interact_drv = {
.disconnect = interact_disconnect,
};
-static int __init interact_init(void)
-{
- return gameport_register_driver(&interact_drv);
-}
-
-static void __exit interact_exit(void)
-{
- gameport_unregister_driver(&interact_drv);
-}
-
-module_init(interact_init);
-module_exit(interact_exit);
+module_gameport_driver(interact_drv);
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index cd894a0564a2..7eb878bab968 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -159,15 +159,4 @@ static struct gameport_driver joydump_drv = {
.disconnect = joydump_disconnect,
};
-static int __init joydump_init(void)
-{
- return gameport_register_driver(&joydump_drv);
-}
-
-static void __exit joydump_exit(void)
-{
- gameport_unregister_driver(&joydump_drv);
-}
-
-module_init(joydump_init);
-module_exit(joydump_exit);
+module_gameport_driver(joydump_drv);
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index 40e40780747d..9fb153eef2fc 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -222,19 +222,4 @@ static struct serio_driver magellan_drv = {
.disconnect = magellan_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init magellan_init(void)
-{
- return serio_register_driver(&magellan_drv);
-}
-
-static void __exit magellan_exit(void)
-{
- serio_unregister_driver(&magellan_drv);
-}
-
-module_init(magellan_init);
-module_exit(magellan_exit);
+module_serio_driver(magellan_drv);
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index b8d86115644b..04c69af37148 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -820,15 +820,4 @@ static struct gameport_driver sw_drv = {
.disconnect = sw_disconnect,
};
-static int __init sw_init(void)
-{
- return gameport_register_driver(&sw_drv);
-}
-
-static void __exit sw_exit(void)
-{
- gameport_unregister_driver(&sw_drv);
-}
-
-module_init(sw_init);
-module_exit(sw_exit);
+module_gameport_driver(sw_drv);
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index 0cd9b29356a8..80a7b27a457a 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -296,19 +296,4 @@ static struct serio_driver spaceball_drv = {
.disconnect = spaceball_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init spaceball_init(void)
-{
- return serio_register_driver(&spaceball_drv);
-}
-
-static void __exit spaceball_exit(void)
-{
- serio_unregister_driver(&spaceball_drv);
-}
-
-module_init(spaceball_init);
-module_exit(spaceball_exit);
+module_serio_driver(spaceball_drv);
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index a694bf8e557b..a41f291652e6 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -237,19 +237,4 @@ static struct serio_driver spaceorb_drv = {
.disconnect = spaceorb_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init spaceorb_init(void)
-{
- return serio_register_driver(&spaceorb_drv);
-}
-
-static void __exit spaceorb_exit(void)
-{
- serio_unregister_driver(&spaceorb_drv);
-}
-
-module_init(spaceorb_init);
-module_exit(spaceorb_exit);
+module_serio_driver(spaceorb_drv);
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index e0db9f5e4b41..0f51a60e14a7 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -208,19 +208,4 @@ static struct serio_driver stinger_drv = {
.disconnect = stinger_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init stinger_init(void)
-{
- return serio_register_driver(&stinger_drv);
-}
-
-static void __exit stinger_exit(void)
-{
- serio_unregister_driver(&stinger_drv);
-}
-
-module_init(stinger_init);
-module_exit(stinger_exit);
+module_serio_driver(stinger_drv);
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index d6c609807115..5ef9bcdb0345 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -436,15 +436,4 @@ static struct gameport_driver tmdc_drv = {
.disconnect = tmdc_disconnect,
};
-static int __init tmdc_init(void)
-{
- return gameport_register_driver(&tmdc_drv);
-}
-
-static void __exit tmdc_exit(void)
-{
- gameport_unregister_driver(&tmdc_drv);
-}
-
-module_init(tmdc_init);
-module_exit(tmdc_exit);
+module_gameport_driver(tmdc_drv);
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c
index 3f4ec73c9553..2556a8193579 100644
--- a/drivers/input/joystick/twidjoy.c
+++ b/drivers/input/joystick/twidjoy.c
@@ -257,19 +257,4 @@ static struct serio_driver twidjoy_drv = {
.disconnect = twidjoy_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init twidjoy_init(void)
-{
- return serio_register_driver(&twidjoy_drv);
-}
-
-static void __exit twidjoy_exit(void)
-{
- serio_unregister_driver(&twidjoy_drv);
-}
-
-module_init(twidjoy_init);
-module_exit(twidjoy_exit);
+module_serio_driver(twidjoy_drv);
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index f72c83e15e60..23b3071abb6e 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -217,19 +217,4 @@ static struct serio_driver warrior_drv = {
.disconnect = warrior_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init warrior_init(void)
-{
- return serio_register_driver(&warrior_drv);
-}
-
-static void __exit warrior_exit(void)
-{
- serio_unregister_driver(&warrior_drv);
-}
-
-module_init(warrior_init);
-module_exit(warrior_exit);
+module_serio_driver(warrior_drv);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ee16fb67b7ae..83811e45d633 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -142,6 +142,7 @@ static const struct xpad_device {
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+ { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
{ 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
@@ -164,6 +165,7 @@ static const struct xpad_device {
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
+ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
- XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
{ }
};
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index b5853125c898..c4de4388fd7f 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -225,19 +225,4 @@ static struct serio_driver zhenhua_drv = {
.disconnect = zhenhua_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init zhenhua_init(void)
-{
- return serio_register_driver(&zhenhua_drv);
-}
-
-static void __exit zhenhua_exit(void)
-{
- serio_unregister_driver(&zhenhua_drv);
-}
-
-module_init(zhenhua_init);
-module_exit(zhenhua_exit);
+module_serio_driver(zhenhua_drv);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index f354813a13e8..c0e11ecc646f 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -166,6 +166,7 @@ config KEYBOARD_LKKBD
config KEYBOARD_EP93XX
tristate "EP93xx Matrix Keypad support"
depends on ARCH_EP93XX
+ select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on the Cirrus EP93XX.
@@ -224,6 +225,7 @@ config KEYBOARD_TCA6416
config KEYBOARD_TCA8418
tristate "TCA8418 Keypad Support"
depends on I2C
+ select INPUT_MATRIXKMAP
help
This driver implements basic keypad functionality
for keys connected through TCA8418 keypad decoder.
@@ -240,6 +242,7 @@ config KEYBOARD_TCA8418
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
depends on GENERIC_GPIO
+ select INPUT_MATRIXKMAP
help
Enable support for GPIO driven matrix keypad.
@@ -309,6 +312,17 @@ config KEYBOARD_LM8323
To compile this driver as a module, choose M here: the
module will be called lm8323.
+config KEYBOARD_LM8333
+ tristate "LM8333 keypad chip"
+ depends on I2C
+ select INPUT_MATRIXKMAP
+ help
+ If you say yes here you get support for the National Semiconductor
+ LM8333 keypad controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lm8333.
+
config KEYBOARD_LOCOMO
tristate "LoCoMo Keyboard Support"
depends on SHARP_LOCOMO
@@ -366,6 +380,7 @@ config KEYBOARD_MPR121
config KEYBOARD_IMX
tristate "IMX keypad support"
depends on ARCH_MXC
+ select INPUT_MATRIXKMAP
help
Enable support for IMX keypad port.
@@ -384,6 +399,7 @@ config KEYBOARD_NEWTON
config KEYBOARD_NOMADIK
tristate "ST-Ericsson Nomadik SKE keyboard"
depends on PLAT_NOMADIK
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use a keypad provided on the SKE controller
used on the Ux500 and Nomadik platforms
@@ -394,7 +410,7 @@ config KEYBOARD_NOMADIK
config KEYBOARD_TEGRA
tristate "NVIDIA Tegra internal matrix keyboard controller support"
depends on ARCH_TEGRA
- select INPUT_OF_MATRIX_KEYMAP if USE_OF
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use a matrix keyboard connected directly
to the internal keyboard controller on Tegra SoCs.
@@ -432,6 +448,7 @@ config KEYBOARD_PXA930_ROTARY
config KEYBOARD_PMIC8XXX
tristate "Qualcomm PMIC8XXX keypad support"
depends on MFD_PM8XXX
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to enable the driver for the PMIC8XXX
keypad provided as a reference design from Qualcomm. This is intended
@@ -443,6 +460,7 @@ config KEYBOARD_PMIC8XXX
config KEYBOARD_SAMSUNG
tristate "Samsung keypad support"
depends on HAVE_CLK
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad on your Samsung mobile
device.
@@ -485,6 +503,7 @@ config KEYBOARD_SH_KEYSC
config KEYBOARD_STMPE
tristate "STMPE keypad support"
depends on MFD_STMPE
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad controller on STMPE I/O
expanders.
@@ -505,6 +524,7 @@ config KEYBOARD_DAVINCI
config KEYBOARD_OMAP
tristate "TI OMAP keypad support"
depends on (ARCH_OMAP1 || ARCH_OMAP2)
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the OMAP keypad.
@@ -512,9 +532,10 @@ config KEYBOARD_OMAP
module will be called omap-keypad.
config KEYBOARD_OMAP4
- tristate "TI OMAP4 keypad support"
+ tristate "TI OMAP4+ keypad support"
+ select INPUT_MATRIXKMAP
help
- Say Y here if you want to use the OMAP4 keypad.
+ Say Y here if you want to use the OMAP4+ keypad.
To compile this driver as a module, choose M here: the
module will be called omap4-keypad.
@@ -522,6 +543,7 @@ config KEYBOARD_OMAP4
config KEYBOARD_SPEAR
tristate "ST SPEAR keyboard support"
depends on PLAT_SPEAR
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the SPEAR keyboard.
@@ -531,6 +553,7 @@ config KEYBOARD_SPEAR
config KEYBOARD_TC3589X
tristate "TC3589X Keypad support"
depends on MFD_TC3589X
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad controller on
TC35892/3 I/O expander.
@@ -541,6 +564,7 @@ config KEYBOARD_TC3589X
config KEYBOARD_TNETV107X
tristate "TI TNETV107X keypad support"
depends on ARCH_DAVINCI_TNETV107X
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use the TNETV107X keypad.
@@ -550,6 +574,7 @@ config KEYBOARD_TNETV107X
config KEYBOARD_TWL4030
tristate "TI TWL4030/TWL5030/TPS659x0 keypad support"
depends on TWL4030_CORE
+ select INPUT_MATRIXKMAP
help
Say Y here if your board use the keypad controller on
TWL4030 family chips. It's safe to say enable this
@@ -573,6 +598,7 @@ config KEYBOARD_XTKBD
config KEYBOARD_W90P910
tristate "W90P910 Matrix Keypad support"
depends on ARCH_W90X900
+ select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on evaluation board
based on W90P910.
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index df7061f12918..b03b02456a82 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
obj-$(CONFIG_KEYBOARD_LM8323) += lm8323.o
+obj-$(CONFIG_KEYBOARD_LM8333) += lm8333.o
obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 39ebffac207e..b083bf10f139 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -197,6 +197,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->gc.base = gpio_data->gpio_start;
kpad->gc.label = kpad->client->name;
kpad->gc.owner = THIS_MODULE;
+ kpad->gc.names = gpio_data->names;
mutex_init(&kpad->gpio_lock);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index e05a2e7073c6..add5ffd9fe26 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -433,7 +433,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
if (printk_ratelimit())
dev_warn(&serio->dev,
"Spurious %s on %s. "
- "Some program might be trying access hardware directly.\n",
+ "Some program might be trying to access hardware directly.\n",
data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
goto out;
case ATKBD_RET_ERR:
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 0ba69f3fcb52..c46fc8185469 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -182,16 +182,10 @@ static void ep93xx_keypad_close(struct input_dev *pdev)
}
-#ifdef CONFIG_PM
-/*
- * NOTE: I don't know if this is correct, or will work on the ep93xx.
- *
- * None of the existing ep93xx drivers have power management support.
- * But, this is basically what the pxa27x_keypad driver does.
- */
-static int ep93xx_keypad_suspend(struct platform_device *pdev,
- pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int ep93xx_keypad_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
@@ -210,8 +204,9 @@ static int ep93xx_keypad_suspend(struct platform_device *pdev,
return 0;
}
-static int ep93xx_keypad_resume(struct platform_device *pdev)
+static int ep93xx_keypad_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
@@ -232,10 +227,10 @@ static int ep93xx_keypad_resume(struct platform_device *pdev)
return 0;
}
-#else /* !CONFIG_PM */
-#define ep93xx_keypad_suspend NULL
-#define ep93xx_keypad_resume NULL
-#endif /* !CONFIG_PM */
+#endif
+
+static SIMPLE_DEV_PM_OPS(ep93xx_keypad_pm_ops,
+ ep93xx_keypad_suspend, ep93xx_keypad_resume);
static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
{
@@ -308,19 +303,16 @@ static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
input_dev->open = ep93xx_keypad_open;
input_dev->close = ep93xx_keypad_close;
input_dev->dev.parent = &pdev->dev;
- input_dev->keycode = keypad->keycodes;
- input_dev->keycodesize = sizeof(keypad->keycodes[0]);
- input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);
- input_set_drvdata(input_dev, keypad);
+ err = matrix_keypad_build_keymap(keymap_data, NULL,
+ EP93XX_MATRIX_ROWS, EP93XX_MATRIX_COLS,
+ keypad->keycodes, input_dev);
+ if (err)
+ goto failed_free_dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY);
if (keypad->pdata->flags & EP93XX_KEYPAD_AUTOREPEAT)
- input_dev->evbit[0] |= BIT_MASK(EV_REP);
-
- matrix_keypad_build_keymap(keymap_data, 3,
- input_dev->keycode, input_dev->keybit);
- platform_set_drvdata(pdev, keypad);
+ __set_bit(EV_REP, input_dev->evbit);
+ input_set_drvdata(input_dev, keypad);
err = request_irq(keypad->irq, ep93xx_keypad_irq_handler,
0, pdev->name, keypad);
@@ -331,6 +323,7 @@ static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
if (err)
goto failed_free_irq;
+ platform_set_drvdata(pdev, keypad);
device_init_wakeup(&pdev->dev, 1);
return 0;
@@ -384,11 +377,10 @@ static struct platform_driver ep93xx_keypad_driver = {
.driver = {
.name = "ep93xx-keypad",
.owner = THIS_MODULE,
+ .pm = &ep93xx_keypad_pm_ops,
},
.probe = ep93xx_keypad_probe,
.remove = __devexit_p(ep93xx_keypad_remove),
- .suspend = ep93xx_keypad_suspend,
- .resume = ep93xx_keypad_resume,
};
module_platform_driver(ep93xx_keypad_driver);
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index fed31e0947a1..589e3c258f3f 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -583,15 +583,4 @@ static struct serio_driver hil_serio_drv = {
.interrupt = hil_dev_interrupt
};
-static int __init hil_dev_init(void)
-{
- return serio_register_driver(&hil_serio_drv);
-}
-
-static void __exit hil_dev_exit(void)
-{
- serio_unregister_driver(&hil_serio_drv);
-}
-
-module_init(hil_dev_init);
-module_exit(hil_dev_exit);
+module_serio_driver(hil_serio_drv);
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index fb87b3bcadb9..6ee7421e2321 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -481,7 +481,7 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
}
if (keypad->rows_en_mask > ((1 << MAX_MATRIX_KEY_ROWS) - 1) ||
- keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) {
+ keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) {
dev_err(&pdev->dev,
"invalid key data (too many rows or colums)\n");
error = -EINVAL;
@@ -496,14 +496,17 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
input_dev->dev.parent = &pdev->dev;
input_dev->open = imx_keypad_open;
input_dev->close = imx_keypad_close;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
- input_dev->keycode = keypad->keycodes;
- input_dev->keycodesize = sizeof(keypad->keycodes[0]);
- input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);
- matrix_keypad_build_keymap(keymap_data, MATRIX_ROW_SHIFT,
- keypad->keycodes, input_dev->keybit);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ MAX_MATRIX_KEY_ROWS,
+ MAX_MATRIX_KEY_COLS,
+ keypad->keycodes, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto failed_clock_put;
+ }
+ __set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad);
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index fa9bb6d235e2..fc0a63c2f278 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -731,19 +731,4 @@ static struct serio_driver lkkbd_drv = {
.interrupt = lkkbd_interrupt,
};
-/*
- * The functions for insering/removing us as a module.
- */
-static int __init lkkbd_init(void)
-{
- return serio_register_driver(&lkkbd_drv);
-}
-
-static void __exit lkkbd_exit(void)
-{
- serio_unregister_driver(&lkkbd_drv);
-}
-
-module_init(lkkbd_init);
-module_exit(lkkbd_exit);
-
+module_serio_driver(lkkbd_drv);
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
new file mode 100644
index 000000000000..ca168a6679de
--- /dev/null
+++ b/drivers/input/keyboard/lm8333.c
@@ -0,0 +1,235 @@
+/*
+ * LM8333 keypad driver
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/input/lm8333.h>
+
+#define LM8333_FIFO_READ 0x20
+#define LM8333_DEBOUNCE 0x22
+#define LM8333_READ_INT 0xD0
+#define LM8333_ACTIVE 0xE4
+#define LM8333_READ_ERROR 0xF0
+
+#define LM8333_KEYPAD_IRQ (1 << 0)
+#define LM8333_ERROR_IRQ (1 << 3)
+
+#define LM8333_ERROR_KEYOVR 0x04
+#define LM8333_ERROR_FIFOOVR 0x40
+
+#define LM8333_FIFO_TRANSFER_SIZE 16
+
+#define LM8333_NUM_ROWS 8
+#define LM8333_NUM_COLS 16
+#define LM8333_ROW_SHIFT 4
+
+struct lm8333 {
+ struct i2c_client *client;
+ struct input_dev *input;
+ unsigned short keycodes[LM8333_NUM_ROWS << LM8333_ROW_SHIFT];
+};
+
+/* The accessors try twice because the first access may be needed for wakeup */
+#define LM8333_READ_RETRIES 2
+
+int lm8333_read8(struct lm8333 *lm8333, u8 cmd)
+{
+ int retries = 0, ret;
+
+ do {
+ ret = i2c_smbus_read_byte_data(lm8333->client, cmd);
+ } while (ret < 0 && retries++ < LM8333_READ_RETRIES);
+
+ return ret;
+}
+
+int lm8333_write8(struct lm8333 *lm8333, u8 cmd, u8 val)
+{
+ int retries = 0, ret;
+
+ do {
+ ret = i2c_smbus_write_byte_data(lm8333->client, cmd, val);
+ } while (ret < 0 && retries++ < LM8333_READ_RETRIES);
+
+ return ret;
+}
+
+int lm8333_read_block(struct lm8333 *lm8333, u8 cmd, u8 len, u8 *buf)
+{
+ int retries = 0, ret;
+
+ do {
+ ret = i2c_smbus_read_i2c_block_data(lm8333->client,
+ cmd, len, buf);
+ } while (ret < 0 && retries++ < LM8333_READ_RETRIES);
+
+ return ret;
+}
+
+static void lm8333_key_handler(struct lm8333 *lm8333)
+{
+ struct input_dev *input = lm8333->input;
+ u8 keys[LM8333_FIFO_TRANSFER_SIZE];
+ u8 code, pressed;
+ int i, ret;
+
+ ret = lm8333_read_block(lm8333, LM8333_FIFO_READ,
+ LM8333_FIFO_TRANSFER_SIZE, keys);
+ if (ret != LM8333_FIFO_TRANSFER_SIZE) {
+ dev_err(&lm8333->client->dev,
+ "Error %d while reading FIFO\n", ret);
+ return;
+ }
+
+ for (i = 0; keys[i] && i < LM8333_FIFO_TRANSFER_SIZE; i++) {
+ pressed = keys[i] & 0x80;
+ code = keys[i] & 0x7f;
+
+ input_event(input, EV_MSC, MSC_SCAN, code);
+ input_report_key(input, lm8333->keycodes[code], pressed);
+ }
+
+ input_sync(input);
+}
+
+static irqreturn_t lm8333_irq_thread(int irq, void *data)
+{
+ struct lm8333 *lm8333 = data;
+ u8 status = lm8333_read8(lm8333, LM8333_READ_INT);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & LM8333_ERROR_IRQ) {
+ u8 err = lm8333_read8(lm8333, LM8333_READ_ERROR);
+
+ if (err & (LM8333_ERROR_KEYOVR | LM8333_ERROR_FIFOOVR)) {
+ u8 dummy[LM8333_FIFO_TRANSFER_SIZE];
+
+ lm8333_read_block(lm8333, LM8333_FIFO_READ,
+ LM8333_FIFO_TRANSFER_SIZE, dummy);
+ }
+ dev_err(&lm8333->client->dev, "Got error %02x\n", err);
+ }
+
+ if (status & LM8333_KEYPAD_IRQ)
+ lm8333_key_handler(lm8333);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit lm8333_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct lm8333_platform_data *pdata = client->dev.platform_data;
+ struct lm8333 *lm8333;
+ struct input_dev *input;
+ int err, active_time;
+
+ if (!pdata)
+ return -EINVAL;
+
+ active_time = pdata->active_time ?: 500;
+ if (active_time / 3 <= pdata->debounce_time / 3) {
+ dev_err(&client->dev, "Active time not big enough!\n");
+ return -EINVAL;
+ }
+
+ lm8333 = kzalloc(sizeof(*lm8333), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!lm8333 || !input) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ lm8333->client = client;
+ lm8333->input = input;
+
+ input->name = client->name;
+ input->dev.parent = &client->dev;
+ input->id.bustype = BUS_I2C;
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ err = matrix_keypad_build_keymap(pdata->matrix_data, NULL,
+ LM8333_NUM_ROWS, LM8333_NUM_COLS,
+ lm8333->keycodes, input);
+ if (err)
+ goto free_mem;
+
+ if (pdata->debounce_time) {
+ err = lm8333_write8(lm8333, LM8333_DEBOUNCE,
+ pdata->debounce_time / 3);
+ if (err)
+ dev_warn(&client->dev, "Unable to set debounce time\n");
+ }
+
+ if (pdata->active_time) {
+ err = lm8333_write8(lm8333, LM8333_ACTIVE,
+ pdata->active_time / 3);
+ if (err)
+ dev_warn(&client->dev, "Unable to set active time\n");
+ }
+
+ err = request_threaded_irq(client->irq, NULL, lm8333_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "lm8333", lm8333);
+ if (err)
+ goto free_mem;
+
+ err = input_register_device(input);
+ if (err)
+ goto free_irq;
+
+ i2c_set_clientdata(client, lm8333);
+ return 0;
+
+ free_irq:
+ free_irq(client->irq, lm8333);
+ free_mem:
+ input_free_device(input);
+ kfree(lm8333);
+ return err;
+}
+
+static int __devexit lm8333_remove(struct i2c_client *client)
+{
+ struct lm8333 *lm8333 = i2c_get_clientdata(client);
+
+ free_irq(client->irq, lm8333);
+ input_unregister_device(lm8333->input);
+ kfree(lm8333);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm8333_id[] = {
+ { "lm8333", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm8333_id);
+
+static struct i2c_driver lm8333_driver = {
+ .driver = {
+ .name = "lm8333",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm8333_probe,
+ .remove = __devexit_p(lm8333_remove),
+ .id_table = lm8333_id,
+};
+module_i2c_driver(lm8333_driver);
+
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_DESCRIPTION("LM8333 keyboard driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 9b223d73de32..18b72372028a 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -27,7 +27,6 @@
struct matrix_keypad {
const struct matrix_keypad_platform_data *pdata;
struct input_dev *input_dev;
- unsigned short *keycodes;
unsigned int row_shift;
DECLARE_BITMAP(disabled_gpios, MATRIX_MAX_ROWS);
@@ -38,6 +37,8 @@ struct matrix_keypad {
bool scan_pending;
bool stopped;
bool gpio_all_disabled;
+
+ unsigned short keycodes[];
};
/*
@@ -224,7 +225,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
disable_row_irqs(keypad);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static void matrix_keypad_enable_wakeup(struct matrix_keypad *keypad)
{
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
@@ -293,16 +294,16 @@ static int matrix_keypad_resume(struct device *dev)
return 0;
}
-
-static const SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops,
- matrix_keypad_suspend, matrix_keypad_resume);
#endif
-static int __devinit init_matrix_gpio(struct platform_device *pdev,
- struct matrix_keypad *keypad)
+static SIMPLE_DEV_PM_OPS(matrix_keypad_pm_ops,
+ matrix_keypad_suspend, matrix_keypad_resume);
+
+static int __devinit matrix_keypad_init_gpio(struct platform_device *pdev,
+ struct matrix_keypad *keypad)
{
const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- int i, err = -EINVAL;
+ int i, err;
/* initialized strobe lines as outputs, activated */
for (i = 0; i < pdata->num_col_gpios; i++) {
@@ -348,8 +349,7 @@ static int __devinit init_matrix_gpio(struct platform_device *pdev,
"matrix-keypad", keypad);
if (err) {
dev_err(&pdev->dev,
- "Unable to acquire interrupt "
- "for GPIO line %i\n",
+ "Unable to acquire interrupt for GPIO line %i\n",
pdata->row_gpios[i]);
goto err_free_irqs;
}
@@ -375,14 +375,33 @@ err_free_cols:
return err;
}
+static void matrix_keypad_free_gpio(struct matrix_keypad *keypad)
+{
+ const struct matrix_keypad_platform_data *pdata = keypad->pdata;
+ int i;
+
+ if (pdata->clustered_irq > 0) {
+ free_irq(pdata->clustered_irq, keypad);
+ } else {
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
+ }
+
+ for (i = 0; i < pdata->num_row_gpios; i++)
+ gpio_free(pdata->row_gpios[i]);
+
+ for (i = 0; i < pdata->num_col_gpios; i++)
+ gpio_free(pdata->col_gpios[i]);
+}
+
static int __devinit matrix_keypad_probe(struct platform_device *pdev)
{
const struct matrix_keypad_platform_data *pdata;
const struct matrix_keymap_data *keymap_data;
struct matrix_keypad *keypad;
struct input_dev *input_dev;
- unsigned short *keycodes;
unsigned int row_shift;
+ size_t keymap_size;
int err;
pdata = pdev->dev.platform_data;
@@ -398,20 +417,18 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
}
row_shift = get_count_order(pdata->num_col_gpios);
-
- keypad = kzalloc(sizeof(struct matrix_keypad), GFP_KERNEL);
- keycodes = kzalloc((pdata->num_row_gpios << row_shift) *
- sizeof(*keycodes),
- GFP_KERNEL);
+ keymap_size = (pdata->num_row_gpios << row_shift) *
+ sizeof(keypad->keycodes[0]);
+ keypad = kzalloc(sizeof(struct matrix_keypad) + keymap_size,
+ GFP_KERNEL);
input_dev = input_allocate_device();
- if (!keypad || !keycodes || !input_dev) {
+ if (!keypad || !input_dev) {
err = -ENOMEM;
goto err_free_mem;
}
keypad->input_dev = input_dev;
keypad->pdata = pdata;
- keypad->keycodes = keycodes;
keypad->row_shift = row_shift;
keypad->stopped = true;
INIT_DELAYED_WORK(&keypad->work, matrix_keypad_scan);
@@ -420,38 +437,38 @@ static int __devinit matrix_keypad_probe(struct platform_device *pdev)
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY);
- if (!pdata->no_autorepeat)
- input_dev->evbit[0] |= BIT_MASK(EV_REP);
input_dev->open = matrix_keypad_start;
input_dev->close = matrix_keypad_stop;
- input_dev->keycode = keycodes;
- input_dev->keycodesize = sizeof(*keycodes);
- input_dev->keycodemax = pdata->num_row_gpios << row_shift;
-
- matrix_keypad_build_keymap(keymap_data, row_shift,
- input_dev->keycode, input_dev->keybit);
+ err = matrix_keypad_build_keymap(keymap_data, NULL,
+ pdata->num_row_gpios,
+ pdata->num_col_gpios,
+ keypad->keycodes, input_dev);
+ if (err)
+ goto err_free_mem;
+ if (!pdata->no_autorepeat)
+ __set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad);
- err = init_matrix_gpio(pdev, keypad);
+ err = matrix_keypad_init_gpio(pdev, keypad);
if (err)
goto err_free_mem;
err = input_register_device(keypad->input_dev);
if (err)
- goto err_free_mem;
+ goto err_free_gpio;
device_init_wakeup(&pdev->dev, pdata->wakeup);
platform_set_drvdata(pdev, keypad);
return 0;
+err_free_gpio:
+ matrix_keypad_free_gpio(keypad);
err_free_mem:
input_free_device(input_dev);
- kfree(keycodes);
kfree(keypad);
return err;
}
@@ -459,29 +476,15 @@ err_free_mem:
static int __devexit matrix_keypad_remove(struct platform_device *pdev)
{
struct matrix_keypad *keypad = platform_get_drvdata(pdev);
- const struct matrix_keypad_platform_data *pdata = keypad->pdata;
- int i;
device_init_wakeup(&pdev->dev, 0);
- if (pdata->clustered_irq > 0) {
- free_irq(pdata->clustered_irq, keypad);
- } else {
- for (i = 0; i < pdata->num_row_gpios; i++)
- free_irq(gpio_to_irq(pdata->row_gpios[i]), keypad);
- }
-
- for (i = 0; i < pdata->num_row_gpios; i++)
- gpio_free(pdata->row_gpios[i]);
-
- for (i = 0; i < pdata->num_col_gpios; i++)
- gpio_free(pdata->col_gpios[i]);
-
+ matrix_keypad_free_gpio(keypad);
input_unregister_device(keypad->input_dev);
- platform_set_drvdata(pdev, NULL);
- kfree(keypad->keycodes);
kfree(keypad);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
@@ -491,9 +494,7 @@ static struct platform_driver matrix_keypad_driver = {
.driver = {
.name = "matrix-keypad",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &matrix_keypad_pm_ops,
-#endif
},
};
module_platform_driver(matrix_keypad_driver);
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 64a0ca4c92f3..0d77f6c84950 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -178,7 +178,8 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
}
error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
- IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_free_mem;
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index caa218a51b5a..7613f1cac951 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -248,7 +248,7 @@ static int __devinit mpr_touchkey_probe(struct i2c_client *client,
error = request_threaded_irq(client->irq, NULL,
mpr_touchkey_interrupt,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->dev.driver->name, mpr121);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index 48d1cab0aa1c..f971898ad591 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -166,15 +166,4 @@ static struct serio_driver nkbd_drv = {
.disconnect = nkbd_disconnect,
};
-static int __init nkbd_init(void)
-{
- return serio_register_driver(&nkbd_drv);
-}
-
-static void __exit nkbd_exit(void)
-{
- serio_unregister_driver(&nkbd_drv);
-}
-
-module_init(nkbd_init);
-module_exit(nkbd_exit);
+module_serio_driver(nkbd_drv);
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 101e245944e7..4ea4341a68c5 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -39,7 +39,8 @@
#define SKE_KPRISA (0x1 << 2)
#define SKE_KEYPAD_ROW_SHIFT 3
-#define SKE_KPD_KEYMAP_SIZE (8 * 8)
+#define SKE_KPD_NUM_ROWS 8
+#define SKE_KPD_NUM_COLS 8
/* keypad auto scan registers */
#define SKE_ASR0 0x20
@@ -63,7 +64,7 @@ struct ske_keypad {
void __iomem *reg_base;
struct input_dev *input;
const struct ske_keypad_platform_data *board;
- unsigned short keymap[SKE_KPD_KEYMAP_SIZE];
+ unsigned short keymap[SKE_KPD_NUM_ROWS * SKE_KPD_NUM_COLS];
struct clk *clk;
spinlock_t ske_keypad_lock;
};
@@ -261,19 +262,18 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
input->name = "ux500-ske-keypad";
input->dev.parent = &pdev->dev;
- input->keycode = keypad->keymap;
- input->keycodesize = sizeof(keypad->keymap[0]);
- input->keycodemax = ARRAY_SIZE(keypad->keymap);
+ error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
+ SKE_KPD_NUM_ROWS, SKE_KPD_NUM_COLS,
+ keypad->keymap, input);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to build keymap\n");
+ goto err_iounmap;
+ }
input_set_capability(input, EV_MSC, MSC_SCAN);
-
- __set_bit(EV_KEY, input->evbit);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- matrix_keypad_build_keymap(plat->keymap_data, SKE_KEYPAD_ROW_SHIFT,
- input->keycode, input->keybit);
-
clk_enable(keypad->clk);
/* go through board initialization helpers */
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 6b630d9d3dff..a0222db4dc86 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -61,6 +61,7 @@ struct omap_kp {
unsigned int cols;
unsigned long delay;
unsigned int debounce;
+ unsigned short keymap[];
};
static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0);
@@ -316,13 +317,6 @@ static int __devinit omap_kp_probe(struct platform_device *pdev)
if (!cpu_is_omap24xx())
omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
- input_dev->keycode = &omap_kp[1];
- input_dev->keycodesize = sizeof(unsigned short);
- input_dev->keycodemax = keycodemax;
-
- if (pdata->rep)
- __set_bit(EV_REP, input_dev->evbit);
-
if (pdata->delay)
omap_kp->delay = pdata->delay;
@@ -371,9 +365,6 @@ static int __devinit omap_kp_probe(struct platform_device *pdev)
goto err2;
/* setup input device */
- __set_bit(EV_KEY, input_dev->evbit);
- matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
- input_dev->keycode, input_dev->keybit);
input_dev->name = "omap-keypad";
input_dev->phys = "omap-keypad/input0";
input_dev->dev.parent = &pdev->dev;
@@ -383,6 +374,15 @@ static int __devinit omap_kp_probe(struct platform_device *pdev)
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
+ if (pdata->rep)
+ __set_bit(EV_REP, input_dev->evbit);
+
+ ret = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
+ pdata->rows, pdata->cols,
+ omap_kp->keymap, input_dev);
+ if (ret < 0)
+ goto err3;
+
ret = input_register_device(omap_kp->input);
if (ret < 0) {
printk(KERN_ERR "Unable to register omap-keypad input device\n");
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index e809ac095a38..aed5f6999ce2 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -68,19 +68,52 @@
#define OMAP4_MASK_IRQSTATUSDISABLE 0xFFFF
+enum {
+ KBD_REVISION_OMAP4 = 0,
+ KBD_REVISION_OMAP5,
+};
+
struct omap4_keypad {
struct input_dev *input;
void __iomem *base;
- int irq;
+ unsigned int irq;
unsigned int rows;
unsigned int cols;
+ u32 reg_offset;
+ u32 irqreg_offset;
unsigned int row_shift;
unsigned char key_state[8];
unsigned short keymap[];
};
+static int kbd_readl(struct omap4_keypad *keypad_data, u32 offset)
+{
+ return __raw_readl(keypad_data->base +
+ keypad_data->reg_offset + offset);
+}
+
+static void kbd_writel(struct omap4_keypad *keypad_data, u32 offset, u32 value)
+{
+ __raw_writel(value,
+ keypad_data->base + keypad_data->reg_offset + offset);
+}
+
+static int kbd_read_irqreg(struct omap4_keypad *keypad_data, u32 offset)
+{
+ return __raw_readl(keypad_data->base +
+ keypad_data->irqreg_offset + offset);
+}
+
+static void kbd_write_irqreg(struct omap4_keypad *keypad_data,
+ u32 offset, u32 value)
+{
+ __raw_writel(value,
+ keypad_data->base + keypad_data->irqreg_offset + offset);
+}
+
+
/* Interrupt handler */
static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
{
@@ -91,12 +124,11 @@ static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
u32 *new_state = (u32 *) key_state;
/* Disable interrupts */
- __raw_writel(OMAP4_VAL_IRQDISABLE,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+ OMAP4_VAL_IRQDISABLE);
- *new_state = __raw_readl(keypad_data->base + OMAP4_KBD_FULLCODE31_0);
- *(new_state + 1) = __raw_readl(keypad_data->base
- + OMAP4_KBD_FULLCODE63_32);
+ *new_state = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE31_0);
+ *(new_state + 1) = kbd_readl(keypad_data, OMAP4_KBD_FULLCODE63_32);
for (row = 0; row < keypad_data->rows; row++) {
changed = key_state[row] ^ keypad_data->key_state[row];
@@ -121,12 +153,13 @@ static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id)
sizeof(keypad_data->key_state));
/* clear pending interrupts */
- __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS),
- keypad_data->base + OMAP4_KBD_IRQSTATUS);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+ kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
/* enable interrupts */
- __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+ OMAP4_DEF_IRQENABLE_EVENTEN |
+ OMAP4_DEF_IRQENABLE_LONGKEY);
return IRQ_HANDLED;
}
@@ -139,16 +172,17 @@ static int omap4_keypad_open(struct input_dev *input)
disable_irq(keypad_data->irq);
- __raw_writel(OMAP4_VAL_FUNCTIONALCFG,
- keypad_data->base + OMAP4_KBD_CTRL);
- __raw_writel(OMAP4_VAL_DEBOUNCINGTIME,
- keypad_data->base + OMAP4_KBD_DEBOUNCINGTIME);
- __raw_writel(OMAP4_VAL_IRQDISABLE,
- keypad_data->base + OMAP4_KBD_IRQSTATUS);
- __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN | OMAP4_DEF_IRQENABLE_LONGKEY,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
- __raw_writel(OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA,
- keypad_data->base + OMAP4_KBD_WAKEUPENABLE);
+ kbd_writel(keypad_data, OMAP4_KBD_CTRL,
+ OMAP4_VAL_FUNCTIONALCFG);
+ kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
+ OMAP4_VAL_DEBOUNCINGTIME);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+ OMAP4_VAL_IRQDISABLE);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+ OMAP4_DEF_IRQENABLE_EVENTEN |
+ OMAP4_DEF_IRQENABLE_LONGKEY);
+ kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE,
+ OMAP4_DEF_WUP_EVENT_ENA | OMAP4_DEF_WUP_LONG_KEY_ENA);
enable_irq(keypad_data->irq);
@@ -162,12 +196,12 @@ static void omap4_keypad_close(struct input_dev *input)
disable_irq(keypad_data->irq);
/* Disable interrupts */
- __raw_writel(OMAP4_VAL_IRQDISABLE,
- keypad_data->base + OMAP4_KBD_IRQENABLE);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
+ OMAP4_VAL_IRQDISABLE);
/* clear pending interrupts */
- __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS),
- keypad_data->base + OMAP4_KBD_IRQSTATUS);
+ kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+ kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
enable_irq(keypad_data->irq);
@@ -182,6 +216,7 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
struct resource *res;
resource_size_t size;
unsigned int row_shift, max_keys;
+ int rev;
int irq;
int error;
@@ -241,11 +276,40 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
keypad_data->rows = pdata->rows;
keypad_data->cols = pdata->cols;
+ /*
+ * Enable clocks for the keypad module so that we can read
+ * revision register.
+ */
+ pm_runtime_enable(&pdev->dev);
+ error = pm_runtime_get_sync(&pdev->dev);
+ if (error) {
+ dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
+ goto err_unmap;
+ }
+ rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION);
+ rev &= 0x03 << 30;
+ rev >>= 30;
+ switch (rev) {
+ case KBD_REVISION_OMAP4:
+ keypad_data->reg_offset = 0x00;
+ keypad_data->irqreg_offset = 0x00;
+ break;
+ case KBD_REVISION_OMAP5:
+ keypad_data->reg_offset = 0x10;
+ keypad_data->irqreg_offset = 0x0c;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "Keypad reports unsupported revision %d", rev);
+ error = -EINVAL;
+ goto err_pm_put_sync;
+ }
+
/* input device allocation */
keypad_data->input = input_dev = input_allocate_device();
if (!input_dev) {
error = -ENOMEM;
- goto err_unmap;
+ goto err_pm_put_sync;
}
input_dev->name = pdev->name;
@@ -258,20 +322,19 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
input_dev->open = omap4_keypad_open;
input_dev->close = omap4_keypad_close;
- input_dev->keycode = keypad_data->keymap;
- input_dev->keycodesize = sizeof(keypad_data->keymap[0]);
- input_dev->keycodemax = max_keys;
+ error = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
+ pdata->rows, pdata->cols,
+ keypad_data->keymap, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto err_free_input;
+ }
- __set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_REP, input_dev->evbit);
-
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, keypad_data);
- matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
- input_dev->keycode, input_dev->keybit);
-
error = request_irq(keypad_data->irq, omap4_keypad_interrupt,
IRQF_TRIGGER_RISING,
"omap4-keypad", keypad_data);
@@ -280,7 +343,7 @@ static int __devinit omap4_keypad_probe(struct platform_device *pdev)
goto err_free_input;
}
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
error = input_register_device(keypad_data->input);
if (error < 0) {
@@ -296,6 +359,8 @@ err_pm_disable:
free_irq(keypad_data->irq, keypad_data);
err_free_input:
input_free_device(input_dev);
+err_pm_put_sync:
+ pm_runtime_put_sync(&pdev->dev);
err_unmap:
iounmap(keypad_data->base);
err_release_mem:
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 01a1c9f8a383..52c34657d301 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -626,21 +626,21 @@ static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
kp->input->id.product = 0x0001;
kp->input->id.vendor = 0x0001;
- kp->input->evbit[0] = BIT_MASK(EV_KEY);
-
- if (pdata->rep)
- __set_bit(EV_REP, kp->input->evbit);
-
- kp->input->keycode = kp->keycodes;
- kp->input->keycodemax = PM8XXX_MATRIX_MAX_SIZE;
- kp->input->keycodesize = sizeof(kp->keycodes);
kp->input->open = pmic8xxx_kp_open;
kp->input->close = pmic8xxx_kp_close;
- matrix_keypad_build_keymap(keymap_data, PM8XXX_ROW_SHIFT,
- kp->input->keycode, kp->input->keybit);
+ rc = matrix_keypad_build_keymap(keymap_data, NULL,
+ PM8XXX_MAX_ROWS, PM8XXX_MAX_COLS,
+ kp->keycodes, kp->input);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto err_get_irq;
+ }
+ if (pdata->rep)
+ __set_bit(EV_REP, kp->input->evbit);
input_set_capability(kp->input, EV_MSC, MSC_SCAN);
+
input_set_drvdata(kp->input, kp);
/* initialize keypad state */
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 29fe1b2be1c1..7f7b72464a37 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -311,7 +311,15 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
if (pdata->enable_rotary0 || pdata->enable_rotary1)
pxa27x_keypad_scan_rotary(keypad);
- new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+ /*
+ * The KPDR_DK only output the key pin level, so it relates to board,
+ * and low level may be active.
+ */
+ if (pdata->direct_key_low_active)
+ new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask;
+ else
+ new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+
bits_changed = keypad->direct_key_state ^ new_state;
if (bits_changed == 0)
@@ -383,7 +391,14 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
if (pdata->direct_key_num > direct_key_num)
direct_key_num = pdata->direct_key_num;
- keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask;
+ /*
+ * Direct keys usage may not start from KP_DKIN0, check the platfrom
+ * mask data to config the specific.
+ */
+ if (pdata->direct_key_mask)
+ keypad->direct_key_mask = pdata->direct_key_mask;
+ else
+ keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask;
/* enable direct key */
if (direct_key_num)
@@ -399,7 +414,7 @@ static int pxa27x_keypad_open(struct input_dev *dev)
struct pxa27x_keypad *keypad = input_get_drvdata(dev);
/* Enable unit clock */
- clk_enable(keypad->clk);
+ clk_prepare_enable(keypad->clk);
pxa27x_keypad_config(keypad);
return 0;
@@ -410,7 +425,7 @@ static void pxa27x_keypad_close(struct input_dev *dev)
struct pxa27x_keypad *keypad = input_get_drvdata(dev);
/* Disable clock unit */
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
}
#ifdef CONFIG_PM
@@ -419,10 +434,14 @@ static int pxa27x_keypad_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
- clk_disable(keypad->clk);
-
+ /*
+ * If the keypad is used a wake up source, clock can not be disabled.
+ * Or it can not detect the key pressing.
+ */
if (device_may_wakeup(&pdev->dev))
enable_irq_wake(keypad->irq);
+ else
+ clk_disable_unprepare(keypad->clk);
return 0;
}
@@ -433,19 +452,24 @@ static int pxa27x_keypad_resume(struct device *dev)
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
- if (device_may_wakeup(&pdev->dev))
+ /*
+ * If the keypad is used as wake up source, the clock is not turned
+ * off. So do not need configure it again.
+ */
+ if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(keypad->irq);
+ } else {
+ mutex_lock(&input_dev->mutex);
- mutex_lock(&input_dev->mutex);
+ if (input_dev->users) {
+ /* Enable unit clock */
+ clk_prepare_enable(keypad->clk);
+ pxa27x_keypad_config(keypad);
+ }
- if (input_dev->users) {
- /* Enable unit clock */
- clk_enable(keypad->clk);
- pxa27x_keypad_config(keypad);
+ mutex_unlock(&input_dev->mutex);
}
- mutex_unlock(&input_dev->mutex);
-
return 0;
}
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 0b7b2f891752..ca68f2992d72 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -201,7 +201,8 @@ static int __devinit qt1070_probe(struct i2c_client *client,
msleep(QT1070_RESET_TIME);
err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
- IRQF_TRIGGER_NONE, client->dev.driver->name, data);
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (err) {
dev_err(&client->dev, "fail to request irq\n");
goto err_free_mem;
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 2391ae884fee..a061ba603a29 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -454,23 +454,23 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
- input_set_drvdata(input_dev, keypad);
input_dev->open = samsung_keypad_open;
input_dev->close = samsung_keypad_close;
- input_dev->evbit[0] = BIT_MASK(EV_KEY);
- if (!pdata->no_autorepeat)
- input_dev->evbit[0] |= BIT_MASK(EV_REP);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ pdata->rows, pdata->cols,
+ keypad->keycodes, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto err_put_clk;
+ }
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ if (!pdata->no_autorepeat)
+ __set_bit(EV_REP, input_dev->evbit);
- input_dev->keycode = keypad->keycodes;
- input_dev->keycodesize = sizeof(keypad->keycodes[0]);
- input_dev->keycodemax = pdata->rows << row_shift;
-
- matrix_keypad_build_keymap(keymap_data, row_shift,
- input_dev->keycode, input_dev->keybit);
+ input_set_drvdata(input_dev, keypad);
keypad->irq = platform_get_irq(pdev, 0);
if (keypad->irq < 0) {
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 3b6b528f02fd..6f287f7e1538 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeup.h>
#include <linux/slab.h>
@@ -49,7 +50,9 @@
#define KEY_VALUE 0x00FFFFFF
#define ROW_MASK 0xF0
#define COLUMN_MASK 0x0F
-#define ROW_SHIFT 4
+#define NUM_ROWS 16
+#define NUM_COLS 16
+
#define KEY_MATRIX_SHIFT 6
struct spear_kbd {
@@ -60,7 +63,8 @@ struct spear_kbd {
unsigned int irq;
unsigned int mode;
unsigned short last_key;
- unsigned short keycodes[256];
+ unsigned short keycodes[NUM_ROWS * NUM_COLS];
+ bool rep;
};
static irqreturn_t spear_kbd_interrupt(int irq, void *dev_id)
@@ -136,27 +140,49 @@ static void spear_kbd_close(struct input_dev *dev)
kbd->last_key = KEY_RESERVED;
}
-static int __devinit spear_kbd_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static int __devinit spear_kbd_parse_dt(struct platform_device *pdev,
+ struct spear_kbd *kbd)
{
- const struct kbd_platform_data *pdata = pdev->dev.platform_data;
- const struct matrix_keymap_data *keymap;
- struct spear_kbd *kbd;
- struct input_dev *input_dev;
- struct resource *res;
- int irq;
+ struct device_node *np = pdev->dev.of_node;
int error;
+ u32 val;
- if (!pdata) {
- dev_err(&pdev->dev, "Invalid platform data\n");
+ if (!np) {
+ dev_err(&pdev->dev, "Missing DT data\n");
return -EINVAL;
}
- keymap = pdata->keymap;
- if (!keymap) {
- dev_err(&pdev->dev, "no keymap defined\n");
- return -EINVAL;
+ if (of_property_read_bool(np, "autorepeat"))
+ kbd->rep = true;
+
+ error = of_property_read_u32(np, "st,mode", &val);
+ if (error) {
+ dev_err(&pdev->dev, "DT: Invalid or missing mode\n");
+ return error;
}
+ kbd->mode = val;
+ return 0;
+}
+#else
+static inline int spear_kbd_parse_dt(struct platform_device *pdev,
+ struct spear_kbd *kbd)
+{
+ return -ENOSYS;
+}
+#endif
+
+static int __devinit spear_kbd_probe(struct platform_device *pdev)
+{
+ struct kbd_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ const struct matrix_keymap_data *keymap = pdata ? pdata->keymap : NULL;
+ struct spear_kbd *kbd;
+ struct input_dev *input_dev;
+ struct resource *res;
+ int irq;
+ int error;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "no keyboard resource defined\n");
@@ -179,7 +205,15 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
kbd->input = input_dev;
kbd->irq = irq;
- kbd->mode = pdata->mode;
+
+ if (!pdata) {
+ error = spear_kbd_parse_dt(pdev, kbd);
+ if (error)
+ goto err_free_mem;
+ } else {
+ kbd->mode = pdata->mode;
+ kbd->rep = pdata->rep;
+ }
kbd->res = request_mem_region(res->start, resource_size(res),
pdev->name);
@@ -212,18 +246,17 @@ static int __devinit spear_kbd_probe(struct platform_device *pdev)
input_dev->open = spear_kbd_open;
input_dev->close = spear_kbd_close;
- __set_bit(EV_KEY, input_dev->evbit);
- if (pdata->rep)
+ error = matrix_keypad_build_keymap(keymap, NULL, NUM_ROWS, NUM_COLS,
+ kbd->keycodes, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to build keymap\n");
+ goto err_put_clk;
+ }
+
+ if (kbd->rep)
__set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- input_dev->keycode = kbd->keycodes;
- input_dev->keycodesize = sizeof(kbd->keycodes[0]);
- input_dev->keycodemax = ARRAY_SIZE(kbd->keycodes);
-
- matrix_keypad_build_keymap(keymap, ROW_SHIFT,
- input_dev->keycode, input_dev->keybit);
-
input_set_drvdata(input_dev, kbd);
error = request_irq(irq, spear_kbd_interrupt, 0, "keyboard", kbd);
@@ -317,6 +350,14 @@ static int spear_kbd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(spear_kbd_pm_ops, spear_kbd_suspend, spear_kbd_resume);
+#ifdef CONFIG_OF
+static const struct of_device_id spear_kbd_id_table[] = {
+ { .compatible = "st,spear300-kbd" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_kbd_id_table);
+#endif
+
static struct platform_driver spear_kbd_driver = {
.probe = spear_kbd_probe,
.remove = __devexit_p(spear_kbd_remove),
@@ -324,6 +365,7 @@ static struct platform_driver spear_kbd_driver = {
.name = "keyboard",
.owner = THIS_MODULE,
.pm = &spear_kbd_pm_ops,
+ .of_match_table = of_match_ptr(spear_kbd_id_table),
},
};
module_platform_driver(spear_kbd_driver);
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index 9397cf9c625c..470a8778dec1 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -289,19 +289,17 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev)
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
- input_set_capability(input, EV_MSC, MSC_SCAN);
+ ret = matrix_keypad_build_keymap(plat->keymap_data, NULL,
+ STMPE_KEYPAD_MAX_ROWS,
+ STMPE_KEYPAD_MAX_COLS,
+ keypad->keymap, input);
+ if (ret)
+ goto out_freeinput;
- __set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- input->keycode = keypad->keymap;
- input->keycodesize = sizeof(keypad->keymap[0]);
- input->keycodemax = ARRAY_SIZE(keypad->keymap);
-
- matrix_keypad_build_keymap(plat->keymap_data, STMPE_KEYPAD_ROW_SHIFT,
- input->keycode, input->keybit);
-
for (i = 0; i < plat->keymap_data->keymap_size; i++) {
unsigned int key = plat->keymap_data->keymap[i];
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index 7437219370b1..cc612c5d5427 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -170,15 +170,4 @@ static struct serio_driver skbd_drv = {
.disconnect = skbd_disconnect,
};
-static int __init skbd_init(void)
-{
- return serio_register_driver(&skbd_drv);
-}
-
-static void __exit skbd_exit(void)
-{
- serio_unregister_driver(&skbd_drv);
-}
-
-module_init(skbd_init);
-module_exit(skbd_exit);
+module_serio_driver(skbd_drv);
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index a99a04b03ee4..5f836b1638c1 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -369,19 +369,4 @@ static struct serio_driver sunkbd_drv = {
.disconnect = sunkbd_disconnect,
};
-/*
- * The functions for insering/removing us as a module.
- */
-
-static int __init sunkbd_init(void)
-{
- return serio_register_driver(&sunkbd_drv);
-}
-
-static void __exit sunkbd_exit(void)
-{
- serio_unregister_driver(&sunkbd_drv);
-}
-
-module_init(sunkbd_init);
-module_exit(sunkbd_exit);
+module_serio_driver(sunkbd_drv);
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 2dee3e4e7c6f..7d498e698508 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -78,7 +78,7 @@
* @input: pointer to input device object
* @board: keypad platform device
* @krow: number of rows
- * @kcol: number of coloumns
+ * @kcol: number of columns
* @keymap: matrix scan code table for keycodes
* @keypad_stopped: holds keypad status
*/
@@ -96,21 +96,15 @@ static int tc3589x_keypad_init_key_hardware(struct tc_keypad *keypad)
{
int ret;
struct tc3589x *tc3589x = keypad->tc3589x;
- u8 settle_time = keypad->board->settle_time;
- u8 dbounce_period = keypad->board->debounce_period;
- u8 rows = keypad->board->krow & 0xf; /* mask out the nibble */
- u8 column = keypad->board->kcol & 0xf; /* mask out the nibble */
-
- /* validate platform configurations */
- if (keypad->board->kcol > TC3589x_MAX_KPCOL ||
- keypad->board->krow > TC3589x_MAX_KPROW ||
- keypad->board->debounce_period > TC3589x_MAX_DEBOUNCE_SETTLE ||
- keypad->board->settle_time > TC3589x_MAX_DEBOUNCE_SETTLE)
+ const struct tc3589x_keypad_platform_data *board = keypad->board;
+
+ /* validate platform configuration */
+ if (board->kcol > TC3589x_MAX_KPCOL || board->krow > TC3589x_MAX_KPROW)
return -EINVAL;
/* configure KBDSIZE 4 LSbits for cols and 4 MSbits for rows */
ret = tc3589x_reg_write(tc3589x, TC3589x_KBDSIZE,
- (rows << KP_ROW_SHIFT) | column);
+ (board->krow << KP_ROW_SHIFT) | board->kcol);
if (ret < 0)
return ret;
@@ -124,12 +118,14 @@ static int tc3589x_keypad_init_key_hardware(struct tc_keypad *keypad)
return ret;
/* Configure settle time */
- ret = tc3589x_reg_write(tc3589x, TC3589x_KBDSETTLE_REG, settle_time);
+ ret = tc3589x_reg_write(tc3589x, TC3589x_KBDSETTLE_REG,
+ board->settle_time);
if (ret < 0)
return ret;
/* Configure debounce time */
- ret = tc3589x_reg_write(tc3589x, TC3589x_KBDBOUNCE, dbounce_period);
+ ret = tc3589x_reg_write(tc3589x, TC3589x_KBDBOUNCE,
+ board->debounce_period);
if (ret < 0)
return ret;
@@ -337,23 +333,22 @@ static int __devinit tc3589x_keypad_probe(struct platform_device *pdev)
input->name = pdev->name;
input->dev.parent = &pdev->dev;
- input->keycode = keypad->keymap;
- input->keycodesize = sizeof(keypad->keymap[0]);
- input->keycodemax = ARRAY_SIZE(keypad->keymap);
-
input->open = tc3589x_keypad_open;
input->close = tc3589x_keypad_close;
- input_set_drvdata(input, keypad);
+ error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
+ TC3589x_MAX_KPROW, TC3589x_MAX_KPCOL,
+ keypad->keymap, input);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to build keymap\n");
+ goto err_free_mem;
+ }
input_set_capability(input, EV_MSC, MSC_SCAN);
-
- __set_bit(EV_KEY, input->evbit);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- matrix_keypad_build_keymap(plat->keymap_data, 0x3,
- input->keycode, input->keybit);
+ input_set_drvdata(input, keypad);
error = request_threaded_irq(irq, NULL,
tc3589x_keypad_irq, plat->irqtype,
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 3afea3f89718..c355cdde8d22 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -278,7 +278,8 @@ static int __devinit tca6416_keypad_probe(struct i2c_client *client,
error = request_threaded_irq(chip->irqnum, NULL,
tca6416_keys_isr,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
"tca6416-keypad", chip);
if (error) {
dev_dbg(&client->dev,
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 958ec107bfbc..893869b29ed9 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -342,26 +342,25 @@ static int __devinit tca8418_keypad_probe(struct i2c_client *client,
input->id.product = 0x001;
input->id.version = 0x0001;
- input->keycode = keypad_data->keymap;
- input->keycodesize = sizeof(keypad_data->keymap[0]);
- input->keycodemax = max_keys;
+ error = matrix_keypad_build_keymap(pdata->keymap_data, NULL,
+ pdata->rows, pdata->cols,
+ keypad_data->keymap, input);
+ if (error) {
+ dev_dbg(&client->dev, "Failed to build keymap\n");
+ goto fail2;
+ }
- __set_bit(EV_KEY, input->evbit);
if (pdata->rep)
__set_bit(EV_REP, input->evbit);
-
input_set_capability(input, EV_MSC, MSC_SCAN);
input_set_drvdata(input, keypad_data);
- matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
- input->keycode, input->keybit);
-
if (pdata->irq_is_gpio)
client->irq = gpio_to_irq(client->irq);
error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->name, keypad_data);
if (error) {
dev_dbg(&client->dev,
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index fe4ac95ca6c8..4ffe64d53107 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -619,8 +619,8 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
}
#ifdef CONFIG_OF
-static struct tegra_kbc_platform_data * __devinit
-tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
+static struct tegra_kbc_platform_data * __devinit tegra_kbc_dt_parse_pdata(
+ struct platform_device *pdev)
{
struct tegra_kbc_platform_data *pdata;
struct device_node *np = pdev->dev.of_node;
@@ -660,10 +660,6 @@ tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
pdata->pin_cfg[KBC_MAX_ROW + i].type = PIN_CFG_COL;
}
- pdata->keymap_data = matrix_keyboard_of_fill_keymap(np, "linux,keymap");
-
- /* FIXME: Add handling of linux,fn-keymap here */
-
return pdata;
}
#else
@@ -674,10 +670,36 @@ static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
}
#endif
+static int __devinit tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
+{
+ const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+ const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
+ unsigned int keymap_rows = KBC_MAX_KEY;
+ int retval;
+
+ if (keymap_data && pdata->use_fn_map)
+ keymap_rows *= 2;
+
+ retval = matrix_keypad_build_keymap(keymap_data, NULL,
+ keymap_rows, KBC_MAX_COL,
+ kbc->keycode, kbc->idev);
+ if (retval == -ENOSYS || retval == -ENOENT) {
+ /*
+ * If there is no OF support in kernel or keymap
+ * property is missing, use default keymap.
+ */
+ retval = matrix_keypad_build_keymap(
+ &tegra_kbc_default_keymap_data, NULL,
+ keymap_rows, KBC_MAX_COL,
+ kbc->keycode, kbc->idev);
+ }
+
+ return retval;
+}
+
static int __devinit tegra_kbc_probe(struct platform_device *pdev)
{
const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
- const struct matrix_keymap_data *keymap_data;
struct tegra_kbc *kbc;
struct input_dev *input_dev;
struct resource *res;
@@ -757,29 +779,26 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
+ kbc->wakeup_key = pdata->wakeup_key;
+ kbc->use_fn_map = pdata->use_fn_map;
+ kbc->use_ghost_filter = pdata->use_ghost_filter;
+
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
input_dev->open = tegra_kbc_open;
input_dev->close = tegra_kbc_close;
- input_set_drvdata(input_dev, kbc);
+ err = tegra_kbd_setup_keymap(kbc);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup keymap\n");
+ goto err_put_clk;
+ }
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ __set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- input_dev->keycode = kbc->keycode;
- input_dev->keycodesize = sizeof(kbc->keycode[0]);
- input_dev->keycodemax = KBC_MAX_KEY;
- if (pdata->use_fn_map)
- input_dev->keycodemax *= 2;
-
- kbc->use_fn_map = pdata->use_fn_map;
- kbc->use_ghost_filter = pdata->use_ghost_filter;
- keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
- matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
- input_dev->keycode, input_dev->keybit);
- kbc->wakeup_key = pdata->wakeup_key;
+ input_set_drvdata(input_dev, kbc);
err = request_irq(kbc->irq, tegra_kbc_isr,
IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
@@ -799,9 +818,6 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, kbc);
device_init_wakeup(&pdev->dev, pdata->wakeup);
- if (!pdev->dev.platform_data)
- matrix_keyboard_of_free_keymap(pdata->keymap_data);
-
return 0;
err_free_irq:
@@ -816,10 +832,8 @@ err_free_mem:
input_free_device(input_dev);
kfree(kbc);
err_free_pdata:
- if (!pdev->dev.platform_data) {
- matrix_keyboard_of_free_keymap(pdata->keymap_data);
+ if (!pdev->dev.platform_data)
kfree(pdata);
- }
return err;
}
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index fb39c94b6fdd..4c34f21fbe2d 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -227,15 +227,15 @@ static int __devinit keypad_probe(struct platform_device *pdev)
goto error_clk;
}
- error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
- dev_name(dev), kp);
+ error = request_threaded_irq(kp->irq_press, NULL, keypad_irq,
+ IRQF_ONESHOT, dev_name(dev), kp);
if (error < 0) {
dev_err(kp->dev, "Could not allocate keypad press key irq\n");
goto error_irq_press;
}
- error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
- dev_name(dev), kp);
+ error = request_threaded_irq(kp->irq_release, NULL, keypad_irq,
+ IRQF_ONESHOT, dev_name(dev), kp);
if (error < 0) {
dev_err(kp->dev, "Could not allocate keypad release key irq\n");
goto error_irq_release;
@@ -247,15 +247,11 @@ static int __devinit keypad_probe(struct platform_device *pdev)
error = -ENOMEM;
goto error_input;
}
- input_set_drvdata(kp->input_dev, kp);
kp->input_dev->name = pdev->name;
kp->input_dev->dev.parent = &pdev->dev;
kp->input_dev->open = keypad_start;
kp->input_dev->close = keypad_stop;
- kp->input_dev->evbit[0] = BIT_MASK(EV_KEY);
- if (!pdata->no_autorepeat)
- kp->input_dev->evbit[0] |= BIT_MASK(EV_REP);
clk_enable(kp->clk);
rev = keypad_read(kp, rev);
@@ -264,15 +260,20 @@ static int __devinit keypad_probe(struct platform_device *pdev)
kp->input_dev->id.version = ((rev >> 16) & 0xfff);
clk_disable(kp->clk);
- kp->input_dev->keycode = kp->keycodes;
- kp->input_dev->keycodesize = sizeof(kp->keycodes[0]);
- kp->input_dev->keycodemax = kp->rows << kp->row_shift;
-
- matrix_keypad_build_keymap(keymap_data, kp->row_shift, kp->keycodes,
- kp->input_dev->keybit);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ kp->rows, kp->cols,
+ kp->keycodes, kp->input_dev);
+ if (error) {
+ dev_err(dev, "Failed to build keymap\n");
+ goto error_reg;
+ }
+ if (!pdata->no_autorepeat)
+ kp->input_dev->evbit[0] |= BIT_MASK(EV_REP);
input_set_capability(kp->input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(kp->input_dev, kp);
+
error = input_register_device(kp->input_dev);
if (error < 0) {
dev_err(dev, "Could not register input device\n");
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 67bec14e8b96..a2c6f79aa101 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -361,14 +361,6 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev)
kp->irq = platform_get_irq(pdev, 0);
/* setup input device */
- __set_bit(EV_KEY, input->evbit);
-
- /* Enable auto repeat feature of Linux input subsystem */
- if (pdata->rep)
- __set_bit(EV_REP, input->evbit);
-
- input_set_capability(input, EV_MSC, MSC_SCAN);
-
input->name = "TWL4030 Keypad";
input->phys = "twl4030_keypad/input0";
input->dev.parent = &pdev->dev;
@@ -378,12 +370,19 @@ static int __devinit twl4030_kp_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0003;
- input->keycode = kp->keymap;
- input->keycodesize = sizeof(kp->keymap[0]);
- input->keycodemax = ARRAY_SIZE(kp->keymap);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ TWL4030_MAX_ROWS,
+ 1 << TWL4030_ROW_SHIFT,
+ kp->keymap, input);
+ if (error) {
+ dev_err(kp->dbg_dev, "Failed to build keymap\n");
+ goto err1;
+ }
- matrix_keypad_build_keymap(keymap_data, TWL4030_ROW_SHIFT,
- input->keycode, input->keybit);
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+ /* Enable auto repeat feature of Linux input subsystem */
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
error = input_register_device(input);
if (error) {
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index 99bbb7e775ae..085ede4d972d 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -42,7 +42,8 @@
#define KGET_RAW(n) (((n) & KEY0R) >> 3)
#define KGET_COLUMN(n) ((n) & KEY0C)
-#define W90P910_MAX_KEY_NUM (8 * 8)
+#define W90P910_NUM_ROWS 8
+#define W90P910_NUM_COLS 8
#define W90P910_ROW_SHIFT 3
struct w90p910_keypad {
@@ -51,7 +52,7 @@ struct w90p910_keypad {
struct input_dev *input_dev;
void __iomem *mmio_base;
int irq;
- unsigned short keymap[W90P910_MAX_KEY_NUM];
+ unsigned short keymap[W90P910_NUM_ROWS * W90P910_NUM_COLS];
};
static void w90p910_keypad_scan_matrix(struct w90p910_keypad *keypad,
@@ -190,17 +191,13 @@ static int __devinit w90p910_keypad_probe(struct platform_device *pdev)
input_dev->close = w90p910_keypad_close;
input_dev->dev.parent = &pdev->dev;
- input_dev->keycode = keypad->keymap;
- input_dev->keycodesize = sizeof(keypad->keymap[0]);
- input_dev->keycodemax = ARRAY_SIZE(keypad->keymap);
-
- input_set_drvdata(input_dev, keypad);
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
- input_set_capability(input_dev, EV_MSC, MSC_SCAN);
-
- matrix_keypad_build_keymap(keymap_data, W90P910_ROW_SHIFT,
- input_dev->keycode, input_dev->keybit);
+ error = matrix_keypad_build_keymap(keymap_data, NULL,
+ W90P910_NUM_ROWS, W90P910_NUM_COLS,
+ keypad->keymap, input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to build keymap\n");
+ goto failed_put_clk;
+ }
error = request_irq(keypad->irq, w90p910_keypad_irq_handler,
0, pdev->name, keypad);
@@ -209,6 +206,10 @@ static int __devinit w90p910_keypad_probe(struct platform_device *pdev)
goto failed_put_clk;
}
+ __set_bit(EV_REP, input_dev->evbit);
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input_dev, keypad);
+
/* Register the input device */
error = input_register_device(input_dev);
if (error) {
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 37b01d777a4a..d050d9d0011b 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -169,15 +169,4 @@ static struct serio_driver xtkbd_drv = {
.disconnect = xtkbd_disconnect,
};
-static int __init xtkbd_init(void)
-{
- return serio_register_driver(&xtkbd_drv);
-}
-
-static void __exit xtkbd_exit(void)
-{
- serio_unregister_driver(&xtkbd_drv);
-}
-
-module_init(xtkbd_init);
-module_exit(xtkbd_exit);
+module_serio_driver(xtkbd_drv);
diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c
new file mode 100644
index 000000000000..443ad64b7f2a
--- /dev/null
+++ b/drivers/input/matrix-keymap.c
@@ -0,0 +1,163 @@
+/*
+ * Helpers for matrix keyboard bindings
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * Author:
+ * Olof Johansson <olof@lixom.net>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/of.h>
+#include <linux/export.h>
+#include <linux/input/matrix_keypad.h>
+
+static bool matrix_keypad_map_key(struct input_dev *input_dev,
+ unsigned int rows, unsigned int cols,
+ unsigned int row_shift, unsigned int key)
+{
+ unsigned short *keymap = input_dev->keycode;
+ unsigned int row = KEY_ROW(key);
+ unsigned int col = KEY_COL(key);
+ unsigned short code = KEY_VAL(key);
+
+ if (row >= rows || col >= cols) {
+ dev_err(input_dev->dev.parent,
+ "%s: invalid keymap entry 0x%x (row: %d, col: %d, rows: %d, cols: %d)\n",
+ __func__, key, row, col, rows, cols);
+ return false;
+ }
+
+ keymap[MATRIX_SCAN_CODE(row, col, row_shift)] = code;
+ __set_bit(code, input_dev->keybit);
+
+ return true;
+}
+
+#ifdef CONFIG_OF
+static int matrix_keypad_parse_of_keymap(const char *propname,
+ unsigned int rows, unsigned int cols,
+ struct input_dev *input_dev)
+{
+ struct device *dev = input_dev->dev.parent;
+ struct device_node *np = dev->of_node;
+ unsigned int row_shift = get_count_order(cols);
+ unsigned int max_keys = rows << row_shift;
+ unsigned int proplen, i, size;
+ const __be32 *prop;
+
+ if (!np)
+ return -ENOENT;
+
+ if (!propname)
+ propname = "linux,keymap";
+
+ prop = of_get_property(np, propname, &proplen);
+ if (!prop) {
+ dev_err(dev, "OF: %s property not defined in %s\n",
+ propname, np->full_name);
+ return -ENOENT;
+ }
+
+ if (proplen % sizeof(u32)) {
+ dev_err(dev, "OF: Malformed keycode property %s in %s\n",
+ propname, np->full_name);
+ return -EINVAL;
+ }
+
+ size = proplen / sizeof(u32);
+ if (size > max_keys) {
+ dev_err(dev, "OF: %s size overflow\n", propname);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i++) {
+ unsigned int key = be32_to_cpup(prop + i);
+
+ if (!matrix_keypad_map_key(input_dev, rows, cols,
+ row_shift, key))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#else
+static int matrix_keypad_parse_of_keymap(const char *propname,
+ unsigned int rows, unsigned int cols,
+ struct input_dev *input_dev)
+{
+ return -ENOSYS;
+}
+#endif
+
+/**
+ * matrix_keypad_build_keymap - convert platform keymap into matrix keymap
+ * @keymap_data: keymap supplied by the platform code
+ * @keymap_name: name of device tree property containing keymap (if device
+ * tree support is enabled).
+ * @rows: number of rows in target keymap array
+ * @cols: number of cols in target keymap array
+ * @keymap: expanded version of keymap that is suitable for use by
+ * matrix keyboard driver
+ * @input_dev: input devices for which we are setting up the keymap
+ *
+ * This function converts platform keymap (encoded with KEY() macro) into
+ * an array of keycodes that is suitable for using in a standard matrix
+ * keyboard driver that uses row and col as indices.
+ *
+ * If @keymap_data is not supplied and device tree support is enabled
+ * it will attempt load the keymap from property specified by @keymap_name
+ * argument (or "linux,keymap" if @keymap_name is %NULL).
+ *
+ * Callers are expected to set up input_dev->dev.parent before calling this
+ * function.
+ */
+int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
+ const char *keymap_name,
+ unsigned int rows, unsigned int cols,
+ unsigned short *keymap,
+ struct input_dev *input_dev)
+{
+ unsigned int row_shift = get_count_order(cols);
+ int i;
+ int error;
+
+ input_dev->keycode = keymap;
+ input_dev->keycodesize = sizeof(*keymap);
+ input_dev->keycodemax = rows << row_shift;
+
+ __set_bit(EV_KEY, input_dev->evbit);
+
+ if (keymap_data) {
+ for (i = 0; i < keymap_data->keymap_size; i++) {
+ unsigned int key = keymap_data->keymap[i];
+
+ if (!matrix_keypad_map_key(input_dev, rows, cols,
+ row_shift, key))
+ return -EINVAL;
+ }
+ } else {
+ error = matrix_keypad_parse_of_keymap(keymap_name, rows, cols,
+ input_dev);
+ if (error)
+ return error;
+ }
+
+ __clear_bit(KEY_RESERVED, input_dev->keybit);
+
+ return 0;
+}
+EXPORT_SYMBOL(matrix_keypad_build_keymap);
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index 0ac75bbad4d6..2e5d5e1de647 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -972,6 +972,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
struct ad714x_platform_data *plat_data = dev->platform_data;
struct ad714x_chip *ad714x;
void *drv_mem;
+ unsigned long irqflags;
struct ad714x_button_drv *bt_drv;
struct ad714x_slider_drv *sd_drv;
@@ -1162,10 +1163,11 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
alloc_idx++;
}
+ irqflags = plat_data->irqflags ?: IRQF_TRIGGER_FALLING;
+ irqflags |= IRQF_ONESHOT;
+
error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
- plat_data->irqflags ?
- plat_data->irqflags : IRQF_TRIGGER_FALLING,
- "ad714x_captouch", ad714x);
+ irqflags, "ad714x_captouch", ad714x);
if (error) {
dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
goto err_unreg_dev;
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 06517e60e50c..a3735a01e9fd 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -318,7 +318,7 @@ struct cma3000_accl_data *cma3000_init(struct device *dev, int irq,
mutex_init(&data->mutex);
data->mode = pdata->mode;
- if (data->mode < CMAMODE_DEFAULT || data->mode > CMAMODE_POFF) {
+ if (data->mode > CMAMODE_POFF) {
data->mode = CMAMODE_MOTDET;
dev_warn(dev,
"Invalid mode specified, assuming Motion Detect\n");
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index 35083c6836c3..c1313d8535c3 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -213,7 +213,8 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
/* REVISIT: flush the event queue? */
status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
- IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys);
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&pdev->dev), keys);
if (status < 0)
goto fail2;
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 5403c571b6a5..306f84c2d8fb 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -367,7 +367,7 @@ static int __devinit mpu3050_probe(struct i2c_client *client,
error = request_threaded_irq(client->irq,
NULL, mpu3050_interrupt_thread,
- IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"mpu3050", sensor);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 14e94f56cb7d..c34f6c0371c4 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -27,6 +27,7 @@
*/
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/workqueue.h>
#include <linux/input.h>
#include <linux/mfd/twl6040.h>
@@ -258,10 +259,13 @@ static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
{
struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
+ struct device_node *node = pdev->dev.of_node;
struct vibra_info *info;
+ int vddvibl_uV = 0;
+ int vddvibr_uV = 0;
int ret;
- if (!pdata) {
+ if (!pdata && !node) {
dev_err(&pdev->dev, "platform_data not available\n");
return -EINVAL;
}
@@ -273,11 +277,26 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
}
info->dev = &pdev->dev;
+
info->twl6040 = dev_get_drvdata(pdev->dev.parent);
- info->vibldrv_res = pdata->vibldrv_res;
- info->vibrdrv_res = pdata->vibrdrv_res;
- info->viblmotor_res = pdata->viblmotor_res;
- info->vibrmotor_res = pdata->vibrmotor_res;
+ if (pdata) {
+ info->vibldrv_res = pdata->vibldrv_res;
+ info->vibrdrv_res = pdata->vibrdrv_res;
+ info->viblmotor_res = pdata->viblmotor_res;
+ info->vibrmotor_res = pdata->vibrmotor_res;
+ vddvibl_uV = pdata->vddvibl_uV;
+ vddvibr_uV = pdata->vddvibr_uV;
+ } else {
+ of_property_read_u32(node, "vibldrv_res", &info->vibldrv_res);
+ of_property_read_u32(node, "vibrdrv_res", &info->vibrdrv_res);
+ of_property_read_u32(node, "viblmotor_res",
+ &info->viblmotor_res);
+ of_property_read_u32(node, "vibrmotor_res",
+ &info->vibrmotor_res);
+ of_property_read_u32(node, "vddvibl_uV", &vddvibl_uV);
+ of_property_read_u32(node, "vddvibr_uV", &vddvibr_uV);
+ }
+
if ((!info->vibldrv_res && !info->viblmotor_res) ||
(!info->vibrdrv_res && !info->vibrmotor_res)) {
dev_err(info->dev, "invalid vibra driver/motor resistance\n");
@@ -339,10 +358,9 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
goto err_regulator;
}
- if (pdata->vddvibl_uV) {
+ if (vddvibl_uV) {
ret = regulator_set_voltage(info->supplies[0].consumer,
- pdata->vddvibl_uV,
- pdata->vddvibl_uV);
+ vddvibl_uV, vddvibl_uV);
if (ret) {
dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
ret);
@@ -350,10 +368,9 @@ static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
}
}
- if (pdata->vddvibr_uV) {
+ if (vddvibr_uV) {
ret = regulator_set_voltage(info->supplies[1].consumer,
- pdata->vddvibr_uV,
- pdata->vddvibr_uV);
+ vddvibr_uV, vddvibr_uV);
if (ret) {
dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
ret);
@@ -401,6 +418,12 @@ static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id twl6040_vibra_of_match[] = {
+ {.compatible = "ti,twl6040-vibra", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl6040_vibra_of_match);
+
static struct platform_driver twl6040_vibra_driver = {
.probe = twl6040_vibra_probe,
.remove = __devexit_p(twl6040_vibra_remove),
@@ -408,6 +431,7 @@ static struct platform_driver twl6040_vibra_driver = {
.name = "twl6040-vibra",
.owner = THIS_MODULE,
.pm = &twl6040_vibra_pm_ops,
+ .of_match_table = twl6040_vibra_of_match,
},
};
module_platform_driver(twl6040_vibra_driver);
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 47f18d6bce46..6790a812a1db 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -73,7 +73,7 @@ static int __devinit wm831x_on_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_on *wm831x_on;
- int irq = platform_get_irq(pdev, 0);
+ int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
int ret;
wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 9b8db821d5f0..cd6268cf7cd5 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -339,4 +339,16 @@ config MOUSE_SYNAPTICS_USB
To compile this driver as a module, choose M here: the
module will be called synaptics_usb.
+config MOUSE_NAVPOINT_PXA27x
+ tristate "Synaptics NavPoint (PXA27x SSP/SPI)"
+ depends on PXA27x && PXA_SSP
+ help
+ This driver adds support for the Synaptics NavPoint touchpad connected
+ to a PXA27x SSP port in SPI slave mode. The device emulates a mouse;
+ a tap or tap-and-a-half drag gesture emulates the left mouse button.
+ For example, use the xf86-input-evdev driver for an X pointing device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called navpoint.
+
endif
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 4718effeb8d9..46ba7556fd4f 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o
obj-$(CONFIG_MOUSE_INPORT) += inport.o
obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o
obj-$(CONFIG_MOUSE_MAPLE) += maplemouse.o
+obj-$(CONFIG_MOUSE_NAVPOINT_PXA27x) += navpoint.o
obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o
obj-$(CONFIG_MOUSE_PS2) += psmouse.o
obj-$(CONFIG_MOUSE_PXA930_TRKBALL) += pxa930_trkball.o
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 4c6a72d3d48c..4a1347e91bdc 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -553,10 +553,7 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
- input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
- input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
- input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
- input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4);
+ input_mt_report_finger_count(dev, fingers);
input_report_key(dev, BTN_LEFT, left);
input_report_key(dev, BTN_RIGHT, right);
@@ -604,10 +601,54 @@ static void alps_process_packet_v3(struct psmouse *psmouse)
static void alps_process_packet_v4(struct psmouse *psmouse)
{
+ struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
+ int offset;
int x, y, z;
int left, right;
+ int x1, y1, x2, y2;
+ int fingers = 0;
+ unsigned int x_bitmap, y_bitmap;
+
+ /*
+ * v4 has a 6-byte encoding for bitmap data, but this data is
+ * broken up between 3 normal packets. Use priv->multi_packet to
+ * track our position in the bitmap packet.
+ */
+ if (packet[6] & 0x40) {
+ /* sync, reset position */
+ priv->multi_packet = 0;
+ }
+
+ if (WARN_ON_ONCE(priv->multi_packet > 2))
+ return;
+
+ offset = 2 * priv->multi_packet;
+ priv->multi_data[offset] = packet[6];
+ priv->multi_data[offset + 1] = packet[7];
+
+ if (++priv->multi_packet > 2) {
+ priv->multi_packet = 0;
+
+ x_bitmap = ((priv->multi_data[2] & 0x1f) << 10) |
+ ((priv->multi_data[3] & 0x60) << 3) |
+ ((priv->multi_data[0] & 0x3f) << 2) |
+ ((priv->multi_data[1] & 0x60) >> 5);
+ y_bitmap = ((priv->multi_data[5] & 0x01) << 10) |
+ ((priv->multi_data[3] & 0x1f) << 5) |
+ (priv->multi_data[1] & 0x1f);
+
+ fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+ &x1, &y1, &x2, &y2);
+
+ /* Store MT data.*/
+ priv->fingers = fingers;
+ priv->x1 = x1;
+ priv->x2 = x2;
+ priv->y1 = y1;
+ priv->y2 = y2;
+ }
left = packet[4] & 0x01;
right = packet[4] & 0x02;
@@ -617,21 +658,41 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f);
z = packet[5] & 0x7f;
+ /*
+ * If there were no contacts in the bitmap, use ST
+ * points in MT reports.
+ * If there were two contacts or more, report MT data.
+ */
+ if (priv->fingers < 2) {
+ x1 = x;
+ y1 = y;
+ fingers = z > 0 ? 1 : 0;
+ } else {
+ fingers = priv->fingers;
+ x1 = priv->x1;
+ x2 = priv->x2;
+ y1 = priv->y1;
+ y2 = priv->y2;
+ }
+
if (z >= 64)
input_report_key(dev, BTN_TOUCH, 1);
else
input_report_key(dev, BTN_TOUCH, 0);
+ alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
+
+ input_mt_report_finger_count(dev, fingers);
+
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+
if (z > 0) {
input_report_abs(dev, ABS_X, x);
input_report_abs(dev, ABS_Y, y);
}
input_report_abs(dev, ABS_PRESSURE, z);
- input_report_key(dev, BTN_TOOL_FINGER, z > 0);
- input_report_key(dev, BTN_LEFT, left);
- input_report_key(dev, BTN_RIGHT, right);
-
input_sync(dev);
}
@@ -1557,6 +1618,7 @@ int alps_init(struct psmouse *psmouse)
input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
break;
case ALPS_PROTO_V3:
+ case ALPS_PROTO_V4:
set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
input_mt_init_slots(dev1, 2);
input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
@@ -1565,8 +1627,7 @@ int alps_init(struct psmouse *psmouse)
set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
- /* fall through */
- case ALPS_PROTO_V4:
+
input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
break;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index a00a4ab92a0f..ae1ac354c778 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -39,6 +39,8 @@ struct alps_data {
int prev_fin; /* Finger bit from previous packet */
int multi_packet; /* Multi-packet data in progress */
unsigned char multi_data[6]; /* Saved multi-packet data */
+ int x1, x2, y1, y2; /* Coordinates from last MT report */
+ int fingers; /* Number of fingers from MT report */
u8 quirks;
struct timer_list timer;
};
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 2cf681d98c0d..d528c23e194f 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -79,6 +79,10 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
+/* MacbookPro10,1 (unibody, June 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+ /* MacbookPro10,1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
/* Terminating entry */
{}
};
@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
{ DIM_X, DIM_X / SN_COORD, -4620, 5140 },
{ DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
+ },
{}
};
diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c
new file mode 100644
index 000000000000..c29ae7654d5e
--- /dev/null
+++ b/drivers/input/mouse/navpoint.c
@@ -0,0 +1,369 @@
+/*
+ * Synaptics NavPoint (PXA27x SSP/SPI) driver.
+ *
+ * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/input/navpoint.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/slab.h>
+
+/*
+ * Synaptics Modular Embedded Protocol: Module Packet Format.
+ * Module header byte 2:0 = Length (# bytes that follow)
+ * Module header byte 4:3 = Control
+ * Module header byte 7:5 = Module Address
+ */
+#define HEADER_LENGTH(byte) ((byte) & 0x07)
+#define HEADER_CONTROL(byte) (((byte) >> 3) & 0x03)
+#define HEADER_ADDRESS(byte) ((byte) >> 5)
+
+struct navpoint {
+ struct ssp_device *ssp;
+ struct input_dev *input;
+ struct device *dev;
+ int gpio;
+ int index;
+ u8 data[1 + HEADER_LENGTH(0xff)];
+};
+
+/*
+ * Initialization values for SSCR0_x, SSCR1_x, SSSR_x.
+ */
+static const u32 sscr0 = 0
+ | SSCR0_TUM /* TIM = 1; No TUR interrupts */
+ | SSCR0_RIM /* RIM = 1; No ROR interrupts */
+ | SSCR0_SSE /* SSE = 1; SSP enabled */
+ | SSCR0_Motorola /* FRF = 0; Motorola SPI */
+ | SSCR0_DataSize(16) /* DSS = 15; Data size = 16-bit */
+ ;
+static const u32 sscr1 = 0
+ | SSCR1_SCFR /* SCFR = 1; SSPSCLK only during transfers */
+ | SSCR1_SCLKDIR /* SCLKDIR = 1; Slave mode */
+ | SSCR1_SFRMDIR /* SFRMDIR = 1; Slave mode */
+ | SSCR1_RWOT /* RWOT = 1; Receive without transmit mode */
+ | SSCR1_RxTresh(1) /* RFT = 0; Receive FIFO threshold = 1 */
+ | SSCR1_SPH /* SPH = 1; SSPSCLK inactive 0.5 + 1 cycles */
+ | SSCR1_RIE /* RIE = 1; Receive FIFO interrupt enabled */
+ ;
+static const u32 sssr = 0
+ | SSSR_BCE /* BCE = 1; Clear BCE */
+ | SSSR_TUR /* TUR = 1; Clear TUR */
+ | SSSR_EOC /* EOC = 1; Clear EOC */
+ | SSSR_TINT /* TINT = 1; Clear TINT */
+ | SSSR_PINT /* PINT = 1; Clear PINT */
+ | SSSR_ROR /* ROR = 1; Clear ROR */
+ ;
+
+/*
+ * MEP Query $22: Touchpad Coordinate Range Query is not supported by
+ * the NavPoint module, so sampled values provide the default limits.
+ */
+#define NAVPOINT_X_MIN 1278
+#define NAVPOINT_X_MAX 5340
+#define NAVPOINT_Y_MIN 1572
+#define NAVPOINT_Y_MAX 4396
+#define NAVPOINT_PRESSURE_MIN 0
+#define NAVPOINT_PRESSURE_MAX 255
+
+static void navpoint_packet(struct navpoint *navpoint)
+{
+ int finger;
+ int gesture;
+ int x, y, z;
+
+ switch (navpoint->data[0]) {
+ case 0xff: /* Garbage (packet?) between reset and Hello packet */
+ case 0x00: /* Module 0, NULL packet */
+ break;
+
+ case 0x0e: /* Module 0, Absolute packet */
+ finger = (navpoint->data[1] & 0x01);
+ gesture = (navpoint->data[1] & 0x02);
+ x = ((navpoint->data[2] & 0x1f) << 8) | navpoint->data[3];
+ y = ((navpoint->data[4] & 0x1f) << 8) | navpoint->data[5];
+ z = navpoint->data[6];
+ input_report_key(navpoint->input, BTN_TOUCH, finger);
+ input_report_abs(navpoint->input, ABS_X, x);
+ input_report_abs(navpoint->input, ABS_Y, y);
+ input_report_abs(navpoint->input, ABS_PRESSURE, z);
+ input_report_key(navpoint->input, BTN_TOOL_FINGER, finger);
+ input_report_key(navpoint->input, BTN_LEFT, gesture);
+ input_sync(navpoint->input);
+ break;
+
+ case 0x19: /* Module 0, Hello packet */
+ if ((navpoint->data[1] & 0xf0) == 0x10)
+ break;
+ /* FALLTHROUGH */
+ default:
+ dev_warn(navpoint->dev,
+ "spurious packet: data=0x%02x,0x%02x,...\n",
+ navpoint->data[0], navpoint->data[1]);
+ break;
+ }
+}
+
+static irqreturn_t navpoint_irq(int irq, void *dev_id)
+{
+ struct navpoint *navpoint = dev_id;
+ struct ssp_device *ssp = navpoint->ssp;
+ irqreturn_t ret = IRQ_NONE;
+ u32 status;
+
+ status = pxa_ssp_read_reg(ssp, SSSR);
+ if (status & sssr) {
+ dev_warn(navpoint->dev,
+ "unexpected interrupt: status=0x%08x\n", status);
+ pxa_ssp_write_reg(ssp, SSSR, (status & sssr));
+ ret = IRQ_HANDLED;
+ }
+
+ while (status & SSSR_RNE) {
+ u32 data;
+
+ data = pxa_ssp_read_reg(ssp, SSDR);
+ navpoint->data[navpoint->index + 0] = (data >> 8);
+ navpoint->data[navpoint->index + 1] = data;
+ navpoint->index += 2;
+ if (HEADER_LENGTH(navpoint->data[0]) < navpoint->index) {
+ navpoint_packet(navpoint);
+ navpoint->index = 0;
+ }
+ status = pxa_ssp_read_reg(ssp, SSSR);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static void navpoint_up(struct navpoint *navpoint)
+{
+ struct ssp_device *ssp = navpoint->ssp;
+ int timeout;
+
+ clk_prepare_enable(ssp->clk);
+
+ pxa_ssp_write_reg(ssp, SSCR1, sscr1);
+ pxa_ssp_write_reg(ssp, SSSR, sssr);
+ pxa_ssp_write_reg(ssp, SSTO, 0);
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0); /* SSCR0_SSE written last */
+
+ /* Wait until SSP port is ready for slave clock operations */
+ for (timeout = 100; timeout != 0; --timeout) {
+ if (!(pxa_ssp_read_reg(ssp, SSSR) & SSSR_CSS))
+ break;
+ msleep(1);
+ }
+
+ if (timeout == 0)
+ dev_err(navpoint->dev,
+ "timeout waiting for SSSR[CSS] to clear\n");
+
+ if (gpio_is_valid(navpoint->gpio))
+ gpio_set_value(navpoint->gpio, 1);
+}
+
+static void navpoint_down(struct navpoint *navpoint)
+{
+ struct ssp_device *ssp = navpoint->ssp;
+
+ if (gpio_is_valid(navpoint->gpio))
+ gpio_set_value(navpoint->gpio, 0);
+
+ pxa_ssp_write_reg(ssp, SSCR0, 0);
+
+ clk_disable_unprepare(ssp->clk);
+}
+
+static int navpoint_open(struct input_dev *input)
+{
+ struct navpoint *navpoint = input_get_drvdata(input);
+
+ navpoint_up(navpoint);
+
+ return 0;
+}
+
+static void navpoint_close(struct input_dev *input)
+{
+ struct navpoint *navpoint = input_get_drvdata(input);
+
+ navpoint_down(navpoint);
+}
+
+static int __devinit navpoint_probe(struct platform_device *pdev)
+{
+ const struct navpoint_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ struct ssp_device *ssp;
+ struct input_dev *input;
+ struct navpoint *navpoint;
+ int error;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (gpio_is_valid(pdata->gpio)) {
+ error = gpio_request_one(pdata->gpio, GPIOF_OUT_INIT_LOW,
+ "SYNAPTICS_ON");
+ if (error)
+ return error;
+ }
+
+ ssp = pxa_ssp_request(pdata->port, pdev->name);
+ if (!ssp) {
+ error = -ENODEV;
+ goto err_free_gpio;
+ }
+
+ /* HaRET does not disable devices before jumping into Linux */
+ if (pxa_ssp_read_reg(ssp, SSCR0) & SSCR0_SSE) {
+ pxa_ssp_write_reg(ssp, SSCR0, 0);
+ dev_warn(&pdev->dev, "ssp%d already enabled\n", pdata->port);
+ }
+
+ navpoint = kzalloc(sizeof(*navpoint), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!navpoint || !input) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ navpoint->ssp = ssp;
+ navpoint->input = input;
+ navpoint->dev = &pdev->dev;
+ navpoint->gpio = pdata->gpio;
+
+ input->name = pdev->name;
+ input->dev.parent = &pdev->dev;
+
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(BTN_LEFT, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+
+ input_set_abs_params(input, ABS_X,
+ NAVPOINT_X_MIN, NAVPOINT_X_MAX, 0, 0);
+ input_set_abs_params(input, ABS_Y,
+ NAVPOINT_Y_MIN, NAVPOINT_Y_MAX, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE,
+ NAVPOINT_PRESSURE_MIN, NAVPOINT_PRESSURE_MAX,
+ 0, 0);
+
+ input->open = navpoint_open;
+ input->close = navpoint_close;
+
+ input_set_drvdata(input, navpoint);
+
+ error = request_irq(ssp->irq, navpoint_irq, 0, pdev->name, navpoint);
+ if (error)
+ goto err_free_mem;
+
+ error = input_register_device(input);
+ if (error)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, navpoint);
+ dev_dbg(&pdev->dev, "ssp%d, irq %d\n", pdata->port, ssp->irq);
+
+ return 0;
+
+err_free_irq:
+ free_irq(ssp->irq, &pdev->dev);
+err_free_mem:
+ input_free_device(input);
+ kfree(navpoint);
+ pxa_ssp_free(ssp);
+err_free_gpio:
+ if (gpio_is_valid(pdata->gpio))
+ gpio_free(pdata->gpio);
+
+ return error;
+}
+
+static int __devexit navpoint_remove(struct platform_device *pdev)
+{
+ const struct navpoint_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ struct navpoint *navpoint = platform_get_drvdata(pdev);
+ struct ssp_device *ssp = navpoint->ssp;
+
+ free_irq(ssp->irq, navpoint);
+
+ input_unregister_device(navpoint->input);
+ kfree(navpoint);
+
+ pxa_ssp_free(ssp);
+
+ if (gpio_is_valid(pdata->gpio))
+ gpio_free(pdata->gpio);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int navpoint_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct navpoint *navpoint = platform_get_drvdata(pdev);
+ struct input_dev *input = navpoint->input;
+
+ mutex_lock(&input->mutex);
+ if (input->users)
+ navpoint_down(navpoint);
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+
+static int navpoint_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct navpoint *navpoint = platform_get_drvdata(pdev);
+ struct input_dev *input = navpoint->input;
+
+ mutex_lock(&input->mutex);
+ if (input->users)
+ navpoint_up(navpoint);
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(navpoint_pm_ops, navpoint_suspend, navpoint_resume);
+
+static struct platform_driver navpoint_driver = {
+ .probe = navpoint_probe,
+ .remove = __devexit_p(navpoint_remove),
+ .driver = {
+ .name = "navpoint",
+ .owner = THIS_MODULE,
+ .pm = &navpoint_pm_ops,
+ },
+};
+
+module_platform_driver(navpoint_driver);
+
+MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
+MODULE_DESCRIPTION("Synaptics NavPoint (PXA27x SSP/SPI) driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:navpoint");
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 661a0ca3b3d6..3f5649f19082 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -41,7 +41,7 @@
#define GET_ABS_Y(packet) ((packet[2] << 2) | (packet[3] & 0x03))
/** Driver version. */
-static const char fsp_drv_ver[] = "1.0.0-K";
+static const char fsp_drv_ver[] = "1.1.0-K";
/*
* Make sure that the value being sent to FSP will not conflict with
@@ -303,6 +303,27 @@ static int fsp_get_revision(struct psmouse *psmouse, int *rev)
return 0;
}
+static int fsp_get_sn(struct psmouse *psmouse, int *sn)
+{
+ int v0, v1, v2;
+ int rc = -EIO;
+
+ /* production number since Cx is available at: 0x0b40 ~ 0x0b42 */
+ if (fsp_page_reg_write(psmouse, FSP_PAGE_0B))
+ goto out;
+ if (fsp_reg_read(psmouse, FSP_REG_SN0, &v0))
+ goto out;
+ if (fsp_reg_read(psmouse, FSP_REG_SN1, &v1))
+ goto out;
+ if (fsp_reg_read(psmouse, FSP_REG_SN2, &v2))
+ goto out;
+ *sn = (v0 << 16) | (v1 << 8) | v2;
+ rc = 0;
+out:
+ fsp_page_reg_write(psmouse, FSP_PAGE_DEFAULT);
+ return rc;
+}
+
static int fsp_get_buttons(struct psmouse *psmouse, int *btn)
{
static const int buttons[] = {
@@ -1000,16 +1021,21 @@ static int fsp_reconnect(struct psmouse *psmouse)
int fsp_init(struct psmouse *psmouse)
{
struct fsp_data *priv;
- int ver, rev;
+ int ver, rev, sn = 0;
int error;
if (fsp_get_version(psmouse, &ver) ||
fsp_get_revision(psmouse, &rev)) {
return -ENODEV;
}
+ if (ver >= FSP_VER_STL3888_C0) {
+ /* firmware information is only available since C0 */
+ fsp_get_sn(psmouse, &sn);
+ }
- psmouse_info(psmouse, "Finger Sensing Pad, hw: %d.%d.%d, sw: %s\n",
- ver >> 4, ver & 0x0F, rev, fsp_drv_ver);
+ psmouse_info(psmouse,
+ "Finger Sensing Pad, hw: %d.%d.%d, sn: %x, sw: %s\n",
+ ver >> 4, ver & 0x0F, rev, sn, fsp_drv_ver);
psmouse->private = priv = kzalloc(sizeof(struct fsp_data), GFP_KERNEL);
if (!priv)
diff --git a/drivers/input/mouse/sentelic.h b/drivers/input/mouse/sentelic.h
index 334de19e5ddb..aa697ece405b 100644
--- a/drivers/input/mouse/sentelic.h
+++ b/drivers/input/mouse/sentelic.h
@@ -65,6 +65,14 @@
#define FSP_BIT_SWC1_GST_GRP1 BIT(6)
#define FSP_BIT_SWC1_BX_COMPAT BIT(7)
+#define FSP_PAGE_0B (0x0b)
+#define FSP_PAGE_82 (0x82)
+#define FSP_PAGE_DEFAULT FSP_PAGE_82
+
+#define FSP_REG_SN0 (0x40)
+#define FSP_REG_SN1 (0x41)
+#define FSP_REG_SN2 (0x42)
+
/* Finger-sensing Pad packet formating related definitions */
/* absolute packet type */
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index 17ff137b9bd5..d5928fd0c914 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -355,15 +355,4 @@ static struct serio_driver sermouse_drv = {
.disconnect = sermouse_disconnect,
};
-static int __init sermouse_init(void)
-{
- return serio_register_driver(&sermouse_drv);
-}
-
-static void __exit sermouse_exit(void)
-{
- serio_unregister_driver(&sermouse_drv);
-}
-
-module_init(sermouse_init);
-module_exit(sermouse_exit);
+module_serio_driver(sermouse_drv);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index a4b14a41cbf4..c703d53be3a0 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -45,16 +45,6 @@
#define YMIN_NOMINAL 1408
#define YMAX_NOMINAL 4448
-/*
- * Synaptics touchpads report the y coordinate from bottom to top, which is
- * opposite from what userspace expects.
- * This function is used to invert y before reporting.
- */
-static int synaptics_invert_y(int y)
-{
- return YMAX_NOMINAL + YMIN_NOMINAL - y;
-}
-
/*****************************************************************************
* Stuff we need even when we do not want native Synaptics support
@@ -112,6 +102,16 @@ void synaptics_reset(struct psmouse *psmouse)
****************************************************************************/
/*
+ * Synaptics touchpads report the y coordinate from bottom to top, which is
+ * opposite from what userspace expects.
+ * This function is used to invert y before reporting.
+ */
+static int synaptics_invert_y(int y)
+{
+ return YMAX_NOMINAL + YMIN_NOMINAL - y;
+}
+
+/*
* Send a command to the synpatics touchpad by special commands
*/
static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c, unsigned char *param)
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index eb9a3cfbeefa..e900d465aaf6 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -548,16 +548,4 @@ static struct serio_driver vsxxxaa_drv = {
.disconnect = vsxxxaa_disconnect,
};
-static int __init vsxxxaa_init(void)
-{
- return serio_register_driver(&vsxxxaa_drv);
-}
-
-static void __exit vsxxxaa_exit(void)
-{
- serio_unregister_driver(&vsxxxaa_drv);
-}
-
-module_init(vsxxxaa_init);
-module_exit(vsxxxaa_exit);
-
+module_serio_driver(vsxxxaa_drv);
diff --git a/drivers/input/of_keymap.c b/drivers/input/of_keymap.c
deleted file mode 100644
index 061493d57682..000000000000
--- a/drivers/input/of_keymap.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Helpers for open firmware matrix keyboard bindings
- *
- * Copyright (C) 2012 Google, Inc
- *
- * Author:
- * Olof Johansson <olof@lixom.net>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/input.h>
-#include <linux/of.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/export.h>
-#include <linux/gfp.h>
-#include <linux/slab.h>
-
-struct matrix_keymap_data *
-matrix_keyboard_of_fill_keymap(struct device_node *np,
- const char *propname)
-{
- struct matrix_keymap_data *kd;
- u32 *keymap;
- int proplen, i;
- const __be32 *prop;
-
- if (!np)
- return NULL;
-
- if (!propname)
- propname = "linux,keymap";
-
- prop = of_get_property(np, propname, &proplen);
- if (!prop)
- return NULL;
-
- if (proplen % sizeof(u32)) {
- pr_warn("Malformed keymap property %s in %s\n",
- propname, np->full_name);
- return NULL;
- }
-
- kd = kzalloc(sizeof(*kd), GFP_KERNEL);
- if (!kd)
- return NULL;
-
- kd->keymap = keymap = kzalloc(proplen, GFP_KERNEL);
- if (!kd->keymap) {
- kfree(kd);
- return NULL;
- }
-
- kd->keymap_size = proplen / sizeof(u32);
-
- for (i = 0; i < kd->keymap_size; i++) {
- u32 tmp = be32_to_cpup(prop + i);
- int key_code, row, col;
-
- row = (tmp >> 24) & 0xff;
- col = (tmp >> 16) & 0xff;
- key_code = tmp & 0xffff;
- keymap[i] = KEY(row, col, key_code);
- }
-
- return kd;
-}
-EXPORT_SYMBOL_GPL(matrix_keyboard_of_fill_keymap);
-
-void matrix_keyboard_of_free_keymap(const struct matrix_keymap_data *kd)
-{
- if (kd) {
- kfree(kd->keymap);
- kfree(kd);
- }
-}
-EXPORT_SYMBOL_GPL(matrix_keyboard_of_free_keymap);
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index 43494742541c..0c42497aaaf4 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -206,6 +206,7 @@ static const struct pci_device_id pcips2_ids[] = {
},
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, pcips2_ids);
static struct pci_driver pcips2_driver = {
.name = "pcips2",
@@ -214,20 +215,8 @@ static struct pci_driver pcips2_driver = {
.remove = __devexit_p(pcips2_remove),
};
-static int __init pcips2_init(void)
-{
- return pci_register_driver(&pcips2_driver);
-}
-
-static void __exit pcips2_exit(void)
-{
- pci_unregister_driver(&pcips2_driver);
-}
-
-module_init(pcips2_init);
-module_exit(pcips2_exit);
+module_pci_driver(pcips2_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("PCI PS/2 keyboard/mouse driver");
-MODULE_DEVICE_TABLE(pci, pcips2_ids);
diff --git a/drivers/input/serio/ps2mult.c b/drivers/input/serio/ps2mult.c
index 15aa81c9f1fb..a76fb64f03db 100644
--- a/drivers/input/serio/ps2mult.c
+++ b/drivers/input/serio/ps2mult.c
@@ -304,15 +304,4 @@ static struct serio_driver ps2mult_drv = {
.reconnect = ps2mult_reconnect,
};
-static int __init ps2mult_init(void)
-{
- return serio_register_driver(&ps2mult_drv);
-}
-
-static void __exit ps2mult_exit(void)
-{
- serio_unregister_driver(&ps2mult_drv);
-}
-
-module_init(ps2mult_init);
-module_exit(ps2mult_exit);
+module_serio_driver(ps2mult_drv);
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 4494233d331a..59df2e7317a3 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -165,31 +165,38 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
struct serio_raw *serio_raw = client->serio_raw;
char uninitialized_var(c);
ssize_t read = 0;
- int retval;
+ int error;
- if (serio_raw->dead)
- return -ENODEV;
+ for (;;) {
+ if (serio_raw->dead)
+ return -ENODEV;
- if (serio_raw->head == serio_raw->tail && (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
+ if (serio_raw->head == serio_raw->tail &&
+ (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
- retval = wait_event_interruptible(serio_raw->wait,
- serio_raw->head != serio_raw->tail || serio_raw->dead);
- if (retval)
- return retval;
+ if (count == 0)
+ break;
- if (serio_raw->dead)
- return -ENODEV;
+ while (read < count && serio_raw_fetch_byte(serio_raw, &c)) {
+ if (put_user(c, buffer++))
+ return -EFAULT;
+ read++;
+ }
- while (read < count && serio_raw_fetch_byte(serio_raw, &c)) {
- if (put_user(c, buffer++)) {
- retval = -EFAULT;
+ if (read)
break;
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ error = wait_event_interruptible(serio_raw->wait,
+ serio_raw->head != serio_raw->tail ||
+ serio_raw->dead);
+ if (error)
+ return error;
}
- read++;
}
- return read ?: retval;
+ return read;
}
static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
@@ -197,8 +204,7 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
{
struct serio_raw_client *client = file->private_data;
struct serio_raw *serio_raw = client->serio_raw;
- ssize_t written = 0;
- int retval;
+ int retval = 0;
unsigned char c;
retval = mutex_lock_interruptible(&serio_raw_mutex);
@@ -218,16 +224,20 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
retval = -EFAULT;
goto out;
}
+
if (serio_write(serio_raw->serio, c)) {
- retval = -EIO;
+ /* Either signal error or partial write */
+ if (retval == 0)
+ retval = -EIO;
goto out;
}
- written++;
+
+ retval++;
}
out:
mutex_unlock(&serio_raw_mutex);
- return written ?: retval;
+ return retval;
}
static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
@@ -432,15 +442,4 @@ static struct serio_driver serio_raw_drv = {
.manual_bind = true,
};
-static int __init serio_raw_init(void)
-{
- return serio_register_driver(&serio_raw_drv);
-}
-
-static void __exit serio_raw_exit(void)
-{
- serio_unregister_driver(&serio_raw_drv);
-}
-
-module_init(serio_raw_init);
-module_exit(serio_raw_exit);
+module_serio_driver(serio_raw_drv);
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index d96d4c2a76a9..1e983bec7d86 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -73,7 +73,8 @@ struct xps2data {
spinlock_t lock;
void __iomem *base_address; /* virt. address of control registers */
unsigned int flags;
- struct serio serio; /* serio */
+ struct serio *serio; /* serio */
+ struct device *dev;
};
/************************************/
@@ -119,7 +120,7 @@ static irqreturn_t xps2_interrupt(int irq, void *dev_id)
/* Check which interrupt is active */
if (intr_sr & XPS2_IPIXR_RX_OVF)
- dev_warn(drvdata->serio.dev.parent, "receive overrun error\n");
+ dev_warn(drvdata->dev, "receive overrun error\n");
if (intr_sr & XPS2_IPIXR_RX_ERR)
drvdata->flags |= SERIO_PARITY;
@@ -132,10 +133,10 @@ static irqreturn_t xps2_interrupt(int irq, void *dev_id)
/* Error, if a byte is not received */
if (status) {
- dev_err(drvdata->serio.dev.parent,
+ dev_err(drvdata->dev,
"wrong rcvd byte count (%d)\n", status);
} else {
- serio_interrupt(&drvdata->serio, c, drvdata->flags);
+ serio_interrupt(drvdata->serio, c, drvdata->flags);
drvdata->flags = 0;
}
}
@@ -193,7 +194,7 @@ static int sxps2_open(struct serio *pserio)
error = request_irq(drvdata->irq, &xps2_interrupt, 0,
DRIVER_NAME, drvdata);
if (error) {
- dev_err(drvdata->serio.dev.parent,
+ dev_err(drvdata->dev,
"Couldn't allocate interrupt %d\n", drvdata->irq);
return error;
}
@@ -259,15 +260,16 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
}
drvdata = kzalloc(sizeof(struct xps2data), GFP_KERNEL);
- if (!drvdata) {
- dev_err(dev, "Couldn't allocate device private record\n");
- return -ENOMEM;
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!drvdata || !serio) {
+ error = -ENOMEM;
+ goto failed1;
}
- dev_set_drvdata(dev, drvdata);
-
spin_lock_init(&drvdata->lock);
drvdata->irq = r_irq.start;
+ drvdata->serio = serio;
+ drvdata->dev = dev;
phys_addr = r_mem.start;
remap_size = resource_size(&r_mem);
@@ -298,7 +300,6 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
(unsigned long long)phys_addr, drvdata->base_address,
drvdata->irq);
- serio = &drvdata->serio;
serio->id.type = SERIO_8042;
serio->write = sxps2_write;
serio->open = sxps2_open;
@@ -312,13 +313,14 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
serio_register_port(serio);
+ platform_set_drvdata(ofdev, drvdata);
return 0; /* success */
failed2:
release_mem_region(phys_addr, remap_size);
failed1:
+ kfree(serio);
kfree(drvdata);
- dev_set_drvdata(dev, NULL);
return error;
}
@@ -333,22 +335,21 @@ failed1:
*/
static int __devexit xps2_of_remove(struct platform_device *of_dev)
{
- struct device *dev = &of_dev->dev;
- struct xps2data *drvdata = dev_get_drvdata(dev);
+ struct xps2data *drvdata = platform_get_drvdata(of_dev);
struct resource r_mem; /* IO mem resources */
- serio_unregister_port(&drvdata->serio);
+ serio_unregister_port(drvdata->serio);
iounmap(drvdata->base_address);
/* Get iospace of the device */
if (of_address_to_resource(of_dev->dev.of_node, 0, &r_mem))
- dev_err(dev, "invalid address\n");
+ dev_err(drvdata->dev, "invalid address\n");
else
release_mem_region(r_mem.start, resource_size(&r_mem));
kfree(drvdata);
- dev_set_drvdata(dev, NULL);
+ platform_set_drvdata(of_dev, NULL);
return 0;
}
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 755a39e4c9e9..ee83c3904ee8 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1862,7 +1862,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (i == ARRAY_SIZE(speeds)) {
dev_info(&intf->dev,
"Aiptek tried all speeds, no sane response\n");
- goto fail2;
+ goto fail3;
}
/* Associate this driver's struct with the usb interface.
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index b4842d0e61dd..b79d45198d82 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -135,6 +135,6 @@ extern const struct usb_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
void wacom_setup_device_quirks(struct wacom_features *features);
-void wacom_setup_input_capabilities(struct input_dev *input_dev,
- struct wacom_wac *wacom_wac);
+int wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac);
#endif
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 79a0509882d4..8b31473a81fe 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -28,6 +28,7 @@
#define HID_USAGE_Y_TILT 0x3e
#define HID_USAGE_FINGER 0x22
#define HID_USAGE_STYLUS 0x20
+#define HID_USAGE_CONTACTMAX 0x55
#define HID_COLLECTION 0xa1
#define HID_COLLECTION_LOGICAL 0x02
#define HID_COLLECTION_END 0xc0
@@ -204,6 +205,27 @@ static int wacom_parse_logical_collection(unsigned char *report,
return length;
}
+static void wacom_retrieve_report_data(struct usb_interface *intf,
+ struct wacom_features *features)
+{
+ int result = 0;
+ unsigned char *rep_data;
+
+ rep_data = kmalloc(2, GFP_KERNEL);
+ if (rep_data) {
+
+ rep_data[0] = 12;
+ result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
+ rep_data[0], rep_data, 2,
+ WAC_MSG_RETRIES);
+
+ if (result >= 0 && rep_data[1] > 2)
+ features->touch_max = rep_data[1];
+
+ kfree(rep_data);
+ }
+}
+
/*
* Interface Descriptor of wacom devices can be incomplete and
* inconsistent so wacom_features table is used to store stylus
@@ -236,6 +258,9 @@ static int wacom_parse_logical_collection(unsigned char *report,
* 3rd gen Bamboo Touch no longer define a Digitizer-Finger Pysical
* Collection. Instead they define a Logical Collection with a single
* Logical Maximum for both X and Y.
+ *
+ * Intuos5 touch interface does not contain useful data. We deal with
+ * this after returning from this function.
*/
static int wacom_parse_hid(struct usb_interface *intf,
struct hid_descriptor *hid_desc,
@@ -295,6 +320,10 @@ static int wacom_parse_hid(struct usb_interface *intf,
/* need to reset back */
features->pktlen = WACOM_PKGLEN_TPC2FG;
}
+
+ if (features->type == MTSCREEN)
+ features->pktlen = WACOM_PKGLEN_MTOUCH;
+
if (features->type == BAMBOO_PT) {
/* need to reset back */
features->pktlen = WACOM_PKGLEN_BBTOUCH;
@@ -327,18 +356,15 @@ static int wacom_parse_hid(struct usb_interface *intf,
case HID_USAGE_Y:
if (usage == WCM_DESKTOP) {
if (finger) {
- features->device_type = BTN_TOOL_FINGER;
- if (features->type == TABLETPC2FG) {
- /* need to reset back */
- features->pktlen = WACOM_PKGLEN_TPC2FG;
+ int type = features->type;
+
+ if (type == TABLETPC2FG || type == MTSCREEN) {
features->y_max =
get_unaligned_le16(&report[i + 3]);
features->y_phy =
get_unaligned_le16(&report[i + 6]);
i += 7;
- } else if (features->type == BAMBOO_PT) {
- /* need to reset back */
- features->pktlen = WACOM_PKGLEN_BBTOUCH;
+ } else if (type == BAMBOO_PT) {
features->y_phy =
get_unaligned_le16(&report[i + 3]);
features->y_max =
@@ -352,10 +378,6 @@ static int wacom_parse_hid(struct usb_interface *intf,
i += 4;
}
} else if (pen) {
- /* penabled only accepts exact bytes of data */
- if (features->type == TABLETPC2FG)
- features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- features->device_type = BTN_TOOL_PEN;
features->y_max =
get_unaligned_le16(&report[i + 3]);
i += 4;
@@ -377,6 +399,13 @@ static int wacom_parse_hid(struct usb_interface *intf,
pen = 1;
i++;
break;
+
+ case HID_USAGE_CONTACTMAX:
+ /* leave touch_max as is if predefined */
+ if (!features->touch_max)
+ wacom_retrieve_report_data(intf, features);
+ i++;
+ break;
}
break;
@@ -413,22 +442,29 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
if (!rep_data)
return error;
- /* ask to report tablet data if it is MT Tablet PC or
- * not a Tablet PC */
- if (features->type == TABLETPC2FG) {
- do {
- rep_data[0] = 3;
- rep_data[1] = 4;
- rep_data[2] = 0;
- rep_data[3] = 0;
- report_id = 3;
- error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
- report_id, rep_data, 4, 1);
- if (error >= 0)
- error = wacom_get_report(intf,
- WAC_HID_FEATURE_REPORT,
- report_id, rep_data, 4, 1);
- } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
+ /* ask to report Wacom data */
+ if (features->device_type == BTN_TOOL_FINGER) {
+ /* if it is an MT Tablet PC touch */
+ if (features->type == TABLETPC2FG ||
+ features->type == MTSCREEN) {
+ do {
+ rep_data[0] = 3;
+ rep_data[1] = 4;
+ rep_data[2] = 0;
+ rep_data[3] = 0;
+ report_id = 3;
+ error = wacom_set_report(intf,
+ WAC_HID_FEATURE_REPORT,
+ report_id,
+ rep_data, 4, 1);
+ if (error >= 0)
+ error = wacom_get_report(intf,
+ WAC_HID_FEATURE_REPORT,
+ report_id,
+ rep_data, 4, 1);
+ } while ((error < 0 || rep_data[1] != 4) &&
+ limit++ < WAC_MSG_RETRIES);
+ }
} else if (features->type != TABLETPC &&
features->type != WIRELESS &&
features->device_type == BTN_TOOL_PEN) {
@@ -450,7 +486,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
}
static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
- struct wacom_features *features)
+ struct wacom_features *features)
{
int error = 0;
struct usb_host_interface *interface = intf->cur_altsetting;
@@ -478,16 +514,21 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
}
}
- /* only Tablet PCs and Bamboo P&T need to retrieve the info */
- if ((features->type != TABLETPC) && (features->type != TABLETPC2FG) &&
- (features->type != BAMBOO_PT))
+ /* only devices that support touch need to retrieve the info */
+ if (features->type != TABLETPC &&
+ features->type != TABLETPC2FG &&
+ features->type != BAMBOO_PT &&
+ features->type != MTSCREEN) {
goto out;
+ }
- if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
- if (usb_get_extra_descriptor(&interface->endpoint[0],
- HID_DEVICET_REPORT, &hid_desc)) {
- printk("wacom: can not retrieve extra class descriptor\n");
- error = 1;
+ error = usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc);
+ if (error) {
+ error = usb_get_extra_descriptor(&interface->endpoint[0],
+ HID_DEVICET_REPORT, &hid_desc);
+ if (error) {
+ dev_err(&intf->dev,
+ "can not retrieve extra class descriptor\n");
goto out;
}
}
@@ -577,23 +618,39 @@ static void wacom_remove_shared_data(struct wacom_wac *wacom)
static int wacom_led_control(struct wacom *wacom)
{
unsigned char *buf;
- int retval, led = 0;
+ int retval;
buf = kzalloc(9, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- if (wacom->wacom_wac.features.type == WACOM_21UX2 ||
- wacom->wacom_wac.features.type == WACOM_24HD)
- led = (wacom->led.select[1] << 4) | 0x40;
-
- led |= wacom->led.select[0] | 0x4;
-
- buf[0] = WAC_CMD_LED_CONTROL;
- buf[1] = led;
- buf[2] = wacom->led.llv;
- buf[3] = wacom->led.hlv;
- buf[4] = wacom->led.img_lum;
+ if (wacom->wacom_wac.features.type >= INTUOS5S &&
+ wacom->wacom_wac.features.type <= INTUOS5L) {
+ /*
+ * Touch Ring and crop mark LED luminance may take on
+ * one of four values:
+ * 0 = Low; 1 = Medium; 2 = High; 3 = Off
+ */
+ int ring_led = wacom->led.select[0] & 0x03;
+ int ring_lum = (((wacom->led.llv & 0x60) >> 5) - 1) & 0x03;
+ int crop_lum = 0;
+
+ buf[0] = WAC_CMD_LED_CONTROL;
+ buf[1] = (crop_lum << 4) | (ring_lum << 2) | (ring_led);
+ }
+ else {
+ int led = wacom->led.select[0] | 0x4;
+
+ if (wacom->wacom_wac.features.type == WACOM_21UX2 ||
+ wacom->wacom_wac.features.type == WACOM_24HD)
+ led |= (wacom->led.select[1] << 4) | 0x40;
+
+ buf[0] = WAC_CMD_LED_CONTROL;
+ buf[1] = led;
+ buf[2] = wacom->led.llv;
+ buf[3] = wacom->led.hlv;
+ buf[4] = wacom->led.img_lum;
+ }
retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_LED_CONTROL,
buf, 9, WAC_CMD_RETRIES);
@@ -786,6 +843,17 @@ static struct attribute_group intuos4_led_attr_group = {
.attrs = intuos4_led_attrs,
};
+static struct attribute *intuos5_led_attrs[] = {
+ &dev_attr_status0_luminance.attr,
+ &dev_attr_status_led0_select.attr,
+ NULL
+};
+
+static struct attribute_group intuos5_led_attr_group = {
+ .name = "wacom_led",
+ .attrs = intuos5_led_attrs,
+};
+
static int wacom_initialize_leds(struct wacom *wacom)
{
int error;
@@ -815,6 +883,19 @@ static int wacom_initialize_leds(struct wacom *wacom)
&cintiq_led_attr_group);
break;
+ case INTUOS5S:
+ case INTUOS5:
+ case INTUOS5L:
+ wacom->led.select[0] = 0;
+ wacom->led.select[1] = 0;
+ wacom->led.llv = 32;
+ wacom->led.hlv = 0;
+ wacom->led.img_lum = 0;
+
+ error = sysfs_create_group(&wacom->intf->dev.kobj,
+ &intuos5_led_attr_group);
+ break;
+
default:
return 0;
}
@@ -843,6 +924,13 @@ static void wacom_destroy_leds(struct wacom *wacom)
sysfs_remove_group(&wacom->intf->dev.kobj,
&cintiq_led_attr_group);
break;
+
+ case INTUOS5S:
+ case INTUOS5:
+ case INTUOS5L:
+ sysfs_remove_group(&wacom->intf->dev.kobj,
+ &intuos5_led_attr_group);
+ break;
}
}
@@ -904,8 +992,10 @@ static int wacom_register_input(struct wacom *wacom)
int error;
input_dev = input_allocate_device();
- if (!input_dev)
- return -ENOMEM;
+ if (!input_dev) {
+ error = -ENOMEM;
+ goto fail1;
+ }
input_dev->name = wacom_wac->name;
input_dev->dev.parent = &intf->dev;
@@ -915,14 +1005,20 @@ static int wacom_register_input(struct wacom *wacom)
input_set_drvdata(input_dev, wacom);
wacom_wac->input = input_dev;
- wacom_setup_input_capabilities(input_dev, wacom_wac);
+ error = wacom_setup_input_capabilities(input_dev, wacom_wac);
+ if (error)
+ goto fail1;
error = input_register_device(input_dev);
- if (error) {
- input_free_device(input_dev);
- wacom_wac->input = NULL;
- }
+ if (error)
+ goto fail2;
+ return 0;
+
+fail2:
+ input_free_device(input_dev);
+ wacom_wac->input = NULL;
+fail1:
return error;
}
@@ -941,22 +1037,22 @@ static void wacom_wireless_work(struct work_struct *work)
wacom = usb_get_intfdata(usbdev->config->interface[1]);
if (wacom->wacom_wac.input)
input_unregister_device(wacom->wacom_wac.input);
- wacom->wacom_wac.input = 0;
+ wacom->wacom_wac.input = NULL;
/* Touch interface */
wacom = usb_get_intfdata(usbdev->config->interface[2]);
if (wacom->wacom_wac.input)
input_unregister_device(wacom->wacom_wac.input);
- wacom->wacom_wac.input = 0;
+ wacom->wacom_wac.input = NULL;
if (wacom_wac->pid == 0) {
- printk(KERN_INFO "wacom: wireless tablet disconnected\n");
+ dev_info(&wacom->intf->dev, "wireless tablet disconnected\n");
} else {
const struct usb_device_id *id = wacom_ids;
- printk(KERN_INFO
- "wacom: wireless tablet connected with PID %x\n",
- wacom_wac->pid);
+ dev_info(&wacom->intf->dev,
+ "wireless tablet connected with PID %x\n",
+ wacom_wac->pid);
while (id->match_flags) {
if (id->idVendor == USB_VENDOR_ID_WACOM &&
@@ -966,8 +1062,8 @@ static void wacom_wireless_work(struct work_struct *work)
}
if (!id->match_flags) {
- printk(KERN_INFO
- "wacom: ignorning unknown PID.\n");
+ dev_info(&wacom->intf->dev,
+ "ignoring unknown PID.\n");
return;
}
@@ -1038,11 +1134,33 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
endpoint = &intf->cur_altsetting->endpoint[0].desc;
- /* Retrieve the physical and logical size for OEM devices */
+ /* Retrieve the physical and logical size for touch devices */
error = wacom_retrieve_hid_descriptor(intf, features);
if (error)
goto fail3;
+ /*
+ * Intuos5 has no useful data about its touch interface in its
+ * HID descriptor. If this is the touch interface (wMaxPacketSize
+ * of WACOM_PKGLEN_BBTOUCH3), override the table values.
+ */
+ if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
+ features->device_type = BTN_TOOL_FINGER;
+ features->pktlen = WACOM_PKGLEN_BBTOUCH3;
+
+ features->x_phy =
+ (features->x_max * 100) / features->x_resolution;
+ features->y_phy =
+ (features->y_max * 100) / features->y_resolution;
+
+ features->x_max = 4096;
+ features->y_max = 4096;
+ } else {
+ features->device_type = BTN_TOOL_PEN;
+ }
+ }
+
wacom_setup_device_quirks(features);
strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b327790e9a0c..004bc1bb1544 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -61,7 +61,8 @@ static int wacom_penpartner_irq(struct wacom_wac *wacom)
break;
default:
- printk(KERN_INFO "wacom_penpartner_irq: received unknown report #%d\n", data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
return 0;
}
@@ -76,8 +77,8 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
int prox, pressure;
if (data[0] != WACOM_REPORT_PENABLED) {
- dev_dbg(&input->dev,
- "wacom_pl_irq: received unknown report #%d\n", data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
return 0;
}
@@ -147,7 +148,8 @@ static int wacom_ptu_irq(struct wacom_wac *wacom)
struct input_dev *input = wacom->input;
if (data[0] != WACOM_REPORT_PENABLED) {
- printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
return 0;
}
@@ -176,7 +178,8 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
struct input_dev *input = wacom->input;
int prox = data[1] & 0x20, pressure;
- dev_dbg(&input->dev, "wacom_dtu_irq: received report #%d\n", data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received report #%d", __func__, data[0]);
if (prox) {
/* Going into proximity select tool */
@@ -212,9 +215,8 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
int retval = 0;
if (data[0] != WACOM_REPORT_PENABLED) {
- dev_dbg(&input->dev,
- "wacom_graphire_irq: received unknown report #%d\n",
- data[0]);
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
goto exit;
}
@@ -324,6 +326,9 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
+ if (features->type >= INTUOS5S && features->type <= INTUOS5L)
+ wacom->shared->stylus_in_proximity = true;
+
/* serial number of the tool */
wacom->serial[idx] = ((data[3] & 0x0f) << 28) +
(data[4] << 20) + (data[5] << 12) +
@@ -409,6 +414,9 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
/* Exit report */
if ((data[1] & 0xfe) == 0x80) {
+ if (features->type >= INTUOS5S && features->type <= INTUOS5L)
+ wacom->shared->stylus_in_proximity = false;
+
/*
* Reset all states otherwise we lose the initial states
* when in-prox next time
@@ -455,6 +463,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
+ (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
features->type == WACOM_21UX2 || features->type == WACOM_24HD) {
t = (t << 1) | (data[1] & 1);
}
@@ -485,11 +494,13 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
unsigned int t;
int idx = 0, result;
- if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_INTUOSREAD
- && data[0] != WACOM_REPORT_INTUOSWRITE && data[0] != WACOM_REPORT_INTUOSPAD) {
- dev_dbg(&input->dev,
- "wacom_intuos_irq: received unknown report #%d\n",
- data[0]);
+ if (data[0] != WACOM_REPORT_PENABLED &&
+ data[0] != WACOM_REPORT_INTUOSREAD &&
+ data[0] != WACOM_REPORT_INTUOSWRITE &&
+ data[0] != WACOM_REPORT_INTUOSPAD &&
+ data[0] != WACOM_REPORT_INTUOS5PAD) {
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__, data[0]);
return 0;
}
@@ -498,7 +509,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
idx = data[1] & 0x01;
/* pad packets. Works as a second tool and is always in prox */
- if (data[0] == WACOM_REPORT_INTUOSPAD) {
+ if (data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD) {
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
input_report_key(input, BTN_0, (data[2] & 0x01));
input_report_key(input, BTN_1, (data[3] & 0x01));
@@ -574,6 +585,34 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
+ } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
+ int i;
+
+ /* Touch ring mode switch has no capacitive sensor */
+ input_report_key(input, BTN_0, (data[3] & 0x01));
+
+ /*
+ * ExpressKeys on Intuos5 have a capacitive sensor in
+ * addition to the mechanical switch. Switch data is
+ * stored in data[4], capacitive data in data[5].
+ */
+ for (i = 0; i < 8; i++)
+ input_report_key(input, BTN_1 + i, data[4] & (1 << i));
+
+ if (data[2] & 0x80) {
+ input_report_abs(input, ABS_WHEEL, (data[2] & 0x7f));
+ } else {
+ /* Out of proximity, clear wheel value. */
+ input_report_abs(input, ABS_WHEEL, 0);
+ }
+
+ if (data[2] | (data[3] & 0x01) | data[4]) {
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ } else {
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
+ }
} else {
if (features->type == WACOM_21UX2) {
input_report_key(input, BTN_0, (data[5] & 0x01));
@@ -637,7 +676,9 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
(features->type == INTUOS3 ||
features->type == INTUOS3S ||
features->type == INTUOS4 ||
- features->type == INTUOS4S)) {
+ features->type == INTUOS4S ||
+ features->type == INTUOS5 ||
+ features->type == INTUOS5S)) {
return 0;
}
@@ -690,7 +731,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
} else if (wacom->tool[idx] == BTN_TOOL_MOUSE) {
/* I4 mouse */
- if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
+ if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
+ (features->type >= INTUOS5S && features->type <= INTUOS5L)) {
input_report_key(input, BTN_LEFT, data[6] & 0x01);
input_report_key(input, BTN_MIDDLE, data[6] & 0x02);
input_report_key(input, BTN_RIGHT, data[6] & 0x04);
@@ -717,7 +759,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
}
}
} else if ((features->type < INTUOS3S || features->type == INTUOS3L ||
- features->type == INTUOS4L) &&
+ features->type == INTUOS4L || features->type == INTUOS5L) &&
wacom->tool[idx] == BTN_TOOL_LENS) {
/* Lens cursor packets */
input_report_key(input, BTN_LEFT, data[8] & 0x01);
@@ -734,6 +776,72 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
return 1;
}
+static int find_slot_from_contactid(struct wacom_wac *wacom, int contactid)
+{
+ int touch_max = wacom->features.touch_max;
+ int i;
+
+ if (!wacom->slots)
+ return -1;
+
+ for (i = 0; i < touch_max; ++i) {
+ if (wacom->slots[i] == contactid)
+ return i;
+ }
+ for (i = 0; i < touch_max; ++i) {
+ if (wacom->slots[i] == -1)
+ return i;
+ }
+ return -1;
+}
+
+static int wacom_mt_touch(struct wacom_wac *wacom)
+{
+ struct input_dev *input = wacom->input;
+ char *data = wacom->data;
+ int i;
+ int current_num_contacts = data[2];
+ int contacts_to_send = 0;
+
+ /*
+ * First packet resets the counter since only the first
+ * packet in series will have non-zero current_num_contacts.
+ */
+ if (current_num_contacts)
+ wacom->num_contacts_left = current_num_contacts;
+
+ /* There are at most 5 contacts per packet */
+ contacts_to_send = min(5, wacom->num_contacts_left);
+
+ for (i = 0; i < contacts_to_send; i++) {
+ int offset = (WACOM_BYTES_PER_MT_PACKET * i) + 3;
+ bool touch = data[offset] & 0x1;
+ int id = le16_to_cpup((__le16 *)&data[offset + 1]);
+ int slot = find_slot_from_contactid(wacom, id);
+
+ if (slot < 0)
+ continue;
+
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+ if (touch) {
+ int x = le16_to_cpup((__le16 *)&data[offset + 7]);
+ int y = le16_to_cpup((__le16 *)&data[offset + 9]);
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ }
+ wacom->slots[slot] = touch ? id : -1;
+ }
+
+ input_mt_report_pointer_emulation(input, true);
+
+ wacom->num_contacts_left -= contacts_to_send;
+ if (wacom->num_contacts_left < 0)
+ wacom->num_contacts_left = 0;
+
+ return 1;
+}
+
static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
@@ -772,6 +880,9 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
bool prox;
int x = 0, y = 0;
+ if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG)
+ return 0;
+
if (!wacom->shared->stylus_in_proximity) {
if (len == WACOM_PKGLEN_TPC1FG) {
prox = data[0] & 0x01;
@@ -835,15 +946,15 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
char *data = wacom->data;
- dev_dbg(&wacom->input->dev, "wacom_tpc_irq: received report #%d\n",
- data[0]);
+ dev_dbg(wacom->input->dev.parent,
+ "%s: received report #%d\n", __func__, data[0]);
switch (len) {
case WACOM_PKGLEN_TPC1FG:
- return wacom_tpc_single_touch(wacom, len);
+ return wacom_tpc_single_touch(wacom, len);
case WACOM_PKGLEN_TPC2FG:
- return wacom_tpc_mt_touch(wacom);
+ return wacom_tpc_mt_touch(wacom);
default:
switch (data[0]) {
@@ -852,6 +963,9 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
case WACOM_REPORT_TPCST:
return wacom_tpc_single_touch(wacom, len);
+ case WACOM_REPORT_TPCMT:
+ return wacom_mt_touch(wacom);
+
case WACOM_REPORT_PENABLED:
return wacom_tpc_pen(wacom);
}
@@ -1120,8 +1234,18 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_intuos_irq(wacom_wac);
break;
+ case INTUOS5S:
+ case INTUOS5:
+ case INTUOS5L:
+ if (len == WACOM_PKGLEN_BBTOUCH3)
+ sync = wacom_bpt3_touch(wacom_wac);
+ else
+ sync = wacom_intuos_irq(wacom_wac);
+ break;
+
case TABLETPC:
case TABLETPC2FG:
+ case MTSCREEN:
sync = wacom_tpc_irq(wacom_wac, len);
break;
@@ -1194,7 +1318,9 @@ void wacom_setup_device_quirks(struct wacom_features *features)
/* these device have multiple inputs */
if (features->type == TABLETPC || features->type == TABLETPC2FG ||
- features->type == BAMBOO_PT || features->type == WIRELESS)
+ features->type == BAMBOO_PT || features->type == WIRELESS ||
+ (features->type >= INTUOS5S && features->type <= INTUOS5L) ||
+ features->type == MTSCREEN)
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
/* quirk for bamboo touch with 2 low res touches */
@@ -1225,8 +1351,8 @@ static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
return (logical_max * 100) / physical_max;
}
-void wacom_setup_input_capabilities(struct input_dev *input_dev,
- struct wacom_wac *wacom_wac)
+int wacom_setup_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
{
struct wacom_features *features = &wacom_wac->features;
int i;
@@ -1361,6 +1487,50 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
wacom_setup_intuos(wacom_wac);
break;
+ case INTUOS5:
+ case INTUOS5L:
+ if (features->device_type == BTN_TOOL_PEN) {
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+ }
+ /* fall through */
+
+ case INTUOS5S:
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ if (features->device_type == BTN_TOOL_PEN) {
+ for (i = 0; i < 7; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_DISTANCE, 0,
+ features->distance_max,
+ 0, 0);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+
+ wacom_setup_intuos(wacom_wac);
+ } else if (features->device_type == BTN_TOOL_FINGER) {
+ __clear_bit(ABS_MISC, input_dev->absbit);
+
+ __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+ __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
+ __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
+
+ input_mt_init_slots(input_dev, features->touch_max);
+
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, 255, 0, 0);
+
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, features->x_max,
+ features->x_fuzz, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, features->y_max,
+ features->y_fuzz, 0);
+ }
+ break;
+
case INTUOS4:
case INTUOS4L:
__set_bit(BTN_7, input_dev->keybit);
@@ -1378,9 +1548,19 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
break;
case TABLETPC2FG:
+ case MTSCREEN:
if (features->device_type == BTN_TOOL_FINGER) {
- input_mt_init_slots(input_dev, 2);
+ wacom_wac->slots = kmalloc(features->touch_max *
+ sizeof(int),
+ GFP_KERNEL);
+ if (!wacom_wac->slots)
+ return -ENOMEM;
+
+ for (i = 0; i < features->touch_max; i++)
+ wacom_wac->slots[i] = -1;
+
+ input_mt_init_slots(input_dev, features->touch_max);
input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE,
0, MT_TOOL_MAX, 0, 0);
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
@@ -1435,6 +1615,7 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+ input_mt_init_slots(input_dev, features->touch_max);
if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
__set_bit(BTN_TOOL_TRIPLETAP,
@@ -1442,13 +1623,9 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_QUADTAP,
input_dev->keybit);
- input_mt_init_slots(input_dev, 16);
-
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
0, 255, 0, 0);
- } else {
- input_mt_init_slots(input_dev, 2);
}
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
@@ -1468,6 +1645,7 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
}
break;
}
+ return 0;
}
static const struct wacom_features wacom_features_0x00 =
@@ -1635,6 +1813,24 @@ static const struct wacom_features wacom_features_0xBB =
static const struct wacom_features wacom_features_0xBC =
{ "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x26 =
+ { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
+ 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x27 =
+ { "Wacom Intuos5 touch M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
+ 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x28 =
+ { "Wacom Intuos5 touch L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047,
+ 63, INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+ .touch_max = 16 };
+static const struct wacom_features wacom_features_0x29 =
+ { "Wacom Intuos5 S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
+ 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0x2A =
+ { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
+ 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xF4 =
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -1676,13 +1872,19 @@ static const struct wacom_features wacom_features_0x9F =
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xE2 =
{ "Wacom ISDv4 E2", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255,
- 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xE3 =
{ "Wacom ISDv4 E3", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255,
- 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
+static const struct wacom_features wacom_features_0xE5 =
+ { "Wacom ISDv4 E5", WACOM_PKGLEN_MTOUCH, 26202, 16325, 255,
+ 0, MTSCREEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
- 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xEC =
{ "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1691,19 +1893,22 @@ static const struct wacom_features wacom_features_0x47 =
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x84 =
{ "Wacom Wireless Receiver", WACOM_PKGLEN_WIRELESS, 0, 0, 0,
- 0, WIRELESS, 0, 0 };
+ 0, WIRELESS, 0, 0, .touch_max = 16 };
static const struct wacom_features wacom_features_0xD0 =
{ "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD1 =
{ "Wacom Bamboo 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD2 =
{ "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD3 =
{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD4 =
{ "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1712,28 +1917,35 @@ static const struct wacom_features wacom_features_0xD5 =
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD6 =
{ "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD7 =
{ "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xD8 =
{ "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xDA =
{ "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static struct wacom_features wacom_features_0xDB =
{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 2 };
static const struct wacom_features wacom_features_0xDD =
{ "Wacom Bamboo Connect", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xDE =
{ "Wacom Bamboo 16FG 4x5", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 16 };
static const struct wacom_features wacom_features_0xDF =
{ "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .touch_max = 16 };
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1807,6 +2019,11 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xBA) },
{ USB_DEVICE_WACOM(0xBB) },
{ USB_DEVICE_WACOM(0xBC) },
+ { USB_DEVICE_WACOM(0x26) },
+ { USB_DEVICE_WACOM(0x27) },
+ { USB_DEVICE_WACOM(0x28) },
+ { USB_DEVICE_WACOM(0x29) },
+ { USB_DEVICE_WACOM(0x2A) },
{ USB_DEVICE_WACOM(0x3F) },
{ USB_DEVICE_WACOM(0xC5) },
{ USB_DEVICE_WACOM(0xC6) },
@@ -1842,6 +2059,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x9F) },
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
+ { USB_DEVICE_WACOM(0xE5) },
{ USB_DEVICE_WACOM(0xE6) },
{ USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index ba5a334e54d6..78fbd3f42009 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -25,6 +25,10 @@
#define WACOM_PKGLEN_BBTOUCH3 64
#define WACOM_PKGLEN_BBPEN 10
#define WACOM_PKGLEN_WIRELESS 32
+#define WACOM_PKGLEN_MTOUCH 62
+
+/* wacom data size per MT contact */
+#define WACOM_BYTES_PER_MT_PACKET 11
/* device IDs */
#define STYLUS_DEVICE_ID 0x02
@@ -38,8 +42,10 @@
#define WACOM_REPORT_INTUOSREAD 5
#define WACOM_REPORT_INTUOSWRITE 6
#define WACOM_REPORT_INTUOSPAD 12
+#define WACOM_REPORT_INTUOS5PAD 3
#define WACOM_REPORT_TPC1FG 6
#define WACOM_REPORT_TPC2FG 13
+#define WACOM_REPORT_TPCMT 13
#define WACOM_REPORT_TPCHID 15
#define WACOM_REPORT_TPCST 16
@@ -65,6 +71,9 @@ enum {
INTUOS4S,
INTUOS4,
INTUOS4L,
+ INTUOS5S,
+ INTUOS5,
+ INTUOS5L,
WACOM_24HD,
WACOM_21UX2,
CINTIQ,
@@ -72,6 +81,7 @@ enum {
WACOM_MO,
TABLETPC,
TABLETPC2FG,
+ MTSCREEN,
MAX_TYPE
};
@@ -95,6 +105,7 @@ struct wacom_features {
int pressure_fuzz;
int distance_fuzz;
unsigned quirks;
+ unsigned touch_max;
};
struct wacom_shared {
@@ -113,6 +124,8 @@ struct wacom_wac {
struct input_dev *input;
int pid;
int battery_capacity;
+ int num_contacts_left;
+ int *slots;
};
#endif
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 75838d7710ce..98d263504eea 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -187,6 +187,23 @@ config TOUCHSCREEN_DA9034
Say Y here to enable the support for the touchscreen found
on Dialog Semiconductor DA9034 PMIC.
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da9034-ts.
+
+config TOUCHSCREEN_DA9052
+ tristate "Dialog DA9052/DA9053 TSI"
+ depends on PMIC_DA9052
+ help
+ Say Y here to support the touchscreen found on Dialog Semiconductor
+ DA9052-BC and DA9053-AA/Bx PMICs.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called da9052_tsi.
+
config TOUCHSCREEN_DYNAPRO
tristate "Dynapro serial touchscreen"
select SERIO
@@ -306,6 +323,18 @@ config TOUCHSCREEN_WACOM_W8001
To compile this driver as a module, choose M here: the
module will be called wacom_w8001.
+config TOUCHSCREEN_WACOM_I2C
+ tristate "Wacom Tablet support (I2C)"
+ depends on I2C
+ help
+ Say Y here if you want to use the I2C version of the Wacom
+ Pen Tablet.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called wacom_i2c.
+
config TOUCHSCREEN_LPC32XX
tristate "LPC32XX touchscreen controller"
depends on ARCH_LPC32XX
@@ -635,6 +664,7 @@ config TOUCHSCREEN_USB_COMPOSITE
- Zytronic controllers
- Elo TouchSystems 2700 IntelliTouch
- EasyTouch USB Touch Controller from Data Modul
+ - e2i (Mimo monitors)
Have a look at <http://linux.chapter7.ch/touchkit/> for
a usage description and the required user-space stuff.
@@ -721,7 +751,7 @@ config TOUCHSCREEN_USB_ELO
config TOUCHSCREEN_USB_E2I
default y
- bool "e2i Touchscreen controller (e.g. from Mimo 740)"
+ bool "e2i Touchscreen controller (e.g. from Mimo 740)" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
config TOUCHSCREEN_USB_ZYTRONIC
@@ -744,7 +774,7 @@ config TOUCHSCREEN_USB_EASYTOUCH
bool "EasyTouch USB Touch controller device support" if EMBEDDED
depends on TOUCHSCREEN_USB_COMPOSITE
help
- Say Y here if you have a EasyTouch USB Touch controller device support.
+ Say Y here if you have an EasyTouch USB Touch controller.
If unsure, say N.
config TOUCHSCREEN_TOUCHIT213
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 3d5cf8cbf89c..eb8bfe1c1a46 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_SPI) += cyttsp_spi.o
obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o
+obj-$(CONFIG_TOUCHSCREEN_DA9052) += da9052_tsi.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_TOUCHSCREEN_TSC2005) += tsc2005.o
obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
+obj-$(CONFIG_TOUCHSCREEN_WACOM_I2C) += wacom_i2c.o
obj-$(CONFIG_TOUCHSCREEN_WM831X) += wm831x-ts.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX) += wm97xx-ts.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index e2482b40da51..bd4eb4277697 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -597,7 +597,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
AD7879_TMR(ts->pen_down_acc_interval);
err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), ts);
if (err) {
dev_err(dev, "irq %d busy?\n", ts->irq);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 19d4ea65ea01..25fd0561a17d 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -236,7 +236,6 @@ struct mxt_object {
struct mxt_message {
u8 reportid;
u8 message[7];
- u8 checksum;
};
struct mxt_finger {
@@ -326,17 +325,12 @@ static bool mxt_object_writable(unsigned int type)
}
static void mxt_dump_message(struct device *dev,
- struct mxt_message *message)
+ struct mxt_message *message)
{
- dev_dbg(dev, "reportid:\t0x%x\n", message->reportid);
- dev_dbg(dev, "message1:\t0x%x\n", message->message[0]);
- dev_dbg(dev, "message2:\t0x%x\n", message->message[1]);
- dev_dbg(dev, "message3:\t0x%x\n", message->message[2]);
- dev_dbg(dev, "message4:\t0x%x\n", message->message[3]);
- dev_dbg(dev, "message5:\t0x%x\n", message->message[4]);
- dev_dbg(dev, "message6:\t0x%x\n", message->message[5]);
- dev_dbg(dev, "message7:\t0x%x\n", message->message[6]);
- dev_dbg(dev, "checksum:\t0x%x\n", message->checksum);
+ dev_dbg(dev, "reportid: %u\tmessage: %02x %02x %02x %02x %02x %02x %02x\n",
+ message->reportid, message->message[0], message->message[1],
+ message->message[2], message->message[3], message->message[4],
+ message->message[5], message->message[6]);
}
static int mxt_check_bootloader(struct i2c_client *client,
@@ -506,7 +500,7 @@ static int mxt_write_object(struct mxt_data *data,
u16 reg;
object = mxt_get_object(data, type);
- if (!object)
+ if (!object || offset >= object->size + 1)
return -EINVAL;
reg = object->start_address;
@@ -1049,8 +1043,8 @@ static ssize_t mxt_update_fw_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(object, 0444, mxt_object_show, NULL);
-static DEVICE_ATTR(update_fw, 0664, NULL, mxt_update_fw_store);
+static DEVICE_ATTR(object, S_IRUGO, mxt_object_show, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, mxt_update_fw_store);
static struct attribute *mxt_attrs[] = {
&dev_attr_object.attr,
@@ -1155,7 +1149,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
goto err_free_object;
error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
- pdata->irqflags, client->dev.driver->name, data);
+ pdata->irqflags | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_free_object;
@@ -1201,7 +1196,7 @@ static int __devexit mxt_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int mxt_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1239,13 +1234,10 @@ static int mxt_resume(struct device *dev)
return 0;
}
-
-static const struct dev_pm_ops mxt_pm_ops = {
- .suspend = mxt_suspend,
- .resume = mxt_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
+
static const struct i2c_device_id mxt_id[] = {
{ "qt602240_ts", 0 },
{ "atmel_mxt_ts", 0 },
@@ -1258,9 +1250,7 @@ static struct i2c_driver mxt_driver = {
.driver = {
.name = "atmel_mxt_ts",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &mxt_pm_ops,
-#endif
},
.probe = mxt_probe,
.remove = __devexit_p(mxt_remove),
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index f2d03c06c2da..5c487d23f11c 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -509,7 +509,8 @@ static int __devinit bu21013_probe(struct i2c_client *client,
input_set_drvdata(in_dev, bu21013_data);
error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
- IRQF_TRIGGER_FALLING | IRQF_SHARED,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED |
+ IRQF_ONESHOT,
DRIVER_TP, bu21013_data);
if (error) {
dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 237753ad1031..464f1bf4b61d 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -251,7 +251,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
}
err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread,
- IRQF_TRIGGER_RISING, "touch_reset_key", ts);
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "touch_reset_key", ts);
if (err < 0) {
dev_err(&client->dev,
"irq %d busy? error %d\n", client->irq, err);
diff --git a/drivers/input/touchscreen/da9052_tsi.c b/drivers/input/touchscreen/da9052_tsi.c
new file mode 100644
index 000000000000..e8df341090c0
--- /dev/null
+++ b/drivers/input/touchscreen/da9052_tsi.c
@@ -0,0 +1,370 @@
+/*
+ * TSI driver for Dialog DA9052
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+
+#define TSI_PEN_DOWN_STATUS 0x40
+
+struct da9052_tsi {
+ struct da9052 *da9052;
+ struct input_dev *dev;
+ struct delayed_work ts_pen_work;
+ struct mutex mutex;
+ unsigned int irq_pendwn;
+ unsigned int irq_datardy;
+ bool stopped;
+ bool adc_on;
+};
+
+static void da9052_ts_adc_toggle(struct da9052_tsi *tsi, bool on)
+{
+ da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 0, on);
+ tsi->adc_on = on;
+}
+
+static irqreturn_t da9052_ts_pendwn_irq(int irq, void *data)
+{
+ struct da9052_tsi *tsi = data;
+
+ if (!tsi->stopped) {
+ /* Mask PEN_DOWN event and unmask TSI_READY event */
+ disable_irq_nosync(tsi->irq_pendwn);
+ enable_irq(tsi->irq_datardy);
+
+ da9052_ts_adc_toggle(tsi, true);
+
+ schedule_delayed_work(&tsi->ts_pen_work, HZ / 50);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void da9052_ts_read(struct da9052_tsi *tsi)
+{
+ struct input_dev *input = tsi->dev;
+ int ret;
+ u16 x, y, z;
+ u8 v;
+
+ ret = da9052_reg_read(tsi->da9052, DA9052_TSI_X_MSB_REG);
+ if (ret < 0)
+ return;
+
+ x = (u16) ret;
+
+ ret = da9052_reg_read(tsi->da9052, DA9052_TSI_Y_MSB_REG);
+ if (ret < 0)
+ return;
+
+ y = (u16) ret;
+
+ ret = da9052_reg_read(tsi->da9052, DA9052_TSI_Z_MSB_REG);
+ if (ret < 0)
+ return;
+
+ z = (u16) ret;
+
+ ret = da9052_reg_read(tsi->da9052, DA9052_TSI_LSB_REG);
+ if (ret < 0)
+ return;
+
+ v = (u8) ret;
+
+ x = ((x << 2) & 0x3fc) | (v & 0x3);
+ y = ((y << 2) & 0x3fc) | ((v & 0xc) >> 2);
+ z = ((z << 2) & 0x3fc) | ((v & 0x30) >> 4);
+
+ input_report_key(input, BTN_TOUCH, 1);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_PRESSURE, z);
+ input_sync(input);
+}
+
+static irqreturn_t da9052_ts_datardy_irq(int irq, void *data)
+{
+ struct da9052_tsi *tsi = data;
+
+ da9052_ts_read(tsi);
+
+ return IRQ_HANDLED;
+}
+
+static void da9052_ts_pen_work(struct work_struct *work)
+{
+ struct da9052_tsi *tsi = container_of(work, struct da9052_tsi,
+ ts_pen_work.work);
+ if (!tsi->stopped) {
+ int ret = da9052_reg_read(tsi->da9052, DA9052_TSI_LSB_REG);
+ if (ret < 0 || (ret & TSI_PEN_DOWN_STATUS)) {
+ /* Pen is still DOWN (or read error) */
+ schedule_delayed_work(&tsi->ts_pen_work, HZ / 50);
+ } else {
+ struct input_dev *input = tsi->dev;
+
+ /* Pen UP */
+ da9052_ts_adc_toggle(tsi, false);
+
+ /* Report Pen UP */
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_sync(input);
+
+ /*
+ * FIXME: Fixes the unhandled irq issue when quick
+ * pen down and pen up events occurs
+ */
+ ret = da9052_reg_update(tsi->da9052,
+ DA9052_EVENT_B_REG, 0xC0, 0xC0);
+ if (ret < 0)
+ return;
+
+ /* Mask TSI_READY event and unmask PEN_DOWN event */
+ disable_irq(tsi->irq_datardy);
+ enable_irq(tsi->irq_pendwn);
+ }
+ }
+}
+
+static int __devinit da9052_ts_configure_gpio(struct da9052 *da9052)
+{
+ int error;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_2_3_REG, 0x30, 0);
+ if (error < 0)
+ return error;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_4_5_REG, 0x33, 0);
+ if (error < 0)
+ return error;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_6_7_REG, 0x33, 0);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static int __devinit da9052_configure_tsi(struct da9052_tsi *tsi)
+{
+ int error;
+
+ error = da9052_ts_configure_gpio(tsi->da9052);
+ if (error)
+ return error;
+
+ /* Measure TSI sample every 1ms */
+ error = da9052_reg_update(tsi->da9052, DA9052_ADC_CONT_REG,
+ 1 << 6, 1 << 6);
+ if (error < 0)
+ return error;
+
+ /* TSI_DELAY: 3 slots, TSI_SKIP: 0 slots, TSI_MODE: XYZP */
+ error = da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 0xFC, 0xC0);
+ if (error < 0)
+ return error;
+
+ /* Supply TSIRef through LD09 */
+ error = da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x59);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static int da9052_ts_input_open(struct input_dev *input_dev)
+{
+ struct da9052_tsi *tsi = input_get_drvdata(input_dev);
+
+ tsi->stopped = false;
+ mb();
+
+ /* Unmask PEN_DOWN event */
+ enable_irq(tsi->irq_pendwn);
+
+ /* Enable Pen Detect Circuit */
+ return da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG,
+ 1 << 1, 1 << 1);
+}
+
+static void da9052_ts_input_close(struct input_dev *input_dev)
+{
+ struct da9052_tsi *tsi = input_get_drvdata(input_dev);
+
+ tsi->stopped = true;
+ mb();
+ disable_irq(tsi->irq_pendwn);
+ cancel_delayed_work_sync(&tsi->ts_pen_work);
+
+ if (tsi->adc_on) {
+ disable_irq(tsi->irq_datardy);
+ da9052_ts_adc_toggle(tsi, false);
+
+ /*
+ * If ADC was on that means that pendwn IRQ was disabled
+ * twice and we need to enable it to keep enable/disable
+ * counter balanced. IRQ is still off though.
+ */
+ enable_irq(tsi->irq_pendwn);
+ }
+
+ /* Disable Pen Detect Circuit */
+ da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 1, 0);
+}
+
+static int __devinit da9052_ts_probe(struct platform_device *pdev)
+{
+ struct da9052 *da9052;
+ struct da9052_tsi *tsi;
+ struct input_dev *input_dev;
+ int irq_pendwn;
+ int irq_datardy;
+ int error;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ if (!da9052)
+ return -EINVAL;
+
+ irq_pendwn = platform_get_irq_byname(pdev, "PENDWN");
+ irq_datardy = platform_get_irq_byname(pdev, "TSIRDY");
+ if (irq_pendwn < 0 || irq_datardy < 0) {
+ dev_err(da9052->dev, "Unable to determine device interrupts\n");
+ return -ENXIO;
+ }
+
+ tsi = kzalloc(sizeof(struct da9052_tsi), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!tsi || !input_dev) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tsi->da9052 = da9052;
+ tsi->dev = input_dev;
+ tsi->irq_pendwn = da9052->irq_base + irq_pendwn;
+ tsi->irq_datardy = da9052->irq_base + irq_datardy;
+ tsi->stopped = true;
+ INIT_DELAYED_WORK(&tsi->ts_pen_work, da9052_ts_pen_work);
+
+ input_dev->id.version = 0x0101;
+ input_dev->id.vendor = 0x15B6;
+ input_dev->id.product = 0x9052;
+ input_dev->name = "Dialog DA9052 TouchScreen Driver";
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->open = da9052_ts_input_open;
+ input_dev->close = da9052_ts_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, 1023, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 1023, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 1023, 0, 0);
+
+ input_set_drvdata(input_dev, tsi);
+
+ /* Disable Pen Detect Circuit */
+ da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG, 1 << 1, 0);
+
+ /* Disable ADC */
+ da9052_ts_adc_toggle(tsi, false);
+
+ error = request_threaded_irq(tsi->irq_pendwn,
+ NULL, da9052_ts_pendwn_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "PENDWN", tsi);
+ if (error) {
+ dev_err(tsi->da9052->dev,
+ "Failed to register PENDWN IRQ %d, error = %d\n",
+ tsi->irq_pendwn, error);
+ goto err_free_mem;
+ }
+
+ error = request_threaded_irq(tsi->irq_datardy,
+ NULL, da9052_ts_datardy_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "TSIRDY", tsi);
+ if (error) {
+ dev_err(tsi->da9052->dev,
+ "Failed to register TSIRDY IRQ %d, error = %d\n",
+ tsi->irq_datardy, error);
+ goto err_free_pendwn_irq;
+ }
+
+ /* Mask PEN_DOWN and TSI_READY events */
+ disable_irq(tsi->irq_pendwn);
+ disable_irq(tsi->irq_datardy);
+
+ error = da9052_configure_tsi(tsi);
+ if (error)
+ goto err_free_datardy_irq;
+
+ error = input_register_device(tsi->dev);
+ if (error)
+ goto err_free_datardy_irq;
+
+ platform_set_drvdata(pdev, tsi);
+
+ return 0;
+
+err_free_datardy_irq:
+ free_irq(tsi->irq_datardy, tsi);
+err_free_pendwn_irq:
+ free_irq(tsi->irq_pendwn, tsi);
+err_free_mem:
+ kfree(tsi);
+ input_free_device(input_dev);
+
+ return error;
+}
+
+static int __devexit da9052_ts_remove(struct platform_device *pdev)
+{
+ struct da9052_tsi *tsi = platform_get_drvdata(pdev);
+
+ da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x19);
+
+ free_irq(tsi->irq_pendwn, tsi);
+ free_irq(tsi->irq_datardy, tsi);
+
+ input_unregister_device(tsi->dev);
+ kfree(tsi);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver da9052_tsi_driver = {
+ .probe = da9052_ts_probe,
+ .remove = __devexit_p(da9052_ts_remove),
+ .driver = {
+ .name = "da9052-tsi",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(da9052_tsi_driver);
+
+MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9052");
+MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-tsi");
diff --git a/drivers/input/touchscreen/dynapro.c b/drivers/input/touchscreen/dynapro.c
index 455353908bdf..1809677a6513 100644
--- a/drivers/input/touchscreen/dynapro.c
+++ b/drivers/input/touchscreen/dynapro.c
@@ -188,19 +188,4 @@ static struct serio_driver dynapro_drv = {
.disconnect = dynapro_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init dynapro_init(void)
-{
- return serio_register_driver(&dynapro_drv);
-}
-
-static void __exit dynapro_exit(void)
-{
- serio_unregister_driver(&dynapro_drv);
-}
-
-module_init(dynapro_init);
-module_exit(dynapro_exit);
+module_serio_driver(dynapro_drv);
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 486d31ba9c09..957423d1471d 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -405,19 +405,4 @@ static struct serio_driver elo_drv = {
.disconnect = elo_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init elo_init(void)
-{
- return serio_register_driver(&elo_drv);
-}
-
-static void __exit elo_exit(void)
-{
- serio_unregister_driver(&elo_drv);
-}
-
-module_init(elo_init);
-module_exit(elo_exit);
+module_serio_driver(elo_drv);
diff --git a/drivers/input/touchscreen/fujitsu_ts.c b/drivers/input/touchscreen/fujitsu_ts.c
index 80b21800355f..10794ddbdf58 100644
--- a/drivers/input/touchscreen/fujitsu_ts.c
+++ b/drivers/input/touchscreen/fujitsu_ts.c
@@ -175,15 +175,4 @@ static struct serio_driver fujitsu_drv = {
.disconnect = fujitsu_disconnect,
};
-static int __init fujitsu_init(void)
-{
- return serio_register_driver(&fujitsu_drv);
-}
-
-static void __exit fujitsu_exit(void)
-{
- serio_unregister_driver(&fujitsu_drv);
-}
-
-module_init(fujitsu_init);
-module_exit(fujitsu_exit);
+module_serio_driver(fujitsu_drv);
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index a54f90e02ab6..41c71766bf18 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -186,19 +186,4 @@ static struct serio_driver gunze_drv = {
.disconnect = gunze_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init gunze_init(void)
-{
- return serio_register_driver(&gunze_drv);
-}
-
-static void __exit gunze_exit(void)
-{
- serio_unregister_driver(&gunze_drv);
-}
-
-module_init(gunze_init);
-module_exit(gunze_exit);
+module_serio_driver(gunze_drv);
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index 6107e563e681..b9e8686a6f1c 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -476,19 +476,4 @@ static struct serio_driver h3600ts_drv = {
.disconnect = h3600ts_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init h3600ts_init(void)
-{
- return serio_register_driver(&h3600ts_drv);
-}
-
-static void __exit h3600ts_exit(void)
-{
- serio_unregister_driver(&h3600ts_drv);
-}
-
-module_init(h3600ts_init);
-module_exit(h3600ts_exit);
+module_serio_driver(h3600ts_drv);
diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c
index 2da6cc31bb21..0cc47ea98acf 100644
--- a/drivers/input/touchscreen/hampshire.c
+++ b/drivers/input/touchscreen/hampshire.c
@@ -187,19 +187,4 @@ static struct serio_driver hampshire_drv = {
.disconnect = hampshire_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init hampshire_init(void)
-{
- return serio_register_driver(&hampshire_drv);
-}
-
-static void __exit hampshire_exit(void)
-{
- serio_unregister_driver(&hampshire_drv);
-}
-
-module_init(hampshire_init);
-module_exit(hampshire_exit);
+module_serio_driver(hampshire_drv);
diff --git a/drivers/input/touchscreen/inexio.c b/drivers/input/touchscreen/inexio.c
index 192ade0a0fb9..a29c99c32245 100644
--- a/drivers/input/touchscreen/inexio.c
+++ b/drivers/input/touchscreen/inexio.c
@@ -189,19 +189,4 @@ static struct serio_driver inexio_drv = {
.disconnect = inexio_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init inexio_init(void)
-{
- return serio_register_driver(&inexio_drv);
-}
-
-static void __exit inexio_exit(void)
-{
- serio_unregister_driver(&inexio_drv);
-}
-
-module_init(inexio_init);
-module_exit(inexio_exit);
+module_serio_driver(inexio_drv);
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 3cd7a837f82b..cf299377fc49 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -620,7 +620,7 @@ static int __devinit mrstouch_probe(struct platform_device *pdev)
MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
- 0, "mrstouch", tsdev);
+ IRQF_ONESHOT, "mrstouch", tsdev);
if (err) {
dev_err(tsdev->dev, "unable to allocate irq\n");
goto err_free_mem;
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index afcd0691ec67..4c2b8ed3bf16 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -22,6 +22,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
/*
* Touchscreen controller register offsets
@@ -383,6 +384,14 @@ static const struct dev_pm_ops lpc32xx_ts_pm_ops = {
#define LPC32XX_TS_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static struct of_device_id lpc32xx_tsc_of_match[] = {
+ { .compatible = "nxp,lpc3220-tsc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_tsc_of_match);
+#endif
+
static struct platform_driver lpc32xx_ts_driver = {
.probe = lpc32xx_ts_probe,
.remove = __devexit_p(lpc32xx_ts_remove),
@@ -390,6 +399,7 @@ static struct platform_driver lpc32xx_ts_driver = {
.name = MOD_NAME,
.owner = THIS_MODULE,
.pm = LPC32XX_TS_PM_OPS,
+ .of_match_table = of_match_ptr(lpc32xx_tsc_of_match),
},
};
module_platform_driver(lpc32xx_ts_driver);
diff --git a/drivers/input/touchscreen/mtouch.c b/drivers/input/touchscreen/mtouch.c
index 9077228418b7..eb66b7c37c2f 100644
--- a/drivers/input/touchscreen/mtouch.c
+++ b/drivers/input/touchscreen/mtouch.c
@@ -202,19 +202,4 @@ static struct serio_driver mtouch_drv = {
.disconnect = mtouch_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init mtouch_init(void)
-{
- return serio_register_driver(&mtouch_drv);
-}
-
-static void __exit mtouch_exit(void)
-{
- serio_unregister_driver(&mtouch_drv);
-}
-
-module_init(mtouch_init);
-module_exit(mtouch_exit);
+module_serio_driver(mtouch_drv);
diff --git a/drivers/input/touchscreen/penmount.c b/drivers/input/touchscreen/penmount.c
index 4c012fb2b01e..4ccde45b9da2 100644
--- a/drivers/input/touchscreen/penmount.c
+++ b/drivers/input/touchscreen/penmount.c
@@ -317,19 +317,4 @@ static struct serio_driver pm_drv = {
.disconnect = pm_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init pm_init(void)
-{
- return serio_register_driver(&pm_drv);
-}
-
-static void __exit pm_exit(void)
-{
- serio_unregister_driver(&pm_drv);
-}
-
-module_init(pm_init);
-module_exit(pm_exit);
+module_serio_driver(pm_drv);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 72f6ba3a4709..953b4c105cad 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -165,7 +165,7 @@ static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
input_set_drvdata(input, tsdata);
error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->name, tsdata);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index cbbf71b22696..6cb68a1981bf 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -218,7 +218,7 @@ static int __devexit st1232_ts_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int st1232_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -243,18 +243,25 @@ static int st1232_ts_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops st1232_ts_pm_ops = {
- .suspend = st1232_ts_suspend,
- .resume = st1232_ts_resume,
-};
#endif
+static SIMPLE_DEV_PM_OPS(st1232_ts_pm_ops,
+ st1232_ts_suspend, st1232_ts_resume);
+
static const struct i2c_device_id st1232_ts_id[] = {
{ ST1232_TS_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
+#ifdef CONFIG_OF
+static const struct of_device_id st1232_ts_dt_ids[] __devinitconst = {
+ { .compatible = "sitronix,st1232", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
+#endif
+
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
.remove = __devexit_p(st1232_ts_remove),
@@ -262,9 +269,8 @@ static struct i2c_driver st1232_ts_driver = {
.driver = {
.name = ST1232_TS_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+ .of_match_table = of_match_ptr(st1232_ts_dt_ids),
.pm = &st1232_ts_pm_ops,
-#endif
},
};
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 7e7488097359..368d2c6cf780 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -297,7 +297,7 @@ static int __devinit tsc_probe(struct platform_device *pdev)
goto error_clk;
}
- error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
+ error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
dev_name(dev), ts);
if (error < 0) {
dev_err(ts->dev, "Could not allocate ts irq\n");
diff --git a/drivers/input/touchscreen/touchit213.c b/drivers/input/touchscreen/touchit213.c
index d1297ba19daf..5f29e5b8e1c1 100644
--- a/drivers/input/touchscreen/touchit213.c
+++ b/drivers/input/touchscreen/touchit213.c
@@ -216,19 +216,4 @@ static struct serio_driver touchit213_drv = {
.disconnect = touchit213_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init touchit213_init(void)
-{
- return serio_register_driver(&touchit213_drv);
-}
-
-static void __exit touchit213_exit(void)
-{
- serio_unregister_driver(&touchit213_drv);
-}
-
-module_init(touchit213_init);
-module_exit(touchit213_exit);
+module_serio_driver(touchit213_drv);
diff --git a/drivers/input/touchscreen/touchright.c b/drivers/input/touchscreen/touchright.c
index 3a5c142c2a78..8a2887daf194 100644
--- a/drivers/input/touchscreen/touchright.c
+++ b/drivers/input/touchscreen/touchright.c
@@ -176,19 +176,4 @@ static struct serio_driver tr_drv = {
.disconnect = tr_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init tr_init(void)
-{
- return serio_register_driver(&tr_drv);
-}
-
-static void __exit tr_exit(void)
-{
- serio_unregister_driver(&tr_drv);
-}
-
-module_init(tr_init);
-module_exit(tr_exit);
+module_serio_driver(tr_drv);
diff --git a/drivers/input/touchscreen/touchwin.c b/drivers/input/touchscreen/touchwin.c
index 763a656a59f8..588cdcb839dd 100644
--- a/drivers/input/touchscreen/touchwin.c
+++ b/drivers/input/touchscreen/touchwin.c
@@ -183,19 +183,4 @@ static struct serio_driver tw_drv = {
.disconnect = tw_disconnect,
};
-/*
- * The functions for inserting/removing us as a module.
- */
-
-static int __init tw_init(void)
-{
- return serio_register_driver(&tw_drv);
-}
-
-static void __exit tw_exit(void)
-{
- serio_unregister_driver(&tw_drv);
-}
-
-module_init(tw_init);
-module_exit(tw_exit);
+module_serio_driver(tw_drv);
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index b6adeaee9cc5..5ce3fa8ce646 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -650,7 +650,8 @@ static int __devinit tsc2005_probe(struct spi_device *spi)
tsc2005_stop_scan(ts);
error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
- IRQF_TRIGGER_RISING, "tsc2005", ts);
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "tsc2005", ts);
if (error) {
dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
goto err_free_mem;
diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c
index 29d5ed4dd31c..63209aaa55f0 100644
--- a/drivers/input/touchscreen/tsc40.c
+++ b/drivers/input/touchscreen/tsc40.c
@@ -167,17 +167,7 @@ static struct serio_driver tsc_drv = {
.disconnect = tsc_disconnect,
};
-static int __init tsc_ser_init(void)
-{
- return serio_register_driver(&tsc_drv);
-}
-module_init(tsc_ser_init);
-
-static void __exit tsc_exit(void)
-{
- serio_unregister_driver(&tsc_drv);
-}
-module_exit(tsc_exit);
+module_serio_driver(tsc_drv);
MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
new file mode 100644
index 000000000000..35572575d34a
--- /dev/null
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -0,0 +1,282 @@
+/*
+ * Wacom Penabled Driver for I2C
+ *
+ * Copyright (c) 2011 Tatsunosuke Tobita, Wacom.
+ * <tobita.tatsunosuke@wacom.co.jp>
+ *
+ * This program is free software; you can redistribute it
+ * and/or modify it under the terms of the GNU General
+ * Public License as published by the Free Software
+ * Foundation; either version of 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <asm/unaligned.h>
+
+#define WACOM_CMD_QUERY0 0x04
+#define WACOM_CMD_QUERY1 0x00
+#define WACOM_CMD_QUERY2 0x33
+#define WACOM_CMD_QUERY3 0x02
+#define WACOM_CMD_THROW0 0x05
+#define WACOM_CMD_THROW1 0x00
+#define WACOM_QUERY_SIZE 19
+#define WACOM_RETRY_CNT 100
+
+struct wacom_features {
+ int x_max;
+ int y_max;
+ int pressure_max;
+ char fw_version;
+};
+
+struct wacom_i2c {
+ struct i2c_client *client;
+ struct input_dev *input;
+ u8 data[WACOM_QUERY_SIZE];
+};
+
+static int wacom_query_device(struct i2c_client *client,
+ struct wacom_features *features)
+{
+ int ret;
+ u8 cmd1[] = { WACOM_CMD_QUERY0, WACOM_CMD_QUERY1,
+ WACOM_CMD_QUERY2, WACOM_CMD_QUERY3 };
+ u8 cmd2[] = { WACOM_CMD_THROW0, WACOM_CMD_THROW1 };
+ u8 data[WACOM_QUERY_SIZE];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(cmd1),
+ .buf = cmd1,
+ },
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(cmd2),
+ .buf = cmd2,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(data),
+ .buf = data,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ features->x_max = get_unaligned_le16(&data[3]);
+ features->y_max = get_unaligned_le16(&data[5]);
+ features->pressure_max = get_unaligned_le16(&data[11]);
+ features->fw_version = get_unaligned_le16(&data[13]);
+
+ dev_dbg(&client->dev,
+ "x_max:%d, y_max:%d, pressure:%d, fw:%d\n",
+ features->x_max, features->y_max,
+ features->pressure_max, features->fw_version);
+
+ return 0;
+}
+
+static irqreturn_t wacom_i2c_irq(int irq, void *dev_id)
+{
+ struct wacom_i2c *wac_i2c = dev_id;
+ struct input_dev *input = wac_i2c->input;
+ u8 *data = wac_i2c->data;
+ unsigned int x, y, pressure;
+ unsigned char tsw, f1, f2, ers;
+ int error;
+
+ error = i2c_master_recv(wac_i2c->client,
+ wac_i2c->data, sizeof(wac_i2c->data));
+ if (error < 0)
+ goto out;
+
+ tsw = data[3] & 0x01;
+ ers = data[3] & 0x04;
+ f1 = data[3] & 0x02;
+ f2 = data[3] & 0x10;
+ x = le16_to_cpup((__le16 *)&data[4]);
+ y = le16_to_cpup((__le16 *)&data[6]);
+ pressure = le16_to_cpup((__le16 *)&data[8]);
+
+ input_report_key(input, BTN_TOUCH, tsw || ers);
+ input_report_key(input, BTN_TOOL_PEN, tsw);
+ input_report_key(input, BTN_TOOL_RUBBER, ers);
+ input_report_key(input, BTN_STYLUS, f1);
+ input_report_key(input, BTN_STYLUS2, f2);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_sync(input);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int wacom_i2c_open(struct input_dev *dev)
+{
+ struct wacom_i2c *wac_i2c = input_get_drvdata(dev);
+ struct i2c_client *client = wac_i2c->client;
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static void wacom_i2c_close(struct input_dev *dev)
+{
+ struct wacom_i2c *wac_i2c = input_get_drvdata(dev);
+ struct i2c_client *client = wac_i2c->client;
+
+ disable_irq(client->irq);
+}
+
+static int __devinit wacom_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct wacom_i2c *wac_i2c;
+ struct input_dev *input;
+ struct wacom_features features;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c_check_functionality error\n");
+ return -EIO;
+ }
+
+ error = wacom_query_device(client, &features);
+ if (error)
+ return error;
+
+ wac_i2c = kzalloc(sizeof(*wac_i2c), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!wac_i2c || !input) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ wac_i2c->client = client;
+ wac_i2c->input = input;
+
+ input->name = "Wacom I2C Digitizer";
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x56a;
+ input->id.version = features.fw_version;
+ input->dev.parent = &client->dev;
+ input->open = wacom_i2c_open;
+ input->close = wacom_i2c_close;
+
+ input->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+ __set_bit(BTN_TOOL_PEN, input->keybit);
+ __set_bit(BTN_TOOL_RUBBER, input->keybit);
+ __set_bit(BTN_STYLUS, input->keybit);
+ __set_bit(BTN_STYLUS2, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
+
+ input_set_abs_params(input, ABS_X, 0, features.x_max, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, features.y_max, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE,
+ 0, features.pressure_max, 0, 0);
+
+ input_set_drvdata(input, wac_i2c);
+
+ error = request_threaded_irq(client->irq, NULL, wacom_i2c_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "wacom_i2c", wac_i2c);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enable IRQ, error: %d\n", error);
+ goto err_free_mem;
+ }
+
+ /* Disable the IRQ, we'll enable it in wac_i2c_open() */
+ disable_irq(client->irq);
+
+ error = input_register_device(wac_i2c->input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device, error: %d\n", error);
+ goto err_free_irq;
+ }
+
+ i2c_set_clientdata(client, wac_i2c);
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, wac_i2c);
+err_free_mem:
+ input_free_device(input);
+ kfree(wac_i2c);
+
+ return error;
+}
+
+static int __devexit wacom_i2c_remove(struct i2c_client *client)
+{
+ struct wacom_i2c *wac_i2c = i2c_get_clientdata(client);
+
+ free_irq(client->irq, wac_i2c);
+ input_unregister_device(wac_i2c->input);
+ kfree(wac_i2c);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wacom_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ disable_irq(client->irq);
+
+ return 0;
+}
+
+static int wacom_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(wacom_i2c_pm, wacom_i2c_suspend, wacom_i2c_resume);
+
+static const struct i2c_device_id wacom_i2c_id[] = {
+ { "WAC_I2C_EMR", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, wacom_i2c_id);
+
+static struct i2c_driver wacom_i2c_driver = {
+ .driver = {
+ .name = "wacom_i2c",
+ .owner = THIS_MODULE,
+ .pm = &wacom_i2c_pm,
+ },
+
+ .probe = wacom_i2c_probe,
+ .remove = __devexit_p(wacom_i2c_remove),
+ .id_table = wacom_i2c_id,
+};
+module_i2c_driver(wacom_i2c_driver);
+
+MODULE_AUTHOR("Tatsunosuke Tobita <tobita.tatsunosuke@wacom.co.jp>");
+MODULE_DESCRIPTION("WACOM EMR I2C Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 1569a3934ab2..8f9ad2f893b8 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -594,15 +594,4 @@ static struct serio_driver w8001_drv = {
.disconnect = w8001_disconnect,
};
-static int __init w8001_init(void)
-{
- return serio_register_driver(&w8001_drv);
-}
-
-static void __exit w8001_exit(void)
-{
- serio_unregister_driver(&w8001_drv);
-}
-
-module_init(w8001_init);
-module_exit(w8001_exit);
+module_serio_driver(w8001_drv);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 4bc851a9dc3d..e83410721e38 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -260,15 +260,16 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
* If we have a direct IRQ use it, otherwise use the interrupt
* from the WM831x IRQ controller.
*/
+ wm831x_ts->data_irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev,
+ "TCHDATA"));
if (pdata && pdata->data_irq)
wm831x_ts->data_irq = pdata->data_irq;
- else
- wm831x_ts->data_irq = platform_get_irq_byname(pdev, "TCHDATA");
+ wm831x_ts->pd_irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev, "TCHPD"));
if (pdata && pdata->pd_irq)
wm831x_ts->pd_irq = pdata->pd_irq;
- else
- wm831x_ts->pd_irq = platform_get_irq_byname(pdev, "TCHPD");
if (pdata)
wm831x_ts->pressure = pdata->pressure;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 3bd9fff5c589..340893727538 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -43,7 +43,7 @@ config AMD_IOMMU
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
remapping of DMA memory accesses from devices. With an AMD IOMMU you
- can isolate the the DMA memory of different devices and protect the
+ can isolate the DMA memory of different devices and protect the
system from misbehaving device drivers or hardware.
You can find out if your system has an AMD IOMMU if you look into
@@ -67,7 +67,7 @@ config AMD_IOMMU_V2
---help---
This option enables support for the AMD IOMMUv2 features of the IOMMU
hardware. Select this option if you want to use devices that support
- the the PCI PRI and PASID interface.
+ the PCI PRI and PASID interface.
# Intel IOMMU support
config DMAR_TABLE
@@ -162,4 +162,25 @@ config TEGRA_IOMMU_SMMU
space through the SMMU (System Memory Management Unit)
hardware included on Tegra SoCs.
+config EXYNOS_IOMMU
+ bool "Exynos IOMMU Support"
+ depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
+ select IOMMU_API
+ help
+ Support for the IOMMU(System MMU) of Samsung Exynos application
+ processor family. This enables H/W multimedia accellerators to see
+ non-linear physical memory chunks as a linear memory in their
+ address spaces
+
+ If unsure, say N here.
+
+config EXYNOS_IOMMU_DEBUG
+ bool "Debugging log for Exynos IOMMU"
+ depends on EXYNOS_IOMMU
+ help
+ Select this to see the detailed log message that shows what
+ happens in the IOMMU driver
+
+ Say N unless you need kernel log message for IOMMU debugging
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 3e5e82ae9f0d..76e54ef796de 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
+obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a5bee8e2dfce..625626391f2d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -83,6 +83,8 @@ static struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
+static struct dma_map_ops amd_iommu_dma_ops;
+
/*
* general struct to manage commands send to an IOMMU
*/
@@ -402,7 +404,7 @@ static void amd_iommu_stats_init(void)
return;
de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
- (u32 *)&amd_iommu_unmap_flush);
+ &amd_iommu_unmap_flush);
amd_iommu_stats_add(&compl_wait);
amd_iommu_stats_add(&cnt_map_single);
@@ -450,12 +452,27 @@ static void dump_command(unsigned long phys_addr)
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
- u32 *event = __evt;
- int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
- int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
- int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
- u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+ int type, devid, domid, flags;
+ volatile u32 *event = __evt;
+ int count = 0;
+ u64 address;
+
+retry:
+ type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ address = (u64)(((u64)event[3]) << 32) | event[2];
+
+ if (type == 0) {
+ /* Did we hit the erratum? */
+ if (++count == LOOP_TIMEOUT) {
+ pr_err("AMD-Vi: No event written to event log\n");
+ return;
+ }
+ udelay(1);
+ goto retry;
+ }
printk(KERN_ERR "AMD-Vi: Event logged [");
@@ -508,6 +525,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
default:
printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
}
+
+ memset(__evt, 0, 4 * sizeof(u32));
}
static void iommu_poll_events(struct amd_iommu *iommu)
@@ -530,26 +549,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
{
struct amd_iommu_fault fault;
- volatile u64 *raw;
- int i;
INC_STATS_COUNTER(pri_requests);
- raw = (u64 *)(iommu->ppr_log + head);
-
- /*
- * Hardware bug: Interrupt may arrive before the entry is written to
- * memory. If this happens we need to wait for the entry to arrive.
- */
- for (i = 0; i < LOOP_TIMEOUT; ++i) {
- if (PPR_REQ_TYPE(raw[0]) != 0)
- break;
- udelay(1);
- }
-
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
return;
@@ -561,12 +566,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]);
- /*
- * To detect the hardware bug we need to clear the entry
- * to back to zero.
- */
- raw[0] = raw[1] = 0;
-
atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
}
@@ -578,25 +577,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
if (iommu->ppr_log == NULL)
return;
+ /* enable ppr interrupts again */
+ writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
while (head != tail) {
+ volatile u64 *raw;
+ u64 entry[2];
+ int i;
- /* Handle PPR entry */
- iommu_handle_ppr_entry(iommu, head);
+ raw = (u64 *)(iommu->ppr_log + head);
+
+ /*
+ * Hardware bug: Interrupt may arrive before the entry is
+ * written to memory. If this happens we need to wait for the
+ * entry to arrive.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ if (PPR_REQ_TYPE(raw[0]) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* Avoid memcpy function-call overhead */
+ entry[0] = raw[0];
+ entry[1] = raw[1];
+
+ /*
+ * To detect the hardware bug we need to clear the entry
+ * back to zero.
+ */
+ raw[0] = raw[1] = 0UL;
- /* Update and refresh ring-buffer state*/
+ /* Update head pointer of hardware ring-buffer */
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+ /*
+ * Release iommu->lock because ppr-handling might need to
+ * re-aquire it
+ */
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ /* Handle PPR entry */
+ iommu_handle_ppr_entry(iommu, entry);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ /* Refresh ring-buffer information */
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
}
- /* enable ppr interrupts again */
- writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -2035,20 +2071,20 @@ out_err:
}
/* FIXME: Move this to PCI code */
-#define PCI_PRI_TLP_OFF (1 << 2)
+#define PCI_PRI_TLP_OFF (1 << 15)
bool pci_pri_tlp_required(struct pci_dev *pdev)
{
- u16 control;
+ u16 status;
int pos;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return false;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
- return (control & PCI_PRI_TLP_OFF) ? true : false;
+ return (status & PCI_PRI_TLP_OFF) ? true : false;
}
/*
@@ -2233,6 +2269,13 @@ static int device_change_notifier(struct notifier_block *nb,
list_add_tail(&dma_domain->list, &iommu_pd_list);
spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+ dev_data = get_dev_data(dev);
+
+ if (!dev_data->passthrough)
+ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+ else
+ dev->archdata.dma_ops = &nommu_dma_ops;
+
break;
case BUS_NOTIFY_DEL_DEVICE:
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index c56790375e0f..a33612f3206f 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -129,7 +129,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
-bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
+u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->dev)
return 1;
+ iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
+ PCI_DEVFN(0, 0));
+
iommu->cap_ptr = h->cap_ptr;
iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
{
int i, j;
u32 ioc_feature_control;
- struct pci_dev *pdev = NULL;
+ struct pci_dev *pdev = iommu->root_pdev;
/* RD890 BIOSes may not have completely reconfigured the iommu */
- if (!is_rd890_iommu(iommu->dev))
+ if (!is_rd890_iommu(iommu->dev) || !pdev)
return;
/*
* First, we need to ensure that the iommu is enabled. This is
* controlled by a register in the northbridge
*/
- pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
-
- if (!pdev)
- return;
/* Select Northbridge indirect register 0x75 and enable writing */
pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
if (!(ioc_feature_control & 0x1))
pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
- pci_dev_put(pdev);
-
/* Restore the iommu BAR */
pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
iommu->stored_addr_lo);
@@ -1644,6 +1641,8 @@ static int __init amd_iommu_init(void)
amd_iommu_init_api();
+ x86_platform.iommu_shutdown = disable_iommus;
+
if (iommu_pass_through)
goto out;
@@ -1652,8 +1651,6 @@ static int __init amd_iommu_init(void)
else
printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
- x86_platform.iommu_shutdown = disable_iommus;
-
out:
return ret;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 2452f3b71736..c1b1d489817e 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -481,6 +481,9 @@ struct amd_iommu {
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;
+ /* Cache pdev to root device for resume quirks */
+ struct pci_dev *root_pdev;
+
/* physical address of MMIO space */
u64 mmio_phys;
/* virtual address of MMIO space */
@@ -649,7 +652,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
* If true, the addresses will be flushed on unmap time, not when
* they are reused
*/
-extern bool amd_iommu_unmap_flush;
+extern u32 amd_iommu_unmap_flush;
/* Smallest number of PASIDs supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasids;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
new file mode 100644
index 000000000000..9a114b9ff170
--- /dev/null
+++ b/drivers/iommu/exynos-iommu.c
@@ -0,0 +1,1076 @@
+/* linux/drivers/iommu/exynos_iommu.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/export.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+
+#include <mach/sysmmu.h>
+
+/* We does not consider super section mapping (16MB) */
+#define SECT_ORDER 20
+#define LPAGE_ORDER 16
+#define SPAGE_ORDER 12
+
+#define SECT_SIZE (1 << SECT_ORDER)
+#define LPAGE_SIZE (1 << LPAGE_ORDER)
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+#define SECT_MASK (~(SECT_SIZE - 1))
+#define LPAGE_MASK (~(LPAGE_SIZE - 1))
+#define SPAGE_MASK (~(SPAGE_SIZE - 1))
+
+#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
+#define lv1ent_page(sent) ((*(sent) & 3) == 1)
+#define lv1ent_section(sent) ((*(sent) & 3) == 2)
+
+#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
+#define lv2ent_small(pent) ((*(pent) & 2) == 2)
+#define lv2ent_large(pent) ((*(pent) & 3) == 1)
+
+#define section_phys(sent) (*(sent) & SECT_MASK)
+#define section_offs(iova) ((iova) & 0xFFFFF)
+#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
+#define lpage_offs(iova) ((iova) & 0xFFFF)
+#define spage_phys(pent) (*(pent) & SPAGE_MASK)
+#define spage_offs(iova) ((iova) & 0xFFF)
+
+#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
+#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
+
+#define NUM_LV1ENTRIES 4096
+#define NUM_LV2ENTRIES 256
+
+#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
+
+#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
+
+#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
+
+#define mk_lv1ent_sect(pa) ((pa) | 2)
+#define mk_lv1ent_page(pa) ((pa) | 1)
+#define mk_lv2ent_lpage(pa) ((pa) | 1)
+#define mk_lv2ent_spage(pa) ((pa) | 2)
+
+#define CTRL_ENABLE 0x5
+#define CTRL_BLOCK 0x7
+#define CTRL_DISABLE 0x0
+
+#define REG_MMU_CTRL 0x000
+#define REG_MMU_CFG 0x004
+#define REG_MMU_STATUS 0x008
+#define REG_MMU_FLUSH 0x00C
+#define REG_MMU_FLUSH_ENTRY 0x010
+#define REG_PT_BASE_ADDR 0x014
+#define REG_INT_STATUS 0x018
+#define REG_INT_CLEAR 0x01C
+
+#define REG_PAGE_FAULT_ADDR 0x024
+#define REG_AW_FAULT_ADDR 0x028
+#define REG_AR_FAULT_ADDR 0x02C
+#define REG_DEFAULT_SLAVE_ADDR 0x030
+
+#define REG_MMU_VERSION 0x034
+
+#define REG_PB0_SADDR 0x04C
+#define REG_PB0_EADDR 0x050
+#define REG_PB1_SADDR 0x054
+#define REG_PB1_EADDR 0x058
+
+static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
+{
+ return pgtable + lv1ent_offset(iova);
+}
+
+static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
+{
+ return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+}
+
+enum exynos_sysmmu_inttype {
+ SYSMMU_PAGEFAULT,
+ SYSMMU_AR_MULTIHIT,
+ SYSMMU_AW_MULTIHIT,
+ SYSMMU_BUSERROR,
+ SYSMMU_AR_SECURITY,
+ SYSMMU_AR_ACCESS,
+ SYSMMU_AW_SECURITY,
+ SYSMMU_AW_PROTECTION, /* 7 */
+ SYSMMU_FAULT_UNKNOWN,
+ SYSMMU_FAULTS_NUM
+};
+
+/*
+ * @itype: type of fault.
+ * @pgtable_base: the physical address of page table base. This is 0 if @itype
+ * is SYSMMU_BUSERROR.
+ * @fault_addr: the device (virtual) address that the System MMU tried to
+ * translated. This is 0 if @itype is SYSMMU_BUSERROR.
+ */
+typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
+ unsigned long pgtable_base, unsigned long fault_addr);
+
+static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
+ REG_PAGE_FAULT_ADDR,
+ REG_AR_FAULT_ADDR,
+ REG_AW_FAULT_ADDR,
+ REG_DEFAULT_SLAVE_ADDR,
+ REG_AR_FAULT_ADDR,
+ REG_AR_FAULT_ADDR,
+ REG_AW_FAULT_ADDR,
+ REG_AW_FAULT_ADDR
+};
+
+static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
+ "PAGE FAULT",
+ "AR MULTI-HIT FAULT",
+ "AW MULTI-HIT FAULT",
+ "BUS ERROR",
+ "AR SECURITY PROTECTION FAULT",
+ "AR ACCESS PROTECTION FAULT",
+ "AW SECURITY PROTECTION FAULT",
+ "AW ACCESS PROTECTION FAULT",
+ "UNKNOWN FAULT"
+};
+
+struct exynos_iommu_domain {
+ struct list_head clients; /* list of sysmmu_drvdata.node */
+ unsigned long *pgtable; /* lv1 page table, 16KB */
+ short *lv2entcnt; /* free lv2 entry counter for each section */
+ spinlock_t lock; /* lock for this structure */
+ spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
+};
+
+struct sysmmu_drvdata {
+ struct list_head node; /* entry of exynos_iommu_domain.clients */
+ struct device *sysmmu; /* System MMU's device descriptor */
+ struct device *dev; /* Owner of system MMU */
+ char *dbgname;
+ int nsfrs;
+ void __iomem **sfrbases;
+ struct clk *clk[2];
+ int activations;
+ rwlock_t lock;
+ struct iommu_domain *domain;
+ sysmmu_fault_handler_t fault_handler;
+ unsigned long pgtable;
+};
+
+static bool set_sysmmu_active(struct sysmmu_drvdata *data)
+{
+ /* return true if the System MMU was not active previously
+ and it needs to be initialized */
+ return ++data->activations == 1;
+}
+
+static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
+{
+ /* return true if the System MMU is needed to be disabled */
+ BUG_ON(data->activations < 1);
+ return --data->activations == 0;
+}
+
+static bool is_sysmmu_active(struct sysmmu_drvdata *data)
+{
+ return data->activations > 0;
+}
+
+static void sysmmu_unblock(void __iomem *sfrbase)
+{
+ __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
+}
+
+static bool sysmmu_block(void __iomem *sfrbase)
+{
+ int i = 120;
+
+ __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
+ while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
+ --i;
+
+ if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
+ sysmmu_unblock(sfrbase);
+ return false;
+ }
+
+ return true;
+}
+
+static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
+{
+ __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
+}
+
+static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
+ unsigned long iova)
+{
+ __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
+}
+
+static void __sysmmu_set_ptbase(void __iomem *sfrbase,
+ unsigned long pgd)
+{
+ __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
+ __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
+
+ __sysmmu_tlb_invalidate(sfrbase);
+}
+
+static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
+ unsigned long size, int idx)
+{
+ __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
+ __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
+}
+
+void exynos_sysmmu_set_prefbuf(struct device *dev,
+ unsigned long base0, unsigned long size0,
+ unsigned long base1, unsigned long size1)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ unsigned long flags;
+ int i;
+
+ BUG_ON((base0 + size0) <= base0);
+ BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
+
+ read_lock_irqsave(&data->lock, flags);
+ if (!is_sysmmu_active(data))
+ goto finish;
+
+ for (i = 0; i < data->nsfrs; i++) {
+ if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+ if (!sysmmu_block(data->sfrbases[i]))
+ continue;
+
+ if (size1 == 0) {
+ if (size0 <= SZ_128K) {
+ base1 = base0;
+ size1 = size0;
+ } else {
+ size1 = size0 -
+ ALIGN(size0 / 2, SZ_64K);
+ size0 = size0 - size1;
+ base1 = base0 + size0;
+ }
+ }
+
+ __sysmmu_set_prefbuf(
+ data->sfrbases[i], base0, size0, 0);
+ __sysmmu_set_prefbuf(
+ data->sfrbases[i], base1, size1, 1);
+
+ sysmmu_unblock(data->sfrbases[i]);
+ }
+ }
+finish:
+ read_unlock_irqrestore(&data->lock, flags);
+}
+
+static void __set_fault_handler(struct sysmmu_drvdata *data,
+ sysmmu_fault_handler_t handler)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&data->lock, flags);
+ data->fault_handler = handler;
+ write_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_set_fault_handler(struct device *dev,
+ sysmmu_fault_handler_t handler)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ __set_fault_handler(data, handler);
+}
+
+static int default_fault_handler(enum exynos_sysmmu_inttype itype,
+ unsigned long pgtable_base, unsigned long fault_addr)
+{
+ unsigned long *ent;
+
+ if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
+ itype = SYSMMU_FAULT_UNKNOWN;
+
+ pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
+ sysmmu_fault_name[itype], fault_addr, pgtable_base);
+
+ ent = section_entry(__va(pgtable_base), fault_addr);
+ pr_err("\tLv1 entry: 0x%lx\n", *ent);
+
+ if (lv1ent_page(ent)) {
+ ent = page_entry(ent, fault_addr);
+ pr_err("\t Lv2 entry: 0x%lx\n", *ent);
+ }
+
+ pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
+
+ BUG();
+
+ return 0;
+}
+
+static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
+{
+ /* SYSMMU is in blocked when interrupt occurred. */
+ struct sysmmu_drvdata *data = dev_id;
+ struct resource *irqres;
+ struct platform_device *pdev;
+ enum exynos_sysmmu_inttype itype;
+ unsigned long addr = -1;
+
+ int i, ret = -ENOSYS;
+
+ read_lock(&data->lock);
+
+ WARN_ON(!is_sysmmu_active(data));
+
+ pdev = to_platform_device(data->sysmmu);
+ for (i = 0; i < (pdev->num_resources / 2); i++) {
+ irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (irqres && ((int)irqres->start == irq))
+ break;
+ }
+
+ if (i == pdev->num_resources) {
+ itype = SYSMMU_FAULT_UNKNOWN;
+ } else {
+ itype = (enum exynos_sysmmu_inttype)
+ __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
+ if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
+ itype = SYSMMU_FAULT_UNKNOWN;
+ else
+ addr = __raw_readl(
+ data->sfrbases[i] + fault_reg_offset[itype]);
+ }
+
+ if (data->domain)
+ ret = report_iommu_fault(data->domain, data->dev,
+ addr, itype);
+
+ if ((ret == -ENOSYS) && data->fault_handler) {
+ unsigned long base = data->pgtable;
+ if (itype != SYSMMU_FAULT_UNKNOWN)
+ base = __raw_readl(
+ data->sfrbases[i] + REG_PT_BASE_ADDR);
+ ret = data->fault_handler(itype, base, addr);
+ }
+
+ if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
+ __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
+ else
+ dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
+ data->dbgname, sysmmu_fault_name[itype]);
+
+ if (itype != SYSMMU_FAULT_UNKNOWN)
+ sysmmu_unblock(data->sfrbases[i]);
+
+ read_unlock(&data->lock);
+
+ return IRQ_HANDLED;
+}
+
+static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
+{
+ unsigned long flags;
+ bool disabled = false;
+ int i;
+
+ write_lock_irqsave(&data->lock, flags);
+
+ if (!set_sysmmu_inactive(data))
+ goto finish;
+
+ for (i = 0; i < data->nsfrs; i++)
+ __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
+
+ if (data->clk[1])
+ clk_disable(data->clk[1]);
+ if (data->clk[0])
+ clk_disable(data->clk[0]);
+
+ disabled = true;
+ data->pgtable = 0;
+ data->domain = NULL;
+finish:
+ write_unlock_irqrestore(&data->lock, flags);
+
+ if (disabled)
+ dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
+ else
+ dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
+ data->dbgname, data->activations);
+
+ return disabled;
+}
+
+/* __exynos_sysmmu_enable: Enables System MMU
+ *
+ * returns -error if an error occurred and System MMU is not enabled,
+ * 0 if the System MMU has been just enabled and 1 if System MMU was already
+ * enabled before.
+ */
+static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
+ unsigned long pgtable, struct iommu_domain *domain)
+{
+ int i, ret = 0;
+ unsigned long flags;
+
+ write_lock_irqsave(&data->lock, flags);
+
+ if (!set_sysmmu_active(data)) {
+ if (WARN_ON(pgtable != data->pgtable)) {
+ ret = -EBUSY;
+ set_sysmmu_inactive(data);
+ } else {
+ ret = 1;
+ }
+
+ dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
+ goto finish;
+ }
+
+ if (data->clk[0])
+ clk_enable(data->clk[0]);
+ if (data->clk[1])
+ clk_enable(data->clk[1]);
+
+ data->pgtable = pgtable;
+
+ for (i = 0; i < data->nsfrs; i++) {
+ __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
+
+ if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+ /* System MMU version is 3.x */
+ __raw_writel((1 << 12) | (2 << 28),
+ data->sfrbases[i] + REG_MMU_CFG);
+ __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
+ __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
+ }
+
+ __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
+ }
+
+ data->domain = domain;
+
+ dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
+finish:
+ write_unlock_irqrestore(&data->lock, flags);
+
+ return ret;
+}
+
+int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ int ret;
+
+ BUG_ON(!memblock_is_memory(pgtable));
+
+ ret = pm_runtime_get_sync(data->sysmmu);
+ if (ret < 0) {
+ dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
+ return ret;
+ }
+
+ ret = __exynos_sysmmu_enable(data, pgtable, NULL);
+ if (WARN_ON(ret < 0)) {
+ pm_runtime_put(data->sysmmu);
+ dev_err(data->sysmmu,
+ "(%s) Already enabled with page table %#lx\n",
+ data->dbgname, data->pgtable);
+ } else {
+ data->dev = dev;
+ }
+
+ return ret;
+}
+
+bool exynos_sysmmu_disable(struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ bool disabled;
+
+ disabled = __exynos_sysmmu_disable(data);
+ pm_runtime_put(data->sysmmu);
+
+ return disabled;
+}
+
+static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
+{
+ unsigned long flags;
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ read_lock_irqsave(&data->lock, flags);
+
+ if (is_sysmmu_active(data)) {
+ int i;
+ for (i = 0; i < data->nsfrs; i++) {
+ if (sysmmu_block(data->sfrbases[i])) {
+ __sysmmu_tlb_invalidate_entry(
+ data->sfrbases[i], iova);
+ sysmmu_unblock(data->sfrbases[i]);
+ }
+ }
+ } else {
+ dev_dbg(data->sysmmu,
+ "(%s) Disabled. Skipping invalidating TLB.\n",
+ data->dbgname);
+ }
+
+ read_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_tlb_invalidate(struct device *dev)
+{
+ unsigned long flags;
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ read_lock_irqsave(&data->lock, flags);
+
+ if (is_sysmmu_active(data)) {
+ int i;
+ for (i = 0; i < data->nsfrs; i++) {
+ if (sysmmu_block(data->sfrbases[i])) {
+ __sysmmu_tlb_invalidate(data->sfrbases[i]);
+ sysmmu_unblock(data->sfrbases[i]);
+ }
+ }
+ } else {
+ dev_dbg(data->sysmmu,
+ "(%s) Disabled. Skipping invalidating TLB.\n",
+ data->dbgname);
+ }
+
+ read_unlock_irqrestore(&data->lock, flags);
+}
+
+static int exynos_sysmmu_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct device *dev;
+ struct sysmmu_drvdata *data;
+
+ dev = &pdev->dev;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_dbg(dev, "Not enough memory\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ ret = dev_set_drvdata(dev, data);
+ if (ret) {
+ dev_dbg(dev, "Unabled to initialize driver data\n");
+ goto err_init;
+ }
+
+ data->nsfrs = pdev->num_resources / 2;
+ data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
+ GFP_KERNEL);
+ if (data->sfrbases == NULL) {
+ dev_dbg(dev, "Not enough memory\n");
+ ret = -ENOMEM;
+ goto err_init;
+ }
+
+ for (i = 0; i < data->nsfrs; i++) {
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res) {
+ dev_dbg(dev, "Unable to find IOMEM region\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ data->sfrbases[i] = ioremap(res->start, resource_size(res));
+ if (!data->sfrbases[i]) {
+ dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
+ res->start);
+ ret = -ENOENT;
+ goto err_res;
+ }
+ }
+
+ for (i = 0; i < data->nsfrs; i++) {
+ ret = platform_get_irq(pdev, i);
+ if (ret <= 0) {
+ dev_dbg(dev, "Unable to find IRQ resource\n");
+ goto err_irq;
+ }
+
+ ret = request_irq(ret, exynos_sysmmu_irq, 0,
+ dev_name(dev), data);
+ if (ret) {
+ dev_dbg(dev, "Unabled to register interrupt handler\n");
+ goto err_irq;
+ }
+ }
+
+ if (dev_get_platdata(dev)) {
+ char *deli, *beg;
+ struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
+
+ beg = platdata->clockname;
+
+ for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
+ /* NOTHING */;
+
+ if (*deli == '\0')
+ deli = NULL;
+ else
+ *deli = '\0';
+
+ data->clk[0] = clk_get(dev, beg);
+ if (IS_ERR(data->clk[0])) {
+ data->clk[0] = NULL;
+ dev_dbg(dev, "No clock descriptor registered\n");
+ }
+
+ if (data->clk[0] && deli) {
+ *deli = ',';
+ data->clk[1] = clk_get(dev, deli + 1);
+ if (IS_ERR(data->clk[1]))
+ data->clk[1] = NULL;
+ }
+
+ data->dbgname = platdata->dbgname;
+ }
+
+ data->sysmmu = dev;
+ rwlock_init(&data->lock);
+ INIT_LIST_HEAD(&data->node);
+
+ __set_fault_handler(data, &default_fault_handler);
+
+ if (dev->parent)
+ pm_runtime_enable(dev);
+
+ dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
+ return 0;
+err_irq:
+ while (i-- > 0) {
+ int irq;
+
+ irq = platform_get_irq(pdev, i);
+ free_irq(irq, data);
+ }
+err_res:
+ while (data->nsfrs-- > 0)
+ iounmap(data->sfrbases[data->nsfrs]);
+ kfree(data->sfrbases);
+err_init:
+ kfree(data);
+err_alloc:
+ dev_err(dev, "Failed to initialize\n");
+ return ret;
+}
+
+static struct platform_driver exynos_sysmmu_driver = {
+ .probe = exynos_sysmmu_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "exynos-sysmmu",
+ }
+};
+
+static inline void pgtable_flush(void *vastart, void *vaend)
+{
+ dmac_flush_range(vastart, vaend);
+ outer_flush_range(virt_to_phys(vastart),
+ virt_to_phys(vaend));
+}
+
+static int exynos_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct exynos_iommu_domain *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pgtable = (unsigned long *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO, 2);
+ if (!priv->pgtable)
+ goto err_pgtable;
+
+ priv->lv2entcnt = (short *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO, 1);
+ if (!priv->lv2entcnt)
+ goto err_counter;
+
+ pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->pgtablelock);
+ INIT_LIST_HEAD(&priv->clients);
+
+ domain->priv = priv;
+ return 0;
+
+err_counter:
+ free_pages((unsigned long)priv->pgtable, 2);
+err_pgtable:
+ kfree(priv);
+ return -ENOMEM;
+}
+
+static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ struct sysmmu_drvdata *data;
+ unsigned long flags;
+ int i;
+
+ WARN_ON(!list_empty(&priv->clients));
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry(data, &priv->clients, node) {
+ while (!exynos_sysmmu_disable(data->dev))
+ ; /* until System MMU is actually disabled */
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ for (i = 0; i < NUM_LV1ENTRIES; i++)
+ if (lv1ent_page(priv->pgtable + i))
+ kfree(__va(lv2table_base(priv->pgtable + i)));
+
+ free_pages((unsigned long)priv->pgtable, 2);
+ free_pages((unsigned long)priv->lv2entcnt, 1);
+ kfree(domain->priv);
+ domain->priv = NULL;
+}
+
+static int exynos_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_domain *priv = domain->priv;
+ unsigned long flags;
+ int ret;
+
+ ret = pm_runtime_get_sync(data->sysmmu);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
+
+ if (ret == 0) {
+ /* 'data->node' must not be appeared in priv->clients */
+ BUG_ON(!list_empty(&data->node));
+ data->dev = dev;
+ list_add_tail(&data->node, &priv->clients);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (ret < 0) {
+ dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
+ __func__, __pa(priv->pgtable));
+ pm_runtime_put(data->sysmmu);
+ } else if (ret > 0) {
+ dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
+ __func__, __pa(priv->pgtable));
+ } else {
+ dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
+ __func__, __pa(priv->pgtable));
+ }
+
+ return ret;
+}
+
+static void exynos_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_domain *priv = domain->priv;
+ struct list_head *pos;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each(pos, &priv->clients) {
+ if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ goto finish;
+
+ if (__exynos_sysmmu_disable(data)) {
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
+ __func__, __pa(priv->pgtable));
+ list_del(&data->node);
+ INIT_LIST_HEAD(&data->node);
+
+ } else {
+ dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
+ __func__, __pa(priv->pgtable));
+ }
+
+finish:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (found)
+ pm_runtime_put(data->sysmmu);
+}
+
+static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
+ short *pgcounter)
+{
+ if (lv1ent_fault(sent)) {
+ unsigned long *pent;
+
+ pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
+ BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
+ if (!pent)
+ return NULL;
+
+ *sent = mk_lv1ent_page(__pa(pent));
+ *pgcounter = NUM_LV2ENTRIES;
+ pgtable_flush(pent, pent + NUM_LV2ENTRIES);
+ pgtable_flush(sent, sent + 1);
+ }
+
+ return page_entry(sent, iova);
+}
+
+static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
+{
+ if (lv1ent_section(sent))
+ return -EADDRINUSE;
+
+ if (lv1ent_page(sent)) {
+ if (*pgcnt != NUM_LV2ENTRIES)
+ return -EADDRINUSE;
+
+ kfree(page_entry(sent, 0));
+
+ *pgcnt = 0;
+ }
+
+ *sent = mk_lv1ent_sect(paddr);
+
+ pgtable_flush(sent, sent + 1);
+
+ return 0;
+}
+
+static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
+ short *pgcnt)
+{
+ if (size == SPAGE_SIZE) {
+ if (!lv2ent_fault(pent))
+ return -EADDRINUSE;
+
+ *pent = mk_lv2ent_spage(paddr);
+ pgtable_flush(pent, pent + 1);
+ *pgcnt -= 1;
+ } else { /* size == LPAGE_SIZE */
+ int i;
+ for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
+ if (!lv2ent_fault(pent)) {
+ memset(pent, 0, sizeof(*pent) * i);
+ return -EADDRINUSE;
+ }
+
+ *pent = mk_lv2ent_lpage(paddr);
+ }
+ pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
+ *pgcnt -= SPAGES_PER_LPAGE;
+ }
+
+ return 0;
+}
+
+static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ unsigned long *entry;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ BUG_ON(priv->pgtable == NULL);
+
+ spin_lock_irqsave(&priv->pgtablelock, flags);
+
+ entry = section_entry(priv->pgtable, iova);
+
+ if (size == SECT_SIZE) {
+ ret = lv1set_section(entry, paddr,
+ &priv->lv2entcnt[lv1ent_offset(iova)]);
+ } else {
+ unsigned long *pent;
+
+ pent = alloc_lv2entry(entry, iova,
+ &priv->lv2entcnt[lv1ent_offset(iova)]);
+
+ if (!pent)
+ ret = -ENOMEM;
+ else
+ ret = lv2set_page(pent, paddr, size,
+ &priv->lv2entcnt[lv1ent_offset(iova)]);
+ }
+
+ if (ret) {
+ pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
+ __func__, iova, size);
+ }
+
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ return ret;
+}
+
+static size_t exynos_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ struct sysmmu_drvdata *data;
+ unsigned long flags;
+ unsigned long *ent;
+
+ BUG_ON(priv->pgtable == NULL);
+
+ spin_lock_irqsave(&priv->pgtablelock, flags);
+
+ ent = section_entry(priv->pgtable, iova);
+
+ if (lv1ent_section(ent)) {
+ BUG_ON(size < SECT_SIZE);
+
+ *ent = 0;
+ pgtable_flush(ent, ent + 1);
+ size = SECT_SIZE;
+ goto done;
+ }
+
+ if (unlikely(lv1ent_fault(ent))) {
+ if (size > SECT_SIZE)
+ size = SECT_SIZE;
+ goto done;
+ }
+
+ /* lv1ent_page(sent) == true here */
+
+ ent = page_entry(ent, iova);
+
+ if (unlikely(lv2ent_fault(ent))) {
+ size = SPAGE_SIZE;
+ goto done;
+ }
+
+ if (lv2ent_small(ent)) {
+ *ent = 0;
+ size = SPAGE_SIZE;
+ priv->lv2entcnt[lv1ent_offset(iova)] += 1;
+ goto done;
+ }
+
+ /* lv1ent_large(ent) == true here */
+ BUG_ON(size < LPAGE_SIZE);
+
+ memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
+
+ size = LPAGE_SIZE;
+ priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
+done:
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(data, &priv->clients, node)
+ sysmmu_tlb_invalidate_entry(data->dev, iova);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+
+ return size;
+}
+
+static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
+ unsigned long iova)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ unsigned long *entry;
+ unsigned long flags;
+ phys_addr_t phys = 0;
+
+ spin_lock_irqsave(&priv->pgtablelock, flags);
+
+ entry = section_entry(priv->pgtable, iova);
+
+ if (lv1ent_section(entry)) {
+ phys = section_phys(entry) + section_offs(iova);
+ } else if (lv1ent_page(entry)) {
+ entry = page_entry(entry, iova);
+
+ if (lv2ent_large(entry))
+ phys = lpage_phys(entry) + lpage_offs(iova);
+ else if (lv2ent_small(entry))
+ phys = spage_phys(entry) + spage_offs(iova);
+ }
+
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ return phys;
+}
+
+static struct iommu_ops exynos_iommu_ops = {
+ .domain_init = &exynos_iommu_domain_init,
+ .domain_destroy = &exynos_iommu_domain_destroy,
+ .attach_dev = &exynos_iommu_attach_device,
+ .detach_dev = &exynos_iommu_detach_device,
+ .map = &exynos_iommu_map,
+ .unmap = &exynos_iommu_unmap,
+ .iova_to_phys = &exynos_iommu_iova_to_phys,
+ .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
+};
+
+static int __init exynos_iommu_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&exynos_sysmmu_driver);
+
+ if (ret == 0)
+ bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+
+ return ret;
+}
+subsys_initcall(exynos_iommu_init);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bf2fbaad5e22..b12af2ff8c54 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1907,6 +1907,15 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
+static inline void unlink_domain_info(struct device_domain_info *info)
+{
+ assert_spin_locked(&device_domain_lock);
+ list_del(&info->link);
+ list_del(&info->global);
+ if (info->dev)
+ info->dev->dev.archdata.iommu = NULL;
+}
+
static void domain_remove_dev_info(struct dmar_domain *domain)
{
struct device_domain_info *info;
@@ -1917,10 +1926,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
while (!list_empty(&domain->devices)) {
info = list_entry(domain->devices.next,
struct device_domain_info, link);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
+ unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_disable_dev_iotlb(info);
@@ -2287,12 +2293,6 @@ static int domain_add_dev_info(struct dmar_domain *domain,
if (!info)
return -ENOMEM;
- ret = domain_context_mapping(domain, pdev, translation);
- if (ret) {
- free_devinfo_mem(info);
- return ret;
- }
-
info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
@@ -2305,6 +2305,15 @@ static int domain_add_dev_info(struct dmar_domain *domain,
pdev->dev.archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ ret = domain_context_mapping(domain, pdev, translation);
+ if (ret) {
+ spin_lock_irqsave(&device_domain_lock, flags);
+ unlink_domain_info(info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ free_devinfo_mem(info);
+ return ret;
+ }
+
return 0;
}
@@ -3728,10 +3737,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
if (info->segment == pci_domain_nr(pdev->bus) &&
info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
+ unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_disable_dev_iotlb(info);
@@ -3786,11 +3792,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
while (!list_empty(&domain->devices)) {
info = list_entry(domain->devices.next,
struct device_domain_info, link);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
-
+ unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags1);
iommu_disable_dev_iotlb(info);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2198b2dbbcd3..8b9ded88e6f5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -119,6 +119,7 @@ EXPORT_SYMBOL_GPL(iommu_present);
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
* @handler: fault handler
+ * @token: user data, will be passed back to the fault handler
*
* This function should be used by IOMMU users which want to be notified
* whenever an IOMMU fault happens.
@@ -127,11 +128,13 @@ EXPORT_SYMBOL_GPL(iommu_present);
* error code otherwise.
*/
void iommu_set_fault_handler(struct iommu_domain *domain,
- iommu_fault_handler_t handler)
+ iommu_fault_handler_t handler,
+ void *token)
{
BUG_ON(!domain);
domain->handler = handler;
+ domain->handler_token = token;
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6899dcd02dfa..e70ee2b59df9 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -41,11 +41,13 @@
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
+ * @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
*/
struct omap_iommu_domain {
u32 *pgtable;
struct omap_iommu *iommu_dev;
+ struct device *dev;
spinlock_t lock;
};
@@ -1081,6 +1083,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
+ omap_domain->dev = dev;
oiommu->domain = domain;
out:
@@ -1088,19 +1091,16 @@ out:
return ret;
}
-static void omap_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
+ struct device *dev)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
-
- spin_lock(&omap_domain->lock);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
dev_err(dev, "invalid iommu device\n");
- goto out;
+ return;
}
iopgtable_clear_entry_all(oiommu);
@@ -1108,8 +1108,16 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
omap_iommu_detach(oiommu);
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
+ omap_domain->dev = NULL;
+}
-out:
+static void omap_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+
+ spin_lock(&omap_domain->lock);
+ _omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
@@ -1148,13 +1156,19 @@ out:
return -ENOMEM;
}
-/* assume device was already detached */
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain = domain->priv;
domain->priv = NULL;
+ /*
+ * An iommu device is still attached
+ * (currently, only one device can be attached) ?
+ */
+ if (omap_domain->iommu_dev)
+ _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
+
kfree(omap_domain->pgtable);
kfree(omap_domain);
}
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 779306ee7b16..0c0a37792218 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -29,15 +29,17 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iommu.h>
+#include <linux/of.h>
#include <asm/cacheflush.h>
/* bitmap of the page sizes currently supported */
#define GART_IOMMU_PGSIZES (SZ_4K)
-#define GART_CONFIG 0x24
-#define GART_ENTRY_ADDR 0x28
-#define GART_ENTRY_DATA 0x2c
+#define GART_REG_BASE 0x24
+#define GART_CONFIG (0x24 - GART_REG_BASE)
+#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
+#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
#define GART_PAGE_SHIFT 12
@@ -158,7 +160,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct gart_client *client, *c;
int err = 0;
- gart = dev_get_drvdata(dev->parent);
+ gart = gart_handle;
if (!gart)
return -EINVAL;
domain->priv = gart;
@@ -422,6 +424,14 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
.resume = tegra_gart_resume,
};
+#ifdef CONFIG_OF
+static struct of_device_id tegra_gart_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-gart", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
+#endif
+
static struct platform_driver tegra_gart_driver = {
.probe = tegra_gart_probe,
.remove = tegra_gart_remove,
@@ -429,6 +439,7 @@ static struct platform_driver tegra_gart_driver = {
.owner = THIS_MODULE,
.name = "tegra-gart",
.pm = &tegra_gart_pm_ops,
+ .of_match_table = of_match_ptr(tegra_gart_of_match),
},
};
@@ -448,4 +459,5 @@ module_exit(tegra_gart_exit);
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_ALIAS("platform:tegra-gart");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index eb93c821f592..3f3d09d560ea 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -550,13 +550,13 @@ static int alloc_pdir(struct smmu_as *as)
return 0;
as->pte_count = devm_kzalloc(smmu->dev,
- sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
+ sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC);
if (!as->pte_count) {
dev_err(smmu->dev,
"failed to allocate smmu_device PTE cunters\n");
return -ENOMEM;
}
- as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
+ as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA);
if (!as->pdir_page) {
dev_err(smmu->dev,
"failed to allocate smmu_device page directory\n");
@@ -733,7 +733,7 @@ static int smmu_iommu_attach_dev(struct iommu_domain *domain,
pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
}
- dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev));
+ dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
return 0;
err_client:
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h
index cb1231b08f78..4157311d569d 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.h
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.h
@@ -410,6 +410,12 @@ static struct usb_device_id hfcsusb_idtab[] = {
{LED_SCHEME1, {0x88, -64, -32, -16},
"ZyXEL OMNI.NET USB II"}),
},
+ {
+ USB_DEVICE(0x1ae7, 0x0525),
+ .driver_info = (unsigned long) &((struct hfcsusb_vdata)
+ {LED_SCHEME1, {0x88, -64, -32, -16},
+ "X-Tensions USB ISDN TA XC-525"}),
+ },
{ }
};
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 1a0ae4445ff2..5f21f629b7ae 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
skb = NULL;
else if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
- "%s ch%d mgr prim(%x) addr(%x) err %d\n",
- __func__, ch->nr, hh->prim, ch->addr, ret);
+ "%s mgr prim(%x) err %d\n",
+ __func__, hh->prim, ret);
}
out:
mutex_unlock(&st->lmutex);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff4b8cfda585..12b2b55c519e 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -50,6 +50,19 @@ config LEDS_LM3530
controlled manually or using PWM input or using ambient
light automatically.
+config LEDS_LM3533
+ tristate "LED support for LM3533"
+ depends on LEDS_CLASS
+ depends on MFD_LM3533
+ help
+ This option enables support for the LEDs on National Semiconductor /
+ TI LM3533 Lighting Power chips.
+
+ The LEDs can be controlled directly, through PWM input, or by the
+ ambient-light-sensor interface. The chip supports
+ hardware-accelerated blinking with maximum on and off periods of 9.8
+ and 77 seconds respectively.
+
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
depends on LEDS_CLASS
@@ -259,6 +272,14 @@ config LEDS_DA903X
This option enables support for on-chip LED drivers found
on Dialog Semiconductor DA9030/DA9034 PMICs.
+config LEDS_DA9052
+ tristate "Dialog DA9052/DA9053 LEDS"
+ depends on LEDS_CLASS
+ depends on PMIC_DA9052
+ help
+ This option enables support for on-chip LED drivers found
+ on Dialog Semiconductor DA9052-BC and DA9053-AA/Bx PMICs.
+
config LEDS_DAC124S085
tristate "LED Support for DAC124S085 SPI DAC"
depends on LEDS_CLASS
@@ -358,7 +379,7 @@ config LEDS_NETXBIG
config LEDS_ASIC3
bool "LED support for the HTC ASIC3"
- depends on LEDS_CLASS
+ depends on LEDS_CLASS=y
depends on MFD_ASIC3
default y
help
@@ -369,7 +390,7 @@ config LEDS_ASIC3
config LEDS_RENESAS_TPU
bool "LED support for Renesas TPU"
- depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO
+ depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
help
This option enables build of the LED TPU platform driver,
suitable to drive any TPU channel on newer Renesas SoCs.
@@ -471,4 +492,12 @@ config LEDS_TRIGGER_DEFAULT_ON
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
+config LEDS_TRIGGER_TRANSIENT
+ tristate "LED Transient Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows one time activation of a transient state on
+ GPIO/PWM based hadrware.
+ If unsure, say Y.
+
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 890481cb09f6..f8958cd6cf6e 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
+obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_PCA9633) += leds-pca9633.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
+obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
@@ -56,3 +58,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 5bff8439dc68..e663e6f413e9 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev)
led_cdev->brightness = led_cdev->brightness_get(led_cdev);
}
-static ssize_t led_brightness_show(struct device *dev,
+static ssize_t led_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -44,23 +44,18 @@ static ssize_t led_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned long state;
ssize_t ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
- if (isspace(*after))
- count++;
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
- if (count == size) {
- ret = count;
+ if (state == LED_OFF)
+ led_trigger_remove(led_cdev);
+ led_set_brightness(led_cdev, state);
- if (state == LED_OFF)
- led_trigger_remove(led_cdev);
- led_set_brightness(led_cdev, state);
- }
-
- return ret;
+ return size;
}
static ssize_t led_max_brightness_show(struct device *dev,
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index d6860043f6f9..d65353d8d3fc 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
if (!led_cdev->blink_brightness)
led_cdev->blink_brightness = led_cdev->max_brightness;
- if (led_get_trigger_data(led_cdev) &&
- delay_on == led_cdev->blink_delay_on &&
- delay_off == led_cdev->blink_delay_off)
- return;
-
- led_stop_software_blink(led_cdev);
-
led_cdev->blink_delay_on = delay_on;
led_cdev->blink_delay_off = delay_off;
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
new file mode 100644
index 000000000000..58a5244c437e
--- /dev/null
+++ b/drivers/leds/leds-da9052.c
@@ -0,0 +1,214 @@
+/*
+ * LED Driver for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+
+#define DA9052_OPENDRAIN_OUTPUT 2
+#define DA9052_SET_HIGH_LVL_OUTPUT (1 << 3)
+#define DA9052_MASK_UPPER_NIBBLE 0xF0
+#define DA9052_MASK_LOWER_NIBBLE 0x0F
+#define DA9052_NIBBLE_SHIFT 4
+#define DA9052_MAX_BRIGHTNESS 0x5f
+
+struct da9052_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct da9052 *da9052;
+ unsigned char led_index;
+ unsigned char id;
+ int brightness;
+};
+
+static unsigned char led_reg[] = {
+ DA9052_LED_CONT_4_REG,
+ DA9052_LED_CONT_5_REG,
+};
+
+static int da9052_set_led_brightness(struct da9052_led *led)
+{
+ u8 val;
+ int error;
+
+ val = (led->brightness & 0x7f) | DA9052_LED_CONT_DIM;
+
+ error = da9052_reg_write(led->da9052, led_reg[led->led_index], val);
+ if (error < 0)
+ dev_err(led->da9052->dev, "Failed to set led brightness, %d\n",
+ error);
+ return error;
+}
+
+static void da9052_led_work(struct work_struct *work)
+{
+ struct da9052_led *led = container_of(work, struct da9052_led, work);
+
+ da9052_set_led_brightness(led);
+}
+
+static void da9052_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct da9052_led *led;
+
+ led = container_of(led_cdev, struct da9052_led, cdev);
+ led->brightness = value;
+ schedule_work(&led->work);
+}
+
+static int da9052_configure_leds(struct da9052 *da9052)
+{
+ int error;
+ unsigned char register_value = DA9052_OPENDRAIN_OUTPUT
+ | DA9052_SET_HIGH_LVL_OUTPUT;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+ DA9052_MASK_LOWER_NIBBLE,
+ register_value);
+
+ if (error < 0) {
+ dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+ error);
+ return error;
+ }
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+ DA9052_MASK_UPPER_NIBBLE,
+ register_value << DA9052_NIBBLE_SHIFT);
+ if (error < 0)
+ dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+ error);
+
+ return error;
+}
+
+static int __devinit da9052_led_probe(struct platform_device *pdev)
+{
+ struct da9052_pdata *pdata;
+ struct da9052 *da9052;
+ struct led_platform_data *pled;
+ struct da9052_led *led = NULL;
+ int error = -ENODEV;
+ int i;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data\n");
+ goto err;
+ }
+
+ pled = pdata->pled;
+ if (pled == NULL) {
+ dev_err(&pdev->dev, "No platform data for LED\n");
+ goto err;
+ }
+
+ led = devm_kzalloc(&pdev->dev,
+ sizeof(struct da9052_led) * pled->num_leds,
+ GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "Failed to alloc memory\n");
+ error = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < pled->num_leds; i++) {
+ led[i].cdev.name = pled->leds[i].name;
+ led[i].cdev.brightness_set = da9052_led_set;
+ led[i].cdev.brightness = LED_OFF;
+ led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS;
+ led[i].brightness = LED_OFF;
+ led[i].led_index = pled->leds[i].flags;
+ led[i].da9052 = dev_get_drvdata(pdev->dev.parent);
+ INIT_WORK(&led[i].work, da9052_led_work);
+
+ error = led_classdev_register(pdev->dev.parent, &led[i].cdev);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to register led %d\n",
+ led[i].led_index);
+ goto err_register;
+ }
+
+ error = da9052_set_led_brightness(&led[i]);
+ if (error) {
+ dev_err(&pdev->dev, "Unable to init led %d\n",
+ led[i].led_index);
+ continue;
+ }
+ }
+ error = da9052_configure_leds(led->da9052);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to configure GPIO LED%d\n", error);
+ goto err_register;
+ }
+
+ platform_set_drvdata(pdev, led);
+
+ return 0;
+
+err_register:
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+err:
+ return error;
+}
+
+static int __devexit da9052_led_remove(struct platform_device *pdev)
+{
+ struct da9052_led *led = platform_get_drvdata(pdev);
+ struct da9052_pdata *pdata;
+ struct da9052 *da9052;
+ struct led_platform_data *pled;
+ int i;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ pled = pdata->pled;
+
+ for (i = 0; i < pled->num_leds; i++) {
+ led[i].brightness = 0;
+ da9052_set_led_brightness(&led[i]);
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ return 0;
+}
+
+static struct platform_driver da9052_led_driver = {
+ .driver = {
+ .name = "da9052-leds",
+ .owner = THIS_MODULE,
+ },
+ .probe = da9052_led_probe,
+ .remove = __devexit_p(da9052_led_remove),
+};
+
+module_platform_driver(da9052_led_driver);
+
+MODULE_AUTHOR("Dialog Semiconductor Ltd <dchen@diasemi.com>");
+MODULE_DESCRIPTION("LED driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 968fd5fef4fc..84ba6de8039c 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -113,6 +113,18 @@ struct lm3530_data {
bool enable;
};
+/*
+ * struct lm3530_als_data
+ * @config : value of ALS configuration register
+ * @imp_sel : value of ALS resistor select register
+ * @zone : values of ALS ZB(Zone Boundary) registers
+ */
+struct lm3530_als_data {
+ u8 config;
+ u8 imp_sel;
+ u8 zones[LM3530_ALS_ZB_MAX];
+};
+
static const u8 lm3530_reg[LM3530_REG_MAX] = {
LM3530_GEN_CONFIG,
LM3530_ALS_CONFIG,
@@ -141,29 +153,65 @@ static int lm3530_get_mode_from_str(const char *str)
return -1;
}
+static void lm3530_als_configure(struct lm3530_platform_data *pdata,
+ struct lm3530_als_data *als)
+{
+ int i;
+ u32 als_vmin, als_vmax, als_vstep;
+
+ if (pdata->als_vmax == 0) {
+ pdata->als_vmin = 0;
+ pdata->als_vmax = LM3530_ALS_WINDOW_mV;
+ }
+
+ als_vmin = pdata->als_vmin;
+ als_vmax = pdata->als_vmax;
+
+ if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
+ pdata->als_vmax = als_vmax = als_vmin + LM3530_ALS_WINDOW_mV;
+
+ /* n zone boundary makes n+1 zones */
+ als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
+
+ for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
+ als->zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
+ als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
+
+ als->config =
+ (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
+ (LM3530_ENABLE_ALS) |
+ (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
+
+ als->imp_sel =
+ (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
+ (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
+}
+
static int lm3530_init_registers(struct lm3530_data *drvdata)
{
int ret = 0;
int i;
u8 gen_config;
- u8 als_config = 0;
u8 brt_ramp;
- u8 als_imp_sel = 0;
u8 brightness;
u8 reg_val[LM3530_REG_MAX];
- u8 zones[LM3530_ALS_ZB_MAX];
- u32 als_vmin, als_vmax, als_vstep;
struct lm3530_platform_data *pdata = drvdata->pdata;
struct i2c_client *client = drvdata->client;
struct lm3530_pwm_data *pwm = &pdata->pwm_data;
+ struct lm3530_als_data als;
+
+ memset(&als, 0, sizeof(struct lm3530_als_data));
gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT);
switch (drvdata->mode) {
case LM3530_BL_MODE_MANUAL:
+ gen_config |= LM3530_ENABLE_I2C;
+ break;
case LM3530_BL_MODE_ALS:
gen_config |= LM3530_ENABLE_I2C;
+ lm3530_als_configure(pdata, &als);
break;
case LM3530_BL_MODE_PWM:
gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE |
@@ -171,38 +219,6 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
break;
}
- if (drvdata->mode == LM3530_BL_MODE_ALS) {
- if (pdata->als_vmax == 0) {
- pdata->als_vmin = 0;
- pdata->als_vmax = LM3530_ALS_WINDOW_mV;
- }
-
- als_vmin = pdata->als_vmin;
- als_vmax = pdata->als_vmax;
-
- if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
- pdata->als_vmax = als_vmax =
- als_vmin + LM3530_ALS_WINDOW_mV;
-
- /* n zone boundary makes n+1 zones */
- als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
-
- for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
- zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
- als_vstep + (i * als_vstep)) * LED_FULL)
- / 1000;
-
- als_config =
- (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
- (LM3530_ENABLE_ALS) |
- (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
-
- als_imp_sel =
- (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
- (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
-
- }
-
brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
(pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
@@ -215,14 +231,14 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
brightness = drvdata->led_dev.max_brightness;
reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
- reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
+ reg_val[1] = als.config; /* LM3530_ALS_CONFIG */
reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
- reg_val[3] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */
+ reg_val[3] = als.imp_sel; /* LM3530_ALS_IMP_SELECT */
reg_val[4] = brightness; /* LM3530_BRT_CTRL_REG */
- reg_val[5] = zones[0]; /* LM3530_ALS_ZB0_REG */
- reg_val[6] = zones[1]; /* LM3530_ALS_ZB1_REG */
- reg_val[7] = zones[2]; /* LM3530_ALS_ZB2_REG */
- reg_val[8] = zones[3]; /* LM3530_ALS_ZB3_REG */
+ reg_val[5] = als.zones[0]; /* LM3530_ALS_ZB0_REG */
+ reg_val[6] = als.zones[1]; /* LM3530_ALS_ZB1_REG */
+ reg_val[7] = als.zones[2]; /* LM3530_ALS_ZB2_REG */
+ reg_val[8] = als.zones[3]; /* LM3530_ALS_ZB3_REG */
reg_val[9] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
reg_val[10] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
reg_val[11] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
new file mode 100644
index 000000000000..f56b6e7ffdac
--- /dev/null
+++ b/drivers/leds/leds-lm3533.c
@@ -0,0 +1,785 @@
+/*
+ * leds-lm3533.c -- LM3533 LED driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/mfd/core.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_LVCTRLBANK_MIN 2
+#define LM3533_LVCTRLBANK_MAX 5
+#define LM3533_LVCTRLBANK_COUNT 4
+#define LM3533_RISEFALLTIME_MAX 7
+#define LM3533_ALS_CHANNEL_LV_MIN 1
+#define LM3533_ALS_CHANNEL_LV_MAX 2
+
+#define LM3533_REG_CTRLBANK_BCONF_BASE 0x1b
+#define LM3533_REG_PATTERN_ENABLE 0x28
+#define LM3533_REG_PATTERN_LOW_TIME_BASE 0x71
+#define LM3533_REG_PATTERN_HIGH_TIME_BASE 0x72
+#define LM3533_REG_PATTERN_RISETIME_BASE 0x74
+#define LM3533_REG_PATTERN_FALLTIME_BASE 0x75
+
+#define LM3533_REG_PATTERN_STEP 0x10
+
+#define LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK 0x04
+#define LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK 0x02
+#define LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK 0x01
+
+#define LM3533_LED_FLAG_PATTERN_ENABLE 1
+
+
+struct lm3533_led {
+ struct lm3533 *lm3533;
+ struct lm3533_ctrlbank cb;
+ struct led_classdev cdev;
+ int id;
+
+ struct mutex mutex;
+ unsigned long flags;
+
+ struct work_struct work;
+ u8 new_brightness;
+};
+
+
+static inline struct lm3533_led *to_lm3533_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct lm3533_led, cdev);
+}
+
+static inline int lm3533_led_get_ctrlbank_id(struct lm3533_led *led)
+{
+ return led->id + 2;
+}
+
+static inline u8 lm3533_led_get_lv_reg(struct lm3533_led *led, u8 base)
+{
+ return base + led->id;
+}
+
+static inline u8 lm3533_led_get_pattern(struct lm3533_led *led)
+{
+ return led->id;
+}
+
+static inline u8 lm3533_led_get_pattern_reg(struct lm3533_led *led,
+ u8 base)
+{
+ return base + lm3533_led_get_pattern(led) * LM3533_REG_PATTERN_STEP;
+}
+
+static int lm3533_led_pattern_enable(struct lm3533_led *led, int enable)
+{
+ u8 mask;
+ u8 val;
+ int pattern;
+ int state;
+ int ret = 0;
+
+ dev_dbg(led->cdev.dev, "%s - %d\n", __func__, enable);
+
+ mutex_lock(&led->mutex);
+
+ state = test_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+ if ((enable && state) || (!enable && !state))
+ goto out;
+
+ pattern = lm3533_led_get_pattern(led);
+ mask = 1 << (2 * pattern);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, LM3533_REG_PATTERN_ENABLE, val, mask);
+ if (ret) {
+ dev_err(led->cdev.dev, "failed to enable pattern %d (%d)\n",
+ pattern, enable);
+ goto out;
+ }
+
+ __change_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+out:
+ mutex_unlock(&led->mutex);
+
+ return ret;
+}
+
+static void lm3533_led_work(struct work_struct *work)
+{
+ struct lm3533_led *led = container_of(work, struct lm3533_led, work);
+
+ dev_dbg(led->cdev.dev, "%s - %u\n", __func__, led->new_brightness);
+
+ if (led->new_brightness == 0)
+ lm3533_led_pattern_enable(led, 0); /* disable blink */
+
+ lm3533_ctrlbank_set_brightness(&led->cb, led->new_brightness);
+}
+
+static void lm3533_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+
+ dev_dbg(led->cdev.dev, "%s - %d\n", __func__, value);
+
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static enum led_brightness lm3533_led_get(struct led_classdev *cdev)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_brightness(&led->cb, &val);
+ if (ret)
+ return ret;
+
+ dev_dbg(led->cdev.dev, "%s - %u\n", __func__, val);
+
+ return val;
+}
+
+/* Pattern generator defines (delays in us). */
+#define LM3533_LED_DELAY1_VMIN 0x00
+#define LM3533_LED_DELAY2_VMIN 0x3d
+#define LM3533_LED_DELAY3_VMIN 0x80
+
+#define LM3533_LED_DELAY1_VMAX (LM3533_LED_DELAY2_VMIN - 1)
+#define LM3533_LED_DELAY2_VMAX (LM3533_LED_DELAY3_VMIN - 1)
+#define LM3533_LED_DELAY3_VMAX 0xff
+
+#define LM3533_LED_DELAY1_TMIN 16384U
+#define LM3533_LED_DELAY2_TMIN 1130496U
+#define LM3533_LED_DELAY3_TMIN 10305536U
+
+#define LM3533_LED_DELAY1_TMAX 999424U
+#define LM3533_LED_DELAY2_TMAX 9781248U
+#define LM3533_LED_DELAY3_TMAX 76890112U
+
+/* t_step = (t_max - t_min) / (v_max - v_min) */
+#define LM3533_LED_DELAY1_TSTEP 16384
+#define LM3533_LED_DELAY2_TSTEP 131072
+#define LM3533_LED_DELAY3_TSTEP 524288
+
+/* Delay limits for hardware accelerated blinking (in ms). */
+#define LM3533_LED_DELAY_ON_MAX \
+ ((LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY2_TSTEP / 2) / 1000)
+#define LM3533_LED_DELAY_OFF_MAX \
+ ((LM3533_LED_DELAY3_TMAX + LM3533_LED_DELAY3_TSTEP / 2) / 1000)
+
+/*
+ * Returns linear map of *t from [t_min,t_max] to [v_min,v_max] with a step
+ * size of t_step, where
+ *
+ * t_step = (t_max - t_min) / (v_max - v_min)
+ *
+ * and updates *t to reflect the mapped value.
+ */
+static u8 time_to_val(unsigned *t, unsigned t_min, unsigned t_step,
+ u8 v_min, u8 v_max)
+{
+ unsigned val;
+
+ val = (*t + t_step / 2 - t_min) / t_step + v_min;
+
+ *t = t_step * (val - v_min) + t_min;
+
+ return (u8)val;
+}
+
+/*
+ * Returns time code corresponding to *delay (in ms) and updates *delay to
+ * reflect actual hardware delay.
+ *
+ * Hardware supports 256 discrete delay times, divided into three groups with
+ * the following ranges and step-sizes:
+ *
+ * [ 16, 999] [0x00, 0x3e] step 16 ms
+ * [ 1130, 9781] [0x3d, 0x7f] step 131 ms
+ * [10306, 76890] [0x80, 0xff] step 524 ms
+ *
+ * Note that delay group 3 is only available for delay_off.
+ */
+static u8 lm3533_led_get_hw_delay(unsigned *delay)
+{
+ unsigned t;
+ u8 val;
+
+ t = *delay * 1000;
+
+ if (t >= (LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY3_TMIN) / 2) {
+ t = clamp(t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY3_TMIN,
+ LM3533_LED_DELAY3_TSTEP,
+ LM3533_LED_DELAY3_VMIN,
+ LM3533_LED_DELAY3_VMAX);
+ } else if (t >= (LM3533_LED_DELAY1_TMAX + LM3533_LED_DELAY2_TMIN) / 2) {
+ t = clamp(t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY2_TMIN,
+ LM3533_LED_DELAY2_TSTEP,
+ LM3533_LED_DELAY2_VMIN,
+ LM3533_LED_DELAY2_VMAX);
+ } else {
+ t = clamp(t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY1_TMIN,
+ LM3533_LED_DELAY1_TSTEP,
+ LM3533_LED_DELAY1_VMIN,
+ LM3533_LED_DELAY1_VMAX);
+ }
+
+ *delay = (t + 500) / 1000;
+
+ return val;
+}
+
+/*
+ * Set delay register base to *delay (in ms) and update *delay to reflect
+ * actual hardware delay used.
+ */
+static u8 lm3533_led_delay_set(struct lm3533_led *led, u8 base,
+ unsigned long *delay)
+{
+ unsigned t;
+ u8 val;
+ u8 reg;
+ int ret;
+
+ t = (unsigned)*delay;
+
+ /* Delay group 3 is only available for low time (delay off). */
+ if (base != LM3533_REG_PATTERN_LOW_TIME_BASE)
+ t = min(t, LM3533_LED_DELAY2_TMAX / 1000);
+
+ val = lm3533_led_get_hw_delay(&t);
+
+ dev_dbg(led->cdev.dev, "%s - %lu: %u (0x%02x)\n", __func__,
+ *delay, t, val);
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_write(led->lm3533, reg, val);
+ if (ret)
+ dev_err(led->cdev.dev, "failed to set delay (%02x)\n", reg);
+
+ *delay = t;
+
+ return ret;
+}
+
+static int lm3533_led_delay_on_set(struct lm3533_led *led, unsigned long *t)
+{
+ return lm3533_led_delay_set(led, LM3533_REG_PATTERN_HIGH_TIME_BASE, t);
+}
+
+static int lm3533_led_delay_off_set(struct lm3533_led *led, unsigned long *t)
+{
+ return lm3533_led_delay_set(led, LM3533_REG_PATTERN_LOW_TIME_BASE, t);
+}
+
+static int lm3533_led_blink_set(struct led_classdev *cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+ int ret;
+
+ dev_dbg(led->cdev.dev, "%s - on = %lu, off = %lu\n", __func__,
+ *delay_on, *delay_off);
+
+ if (*delay_on > LM3533_LED_DELAY_ON_MAX ||
+ *delay_off > LM3533_LED_DELAY_OFF_MAX)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ ret = lm3533_led_delay_on_set(led, delay_on);
+ if (ret)
+ return ret;
+
+ ret = lm3533_led_delay_off_set(led, delay_off);
+ if (ret)
+ return ret;
+
+ return lm3533_led_pattern_enable(led, 1);
+}
+
+static ssize_t show_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", led->id);
+}
+
+/*
+ * Pattern generator rise/fall times:
+ *
+ * 0 - 2048 us (default)
+ * 1 - 262 ms
+ * 2 - 524 ms
+ * 3 - 1.049 s
+ * 4 - 2.097 s
+ * 5 - 4.194 s
+ * 6 - 8.389 s
+ * 7 - 16.78 s
+ */
+static ssize_t show_risefalltime(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, u8 base)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ ssize_t ret;
+ u8 reg;
+ u8 val;
+
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", val);
+}
+
+static ssize_t show_risetime(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_risefalltime(dev, attr, buf,
+ LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t show_falltime(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_risefalltime(dev, attr, buf,
+ LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t store_risefalltime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, u8 base)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ u8 reg;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val) || val > LM3533_RISEFALLTIME_MAX)
+ return -EINVAL;
+
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_write(led->lm3533, reg, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t store_risetime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_risefalltime(dev, attr, buf, len,
+ LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t store_falltime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_risefalltime(dev, attr, buf, len,
+ LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned channel;
+ u8 reg;
+ u8 val;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ channel = (val & LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK) + 1;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t store_als_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned channel;
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int ret;
+
+ if (kstrtouint(buf, 0, &channel))
+ return -EINVAL;
+
+ if (channel < LM3533_ALS_CHANNEL_LV_MIN ||
+ channel > LM3533_ALS_CHANNEL_LV_MAX)
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK;
+ val = channel - 1;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_als_en(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ bool enable;
+ u8 reg;
+ u8 val;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ enable = val & LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned enable;
+ u8 reg;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtouint(buf, 0, &enable))
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 reg;
+ u8 val;
+ int linear;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ if (val & LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK)
+ linear = 1;
+ else
+ linear = 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned long linear;
+ u8 reg;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &linear))
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK;
+
+ if (linear)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_pwm(&led->cb, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ ret = lm3533_ctrlbank_set_pwm(&led->cb, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static LM3533_ATTR_RW(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RW(falltime);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+static LM3533_ATTR_RW(risetime);
+
+static struct attribute *lm3533_led_attributes[] = {
+ &dev_attr_als_channel.attr,
+ &dev_attr_als_en.attr,
+ &dev_attr_falltime.attr,
+ &dev_attr_id.attr,
+ &dev_attr_linear.attr,
+ &dev_attr_pwm.attr,
+ &dev_attr_risetime.attr,
+ NULL,
+};
+
+static umode_t lm3533_led_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_als_channel.attr ||
+ attr == &dev_attr_als_en.attr) {
+ if (!led->lm3533->have_als)
+ mode = 0;
+ }
+
+ return mode;
+};
+
+static struct attribute_group lm3533_led_attribute_group = {
+ .is_visible = lm3533_led_attr_is_visible,
+ .attrs = lm3533_led_attributes
+};
+
+static int __devinit lm3533_led_setup(struct lm3533_led *led,
+ struct lm3533_led_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_ctrlbank_set_max_current(&led->cb, pdata->max_current);
+ if (ret)
+ return ret;
+
+ return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_led_probe(struct platform_device *pdev)
+{
+ struct lm3533 *lm3533;
+ struct lm3533_led_platform_data *pdata;
+ struct lm3533_led *led;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533 = dev_get_drvdata(pdev->dev.parent);
+ if (!lm3533)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdev->id < 0 || pdev->id >= LM3533_LVCTRLBANK_COUNT) {
+ dev_err(&pdev->dev, "illegal LED id %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->lm3533 = lm3533;
+ led->cdev.name = pdata->name;
+ led->cdev.default_trigger = pdata->default_trigger;
+ led->cdev.brightness_set = lm3533_led_set;
+ led->cdev.brightness_get = lm3533_led_get;
+ led->cdev.blink_set = lm3533_led_blink_set;
+ led->cdev.brightness = LED_OFF;
+ led->id = pdev->id;
+
+ mutex_init(&led->mutex);
+ INIT_WORK(&led->work, lm3533_led_work);
+
+ /* The class framework makes a callback to get brightness during
+ * registration so use parent device (for error reporting) until
+ * registered.
+ */
+ led->cb.lm3533 = lm3533;
+ led->cb.id = lm3533_led_get_ctrlbank_id(led);
+ led->cb.dev = lm3533->dev;
+
+ platform_set_drvdata(pdev, led);
+
+ ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
+ return ret;
+ }
+
+ led->cb.dev = led->cdev.dev;
+
+ ret = sysfs_create_group(&led->cdev.dev->kobj,
+ &lm3533_led_attribute_group);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ ret = lm3533_led_setup(led, pdata);
+ if (ret)
+ goto err_sysfs_remove;
+
+ ret = lm3533_ctrlbank_enable(&led->cb);
+ if (ret)
+ goto err_sysfs_remove;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+err_unregister:
+ led_classdev_unregister(&led->cdev);
+ flush_work_sync(&led->work);
+
+ return ret;
+}
+
+static int __devexit lm3533_led_remove(struct platform_device *pdev)
+{
+ struct lm3533_led *led = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&led->cb);
+ sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+ led_classdev_unregister(&led->cdev);
+ flush_work_sync(&led->work);
+
+ return 0;
+}
+
+static void lm3533_led_shutdown(struct platform_device *pdev)
+{
+
+ struct lm3533_led *led = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&led->cb);
+ lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */
+ flush_work_sync(&led->work);
+}
+
+static struct platform_driver lm3533_led_driver = {
+ .driver = {
+ .name = "lm3533-leds",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm3533_led_probe,
+ .remove = __devexit_p(lm3533_led_remove),
+ .shutdown = lm3533_led_shutdown,
+};
+module_platform_driver(lm3533_led_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-leds");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 410a723b8691..23815624f35e 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -193,9 +193,14 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
/* move current engine to direct mode and remember the state */
ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+ if (ret)
+ return ret;
+
/* Mode change requires min 500 us delay. 1 - 2 ms with margin */
usleep_range(1000, 2000);
- ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+ ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+ if (ret)
+ return ret;
/* For loading, all the engines to load mode */
lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
@@ -211,8 +216,7 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
LP5521_PROG_MEM_SIZE,
pattern);
- ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
- return ret;
+ return lp5521_write(client, LP5521_REG_OP_MODE, mode);
}
static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
@@ -785,7 +789,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
* LP5521_REG_ENABLE register will not have any effect - strange!
*/
ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
- if (buf != LP5521_REG_R_CURR_DEFAULT) {
+ if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
dev_err(&client->dev, "error in resetting chip\n");
goto fail2;
}
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 8bc491541550..4cc6a2e3df34 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
return -EINVAL;
}
- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
if (led == NULL) {
dev_err(&pdev->dev, "failed to alloc memory\n");
return -ENOMEM;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index dcc3bc3d38db..5f462dbf0dbb 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -101,11 +101,16 @@ static const struct i2c_device_id pca955x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca955x_id);
-struct pca955x_led {
+struct pca955x {
+ struct mutex lock;
+ struct pca955x_led *leds;
struct pca955x_chipdef *chipdef;
struct i2c_client *client;
+};
+
+struct pca955x_led {
+ struct pca955x *pca955x;
struct work_struct work;
- spinlock_t lock;
enum led_brightness brightness;
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
@@ -140,7 +145,7 @@ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
*/
static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
@@ -156,7 +161,7 @@ static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
*/
static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
@@ -169,7 +174,7 @@ static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
*/
static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
@@ -182,7 +187,7 @@ static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
*/
static u8 pca955x_read_ls(struct i2c_client *client, int n)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
return (u8) i2c_smbus_read_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
@@ -190,18 +195,23 @@ static u8 pca955x_read_ls(struct i2c_client *client, int n)
static void pca955x_led_work(struct work_struct *work)
{
- struct pca955x_led *pca955x;
+ struct pca955x_led *pca955x_led;
+ struct pca955x *pca955x;
u8 ls;
int chip_ls; /* which LSx to use (0-3 potentially) */
int ls_led; /* which set of bits within LSx to use (0-3) */
- pca955x = container_of(work, struct pca955x_led, work);
- chip_ls = pca955x->led_num / 4;
- ls_led = pca955x->led_num % 4;
+ pca955x_led = container_of(work, struct pca955x_led, work);
+ pca955x = pca955x_led->pca955x;
+
+ chip_ls = pca955x_led->led_num / 4;
+ ls_led = pca955x_led->led_num % 4;
+
+ mutex_lock(&pca955x->lock);
ls = pca955x_read_ls(pca955x->client, chip_ls);
- switch (pca955x->brightness) {
+ switch (pca955x_led->brightness) {
case LED_FULL:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
break;
@@ -219,12 +229,15 @@ static void pca955x_led_work(struct work_struct *work)
* OFF, HALF, or FULL. But, this is probably better than
* just turning off for all other values.
*/
- pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness);
+ pca955x_write_pwm(pca955x->client, 1,
+ 255 - pca955x_led->brightness);
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
break;
}
pca955x_write_ls(pca955x->client, chip_ls, ls);
+
+ mutex_unlock(&pca955x->lock);
}
static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
@@ -233,7 +246,6 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
- spin_lock(&pca955x->lock);
pca955x->brightness = value;
/*
@@ -241,14 +253,13 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
* can sleep.
*/
schedule_work(&pca955x->work);
-
- spin_unlock(&pca955x->lock);
}
static int __devinit pca955x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pca955x_led *pca955x;
+ struct pca955x *pca955x;
+ struct pca955x_led *pca955x_led;
struct pca955x_chipdef *chip;
struct i2c_adapter *adapter;
struct led_platform_data *pdata;
@@ -282,39 +293,48 @@ static int __devinit pca955x_probe(struct i2c_client *client,
}
}
- pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
+ pca955x = kzalloc(sizeof(*pca955x), GFP_KERNEL);
if (!pca955x)
return -ENOMEM;
+ pca955x->leds = kzalloc(sizeof(*pca955x_led) * chip->bits, GFP_KERNEL);
+ if (!pca955x->leds) {
+ err = -ENOMEM;
+ goto exit_nomem;
+ }
+
i2c_set_clientdata(client, pca955x);
+ mutex_init(&pca955x->lock);
+ pca955x->client = client;
+ pca955x->chipdef = chip;
+
for (i = 0; i < chip->bits; i++) {
- pca955x[i].chipdef = chip;
- pca955x[i].client = client;
- pca955x[i].led_num = i;
+ pca955x_led = &pca955x->leds[i];
+ pca955x_led->led_num = i;
+ pca955x_led->pca955x = pca955x;
/* Platform data can specify LED names and default triggers */
if (pdata) {
if (pdata->leds[i].name)
- snprintf(pca955x[i].name,
- sizeof(pca955x[i].name), "pca955x:%s",
- pdata->leds[i].name);
+ snprintf(pca955x_led->name,
+ sizeof(pca955x_led->name), "pca955x:%s",
+ pdata->leds[i].name);
if (pdata->leds[i].default_trigger)
- pca955x[i].led_cdev.default_trigger =
+ pca955x_led->led_cdev.default_trigger =
pdata->leds[i].default_trigger;
} else {
- snprintf(pca955x[i].name, sizeof(pca955x[i].name),
+ snprintf(pca955x_led->name, sizeof(pca955x_led->name),
"pca955x:%d", i);
}
- spin_lock_init(&pca955x[i].lock);
-
- pca955x[i].led_cdev.name = pca955x[i].name;
- pca955x[i].led_cdev.brightness_set = pca955x_led_set;
+ pca955x_led->led_cdev.name = pca955x_led->name;
+ pca955x_led->led_cdev.brightness_set = pca955x_led_set;
- INIT_WORK(&pca955x[i].work, pca955x_led_work);
+ INIT_WORK(&pca955x_led->work, pca955x_led_work);
- err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
+ err = led_classdev_register(&client->dev,
+ &pca955x_led->led_cdev);
if (err < 0)
goto exit;
}
@@ -337,10 +357,12 @@ static int __devinit pca955x_probe(struct i2c_client *client,
exit:
while (i--) {
- led_classdev_unregister(&pca955x[i].led_cdev);
- cancel_work_sync(&pca955x[i].work);
+ led_classdev_unregister(&pca955x->leds[i].led_cdev);
+ cancel_work_sync(&pca955x->leds[i].work);
}
+ kfree(pca955x->leds);
+exit_nomem:
kfree(pca955x);
return err;
@@ -348,14 +370,15 @@ exit:
static int __devexit pca955x_remove(struct i2c_client *client)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
int i;
for (i = 0; i < pca955x->chipdef->bits; i++) {
- led_classdev_unregister(&pca955x[i].led_cdev);
- cancel_work_sync(&pca955x[i].work);
+ led_classdev_unregister(&pca955x->leds[i].led_cdev);
+ cancel_work_sync(&pca955x->leds[i].work);
}
+ kfree(pca955x->leds);
kfree(pca955x);
return 0;
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index 2b513a2ad7de..e2726867c5d4 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -120,6 +120,7 @@ static void bl_trig_activate(struct led_classdev *led)
ret = fb_register_client(&n->notifier);
if (ret)
dev_err(led->dev, "unable to register backlight trigger\n");
+ led->activated = true;
return;
@@ -133,10 +134,11 @@ static void bl_trig_deactivate(struct led_classdev *led)
struct bl_trig_notifier *n =
(struct bl_trig_notifier *) led->trigger_data;
- if (n) {
+ if (led->activated) {
device_remove_file(led->dev, &dev_attr_inverted);
fb_unregister_client(&n->notifier);
kfree(n);
+ led->activated = false;
}
}
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index ecc4bf3f37a9..f057c101b896 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -200,6 +200,7 @@ static void gpio_trig_activate(struct led_classdev *led)
gpio_data->led = led;
led->trigger_data = gpio_data;
INIT_WORK(&gpio_data->work, gpio_trig_work);
+ led->activated = true;
return;
@@ -217,7 +218,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data = led->trigger_data;
- if (gpio_data) {
+ if (led->activated) {
device_remove_file(led->dev, &dev_attr_gpio);
device_remove_file(led->dev, &dev_attr_inverted);
device_remove_file(led->dev, &dev_attr_desired_brightness);
@@ -225,6 +226,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
kfree(gpio_data);
+ led->activated = false;
}
}
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/ledtrig-heartbeat.c
index 759c0bba4a8f..a019fbb70880 100644
--- a/drivers/leds/ledtrig-heartbeat.c
+++ b/drivers/leds/ledtrig-heartbeat.c
@@ -18,8 +18,11 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/leds.h>
+#include <linux/reboot.h>
#include "leds.h"
+static int panic_heartbeats;
+
struct heartbeat_trig_data {
unsigned int phase;
unsigned int period;
@@ -33,6 +36,11 @@ static void led_heartbeat_function(unsigned long data)
unsigned long brightness = LED_OFF;
unsigned long delay = 0;
+ if (unlikely(panic_heartbeats)) {
+ led_set_brightness(led_cdev, LED_OFF);
+ return;
+ }
+
/* acts like an actual heart beat -- ie thump-thump-pause... */
switch (heartbeat_data->phase) {
case 0:
@@ -83,15 +91,17 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
led_heartbeat_function, (unsigned long) led_cdev);
heartbeat_data->phase = 0;
led_heartbeat_function(heartbeat_data->timer.data);
+ led_cdev->activated = true;
}
static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
{
struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
- if (heartbeat_data) {
+ if (led_cdev->activated) {
del_timer_sync(&heartbeat_data->timer);
kfree(heartbeat_data);
+ led_cdev->activated = false;
}
}
@@ -101,13 +111,45 @@ static struct led_trigger heartbeat_led_trigger = {
.deactivate = heartbeat_trig_deactivate,
};
+static int heartbeat_reboot_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ led_trigger_unregister(&heartbeat_led_trigger);
+ return NOTIFY_DONE;
+}
+
+static int heartbeat_panic_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ panic_heartbeats = 1;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block heartbeat_reboot_nb = {
+ .notifier_call = heartbeat_reboot_notifier,
+};
+
+static struct notifier_block heartbeat_panic_nb = {
+ .notifier_call = heartbeat_panic_notifier,
+};
+
static int __init heartbeat_trig_init(void)
{
- return led_trigger_register(&heartbeat_led_trigger);
+ int rc = led_trigger_register(&heartbeat_led_trigger);
+
+ if (!rc) {
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &heartbeat_panic_nb);
+ register_reboot_notifier(&heartbeat_reboot_nb);
+ }
+ return rc;
}
static void __exit heartbeat_trig_exit(void)
{
+ unregister_reboot_notifier(&heartbeat_reboot_nb);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &heartbeat_panic_nb);
led_trigger_unregister(&heartbeat_led_trigger);
}
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 328c64c0841c..9010f7abaf2c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -31,21 +31,17 @@ static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- int ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
-
- if (isspace(*after))
- count++;
-
- if (count == size) {
- led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
- led_cdev->blink_delay_on = state;
- ret = count;
- }
+ unsigned long state;
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
- return ret;
+ led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+ led_cdev->blink_delay_on = state;
+
+ return size;
}
static ssize_t led_delay_off_show(struct device *dev,
@@ -60,21 +56,17 @@ static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- int ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
-
- if (isspace(*after))
- count++;
-
- if (count == size) {
- led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
- led_cdev->blink_delay_off = state;
- ret = count;
- }
+ unsigned long state;
+ ssize_t ret = -EINVAL;
- return ret;
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+ led_cdev->blink_delay_off = state;
+
+ return size;
}
static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
@@ -95,8 +87,7 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
&led_cdev->blink_delay_off);
-
- led_cdev->trigger_data = (void *)1;
+ led_cdev->activated = true;
return;
@@ -106,9 +97,10 @@ err_out_delayon:
static void timer_trig_deactivate(struct led_classdev *led_cdev)
{
- if (led_cdev->trigger_data) {
+ if (led_cdev->activated) {
device_remove_file(led_cdev->dev, &dev_attr_delay_on);
device_remove_file(led_cdev->dev, &dev_attr_delay_off);
+ led_cdev->activated = false;
}
/* Stop blinking */
diff --git a/drivers/leds/ledtrig-transient.c b/drivers/leds/ledtrig-transient.c
new file mode 100644
index 000000000000..83179f435e1e
--- /dev/null
+++ b/drivers/leds/ledtrig-transient.c
@@ -0,0 +1,237 @@
+/*
+ * LED Kernel Transient Trigger
+ *
+ * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+ *
+ * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+ * ledtrig-heartbeat.c
+ * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+ * Neil Brown <neilb@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+/*
+ * Transient trigger allows one shot timer activation. Please refer to
+ * Documentation/leds/ledtrig-transient.txt for details
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+struct transient_trig_data {
+ int activate;
+ int state;
+ int restore_state;
+ unsigned long duration;
+ struct timer_list timer;
+};
+
+static void transient_timer_function(unsigned long data)
+{
+ struct led_classdev *led_cdev = (struct led_classdev *) data;
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ transient_data->activate = 0;
+ led_set_brightness(led_cdev, transient_data->restore_state);
+}
+
+static ssize_t transient_activate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%d\n", transient_data->activate);
+}
+
+static ssize_t transient_activate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ if (state != 1 && state != 0)
+ return -EINVAL;
+
+ /* cancel the running timer */
+ if (state == 0 && transient_data->activate == 1) {
+ del_timer(&transient_data->timer);
+ transient_data->activate = state;
+ led_set_brightness(led_cdev, transient_data->restore_state);
+ return size;
+ }
+
+ /* start timer if there is no active timer */
+ if (state == 1 && transient_data->activate == 0 &&
+ transient_data->duration != 0) {
+ transient_data->activate = state;
+ led_set_brightness(led_cdev, transient_data->state);
+ transient_data->restore_state =
+ (transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
+ mod_timer(&transient_data->timer,
+ jiffies + transient_data->duration);
+ }
+
+ /* state == 0 && transient_data->activate == 0
+ timer is not active - just return */
+ /* state == 1 && transient_data->activate == 1
+ timer is already active - just return */
+
+ return size;
+}
+
+static ssize_t transient_duration_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%lu\n", transient_data->duration);
+}
+
+static ssize_t transient_duration_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ transient_data->duration = state;
+ return size;
+}
+
+static ssize_t transient_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ int state;
+
+ state = (transient_data->state == LED_FULL) ? 1 : 0;
+ return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t transient_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ if (state != 1 && state != 0)
+ return -EINVAL;
+
+ transient_data->state = (state == 1) ? LED_FULL : LED_OFF;
+ return size;
+}
+
+static DEVICE_ATTR(activate, 0644, transient_activate_show,
+ transient_activate_store);
+static DEVICE_ATTR(duration, 0644, transient_duration_show,
+ transient_duration_store);
+static DEVICE_ATTR(state, 0644, transient_state_show, transient_state_store);
+
+static void transient_trig_activate(struct led_classdev *led_cdev)
+{
+ int rc;
+ struct transient_trig_data *tdata;
+
+ tdata = kzalloc(sizeof(struct transient_trig_data), GFP_KERNEL);
+ if (!tdata) {
+ dev_err(led_cdev->dev,
+ "unable to allocate transient trigger\n");
+ return;
+ }
+ led_cdev->trigger_data = tdata;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_activate);
+ if (rc)
+ goto err_out;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_duration);
+ if (rc)
+ goto err_out_duration;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_state);
+ if (rc)
+ goto err_out_state;
+
+ setup_timer(&tdata->timer, transient_timer_function,
+ (unsigned long) led_cdev);
+ led_cdev->activated = true;
+
+ return;
+
+err_out_state:
+ device_remove_file(led_cdev->dev, &dev_attr_duration);
+err_out_duration:
+ device_remove_file(led_cdev->dev, &dev_attr_activate);
+err_out:
+ dev_err(led_cdev->dev, "unable to register transient trigger\n");
+ led_cdev->trigger_data = NULL;
+ kfree(tdata);
+}
+
+static void transient_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ if (led_cdev->activated) {
+ del_timer_sync(&transient_data->timer);
+ led_set_brightness(led_cdev, transient_data->restore_state);
+ device_remove_file(led_cdev->dev, &dev_attr_activate);
+ device_remove_file(led_cdev->dev, &dev_attr_duration);
+ device_remove_file(led_cdev->dev, &dev_attr_state);
+ led_cdev->trigger_data = NULL;
+ led_cdev->activated = false;
+ kfree(transient_data);
+ }
+}
+
+static struct led_trigger transient_trigger = {
+ .name = "transient",
+ .activate = transient_trig_activate,
+ .deactivate = transient_trig_deactivate,
+};
+
+static int __init transient_trig_init(void)
+{
+ return led_trigger_register(&transient_trigger);
+}
+
+static void __exit transient_trig_exit(void)
+{
+ led_trigger_unregister(&transient_trigger);
+}
+
+module_init(transient_trig_init);
+module_exit(transient_trig_exit);
+
+MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
+MODULE_DESCRIPTION("Transient LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index fa51af11c6f1..a555da64224e 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -204,11 +204,14 @@ config THERM_ADT746X
better fan behaviour by default, and some manual control.
config THERM_PM72
- tristate "Support for thermal management on PowerMac G5"
+ tristate "Support for thermal management on PowerMac G5 (AGP)"
depends on I2C && I2C_POWERMAC && PPC_PMAC64
+ default n
help
This driver provides thermostat and fan control for the desktop
- G5 machines.
+ G5 machines.
+
+ This is deprecated, use windfarm instead.
config WINDFARM
tristate "New PowerMac thermal control infrastructure"
@@ -221,6 +224,22 @@ config WINDFARM_PM81
help
This driver provides thermal control for the iMacG5
+config WINDFARM_PM72
+ tristate "Support for thermal management on PowerMac G5 (AGP)"
+ depends on WINDFARM && I2C && CPU_FREQ_PMAC64 && ADB_PMU
+ select I2C_POWERMAC
+ help
+ This driver provides thermal control for the PowerMac G5
+ "AGP" variants (PowerMac 7,2 and 7,3)
+
+config WINDFARM_RM31
+ tristate "Support for thermal management on Xserve G5"
+ depends on WINDFARM && I2C && CPU_FREQ_PMAC64 && ADB_PMU
+ select I2C_POWERMAC
+ help
+ This driver provides thermal control for the Xserve G5
+ (RackMac3,1)
+
config WINDFARM_PM91
tristate "Support for thermal management on PowerMac9,1"
depends on WINDFARM && I2C && CPU_FREQ_PMAC64 && PMAC_SMU
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 6652a6ebb6fa..6753b65f8ede 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -29,6 +29,20 @@ obj-$(CONFIG_THERM_PM72) += therm_pm72.o
obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o
obj-$(CONFIG_THERM_ADT746X) += therm_adt746x.o
obj-$(CONFIG_WINDFARM) += windfarm_core.o
+obj-$(CONFIG_WINDFARM_PM72) += windfarm_fcu_controls.o \
+ windfarm_ad7417_sensor.o \
+ windfarm_lm75_sensor.o \
+ windfarm_max6690_sensor.o \
+ windfarm_pid.o \
+ windfarm_cpufreq_clamp.o \
+ windfarm_pm72.o
+obj-$(CONFIG_WINDFARM_RM31) += windfarm_fcu_controls.o \
+ windfarm_ad7417_sensor.o \
+ windfarm_lm75_sensor.o \
+ windfarm_lm87_sensor.o \
+ windfarm_pid.o \
+ windfarm_cpufreq_clamp.o \
+ windfarm_rm31.o
obj-$(CONFIG_WINDFARM_PM81) += windfarm_smu_controls.o \
windfarm_smu_sensors.o \
windfarm_lm75_sensor.o windfarm_pid.o \
diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c
index abeecd27b484..978eda8d6678 100644
--- a/drivers/macintosh/ams/ams-i2c.c
+++ b/drivers/macintosh/ams/ams-i2c.c
@@ -65,7 +65,7 @@ static int ams_i2c_probe(struct i2c_client *client,
static int ams_i2c_remove(struct i2c_client *client);
static const struct i2c_device_id ams_id[] = {
- { "ams", 0 },
+ { "MAC,accelerometer_1", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ams_id);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index fc71723cbc48..f433521a6f9d 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -47,7 +47,7 @@ static u8 FAN_SPD_SET[2] = {0x30, 0x31};
static u8 default_limits_local[3] = {70, 50, 70}; /* local, sensor1, sensor2 */
static u8 default_limits_chip[3] = {80, 65, 80}; /* local, sensor1, sensor2 */
-static const char *sensor_location[3];
+static const char *sensor_location[3] = { "?", "?", "?" };
static int limit_adjust;
static int fan_speed = -1;
@@ -79,18 +79,16 @@ struct thermostat {
int last_speed[2];
int last_var[2];
int pwm_inv[2];
+ struct task_struct *thread;
+ struct platform_device *pdev;
+ enum {
+ ADT7460,
+ ADT7467
+ } type;
};
-static enum {ADT7460, ADT7467} therm_type;
-static int therm_bus, therm_address;
-static struct platform_device * of_dev;
-static struct thermostat* thermostat;
-static struct task_struct *thread_therm = NULL;
-
static void write_both_fan_speed(struct thermostat *th, int speed);
static void write_fan_speed(struct thermostat *th, int speed, int fan);
-static void thermostat_create_files(void);
-static void thermostat_remove_files(void);
static int
write_reg(struct thermostat* th, int reg, u8 data)
@@ -126,66 +124,6 @@ read_reg(struct thermostat* th, int reg)
return data;
}
-static struct i2c_driver thermostat_driver;
-
-static int
-attach_thermostat(struct i2c_adapter *adapter)
-{
- unsigned long bus_no;
- struct i2c_board_info info;
- struct i2c_client *client;
-
- if (strncmp(adapter->name, "uni-n", 5))
- return -ENODEV;
- bus_no = simple_strtoul(adapter->name + 6, NULL, 10);
- if (bus_no != therm_bus)
- return -ENODEV;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "therm_adt746x", I2C_NAME_SIZE);
- info.addr = therm_address;
- client = i2c_new_device(adapter, &info);
- if (!client)
- return -ENODEV;
-
- /*
- * Let i2c-core delete that device on driver removal.
- * This is safe because i2c-core holds the core_lock mutex for us.
- */
- list_add_tail(&client->detected, &thermostat_driver.clients);
- return 0;
-}
-
-static int
-remove_thermostat(struct i2c_client *client)
-{
- struct thermostat *th = i2c_get_clientdata(client);
- int i;
-
- thermostat_remove_files();
-
- if (thread_therm != NULL) {
- kthread_stop(thread_therm);
- }
-
- printk(KERN_INFO "adt746x: Putting max temperatures back from "
- "%d, %d, %d to %d, %d, %d\n",
- th->limits[0], th->limits[1], th->limits[2],
- th->initial_limits[0], th->initial_limits[1],
- th->initial_limits[2]);
-
- for (i = 0; i < 3; i++)
- write_reg(th, LIMIT_REG[i], th->initial_limits[i]);
-
- write_both_fan_speed(th, -1);
-
- thermostat = NULL;
-
- kfree(th);
-
- return 0;
-}
-
static int read_fan_speed(struct thermostat *th, u8 addr)
{
u8 tmp[2];
@@ -203,7 +141,7 @@ static int read_fan_speed(struct thermostat *th, u8 addr)
static void write_both_fan_speed(struct thermostat *th, int speed)
{
write_fan_speed(th, speed, 0);
- if (therm_type == ADT7460)
+ if (th->type == ADT7460)
write_fan_speed(th, speed, 1);
}
@@ -216,7 +154,7 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
else if (speed < -1)
speed = 0;
- if (therm_type == ADT7467 && fan == 1)
+ if (th->type == ADT7467 && fan == 1)
return;
if (th->last_speed[fan] != speed) {
@@ -239,7 +177,7 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
write_reg(th, FAN_SPD_SET[fan], speed);
} else {
/* back to automatic */
- if(therm_type == ADT7460) {
+ if(th->type == ADT7460) {
manual = read_reg(th,
MANUAL_MODE[fan]) & (~MANUAL_MASK);
manual &= ~INVERT_MASK;
@@ -293,7 +231,7 @@ static void update_fans_speed (struct thermostat *th)
/* we don't care about local sensor, so we start at sensor 1 */
for (i = 1; i < 3; i++) {
int started = 0;
- int fan_number = (therm_type == ADT7460 && i == 2);
+ int fan_number = (th->type == ADT7460 && i == 2);
int var = th->temps[i] - th->limits[i];
if (var > -1) {
@@ -370,116 +308,22 @@ static int monitor_task(void *arg)
static void set_limit(struct thermostat *th, int i)
{
- /* Set sensor1 limit higher to avoid powerdowns */
- th->limits[i] = default_limits_chip[i] + limit_adjust;
- write_reg(th, LIMIT_REG[i], th->limits[i]);
+ /* Set sensor1 limit higher to avoid powerdowns */
+ th->limits[i] = default_limits_chip[i] + limit_adjust;
+ write_reg(th, LIMIT_REG[i], th->limits[i]);
- /* set our limits to normal */
- th->limits[i] = default_limits_local[i] + limit_adjust;
+ /* set our limits to normal */
+ th->limits[i] = default_limits_local[i] + limit_adjust;
}
-static int probe_thermostat(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct thermostat* th;
- int rc;
- int i;
-
- if (thermostat)
- return 0;
-
- th = kzalloc(sizeof(struct thermostat), GFP_KERNEL);
- if (!th)
- return -ENOMEM;
-
- i2c_set_clientdata(client, th);
- th->clt = client;
-
- rc = read_reg(th, CONFIG_REG);
- if (rc < 0) {
- dev_err(&client->dev, "Thermostat failed to read config!\n");
- kfree(th);
- return -ENODEV;
- }
-
- /* force manual control to start the fan quieter */
- if (fan_speed == -1)
- fan_speed = 64;
-
- if(therm_type == ADT7460) {
- printk(KERN_INFO "adt746x: ADT7460 initializing\n");
- /* The 7460 needs to be started explicitly */
- write_reg(th, CONFIG_REG, 1);
- } else
- printk(KERN_INFO "adt746x: ADT7467 initializing\n");
-
- for (i = 0; i < 3; i++) {
- th->initial_limits[i] = read_reg(th, LIMIT_REG[i]);
- set_limit(th, i);
- }
-
- printk(KERN_INFO "adt746x: Lowering max temperatures from %d, %d, %d"
- " to %d, %d, %d\n",
- th->initial_limits[0], th->initial_limits[1],
- th->initial_limits[2], th->limits[0], th->limits[1],
- th->limits[2]);
-
- thermostat = th;
-
- /* record invert bit status because fw can corrupt it after suspend */
- th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK;
- th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK;
-
- /* be sure to really write fan speed the first time */
- th->last_speed[0] = -2;
- th->last_speed[1] = -2;
- th->last_var[0] = -80;
- th->last_var[1] = -80;
-
- if (fan_speed != -1) {
- /* manual mode, stop fans */
- write_both_fan_speed(th, 0);
- } else {
- /* automatic mode */
- write_both_fan_speed(th, -1);
- }
-
- thread_therm = kthread_run(monitor_task, th, "kfand");
-
- if (thread_therm == ERR_PTR(-ENOMEM)) {
- printk(KERN_INFO "adt746x: Kthread creation failed\n");
- thread_therm = NULL;
- return -ENOMEM;
- }
-
- thermostat_create_files();
-
- return 0;
+#define BUILD_SHOW_FUNC_INT(name, data) \
+static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct thermostat *th = dev_get_drvdata(dev); \
+ return sprintf(buf, "%d\n", data); \
}
-static const struct i2c_device_id therm_adt746x_id[] = {
- { "therm_adt746x", 0 },
- { }
-};
-
-static struct i2c_driver thermostat_driver = {
- .driver = {
- .name = "therm_adt746x",
- },
- .attach_adapter = attach_thermostat,
- .probe = probe_thermostat,
- .remove = remove_thermostat,
- .id_table = therm_adt746x_id,
-};
-
-/*
- * Now, unfortunately, sysfs doesn't give us a nice void * we could
- * pass around to the attribute functions, so we don't really have
- * choice but implement a bunch of them...
- *
- * FIXME, it does now...
- */
-#define BUILD_SHOW_FUNC_INT(name, data) \
+#define BUILD_SHOW_FUNC_INT_LITE(name, data) \
static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%d\n", data); \
@@ -494,22 +338,24 @@ static ssize_t show_##name(struct device *dev, struct device_attribute *attr, ch
#define BUILD_SHOW_FUNC_FAN(name, data) \
static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
+ struct thermostat *th = dev_get_drvdata(dev); \
return sprintf(buf, "%d (%d rpm)\n", \
- thermostat->last_speed[data], \
- read_fan_speed(thermostat, FAN_SPEED[data]) \
+ th->last_speed[data], \
+ read_fan_speed(th, FAN_SPEED[data]) \
); \
}
#define BUILD_STORE_FUNC_DEG(name, data) \
static ssize_t store_##name(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) \
{ \
+ struct thermostat *th = dev_get_drvdata(dev); \
int val; \
int i; \
val = simple_strtol(buf, NULL, 10); \
printk(KERN_INFO "Adjusting limits by %d degrees\n", val); \
limit_adjust = val; \
for (i=0; i < 3; i++) \
- set_limit(thermostat, i); \
+ set_limit(th, i); \
return n; \
}
@@ -525,20 +371,21 @@ static ssize_t store_##name(struct device *dev, struct device_attribute *attr, c
return n; \
}
-BUILD_SHOW_FUNC_INT(sensor1_temperature, (read_reg(thermostat, TEMP_REG[1])))
-BUILD_SHOW_FUNC_INT(sensor2_temperature, (read_reg(thermostat, TEMP_REG[2])))
-BUILD_SHOW_FUNC_INT(sensor1_limit, thermostat->limits[1])
-BUILD_SHOW_FUNC_INT(sensor2_limit, thermostat->limits[2])
+BUILD_SHOW_FUNC_INT(sensor1_temperature, (read_reg(th, TEMP_REG[1])))
+BUILD_SHOW_FUNC_INT(sensor2_temperature, (read_reg(th, TEMP_REG[2])))
+BUILD_SHOW_FUNC_INT(sensor1_limit, th->limits[1])
+BUILD_SHOW_FUNC_INT(sensor2_limit, th->limits[2])
BUILD_SHOW_FUNC_STR(sensor1_location, sensor_location[1])
BUILD_SHOW_FUNC_STR(sensor2_location, sensor_location[2])
-BUILD_SHOW_FUNC_INT(specified_fan_speed, fan_speed)
+BUILD_SHOW_FUNC_INT_LITE(specified_fan_speed, fan_speed)
+BUILD_STORE_FUNC_INT(specified_fan_speed,fan_speed)
+
BUILD_SHOW_FUNC_FAN(sensor1_fan_speed, 0)
BUILD_SHOW_FUNC_FAN(sensor2_fan_speed, 1)
-BUILD_STORE_FUNC_INT(specified_fan_speed,fan_speed)
-BUILD_SHOW_FUNC_INT(limit_adjust, limit_adjust)
-BUILD_STORE_FUNC_DEG(limit_adjust, thermostat)
+BUILD_SHOW_FUNC_INT_LITE(limit_adjust, limit_adjust)
+BUILD_STORE_FUNC_DEG(limit_adjust, th)
static DEVICE_ATTR(sensor1_temperature, S_IRUGO,
show_sensor1_temperature,NULL);
@@ -564,53 +411,77 @@ static DEVICE_ATTR(sensor2_fan_speed, S_IRUGO,
static DEVICE_ATTR(limit_adjust, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH,
show_limit_adjust, store_limit_adjust);
-
-static int __init
-thermostat_init(void)
+static void thermostat_create_files(struct thermostat *th)
{
- struct device_node* np;
- const u32 *prop;
- int i = 0, offset = 0;
+ struct device_node *np = th->clt->dev.of_node;
+ struct device *dev;
+ int err;
- np = of_find_node_by_name(NULL, "fan");
- if (!np)
- return -ENODEV;
- if (of_device_is_compatible(np, "adt7460"))
- therm_type = ADT7460;
- else if (of_device_is_compatible(np, "adt7467"))
- therm_type = ADT7467;
- else {
- of_node_put(np);
- return -ENODEV;
- }
+ /* To maintain ABI compatibility with userspace, create
+ * the old style platform driver and attach the attributes
+ * to it here
+ */
+ th->pdev = of_platform_device_create(np, "temperatures", NULL);
+ if (!th->pdev)
+ return;
+ dev = &th->pdev->dev;
+ dev_set_drvdata(dev, th);
+ err = device_create_file(dev, &dev_attr_sensor1_temperature);
+ err |= device_create_file(dev, &dev_attr_sensor2_temperature);
+ err |= device_create_file(dev, &dev_attr_sensor1_limit);
+ err |= device_create_file(dev, &dev_attr_sensor2_limit);
+ err |= device_create_file(dev, &dev_attr_sensor1_location);
+ err |= device_create_file(dev, &dev_attr_sensor2_location);
+ err |= device_create_file(dev, &dev_attr_limit_adjust);
+ err |= device_create_file(dev, &dev_attr_specified_fan_speed);
+ err |= device_create_file(dev, &dev_attr_sensor1_fan_speed);
+ if(th->type == ADT7460)
+ err |= device_create_file(dev, &dev_attr_sensor2_fan_speed);
+ if (err)
+ printk(KERN_WARNING
+ "Failed to create temperature attribute file(s).\n");
+}
- prop = of_get_property(np, "hwsensor-params-version", NULL);
- printk(KERN_INFO "adt746x: version %d (%ssupported)\n", *prop,
- (*prop == 1)?"":"un");
- if (*prop != 1) {
- of_node_put(np);
- return -ENODEV;
- }
+static void thermostat_remove_files(struct thermostat *th)
+{
+ struct device *dev;
- prop = of_get_property(np, "reg", NULL);
- if (!prop) {
- of_node_put(np);
- return -ENODEV;
- }
+ if (!th->pdev)
+ return;
+ dev = &th->pdev->dev;
+ device_remove_file(dev, &dev_attr_sensor1_temperature);
+ device_remove_file(dev, &dev_attr_sensor2_temperature);
+ device_remove_file(dev, &dev_attr_sensor1_limit);
+ device_remove_file(dev, &dev_attr_sensor2_limit);
+ device_remove_file(dev, &dev_attr_sensor1_location);
+ device_remove_file(dev, &dev_attr_sensor2_location);
+ device_remove_file(dev, &dev_attr_limit_adjust);
+ device_remove_file(dev, &dev_attr_specified_fan_speed);
+ device_remove_file(dev, &dev_attr_sensor1_fan_speed);
+ if (th->type == ADT7460)
+ device_remove_file(dev, &dev_attr_sensor2_fan_speed);
+ of_device_unregister(th->pdev);
- /* look for bus either by path or using "reg" */
- if (strstr(np->full_name, "/i2c-bus@") != NULL) {
- const char *tmp_bus = (strstr(np->full_name, "/i2c-bus@") + 9);
- therm_bus = tmp_bus[0]-'0';
- } else {
- therm_bus = ((*prop) >> 8) & 0x0f;
- }
+}
- therm_address = ((*prop) & 0xff) >> 1;
+static int probe_thermostat(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device_node *np = client->dev.of_node;
+ struct thermostat* th;
+ const __be32 *prop;
+ int i, rc, vers, offset = 0;
- printk(KERN_INFO "adt746x: Thermostat bus: %d, address: 0x%02x, "
- "limit_adjust: %d, fan_speed: %d\n",
- therm_bus, therm_address, limit_adjust, fan_speed);
+ if (!np)
+ return -ENXIO;
+ prop = of_get_property(np, "hwsensor-params-version", NULL);
+ if (!prop)
+ return -ENXIO;
+ vers = be32_to_cpup(prop);
+ printk(KERN_INFO "adt746x: version %d (%ssupported)\n",
+ vers, vers == 1 ? "" : "un");
+ if (vers != 1)
+ return -ENXIO;
if (of_get_property(np, "hwsensor-location", NULL)) {
for (i = 0; i < 3; i++) {
@@ -623,72 +494,129 @@ thermostat_init(void)
printk(KERN_INFO "sensor %d: %s\n", i, sensor_location[i]);
offset += strlen(sensor_location[i]) + 1;
}
- } else {
- sensor_location[0] = "?";
- sensor_location[1] = "?";
- sensor_location[2] = "?";
}
- of_dev = of_platform_device_create(np, "temperatures", NULL);
- of_node_put(np);
+ th = kzalloc(sizeof(struct thermostat), GFP_KERNEL);
+ if (!th)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, th);
+ th->clt = client;
+ th->type = id->driver_data;
- if (of_dev == NULL) {
- printk(KERN_ERR "Can't register temperatures device !\n");
+ rc = read_reg(th, CONFIG_REG);
+ if (rc < 0) {
+ dev_err(&client->dev, "Thermostat failed to read config!\n");
+ kfree(th);
return -ENODEV;
}
-#ifndef CONFIG_I2C_POWERMAC
- request_module("i2c-powermac");
-#endif
+ /* force manual control to start the fan quieter */
+ if (fan_speed == -1)
+ fan_speed = 64;
+
+ if (th->type == ADT7460) {
+ printk(KERN_INFO "adt746x: ADT7460 initializing\n");
+ /* The 7460 needs to be started explicitly */
+ write_reg(th, CONFIG_REG, 1);
+ } else
+ printk(KERN_INFO "adt746x: ADT7467 initializing\n");
- return i2c_add_driver(&thermostat_driver);
+ for (i = 0; i < 3; i++) {
+ th->initial_limits[i] = read_reg(th, LIMIT_REG[i]);
+ set_limit(th, i);
+ }
+
+ printk(KERN_INFO "adt746x: Lowering max temperatures from %d, %d, %d"
+ " to %d, %d, %d\n",
+ th->initial_limits[0], th->initial_limits[1],
+ th->initial_limits[2], th->limits[0], th->limits[1],
+ th->limits[2]);
+
+ /* record invert bit status because fw can corrupt it after suspend */
+ th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK;
+ th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK;
+
+ /* be sure to really write fan speed the first time */
+ th->last_speed[0] = -2;
+ th->last_speed[1] = -2;
+ th->last_var[0] = -80;
+ th->last_var[1] = -80;
+
+ if (fan_speed != -1) {
+ /* manual mode, stop fans */
+ write_both_fan_speed(th, 0);
+ } else {
+ /* automatic mode */
+ write_both_fan_speed(th, -1);
+ }
+
+ th->thread = kthread_run(monitor_task, th, "kfand");
+ if (th->thread == ERR_PTR(-ENOMEM)) {
+ printk(KERN_INFO "adt746x: Kthread creation failed\n");
+ th->thread = NULL;
+ return -ENOMEM;
+ }
+
+ thermostat_create_files(th);
+
+ return 0;
}
-static void thermostat_create_files(void)
+static int remove_thermostat(struct i2c_client *client)
{
- int err;
+ struct thermostat *th = i2c_get_clientdata(client);
+ int i;
+
+ thermostat_remove_files(th);
- err = device_create_file(&of_dev->dev, &dev_attr_sensor1_temperature);
- err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_temperature);
- err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_limit);
- err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_limit);
- err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_location);
- err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_location);
- err |= device_create_file(&of_dev->dev, &dev_attr_limit_adjust);
- err |= device_create_file(&of_dev->dev, &dev_attr_specified_fan_speed);
- err |= device_create_file(&of_dev->dev, &dev_attr_sensor1_fan_speed);
- if(therm_type == ADT7460)
- err |= device_create_file(&of_dev->dev, &dev_attr_sensor2_fan_speed);
- if (err)
- printk(KERN_WARNING
- "Failed to create temperature attribute file(s).\n");
+ if (th->thread != NULL)
+ kthread_stop(th->thread);
+
+ printk(KERN_INFO "adt746x: Putting max temperatures back from "
+ "%d, %d, %d to %d, %d, %d\n",
+ th->limits[0], th->limits[1], th->limits[2],
+ th->initial_limits[0], th->initial_limits[1],
+ th->initial_limits[2]);
+
+ for (i = 0; i < 3; i++)
+ write_reg(th, LIMIT_REG[i], th->initial_limits[i]);
+
+ write_both_fan_speed(th, -1);
+
+ kfree(th);
+
+ return 0;
}
-static void thermostat_remove_files(void)
+static const struct i2c_device_id therm_adt746x_id[] = {
+ { "MAC,adt7460", ADT7460 },
+ { "MAC,adt7467", ADT7467 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, therm_adt746x_id);
+
+static struct i2c_driver thermostat_driver = {
+ .driver = {
+ .name = "therm_adt746x",
+ },
+ .probe = probe_thermostat,
+ .remove = remove_thermostat,
+ .id_table = therm_adt746x_id,
+};
+
+static int __init thermostat_init(void)
{
- if (of_dev) {
- device_remove_file(&of_dev->dev, &dev_attr_sensor1_temperature);
- device_remove_file(&of_dev->dev, &dev_attr_sensor2_temperature);
- device_remove_file(&of_dev->dev, &dev_attr_sensor1_limit);
- device_remove_file(&of_dev->dev, &dev_attr_sensor2_limit);
- device_remove_file(&of_dev->dev, &dev_attr_sensor1_location);
- device_remove_file(&of_dev->dev, &dev_attr_sensor2_location);
- device_remove_file(&of_dev->dev, &dev_attr_limit_adjust);
- device_remove_file(&of_dev->dev, &dev_attr_specified_fan_speed);
- device_remove_file(&of_dev->dev, &dev_attr_sensor1_fan_speed);
-
- if(therm_type == ADT7460)
- device_remove_file(&of_dev->dev,
- &dev_attr_sensor2_fan_speed);
+#ifndef CONFIG_I2C_POWERMAC
+ request_module("i2c-powermac");
+#endif
- }
+ return i2c_add_driver(&thermostat_driver);
}
-static void __exit
-thermostat_exit(void)
+static void __exit thermostat_exit(void)
{
i2c_del_driver(&thermostat_driver);
- of_device_unregister(of_dev);
}
module_init(thermostat_init);
diff --git a/drivers/macintosh/windfarm.h b/drivers/macintosh/windfarm.h
index 7a2482cc26a7..028cdac2d33d 100644
--- a/drivers/macintosh/windfarm.h
+++ b/drivers/macintosh/windfarm.h
@@ -17,7 +17,7 @@
#include <linux/device.h>
/* Display a 16.16 fixed point value */
-#define FIX32TOPRINT(f) ((f) >> 16),((((f) & 0xffff) * 1000) >> 16)
+#define FIX32TOPRINT(f) (((s32)(f)) >> 16),(((((s32)(f)) & 0xffff) * 1000) >> 16)
/*
* Control objects
@@ -35,12 +35,13 @@ struct wf_control_ops {
};
struct wf_control {
- struct list_head link;
- struct wf_control_ops *ops;
- char *name;
- int type;
- struct kref ref;
- struct device_attribute attr;
+ struct list_head link;
+ const struct wf_control_ops *ops;
+ const char *name;
+ int type;
+ struct kref ref;
+ struct device_attribute attr;
+ void *priv;
};
#define WF_CONTROL_TYPE_GENERIC 0
@@ -72,6 +73,26 @@ static inline int wf_control_set_min(struct wf_control *ct)
return ct->ops->set_value(ct, vmin);
}
+static inline int wf_control_set(struct wf_control *ct, s32 val)
+{
+ return ct->ops->set_value(ct, val);
+}
+
+static inline int wf_control_get(struct wf_control *ct, s32 *val)
+{
+ return ct->ops->get_value(ct, val);
+}
+
+static inline s32 wf_control_get_min(struct wf_control *ct)
+{
+ return ct->ops->get_min(ct);
+}
+
+static inline s32 wf_control_get_max(struct wf_control *ct)
+{
+ return ct->ops->get_max(ct);
+}
+
/*
* Sensor objects
*/
@@ -85,11 +106,12 @@ struct wf_sensor_ops {
};
struct wf_sensor {
- struct list_head link;
- struct wf_sensor_ops *ops;
- char *name;
- struct kref ref;
- struct device_attribute attr;
+ struct list_head link;
+ const struct wf_sensor_ops *ops;
+ const char *name;
+ struct kref ref;
+ struct device_attribute attr;
+ void *priv;
};
/* Same lifetime rules as controls */
@@ -99,6 +121,11 @@ extern struct wf_sensor * wf_find_sensor(const char *name);
extern int wf_get_sensor(struct wf_sensor *sr);
extern void wf_put_sensor(struct wf_sensor *sr);
+static inline int wf_sensor_get(struct wf_sensor *sr, s32 *val)
+{
+ return sr->ops->get_value(sr, val);
+}
+
/* For use by clients. Note that we are a bit racy here since
* notifier_block doesn't have a module owner field. I may fix
* it one day ...
diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c
new file mode 100644
index 000000000000..ac3f243b9c5a
--- /dev/null
+++ b/drivers/macintosh/windfarm_ad7417_sensor.c
@@ -0,0 +1,347 @@
+/*
+ * Windfarm PowerMac thermal control. AD7417 sensors
+ *
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * Released under the term of the GNU GPL v2.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/sections.h>
+
+#include "windfarm.h"
+#include "windfarm_mpu.h"
+
+#define VERSION "1.0"
+
+struct wf_ad7417_priv {
+ struct kref ref;
+ struct i2c_client *i2c;
+ u8 config;
+ u8 cpu;
+ const struct mpu_data *mpu;
+ struct wf_sensor sensors[5];
+ struct mutex lock;
+};
+
+static int wf_ad7417_temp_get(struct wf_sensor *sr, s32 *value)
+{
+ struct wf_ad7417_priv *pv = sr->priv;
+ u8 buf[2];
+ s16 raw;
+ int rc;
+
+ *value = 0;
+ mutex_lock(&pv->lock);
+
+ /* Read temp register */
+ buf[0] = 0;
+ rc = i2c_master_send(pv->i2c, buf, 1);
+ if (rc < 0)
+ goto error;
+ rc = i2c_master_recv(pv->i2c, buf, 2);
+ if (rc < 0)
+ goto error;
+
+ /* Read a a 16-bit signed value */
+ raw = be16_to_cpup((__le16 *)buf);
+
+ /* Convert 8.8-bit to 16.16 fixed point */
+ *value = ((s32)raw) << 8;
+
+ mutex_unlock(&pv->lock);
+ return 0;
+
+error:
+ mutex_unlock(&pv->lock);
+ return -1;
+}
+
+/*
+ * Scaling factors for the AD7417 ADC converters (except
+ * for the CPU diode which is obtained from the EEPROM).
+ * Those values are obtained from the property list of
+ * the darwin driver
+ */
+#define ADC_12V_CURRENT_SCALE 0x0320 /* _AD2 */
+#define ADC_CPU_VOLTAGE_SCALE 0x00a0 /* _AD3 */
+#define ADC_CPU_CURRENT_SCALE 0x1f40 /* _AD4 */
+
+static void wf_ad7417_adc_convert(struct wf_ad7417_priv *pv,
+ int chan, s32 raw, s32 *value)
+{
+ switch(chan) {
+ case 1: /* Diode */
+ *value = (raw * (s32)pv->mpu->mdiode +
+ ((s32)pv->mpu->bdiode << 12)) >> 2;
+ break;
+ case 2: /* 12v current */
+ *value = raw * ADC_12V_CURRENT_SCALE;
+ break;
+ case 3: /* core voltage */
+ *value = raw * ADC_CPU_VOLTAGE_SCALE;
+ break;
+ case 4: /* core current */
+ *value = raw * ADC_CPU_CURRENT_SCALE;
+ break;
+ }
+}
+
+static int wf_ad7417_adc_get(struct wf_sensor *sr, s32 *value)
+{
+ struct wf_ad7417_priv *pv = sr->priv;
+ int chan = sr - pv->sensors;
+ int i, rc;
+ u8 buf[2];
+ u16 raw;
+
+ *value = 0;
+ mutex_lock(&pv->lock);
+ for (i = 0; i < 10; i++) {
+ /* Set channel */
+ buf[0] = 1;
+ buf[1] = (pv->config & 0x1f) | (chan << 5);
+ rc = i2c_master_send(pv->i2c, buf, 2);
+ if (rc < 0)
+ goto error;
+
+ /* Wait for conversion */
+ msleep(1);
+
+ /* Switch to data register */
+ buf[0] = 4;
+ rc = i2c_master_send(pv->i2c, buf, 1);
+ if (rc < 0)
+ goto error;
+
+ /* Read result */
+ rc = i2c_master_recv(pv->i2c, buf, 2);
+ if (rc < 0)
+ goto error;
+
+ /* Read a a 16-bit signed value */
+ raw = be16_to_cpup((__le16 *)buf) >> 6;
+ wf_ad7417_adc_convert(pv, chan, raw, value);
+
+ dev_vdbg(&pv->i2c->dev, "ADC chan %d [%s]"
+ " raw value: 0x%x, conv to: 0x%08x\n",
+ chan, sr->name, raw, *value);
+
+ mutex_unlock(&pv->lock);
+ return 0;
+
+ error:
+ dev_dbg(&pv->i2c->dev,
+ "Error reading ADC, try %d...\n", i);
+ if (i < 9)
+ msleep(10);
+ }
+ mutex_unlock(&pv->lock);
+ return -1;
+}
+
+static void wf_ad7417_release(struct kref *ref)
+{
+ struct wf_ad7417_priv *pv = container_of(ref,
+ struct wf_ad7417_priv, ref);
+ kfree(pv);
+}
+
+static void wf_ad7417_sensor_release(struct wf_sensor *sr)
+{
+ struct wf_ad7417_priv *pv = sr->priv;
+
+ kfree(sr->name);
+ kref_put(&pv->ref, wf_ad7417_release);
+}
+
+static const struct wf_sensor_ops wf_ad7417_temp_ops = {
+ .get_value = wf_ad7417_temp_get,
+ .release = wf_ad7417_sensor_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct wf_sensor_ops wf_ad7417_adc_ops = {
+ .get_value = wf_ad7417_adc_get,
+ .release = wf_ad7417_sensor_release,
+ .owner = THIS_MODULE,
+};
+
+static void __devinit wf_ad7417_add_sensor(struct wf_ad7417_priv *pv,
+ int index, const char *name,
+ const struct wf_sensor_ops *ops)
+{
+ pv->sensors[index].name = kasprintf(GFP_KERNEL, "%s-%d", name, pv->cpu);
+ pv->sensors[index].priv = pv;
+ pv->sensors[index].ops = ops;
+ if (!wf_register_sensor(&pv->sensors[index]))
+ kref_get(&pv->ref);
+}
+
+static void __devinit wf_ad7417_init_chip(struct wf_ad7417_priv *pv)
+{
+ int rc;
+ u8 buf[2];
+ u8 config = 0;
+
+ /*
+ * Read ADC the configuration register and cache it. We
+ * also make sure Config2 contains proper values, I've seen
+ * cases where we got stale grabage in there, thus preventing
+ * proper reading of conv. values
+ */
+
+ /* Clear Config2 */
+ buf[0] = 5;
+ buf[1] = 0;
+ i2c_master_send(pv->i2c, buf, 2);
+
+ /* Read & cache Config1 */
+ buf[0] = 1;
+ rc = i2c_master_send(pv->i2c, buf, 1);
+ if (rc > 0) {
+ rc = i2c_master_recv(pv->i2c, buf, 1);
+ if (rc > 0) {
+ config = buf[0];
+
+ dev_dbg(&pv->i2c->dev, "ADC config reg: %02x\n",
+ config);
+
+ /* Disable shutdown mode */
+ config &= 0xfe;
+ buf[0] = 1;
+ buf[1] = config;
+ rc = i2c_master_send(pv->i2c, buf, 2);
+ }
+ }
+ if (rc <= 0)
+ dev_err(&pv->i2c->dev, "Error reading ADC config\n");
+
+ pv->config = config;
+}
+
+static int __devinit wf_ad7417_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct wf_ad7417_priv *pv;
+ const struct mpu_data *mpu;
+ const char *loc;
+ int cpu_nr;
+
+ loc = of_get_property(client->dev.of_node, "hwsensor-location", NULL);
+ if (!loc) {
+ dev_warn(&client->dev, "Missing hwsensor-location property!\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Identify which CPU we belong to by looking at the first entry
+ * in the hwsensor-location list
+ */
+ if (!strncmp(loc, "CPU A", 5))
+ cpu_nr = 0;
+ else if (!strncmp(loc, "CPU B", 5))
+ cpu_nr = 1;
+ else {
+ pr_err("wf_ad7417: Can't identify location %s\n", loc);
+ return -ENXIO;
+ }
+ mpu = wf_get_mpu(cpu_nr);
+ if (!mpu) {
+ dev_err(&client->dev, "Failed to retrieve MPU data\n");
+ return -ENXIO;
+ }
+
+ pv = kzalloc(sizeof(struct wf_ad7417_priv), GFP_KERNEL);
+ if (pv == NULL)
+ return -ENODEV;
+
+ kref_init(&pv->ref);
+ mutex_init(&pv->lock);
+ pv->i2c = client;
+ pv->cpu = cpu_nr;
+ pv->mpu = mpu;
+ dev_set_drvdata(&client->dev, pv);
+
+ /* Initialize the chip */
+ wf_ad7417_init_chip(pv);
+
+ /*
+ * We cannot rely on Apple device-tree giving us child
+ * node with the names of the individual sensors so we
+ * just hard code what we know about them
+ */
+ wf_ad7417_add_sensor(pv, 0, "cpu-amb-temp", &wf_ad7417_temp_ops);
+ wf_ad7417_add_sensor(pv, 1, "cpu-diode-temp", &wf_ad7417_adc_ops);
+ wf_ad7417_add_sensor(pv, 2, "cpu-12v-current", &wf_ad7417_adc_ops);
+ wf_ad7417_add_sensor(pv, 3, "cpu-voltage", &wf_ad7417_adc_ops);
+ wf_ad7417_add_sensor(pv, 4, "cpu-current", &wf_ad7417_adc_ops);
+
+ return 0;
+}
+
+static int __devexit wf_ad7417_remove(struct i2c_client *client)
+{
+ struct wf_ad7417_priv *pv = dev_get_drvdata(&client->dev);
+ int i;
+
+ /* Mark client detached */
+ pv->i2c = NULL;
+
+ /* Release sensor */
+ for (i = 0; i < 5; i++)
+ wf_unregister_sensor(&pv->sensors[i]);
+
+ kref_put(&pv->ref, wf_ad7417_release);
+
+ return 0;
+}
+
+static const struct i2c_device_id wf_ad7417_id[] = {
+ { "MAC,ad7417", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
+
+static struct i2c_driver wf_ad7417_driver = {
+ .driver = {
+ .name = "wf_ad7417",
+ },
+ .probe = wf_ad7417_probe,
+ .remove = wf_ad7417_remove,
+ .id_table = wf_ad7417_id,
+};
+
+static int __devinit wf_ad7417_init(void)
+{
+ /* This is only supported on these machines */
+ if (!of_machine_is_compatible("PowerMac7,2") &&
+ !of_machine_is_compatible("PowerMac7,3") &&
+ !of_machine_is_compatible("RackMac3,1"))
+ return -ENODEV;
+
+ return i2c_add_driver(&wf_ad7417_driver);
+}
+
+static void __devexit wf_ad7417_exit(void)
+{
+ i2c_del_driver(&wf_ad7417_driver);
+}
+
+module_init(wf_ad7417_init);
+module_exit(wf_ad7417_exit);
+
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("ad7417 sensor driver for PowerMacs");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index ce8897933a84..3ee198b65843 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -164,13 +164,27 @@ static ssize_t wf_show_control(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct wf_control *ctrl = container_of(attr, struct wf_control, attr);
+ const char *typestr;
s32 val = 0;
int err;
err = ctrl->ops->get_value(ctrl, &val);
- if (err < 0)
+ if (err < 0) {
+ if (err == -EFAULT)
+ return sprintf(buf, "<HW FAULT>\n");
return err;
- return sprintf(buf, "%d\n", val);
+ }
+ switch(ctrl->type) {
+ case WF_CONTROL_RPM_FAN:
+ typestr = " RPM";
+ break;
+ case WF_CONTROL_PWM_FAN:
+ typestr = " %";
+ break;
+ default:
+ typestr = "";
+ }
+ return sprintf(buf, "%d%s\n", val, typestr);
}
/* This is really only for debugging... */
@@ -470,11 +484,6 @@ static int __init windfarm_core_init(void)
{
DBG("wf: core loaded\n");
- /* Don't register on old machines that use therm_pm72 for now */
- if (of_machine_is_compatible("PowerMac7,2") ||
- of_machine_is_compatible("PowerMac7,3") ||
- of_machine_is_compatible("RackMac3,1"))
- return -ENODEV;
platform_device_register(&wf_platform_device);
return 0;
}
diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c
index 1a77a7c97d0e..72d1fdfe02a5 100644
--- a/drivers/macintosh/windfarm_cpufreq_clamp.c
+++ b/drivers/macintosh/windfarm_cpufreq_clamp.c
@@ -75,12 +75,6 @@ static int __init wf_cpufreq_clamp_init(void)
{
struct wf_control *clamp;
- /* Don't register on old machines that use therm_pm72 for now */
- if (of_machine_is_compatible("PowerMac7,2") ||
- of_machine_is_compatible("PowerMac7,3") ||
- of_machine_is_compatible("RackMac3,1"))
- return -ENODEV;
-
clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
if (clamp == NULL)
return -ENOMEM;
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
new file mode 100644
index 000000000000..b3411edb324b
--- /dev/null
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -0,0 +1,613 @@
+/*
+ * Windfarm PowerMac thermal control. FCU fan control
+ *
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * Released under the term of the GNU GPL v2.
+ */
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/sections.h>
+
+#include "windfarm.h"
+#include "windfarm_mpu.h"
+
+#define VERSION "1.0"
+
+#ifdef DEBUG
+#define DBG(args...) printk(args)
+#else
+#define DBG(args...) do { } while(0)
+#endif
+
+/*
+ * This option is "weird" :) Basically, if you define this to 1
+ * the control loop for the RPMs fans (not PWMs) will apply the
+ * correction factor obtained from the PID to the actual RPM
+ * speed read from the FCU.
+ *
+ * If you define the below constant to 0, then it will be
+ * applied to the setpoint RPM speed, that is basically the
+ * speed we proviously "asked" for.
+ *
+ * I'm using 0 for now which is what therm_pm72 used to do and
+ * what Darwin -apparently- does based on observed behaviour.
+ */
+#define RPM_PID_USE_ACTUAL_SPEED 0
+
+/* Default min/max for pumps */
+#define CPU_PUMP_OUTPUT_MAX 3200
+#define CPU_PUMP_OUTPUT_MIN 1250
+
+#define FCU_FAN_RPM 0
+#define FCU_FAN_PWM 1
+
+struct wf_fcu_priv {
+ struct kref ref;
+ struct i2c_client *i2c;
+ struct mutex lock;
+ struct list_head fan_list;
+ int rpm_shift;
+};
+
+struct wf_fcu_fan {
+ struct list_head link;
+ int id;
+ s32 min, max, target;
+ struct wf_fcu_priv *fcu_priv;
+ struct wf_control ctrl;
+};
+
+static void wf_fcu_release(struct kref *ref)
+{
+ struct wf_fcu_priv *pv = container_of(ref, struct wf_fcu_priv, ref);
+
+ kfree(pv);
+}
+
+static void wf_fcu_fan_release(struct wf_control *ct)
+{
+ struct wf_fcu_fan *fan = ct->priv;
+
+ kref_put(&fan->fcu_priv->ref, wf_fcu_release);
+ kfree(fan);
+}
+
+static int wf_fcu_read_reg(struct wf_fcu_priv *pv, int reg,
+ unsigned char *buf, int nb)
+{
+ int tries, nr, nw;
+
+ mutex_lock(&pv->lock);
+
+ buf[0] = reg;
+ tries = 0;
+ for (;;) {
+ nw = i2c_master_send(pv->i2c, buf, 1);
+ if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
+ break;
+ msleep(10);
+ ++tries;
+ }
+ if (nw <= 0) {
+ pr_err("Failure writing address to FCU: %d", nw);
+ nr = nw;
+ goto bail;
+ }
+ tries = 0;
+ for (;;) {
+ nr = i2c_master_recv(pv->i2c, buf, nb);
+ if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
+ break;
+ msleep(10);
+ ++tries;
+ }
+ if (nr <= 0)
+ pr_err("wf_fcu: Failure reading data from FCU: %d", nw);
+ bail:
+ mutex_unlock(&pv->lock);
+ return nr;
+}
+
+static int wf_fcu_write_reg(struct wf_fcu_priv *pv, int reg,
+ const unsigned char *ptr, int nb)
+{
+ int tries, nw;
+ unsigned char buf[16];
+
+ buf[0] = reg;
+ memcpy(buf+1, ptr, nb);
+ ++nb;
+ tries = 0;
+ for (;;) {
+ nw = i2c_master_send(pv->i2c, buf, nb);
+ if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
+ break;
+ msleep(10);
+ ++tries;
+ }
+ if (nw < 0)
+ pr_err("wf_fcu: Failure writing to FCU: %d", nw);
+ return nw;
+}
+
+static int wf_fcu_fan_set_rpm(struct wf_control *ct, s32 value)
+{
+ struct wf_fcu_fan *fan = ct->priv;
+ struct wf_fcu_priv *pv = fan->fcu_priv;
+ int rc, shift = pv->rpm_shift;
+ unsigned char buf[2];
+
+ if (value < fan->min)
+ value = fan->min;
+ if (value > fan->max)
+ value = fan->max;
+
+ fan->target = value;
+
+ buf[0] = value >> (8 - shift);
+ buf[1] = value << shift;
+ rc = wf_fcu_write_reg(pv, 0x10 + (fan->id * 2), buf, 2);
+ if (rc < 0)
+ return -EIO;
+ return 0;
+}
+
+static int wf_fcu_fan_get_rpm(struct wf_control *ct, s32 *value)
+{
+ struct wf_fcu_fan *fan = ct->priv;
+ struct wf_fcu_priv *pv = fan->fcu_priv;
+ int rc, reg_base, shift = pv->rpm_shift;
+ unsigned char failure;
+ unsigned char active;
+ unsigned char buf[2];
+
+ rc = wf_fcu_read_reg(pv, 0xb, &failure, 1);
+ if (rc != 1)
+ return -EIO;
+ if ((failure & (1 << fan->id)) != 0)
+ return -EFAULT;
+ rc = wf_fcu_read_reg(pv, 0xd, &active, 1);
+ if (rc != 1)
+ return -EIO;
+ if ((active & (1 << fan->id)) == 0)
+ return -ENXIO;
+
+ /* Programmed value or real current speed */
+#if RPM_PID_USE_ACTUAL_SPEED
+ reg_base = 0x11;
+#else
+ reg_base = 0x10;
+#endif
+ rc = wf_fcu_read_reg(pv, reg_base + (fan->id * 2), buf, 2);
+ if (rc != 2)
+ return -EIO;
+
+ *value = (buf[0] << (8 - shift)) | buf[1] >> shift;
+
+ return 0;
+}
+
+static int wf_fcu_fan_set_pwm(struct wf_control *ct, s32 value)
+{
+ struct wf_fcu_fan *fan = ct->priv;
+ struct wf_fcu_priv *pv = fan->fcu_priv;
+ unsigned char buf[2];
+ int rc;
+
+ if (value < fan->min)
+ value = fan->min;
+ if (value > fan->max)
+ value = fan->max;
+
+ fan->target = value;
+
+ value = (value * 2559) / 1000;
+ buf[0] = value;
+ rc = wf_fcu_write_reg(pv, 0x30 + (fan->id * 2), buf, 1);
+ if (rc < 0)
+ return -EIO;
+ return 0;
+}
+
+static int wf_fcu_fan_get_pwm(struct wf_control *ct, s32 *value)
+{
+ struct wf_fcu_fan *fan = ct->priv;
+ struct wf_fcu_priv *pv = fan->fcu_priv;
+ unsigned char failure;
+ unsigned char active;
+ unsigned char buf[2];
+ int rc;
+
+ rc = wf_fcu_read_reg(pv, 0x2b, &failure, 1);
+ if (rc != 1)
+ return -EIO;
+ if ((failure & (1 << fan->id)) != 0)
+ return -EFAULT;
+ rc = wf_fcu_read_reg(pv, 0x2d, &active, 1);
+ if (rc != 1)
+ return -EIO;
+ if ((active & (1 << fan->id)) == 0)
+ return -ENXIO;
+
+ rc = wf_fcu_read_reg(pv, 0x30 + (fan->id * 2), buf, 1);
+ if (rc != 1)
+ return -EIO;
+
+ *value = (((s32)buf[0]) * 1000) / 2559;
+
+ return 0;
+}
+
+static s32 wf_fcu_fan_min(struct wf_control *ct)
+{
+ struct wf_fcu_fan *fan = ct->priv;
+
+ return fan->min;
+}
+
+static s32 wf_fcu_fan_max(struct wf_control *ct)
+{
+ struct wf_fcu_fan *fan = ct->priv;
+
+ return fan->max;
+}
+
+static const struct wf_control_ops wf_fcu_fan_rpm_ops = {
+ .set_value = wf_fcu_fan_set_rpm,
+ .get_value = wf_fcu_fan_get_rpm,
+ .get_min = wf_fcu_fan_min,
+ .get_max = wf_fcu_fan_max,
+ .release = wf_fcu_fan_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct wf_control_ops wf_fcu_fan_pwm_ops = {
+ .set_value = wf_fcu_fan_set_pwm,
+ .get_value = wf_fcu_fan_get_pwm,
+ .get_min = wf_fcu_fan_min,
+ .get_max = wf_fcu_fan_max,
+ .release = wf_fcu_fan_release,
+ .owner = THIS_MODULE,
+};
+
+static void __devinit wf_fcu_get_pump_minmax(struct wf_fcu_fan *fan)
+{
+ const struct mpu_data *mpu = wf_get_mpu(0);
+ u16 pump_min = 0, pump_max = 0xffff;
+ u16 tmp[4];
+
+ /* Try to fetch pumps min/max infos from eeprom */
+ if (mpu) {
+ memcpy(&tmp, mpu->processor_part_num, 8);
+ if (tmp[0] != 0xffff && tmp[1] != 0xffff) {
+ pump_min = max(pump_min, tmp[0]);
+ pump_max = min(pump_max, tmp[1]);
+ }
+ if (tmp[2] != 0xffff && tmp[3] != 0xffff) {
+ pump_min = max(pump_min, tmp[2]);
+ pump_max = min(pump_max, tmp[3]);
+ }
+ }
+
+ /* Double check the values, this _IS_ needed as the EEPROM on
+ * some dual 2.5Ghz G5s seem, at least, to have both min & max
+ * same to the same value ... (grrrr)
+ */
+ if (pump_min == pump_max || pump_min == 0 || pump_max == 0xffff) {
+ pump_min = CPU_PUMP_OUTPUT_MIN;
+ pump_max = CPU_PUMP_OUTPUT_MAX;
+ }
+
+ fan->min = pump_min;
+ fan->max = pump_max;
+
+ DBG("wf_fcu: pump min/max for %s set to: [%d..%d] RPM\n",
+ fan->ctrl.name, pump_min, pump_max);
+}
+
+static void __devinit wf_fcu_get_rpmfan_minmax(struct wf_fcu_fan *fan)
+{
+ struct wf_fcu_priv *pv = fan->fcu_priv;
+ const struct mpu_data *mpu0 = wf_get_mpu(0);
+ const struct mpu_data *mpu1 = wf_get_mpu(1);
+
+ /* Default */
+ fan->min = 2400 >> pv->rpm_shift;
+ fan->max = 56000 >> pv->rpm_shift;
+
+ /* CPU fans have min/max in MPU */
+ if (mpu0 && !strcmp(fan->ctrl.name, "cpu-front-fan-0")) {
+ fan->min = max(fan->min, (s32)mpu0->rminn_intake_fan);
+ fan->max = min(fan->max, (s32)mpu0->rmaxn_intake_fan);
+ goto bail;
+ }
+ if (mpu1 && !strcmp(fan->ctrl.name, "cpu-front-fan-1")) {
+ fan->min = max(fan->min, (s32)mpu1->rminn_intake_fan);
+ fan->max = min(fan->max, (s32)mpu1->rmaxn_intake_fan);
+ goto bail;
+ }
+ if (mpu0 && !strcmp(fan->ctrl.name, "cpu-rear-fan-0")) {
+ fan->min = max(fan->min, (s32)mpu0->rminn_exhaust_fan);
+ fan->max = min(fan->max, (s32)mpu0->rmaxn_exhaust_fan);
+ goto bail;
+ }
+ if (mpu1 && !strcmp(fan->ctrl.name, "cpu-rear-fan-1")) {
+ fan->min = max(fan->min, (s32)mpu1->rminn_exhaust_fan);
+ fan->max = min(fan->max, (s32)mpu1->rmaxn_exhaust_fan);
+ goto bail;
+ }
+ /* Rackmac variants, we just use mpu0 intake */
+ if (!strncmp(fan->ctrl.name, "cpu-fan", 7)) {
+ fan->min = max(fan->min, (s32)mpu0->rminn_intake_fan);
+ fan->max = min(fan->max, (s32)mpu0->rmaxn_intake_fan);
+ goto bail;
+ }
+ bail:
+ DBG("wf_fcu: fan min/max for %s set to: [%d..%d] RPM\n",
+ fan->ctrl.name, fan->min, fan->max);
+}
+
+static void __devinit wf_fcu_add_fan(struct wf_fcu_priv *pv,
+ const char *name,
+ int type, int id)
+{
+ struct wf_fcu_fan *fan;
+
+ fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+ if (!fan)
+ return;
+ fan->fcu_priv = pv;
+ fan->id = id;
+ fan->ctrl.name = name;
+ fan->ctrl.priv = fan;
+
+ /* min/max is oddball but the code comes from
+ * therm_pm72 which seems to work so ...
+ */
+ if (type == FCU_FAN_RPM) {
+ if (!strncmp(name, "cpu-pump", strlen("cpu-pump")))
+ wf_fcu_get_pump_minmax(fan);
+ else
+ wf_fcu_get_rpmfan_minmax(fan);
+ fan->ctrl.type = WF_CONTROL_RPM_FAN;
+ fan->ctrl.ops = &wf_fcu_fan_rpm_ops;
+ } else {
+ fan->min = 10;
+ fan->max = 100;
+ fan->ctrl.type = WF_CONTROL_PWM_FAN;
+ fan->ctrl.ops = &wf_fcu_fan_pwm_ops;
+ }
+
+ if (wf_register_control(&fan->ctrl)) {
+ pr_err("wf_fcu: Failed to register fan %s\n", name);
+ kfree(fan);
+ return;
+ }
+ list_add(&fan->link, &pv->fan_list);
+ kref_get(&pv->ref);
+}
+
+static void __devinit wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
+{
+ /* Translation of device-tree location properties to
+ * windfarm fan names
+ */
+ static const struct {
+ const char *dt_name; /* Device-tree name */
+ const char *ct_name; /* Control name */
+ } loc_trans[] = {
+ { "BACKSIDE", "backside-fan", },
+ { "SYS CTRLR FAN", "backside-fan", },
+ { "DRIVE BAY", "drive-bay-fan", },
+ { "SLOT", "slots-fan", },
+ { "PCI FAN", "slots-fan", },
+ { "CPU A INTAKE", "cpu-front-fan-0", },
+ { "CPU A EXHAUST", "cpu-rear-fan-0", },
+ { "CPU B INTAKE", "cpu-front-fan-1", },
+ { "CPU B EXHAUST", "cpu-rear-fan-1", },
+ { "CPU A PUMP", "cpu-pump-0", },
+ { "CPU B PUMP", "cpu-pump-1", },
+ { "CPU A 1", "cpu-fan-a-0", },
+ { "CPU A 2", "cpu-fan-b-0", },
+ { "CPU A 3", "cpu-fan-c-0", },
+ { "CPU B 1", "cpu-fan-a-1", },
+ { "CPU B 2", "cpu-fan-b-1", },
+ { "CPU B 3", "cpu-fan-c-1", },
+ };
+ struct device_node *np = NULL, *fcu = pv->i2c->dev.of_node;
+ int i;
+
+ DBG("Looking up FCU controls in device-tree...\n");
+
+ while ((np = of_get_next_child(fcu, np)) != NULL) {
+ int id, type = -1;
+ const char *loc;
+ const char *name;
+ const u32 *reg;
+
+ DBG(" control: %s, type: %s\n", np->name, np->type);
+
+ /* Detect control type */
+ if (!strcmp(np->type, "fan-rpm-control") ||
+ !strcmp(np->type, "fan-rpm"))
+ type = FCU_FAN_RPM;
+ if (!strcmp(np->type, "fan-pwm-control") ||
+ !strcmp(np->type, "fan-pwm"))
+ type = FCU_FAN_PWM;
+ /* Only care about fans for now */
+ if (type == -1)
+ continue;
+
+ /* Lookup for a matching location */
+ loc = of_get_property(np, "location", NULL);
+ reg = of_get_property(np, "reg", NULL);
+ if (loc == NULL || reg == NULL)
+ continue;
+ DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg);
+
+ for (i = 0; i < ARRAY_SIZE(loc_trans); i++) {
+ if (strncmp(loc, loc_trans[i].dt_name,
+ strlen(loc_trans[i].dt_name)))
+ continue;
+ name = loc_trans[i].ct_name;
+
+ DBG(" location match, name: %s\n", name);
+
+ if (type == FCU_FAN_RPM)
+ id = ((*reg) - 0x10) / 2;
+ else
+ id = ((*reg) - 0x30) / 2;
+ if (id > 7) {
+ pr_warning("wf_fcu: Can't parse "
+ "fan ID in device-tree for %s\n",
+ np->full_name);
+ break;
+ }
+ wf_fcu_add_fan(pv, name, type, id);
+ break;
+ }
+ }
+}
+
+static void __devinit wf_fcu_default_fans(struct wf_fcu_priv *pv)
+{
+ /* We only support the default fans for PowerMac7,2 */
+ if (!of_machine_is_compatible("PowerMac7,2"))
+ return;
+
+ wf_fcu_add_fan(pv, "backside-fan", FCU_FAN_PWM, 1);
+ wf_fcu_add_fan(pv, "drive-bay-fan", FCU_FAN_RPM, 2);
+ wf_fcu_add_fan(pv, "slots-fan", FCU_FAN_PWM, 2);
+ wf_fcu_add_fan(pv, "cpu-front-fan-0", FCU_FAN_RPM, 3);
+ wf_fcu_add_fan(pv, "cpu-rear-fan-0", FCU_FAN_RPM, 4);
+ wf_fcu_add_fan(pv, "cpu-front-fan-1", FCU_FAN_RPM, 5);
+ wf_fcu_add_fan(pv, "cpu-rear-fan-1", FCU_FAN_RPM, 6);
+}
+
+static int __devinit wf_fcu_init_chip(struct wf_fcu_priv *pv)
+{
+ unsigned char buf = 0xff;
+ int rc;
+
+ rc = wf_fcu_write_reg(pv, 0xe, &buf, 1);
+ if (rc < 0)
+ return -EIO;
+ rc = wf_fcu_write_reg(pv, 0x2e, &buf, 1);
+ if (rc < 0)
+ return -EIO;
+ rc = wf_fcu_read_reg(pv, 0, &buf, 1);
+ if (rc < 0)
+ return -EIO;
+ pv->rpm_shift = (buf == 1) ? 2 : 3;
+
+ pr_debug("wf_fcu: FCU Initialized, RPM fan shift is %d\n",
+ pv->rpm_shift);
+
+ return 0;
+}
+
+static int __devinit wf_fcu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct wf_fcu_priv *pv;
+
+ pv = kzalloc(sizeof(*pv), GFP_KERNEL);
+ if (!pv)
+ return -ENOMEM;
+
+ kref_init(&pv->ref);
+ mutex_init(&pv->lock);
+ INIT_LIST_HEAD(&pv->fan_list);
+ pv->i2c = client;
+
+ /*
+ * First we must start the FCU which will query the
+ * shift value to apply to RPMs
+ */
+ if (wf_fcu_init_chip(pv)) {
+ pr_err("wf_fcu: Initialization failed !\n");
+ kfree(pv);
+ return -ENXIO;
+ }
+
+ /* First lookup fans in the device-tree */
+ wf_fcu_lookup_fans(pv);
+
+ /*
+ * Older machines don't have the device-tree entries
+ * we are looking for, just hard code the list
+ */
+ if (list_empty(&pv->fan_list))
+ wf_fcu_default_fans(pv);
+
+ /* Still no fans ? FAIL */
+ if (list_empty(&pv->fan_list)) {
+ pr_err("wf_fcu: Failed to find fans for your machine\n");
+ kfree(pv);
+ return -ENODEV;
+ }
+
+ dev_set_drvdata(&client->dev, pv);
+
+ return 0;
+}
+
+static int __devexit wf_fcu_remove(struct i2c_client *client)
+{
+ struct wf_fcu_priv *pv = dev_get_drvdata(&client->dev);
+ struct wf_fcu_fan *fan;
+
+ while (!list_empty(&pv->fan_list)) {
+ fan = list_first_entry(&pv->fan_list, struct wf_fcu_fan, link);
+ list_del(&fan->link);
+ wf_unregister_control(&fan->ctrl);
+ }
+ kref_put(&pv->ref, wf_fcu_release);
+ return 0;
+}
+
+static const struct i2c_device_id wf_fcu_id[] = {
+ { "MAC,fcu", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
+
+static struct i2c_driver wf_fcu_driver = {
+ .driver = {
+ .name = "wf_fcu",
+ },
+ .probe = wf_fcu_probe,
+ .remove = wf_fcu_remove,
+ .id_table = wf_fcu_id,
+};
+
+static int __init wf_fcu_init(void)
+{
+ return i2c_add_driver(&wf_fcu_driver);
+}
+
+static void __exit wf_fcu_exit(void)
+{
+ i2c_del_driver(&wf_fcu_driver);
+}
+
+
+module_init(wf_fcu_init);
+module_exit(wf_fcu_exit);
+
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("FCU control objects for PowerMacs thermal control");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 4d6a90a1372b..b0c2d3695b34 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -23,7 +23,7 @@
#include "windfarm.h"
-#define VERSION "0.2"
+#define VERSION "1.0"
#undef DEBUG
@@ -36,8 +36,8 @@
struct wf_lm75_sensor {
int ds1775 : 1;
int inited : 1;
- struct i2c_client *i2c;
- struct wf_sensor sens;
+ struct i2c_client *i2c;
+ struct wf_sensor sens;
};
#define wf_to_lm75(c) container_of(c, struct wf_lm75_sensor, sens)
@@ -90,40 +90,19 @@ static struct wf_sensor_ops wf_lm75_ops = {
static int wf_lm75_probe(struct i2c_client *client,
const struct i2c_device_id *id)
-{
+{
struct wf_lm75_sensor *lm;
- int rc;
-
- lm = kzalloc(sizeof(struct wf_lm75_sensor), GFP_KERNEL);
- if (lm == NULL)
- return -ENODEV;
-
- lm->inited = 0;
- lm->ds1775 = id->driver_data;
- lm->i2c = client;
- lm->sens.name = client->dev.platform_data;
- lm->sens.ops = &wf_lm75_ops;
- i2c_set_clientdata(client, lm);
-
- rc = wf_register_sensor(&lm->sens);
- if (rc)
- kfree(lm);
-
- return rc;
-}
-
-static struct i2c_driver wf_lm75_driver;
-
-static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
- u8 addr, int ds1775,
- const char *loc)
-{
- struct i2c_board_info info;
- struct i2c_client *client;
- char *name;
+ int rc, ds1775 = id->driver_data;
+ const char *name, *loc;
DBG("wf_lm75: creating %s device at address 0x%02x\n",
- ds1775 ? "ds1775" : "lm75", addr);
+ ds1775 ? "ds1775" : "lm75", client->addr);
+
+ loc = of_get_property(client->dev.of_node, "hwsensor-location", NULL);
+ if (!loc) {
+ dev_warn(&client->dev, "Missing hwsensor-location property!\n");
+ return -ENXIO;
+ }
/* Usual rant about sensor names not beeing very consistent in
* the device-tree, oh well ...
@@ -137,68 +116,31 @@ static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
name = "optical-drive-temp";
else if (!strcmp(loc, "HD Temp"))
name = "hard-drive-temp";
+ else if (!strcmp(loc, "PCI SLOTS"))
+ name = "slots-temp";
+ else if (!strcmp(loc, "CPU A INLET"))
+ name = "cpu-inlet-temp-0";
+ else if (!strcmp(loc, "CPU B INLET"))
+ name = "cpu-inlet-temp-1";
else
- goto fail;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = (addr >> 1) & 0x7f;
- info.platform_data = name;
- strlcpy(info.type, ds1775 ? "wf_ds1775" : "wf_lm75", I2C_NAME_SIZE);
-
- client = i2c_new_device(adapter, &info);
- if (client == NULL) {
- printk(KERN_ERR "windfarm: failed to attach %s %s to i2c\n",
- ds1775 ? "ds1775" : "lm75", name);
- goto fail;
- }
-
- /*
- * Let i2c-core delete that device on driver removal.
- * This is safe because i2c-core holds the core_lock mutex for us.
- */
- list_add_tail(&client->detected, &wf_lm75_driver.clients);
- return client;
- fail:
- return NULL;
-}
-
-static int wf_lm75_attach(struct i2c_adapter *adapter)
-{
- struct device_node *busnode, *dev;
- struct pmac_i2c_bus *bus;
+ return -ENXIO;
+
- DBG("wf_lm75: adapter %s detected\n", adapter->name);
-
- bus = pmac_i2c_adapter_to_bus(adapter);
- if (bus == NULL)
+ lm = kzalloc(sizeof(struct wf_lm75_sensor), GFP_KERNEL);
+ if (lm == NULL)
return -ENODEV;
- busnode = pmac_i2c_get_bus_node(bus);
- DBG("wf_lm75: bus found, looking for device...\n");
-
- /* Now look for lm75(s) in there */
- for (dev = NULL;
- (dev = of_get_next_child(busnode, dev)) != NULL;) {
- const char *loc =
- of_get_property(dev, "hwsensor-location", NULL);
- u8 addr;
+ lm->inited = 0;
+ lm->ds1775 = ds1775;
+ lm->i2c = client;
+ lm->sens.name = (char *)name; /* XXX fix constness in structure */
+ lm->sens.ops = &wf_lm75_ops;
+ i2c_set_clientdata(client, lm);
- /* We must re-match the adapter in order to properly check
- * the channel on multibus setups
- */
- if (!pmac_i2c_match_adapter(dev, adapter))
- continue;
- addr = pmac_i2c_get_dev_addr(dev);
- if (loc == NULL || addr == 0)
- continue;
- /* real lm75 */
- if (of_device_is_compatible(dev, "lm75"))
- wf_lm75_create(adapter, addr, 0, loc);
- /* ds1775 (compatible, better resolution */
- else if (of_device_is_compatible(dev, "ds1775"))
- wf_lm75_create(adapter, addr, 1, loc);
- }
- return 0;
+ rc = wf_register_sensor(&lm->sens);
+ if (rc)
+ kfree(lm);
+ return rc;
}
static int wf_lm75_remove(struct i2c_client *client)
@@ -217,16 +159,16 @@ static int wf_lm75_remove(struct i2c_client *client)
}
static const struct i2c_device_id wf_lm75_id[] = {
- { "wf_lm75", 0 },
- { "wf_ds1775", 1 },
+ { "MAC,lm75", 0 },
+ { "MAC,ds1775", 1 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wf_lm75_id);
static struct i2c_driver wf_lm75_driver = {
.driver = {
.name = "wf_lm75",
},
- .attach_adapter = wf_lm75_attach,
.probe = wf_lm75_probe,
.remove = wf_lm75_remove,
.id_table = wf_lm75_id,
@@ -234,11 +176,6 @@ static struct i2c_driver wf_lm75_driver = {
static int __init wf_lm75_sensor_init(void)
{
- /* Don't register on old machines that use therm_pm72 for now */
- if (of_machine_is_compatible("PowerMac7,2") ||
- of_machine_is_compatible("PowerMac7,3") ||
- of_machine_is_compatible("RackMac3,1"))
- return -ENODEV;
return i2c_add_driver(&wf_lm75_driver);
}
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
new file mode 100644
index 000000000000..c071aab79dd1
--- /dev/null
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -0,0 +1,201 @@
+/*
+ * Windfarm PowerMac thermal control. LM87 sensor
+ *
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * Released under the term of the GNU GPL v2.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/sections.h>
+#include <asm/pmac_low_i2c.h>
+
+#include "windfarm.h"
+
+#define VERSION "1.0"
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(args...) printk(args)
+#else
+#define DBG(args...) do { } while(0)
+#endif
+
+struct wf_lm87_sensor {
+ struct i2c_client *i2c;
+ struct wf_sensor sens;
+};
+#define wf_to_lm87(c) container_of(c, struct wf_lm87_sensor, sens)
+
+
+static int wf_lm87_read_reg(struct i2c_client *chip, int reg)
+{
+ int rc, tries = 0;
+ u8 buf;
+
+ for (;;) {
+ /* Set address */
+ buf = (u8)reg;
+ rc = i2c_master_send(chip, &buf, 1);
+ if (rc <= 0)
+ goto error;
+ rc = i2c_master_recv(chip, &buf, 1);
+ if (rc <= 0)
+ goto error;
+ return (int)buf;
+ error:
+ DBG("wf_lm87: Error reading LM87, retrying...\n");
+ if (++tries > 10) {
+ printk(KERN_ERR "wf_lm87: Error reading LM87 !\n");
+ return -EIO;
+ }
+ msleep(10);
+ }
+}
+
+static int wf_lm87_get(struct wf_sensor *sr, s32 *value)
+{
+ struct wf_lm87_sensor *lm = sr->priv;
+ s32 temp;
+
+ if (lm->i2c == NULL)
+ return -ENODEV;
+
+#define LM87_INT_TEMP 0x27
+
+ /* Read temperature register */
+ temp = wf_lm87_read_reg(lm->i2c, LM87_INT_TEMP);
+ if (temp < 0)
+ return temp;
+ *value = temp << 16;
+
+ return 0;
+}
+
+static void wf_lm87_release(struct wf_sensor *sr)
+{
+ struct wf_lm87_sensor *lm = wf_to_lm87(sr);
+
+ kfree(lm);
+}
+
+static struct wf_sensor_ops wf_lm87_ops = {
+ .get_value = wf_lm87_get,
+ .release = wf_lm87_release,
+ .owner = THIS_MODULE,
+};
+
+static int wf_lm87_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct wf_lm87_sensor *lm;
+ const char *name = NULL, *loc;
+ struct device_node *np = NULL;
+ int rc;
+
+ /*
+ * The lm87 contains a whole pile of sensors, additionally,
+ * the Xserve G5 has several lm87's. However, for now we only
+ * care about the internal temperature sensor
+ */
+ while ((np = of_get_next_child(client->dev.of_node, np)) != NULL) {
+ if (strcmp(np->name, "int-temp"))
+ continue;
+ loc = of_get_property(np, "location", NULL);
+ if (!loc)
+ continue;
+ if (strstr(loc, "DIMM"))
+ name = "dimms-temp";
+ else if (strstr(loc, "Processors"))
+ name = "between-cpus-temp";
+ if (name) {
+ of_node_put(np);
+ break;
+ }
+ }
+ if (!name) {
+ pr_warning("wf_lm87: Unsupported sensor %s\n",
+ client->dev.of_node->full_name);
+ return -ENODEV;
+ }
+
+ lm = kzalloc(sizeof(struct wf_lm87_sensor), GFP_KERNEL);
+ if (lm == NULL)
+ return -ENODEV;
+
+ lm->i2c = client;
+ lm->sens.name = name;
+ lm->sens.ops = &wf_lm87_ops;
+ lm->sens.priv = lm;
+ i2c_set_clientdata(client, lm);
+
+ rc = wf_register_sensor(&lm->sens);
+ if (rc)
+ kfree(lm);
+ return rc;
+}
+
+static int wf_lm87_remove(struct i2c_client *client)
+{
+ struct wf_lm87_sensor *lm = i2c_get_clientdata(client);
+
+ DBG("wf_lm87: i2c detatch called for %s\n", lm->sens.name);
+
+ /* Mark client detached */
+ lm->i2c = NULL;
+
+ /* release sensor */
+ wf_unregister_sensor(&lm->sens);
+
+ return 0;
+}
+
+static const struct i2c_device_id wf_lm87_id[] = {
+ { "MAC,lm87cimt", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
+
+static struct i2c_driver wf_lm87_driver = {
+ .driver = {
+ .name = "wf_lm87",
+ },
+ .probe = wf_lm87_probe,
+ .remove = wf_lm87_remove,
+ .id_table = wf_lm87_id,
+};
+
+static int __init wf_lm87_sensor_init(void)
+{
+ /* We only support this on the Xserve */
+ if (!of_machine_is_compatible("RackMac3,1"))
+ return -ENODEV;
+
+ return i2c_add_driver(&wf_lm87_driver);
+}
+
+static void __exit wf_lm87_sensor_exit(void)
+{
+ i2c_del_driver(&wf_lm87_driver);
+}
+
+
+module_init(wf_lm87_sensor_init);
+module_exit(wf_lm87_sensor_exit);
+
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("LM87 sensor objects for PowerMacs thermal control");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 8204113268f4..371b058d2f7d 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -16,7 +16,7 @@
#include "windfarm.h"
-#define VERSION "0.2"
+#define VERSION "1.0"
/* This currently only exports the external temperature sensor,
since that's all the control loops need. */
@@ -64,9 +64,29 @@ static struct wf_sensor_ops wf_max6690_ops = {
static int wf_max6690_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const char *name, *loc;
struct wf_6690_sensor *max;
int rc;
+ loc = of_get_property(client->dev.of_node, "hwsensor-location", NULL);
+ if (!loc) {
+ dev_warn(&client->dev, "Missing hwsensor-location property!\n");
+ return -ENXIO;
+ }
+
+ /*
+ * We only expose the external temperature register for
+ * now as this is all we need for our control loops
+ */
+ if (!strcmp(loc, "BACKSIDE") || !strcmp(loc, "SYS CTRLR AMBIENT"))
+ name = "backside-temp";
+ else if (!strcmp(loc, "NB Ambient"))
+ name = "north-bridge-temp";
+ else if (!strcmp(loc, "GPU Ambient"))
+ name = "gpu-temp";
+ else
+ return -ENXIO;
+
max = kzalloc(sizeof(struct wf_6690_sensor), GFP_KERNEL);
if (max == NULL) {
printk(KERN_ERR "windfarm: Couldn't create MAX6690 sensor: "
@@ -75,90 +95,16 @@ static int wf_max6690_probe(struct i2c_client *client,
}
max->i2c = client;
- max->sens.name = client->dev.platform_data;
+ max->sens.name = (char *)name; /* XXX fix constness in structure */
max->sens.ops = &wf_max6690_ops;
i2c_set_clientdata(client, max);
rc = wf_register_sensor(&max->sens);
- if (rc) {
+ if (rc)
kfree(max);
- }
-
return rc;
}
-static struct i2c_driver wf_max6690_driver;
-
-static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
- u8 addr, const char *loc)
-{
- struct i2c_board_info info;
- struct i2c_client *client;
- char *name;
-
- if (!strcmp(loc, "BACKSIDE"))
- name = "backside-temp";
- else if (!strcmp(loc, "NB Ambient"))
- name = "north-bridge-temp";
- else if (!strcmp(loc, "GPU Ambient"))
- name = "gpu-temp";
- else
- goto fail;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = addr >> 1;
- info.platform_data = name;
- strlcpy(info.type, "wf_max6690", I2C_NAME_SIZE);
-
- client = i2c_new_device(adapter, &info);
- if (client == NULL) {
- printk(KERN_ERR "windfarm: failed to attach MAX6690 sensor\n");
- goto fail;
- }
-
- /*
- * Let i2c-core delete that device on driver removal.
- * This is safe because i2c-core holds the core_lock mutex for us.
- */
- list_add_tail(&client->detected, &wf_max6690_driver.clients);
- return client;
-
- fail:
- return NULL;
-}
-
-static int wf_max6690_attach(struct i2c_adapter *adapter)
-{
- struct device_node *busnode, *dev = NULL;
- struct pmac_i2c_bus *bus;
- const char *loc;
-
- bus = pmac_i2c_adapter_to_bus(adapter);
- if (bus == NULL)
- return -ENODEV;
- busnode = pmac_i2c_get_bus_node(bus);
-
- while ((dev = of_get_next_child(busnode, dev)) != NULL) {
- u8 addr;
-
- /* We must re-match the adapter in order to properly check
- * the channel on multibus setups
- */
- if (!pmac_i2c_match_adapter(dev, adapter))
- continue;
- if (!of_device_is_compatible(dev, "max6690"))
- continue;
- addr = pmac_i2c_get_dev_addr(dev);
- loc = of_get_property(dev, "hwsensor-location", NULL);
- if (loc == NULL || addr == 0)
- continue;
- printk("found max6690, loc=%s addr=0x%02x\n", loc, addr);
- wf_max6690_create(adapter, addr, loc);
- }
-
- return 0;
-}
-
static int wf_max6690_remove(struct i2c_client *client)
{
struct wf_6690_sensor *max = i2c_get_clientdata(client);
@@ -170,15 +116,15 @@ static int wf_max6690_remove(struct i2c_client *client)
}
static const struct i2c_device_id wf_max6690_id[] = {
- { "wf_max6690", 0 },
+ { "MAC,max6690", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
static struct i2c_driver wf_max6690_driver = {
.driver = {
.name = "wf_max6690",
},
- .attach_adapter = wf_max6690_attach,
.probe = wf_max6690_probe,
.remove = wf_max6690_remove,
.id_table = wf_max6690_id,
@@ -186,11 +132,6 @@ static struct i2c_driver wf_max6690_driver = {
static int __init wf_max6690_sensor_init(void)
{
- /* Don't register on old machines that use therm_pm72 for now */
- if (of_machine_is_compatible("PowerMac7,2") ||
- of_machine_is_compatible("PowerMac7,3") ||
- of_machine_is_compatible("RackMac3,1"))
- return -ENODEV;
return i2c_add_driver(&wf_max6690_driver);
}
diff --git a/drivers/macintosh/windfarm_mpu.h b/drivers/macintosh/windfarm_mpu.h
new file mode 100644
index 000000000000..046edc8c2ec5
--- /dev/null
+++ b/drivers/macintosh/windfarm_mpu.h
@@ -0,0 +1,105 @@
+/*
+ * Windfarm PowerMac thermal control
+ *
+ * Copyright 2012 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * Released under the term of the GNU GPL v2.
+ */
+
+#ifndef __WINDFARM_MPU_H
+#define __WINDFARM_MPU_H
+
+typedef unsigned short fu16;
+typedef int fs32;
+typedef short fs16;
+
+/* Definition of the MPU data structure which contains per CPU
+ * calibration information (among others) for the G5 machines
+ */
+struct mpu_data
+{
+ u8 signature; /* 0x00 - EEPROM sig. */
+ u8 bytes_used; /* 0x01 - Bytes used in eeprom (160 ?) */
+ u8 size; /* 0x02 - EEPROM size (256 ?) */
+ u8 version; /* 0x03 - EEPROM version */
+ u32 data_revision; /* 0x04 - Dataset revision */
+ u8 processor_bin_code[3]; /* 0x08 - Processor BIN code */
+ u8 bin_code_expansion; /* 0x0b - ??? (padding ?) */
+ u8 processor_num; /* 0x0c - Number of CPUs on this MPU */
+ u8 input_mul_bus_div; /* 0x0d - Clock input multiplier/bus divider */
+ u8 reserved1[2]; /* 0x0e - */
+ u32 input_clk_freq_high; /* 0x10 - Input clock frequency high */
+ u8 cpu_nb_target_cycles; /* 0x14 - ??? */
+ u8 cpu_statlat; /* 0x15 - ??? */
+ u8 cpu_snooplat; /* 0x16 - ??? */
+ u8 cpu_snoopacc; /* 0x17 - ??? */
+ u8 nb_paamwin; /* 0x18 - ??? */
+ u8 nb_statlat; /* 0x19 - ??? */
+ u8 nb_snooplat; /* 0x1a - ??? */
+ u8 nb_snoopwin; /* 0x1b - ??? */
+ u8 api_bus_mode; /* 0x1c - ??? */
+ u8 reserved2[3]; /* 0x1d - */
+ u32 input_clk_freq_low; /* 0x20 - Input clock frequency low */
+ u8 processor_card_slot; /* 0x24 - Processor card slot number */
+ u8 reserved3[2]; /* 0x25 - */
+ u8 padjmax; /* 0x27 - Max power adjustment (Not in OF!) */
+ u8 ttarget; /* 0x28 - Target temperature */
+ u8 tmax; /* 0x29 - Max temperature */
+ u8 pmaxh; /* 0x2a - Max power */
+ u8 tguardband; /* 0x2b - Guardband temp ??? Hist. len in OSX */
+ fs32 pid_gp; /* 0x2c - PID proportional gain */
+ fs32 pid_gr; /* 0x30 - PID reset gain */
+ fs32 pid_gd; /* 0x34 - PID derivative gain */
+ fu16 voph; /* 0x38 - Vop High */
+ fu16 vopl; /* 0x3a - Vop Low */
+ fs16 nactual_die; /* 0x3c - nActual Die */
+ fs16 nactual_heatsink; /* 0x3e - nActual Heatsink */
+ fs16 nactual_system; /* 0x40 - nActual System */
+ u16 calibration_flags; /* 0x42 - Calibration flags */
+ fu16 mdiode; /* 0x44 - Diode M value (scaling factor) */
+ fs16 bdiode; /* 0x46 - Diode B value (offset) */
+ fs32 theta_heat_sink; /* 0x48 - Theta heat sink */
+ u16 rminn_intake_fan; /* 0x4c - Intake fan min RPM */
+ u16 rmaxn_intake_fan; /* 0x4e - Intake fan max RPM */
+ u16 rminn_exhaust_fan; /* 0x50 - Exhaust fan min RPM */
+ u16 rmaxn_exhaust_fan; /* 0x52 - Exhaust fan max RPM */
+ u8 processor_part_num[8]; /* 0x54 - Processor part number XX pumps min/max */
+ u32 processor_lot_num; /* 0x5c - Processor lot number */
+ u8 orig_card_sernum[0x10]; /* 0x60 - Card original serial number */
+ u8 curr_card_sernum[0x10]; /* 0x70 - Card current serial number */
+ u8 mlb_sernum[0x18]; /* 0x80 - MLB serial number */
+ u32 checksum1; /* 0x98 - */
+ u32 checksum2; /* 0x9c - */
+}; /* Total size = 0xa0 */
+
+static inline const struct mpu_data *wf_get_mpu(int cpu)
+{
+ struct device_node *np;
+ char nodename[64];
+ const void *data;
+ int len;
+
+ /*
+ * prom.c routine for finding a node by path is a bit brain dead
+ * and requires exact @xxx unit numbers. This is a bit ugly but
+ * will work for these machines
+ */
+ sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0);
+ np = of_find_node_by_path(nodename);
+ if (!np)
+ return NULL;
+ data = of_get_property(np, "cpuid", &len);
+ of_node_put(np);
+ if (!data)
+ return NULL;
+
+ /*
+ * We are naughty, we have dropped the reference to the device
+ * node and still return a pointer to the content. We know we
+ * can do that though as this is only ever called on PowerMac
+ * which cannot remove those nodes
+ */
+ return data;
+}
+
+#endif /* __WINDFARM_MPU_H */
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
new file mode 100644
index 000000000000..84ac913d7e3a
--- /dev/null
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -0,0 +1,847 @@
+/*
+ * Windfarm PowerMac thermal control.
+ * Control loops for PowerMac7,2 and 7,3
+ *
+ * Copyright (C) 2012 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * Use and redistribute under the terms of the GNU GPL v2.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <asm/prom.h>
+#include <asm/smu.h>
+
+#include "windfarm.h"
+#include "windfarm_pid.h"
+#include "windfarm_mpu.h"
+
+#define VERSION "1.0"
+
+#undef DEBUG
+#undef LOTSA_DEBUG
+
+#ifdef DEBUG
+#define DBG(args...) printk(args)
+#else
+#define DBG(args...) do { } while(0)
+#endif
+
+#ifdef LOTSA_DEBUG
+#define DBG_LOTS(args...) printk(args)
+#else
+#define DBG_LOTS(args...) do { } while(0)
+#endif
+
+/* define this to force CPU overtemp to 60 degree, useful for testing
+ * the overtemp code
+ */
+#undef HACKED_OVERTEMP
+
+/* We currently only handle 2 chips */
+#define NR_CHIPS 2
+#define NR_CPU_FANS 3 * NR_CHIPS
+
+/* Controls and sensors */
+static struct wf_sensor *sens_cpu_temp[NR_CHIPS];
+static struct wf_sensor *sens_cpu_volts[NR_CHIPS];
+static struct wf_sensor *sens_cpu_amps[NR_CHIPS];
+static struct wf_sensor *backside_temp;
+static struct wf_sensor *drives_temp;
+
+static struct wf_control *cpu_front_fans[NR_CHIPS];
+static struct wf_control *cpu_rear_fans[NR_CHIPS];
+static struct wf_control *cpu_pumps[NR_CHIPS];
+static struct wf_control *backside_fan;
+static struct wf_control *drives_fan;
+static struct wf_control *slots_fan;
+static struct wf_control *cpufreq_clamp;
+
+/* We keep a temperature history for average calculation of 180s */
+#define CPU_TEMP_HIST_SIZE 180
+
+/* Fixed speed for slot fan */
+#define SLOTS_FAN_DEFAULT_PWM 40
+
+/* Scale value for CPU intake fans */
+#define CPU_INTAKE_SCALE 0x0000f852
+
+/* PID loop state */
+static const struct mpu_data *cpu_mpu_data[NR_CHIPS];
+static struct wf_cpu_pid_state cpu_pid[NR_CHIPS];
+static bool cpu_pid_combined;
+static u32 cpu_thist[CPU_TEMP_HIST_SIZE];
+static int cpu_thist_pt;
+static s64 cpu_thist_total;
+static s32 cpu_all_tmax = 100 << 16;
+static struct wf_pid_state backside_pid;
+static int backside_tick;
+static struct wf_pid_state drives_pid;
+static int drives_tick;
+
+static int nr_chips;
+static bool have_all_controls;
+static bool have_all_sensors;
+static bool started;
+
+static int failure_state;
+#define FAILURE_SENSOR 1
+#define FAILURE_FAN 2
+#define FAILURE_PERM 4
+#define FAILURE_LOW_OVERTEMP 8
+#define FAILURE_HIGH_OVERTEMP 16
+
+/* Overtemp values */
+#define LOW_OVER_AVERAGE 0
+#define LOW_OVER_IMMEDIATE (10 << 16)
+#define LOW_OVER_CLEAR ((-10) << 16)
+#define HIGH_OVER_IMMEDIATE (14 << 16)
+#define HIGH_OVER_AVERAGE (10 << 16)
+#define HIGH_OVER_IMMEDIATE (14 << 16)
+
+
+static void cpu_max_all_fans(void)
+{
+ int i;
+
+ /* We max all CPU fans in case of a sensor error. We also do the
+ * cpufreq clamping now, even if it's supposedly done later by the
+ * generic code anyway, we do it earlier here to react faster
+ */
+ if (cpufreq_clamp)
+ wf_control_set_max(cpufreq_clamp);
+ for (i = 0; i < nr_chips; i++) {
+ if (cpu_front_fans[i])
+ wf_control_set_max(cpu_front_fans[i]);
+ if (cpu_rear_fans[i])
+ wf_control_set_max(cpu_rear_fans[i]);
+ if (cpu_pumps[i])
+ wf_control_set_max(cpu_pumps[i]);
+ }
+}
+
+static int cpu_check_overtemp(s32 temp)
+{
+ int new_state = 0;
+ s32 t_avg, t_old;
+ static bool first = true;
+
+ /* First check for immediate overtemps */
+ if (temp >= (cpu_all_tmax + LOW_OVER_IMMEDIATE)) {
+ new_state |= FAILURE_LOW_OVERTEMP;
+ if ((failure_state & FAILURE_LOW_OVERTEMP) == 0)
+ printk(KERN_ERR "windfarm: Overtemp due to immediate CPU"
+ " temperature !\n");
+ }
+ if (temp >= (cpu_all_tmax + HIGH_OVER_IMMEDIATE)) {
+ new_state |= FAILURE_HIGH_OVERTEMP;
+ if ((failure_state & FAILURE_HIGH_OVERTEMP) == 0)
+ printk(KERN_ERR "windfarm: Critical overtemp due to"
+ " immediate CPU temperature !\n");
+ }
+
+ /*
+ * The first time around, initialize the array with the first
+ * temperature reading
+ */
+ if (first) {
+ int i;
+
+ cpu_thist_total = 0;
+ for (i = 0; i < CPU_TEMP_HIST_SIZE; i++) {
+ cpu_thist[i] = temp;
+ cpu_thist_total += temp;
+ }
+ first = false;
+ }
+
+ /*
+ * We calculate a history of max temperatures and use that for the
+ * overtemp management
+ */
+ t_old = cpu_thist[cpu_thist_pt];
+ cpu_thist[cpu_thist_pt] = temp;
+ cpu_thist_pt = (cpu_thist_pt + 1) % CPU_TEMP_HIST_SIZE;
+ cpu_thist_total -= t_old;
+ cpu_thist_total += temp;
+ t_avg = cpu_thist_total / CPU_TEMP_HIST_SIZE;
+
+ DBG_LOTS(" t_avg = %d.%03d (out: %d.%03d, in: %d.%03d)\n",
+ FIX32TOPRINT(t_avg), FIX32TOPRINT(t_old), FIX32TOPRINT(temp));
+
+ /* Now check for average overtemps */
+ if (t_avg >= (cpu_all_tmax + LOW_OVER_AVERAGE)) {
+ new_state |= FAILURE_LOW_OVERTEMP;
+ if ((failure_state & FAILURE_LOW_OVERTEMP) == 0)
+ printk(KERN_ERR "windfarm: Overtemp due to average CPU"
+ " temperature !\n");
+ }
+ if (t_avg >= (cpu_all_tmax + HIGH_OVER_AVERAGE)) {
+ new_state |= FAILURE_HIGH_OVERTEMP;
+ if ((failure_state & FAILURE_HIGH_OVERTEMP) == 0)
+ printk(KERN_ERR "windfarm: Critical overtemp due to"
+ " average CPU temperature !\n");
+ }
+
+ /* Now handle overtemp conditions. We don't currently use the windfarm
+ * overtemp handling core as it's not fully suited to the needs of those
+ * new machine. This will be fixed later.
+ */
+ if (new_state) {
+ /* High overtemp -> immediate shutdown */
+ if (new_state & FAILURE_HIGH_OVERTEMP)
+ machine_power_off();
+ if ((failure_state & new_state) != new_state)
+ cpu_max_all_fans();
+ failure_state |= new_state;
+ } else if ((failure_state & FAILURE_LOW_OVERTEMP) &&
+ (temp < (cpu_all_tmax + LOW_OVER_CLEAR))) {
+ printk(KERN_ERR "windfarm: Overtemp condition cleared !\n");
+ failure_state &= ~FAILURE_LOW_OVERTEMP;
+ }
+
+ return failure_state & (FAILURE_LOW_OVERTEMP | FAILURE_HIGH_OVERTEMP);
+}
+
+static int read_one_cpu_vals(int cpu, s32 *temp, s32 *power)
+{
+ s32 dtemp, volts, amps;
+ int rc;
+
+ /* Get diode temperature */
+ rc = wf_sensor_get(sens_cpu_temp[cpu], &dtemp);
+ if (rc) {
+ DBG(" CPU%d: temp reading error !\n", cpu);
+ return -EIO;
+ }
+ DBG_LOTS(" CPU%d: temp = %d.%03d\n", cpu, FIX32TOPRINT((dtemp)));
+ *temp = dtemp;
+
+ /* Get voltage */
+ rc = wf_sensor_get(sens_cpu_volts[cpu], &volts);
+ if (rc) {
+ DBG(" CPU%d, volts reading error !\n", cpu);
+ return -EIO;
+ }
+ DBG_LOTS(" CPU%d: volts = %d.%03d\n", cpu, FIX32TOPRINT((volts)));
+
+ /* Get current */
+ rc = wf_sensor_get(sens_cpu_amps[cpu], &amps);
+ if (rc) {
+ DBG(" CPU%d, current reading error !\n", cpu);
+ return -EIO;
+ }
+ DBG_LOTS(" CPU%d: amps = %d.%03d\n", cpu, FIX32TOPRINT((amps)));
+
+ /* Calculate power */
+
+ /* Scale voltage and current raw sensor values according to fixed scales
+ * obtained in Darwin and calculate power from I and V
+ */
+ *power = (((u64)volts) * ((u64)amps)) >> 16;
+
+ DBG_LOTS(" CPU%d: power = %d.%03d\n", cpu, FIX32TOPRINT((*power)));
+
+ return 0;
+
+}
+
+static void cpu_fans_tick_split(void)
+{
+ int err, cpu;
+ s32 intake, temp, power, t_max = 0;
+
+ DBG_LOTS("* cpu fans_tick_split()\n");
+
+ for (cpu = 0; cpu < nr_chips; ++cpu) {
+ struct wf_cpu_pid_state *sp = &cpu_pid[cpu];
+
+ /* Read current speed */
+ wf_control_get(cpu_rear_fans[cpu], &sp->target);
+
+ DBG_LOTS(" CPU%d: cur_target = %d RPM\n", cpu, sp->target);
+
+ err = read_one_cpu_vals(cpu, &temp, &power);
+ if (err) {
+ failure_state |= FAILURE_SENSOR;
+ cpu_max_all_fans();
+ return;
+ }
+
+ /* Keep track of highest temp */
+ t_max = max(t_max, temp);
+
+ /* Handle possible overtemps */
+ if (cpu_check_overtemp(t_max))
+ return;
+
+ /* Run PID */
+ wf_cpu_pid_run(sp, power, temp);
+
+ DBG_LOTS(" CPU%d: target = %d RPM\n", cpu, sp->target);
+
+ /* Apply result directly to exhaust fan */
+ err = wf_control_set(cpu_rear_fans[cpu], sp->target);
+ if (err) {
+ pr_warning("wf_pm72: Fan %s reports error %d\n",
+ cpu_rear_fans[cpu]->name, err);
+ failure_state |= FAILURE_FAN;
+ break;
+ }
+
+ /* Scale result for intake fan */
+ intake = (sp->target * CPU_INTAKE_SCALE) >> 16;
+ DBG_LOTS(" CPU%d: intake = %d RPM\n", cpu, intake);
+ err = wf_control_set(cpu_front_fans[cpu], intake);
+ if (err) {
+ pr_warning("wf_pm72: Fan %s reports error %d\n",
+ cpu_front_fans[cpu]->name, err);
+ failure_state |= FAILURE_FAN;
+ break;
+ }
+ }
+}
+
+static void cpu_fans_tick_combined(void)
+{
+ s32 temp0, power0, temp1, power1, t_max = 0;
+ s32 temp, power, intake, pump;
+ struct wf_control *pump0, *pump1;
+ struct wf_cpu_pid_state *sp = &cpu_pid[0];
+ int err, cpu;
+
+ DBG_LOTS("* cpu fans_tick_combined()\n");
+
+ /* Read current speed from cpu 0 */
+ wf_control_get(cpu_rear_fans[0], &sp->target);
+
+ DBG_LOTS(" CPUs: cur_target = %d RPM\n", sp->target);
+
+ /* Read values for both CPUs */
+ err = read_one_cpu_vals(0, &temp0, &power0);
+ if (err) {
+ failure_state |= FAILURE_SENSOR;
+ cpu_max_all_fans();
+ return;
+ }
+ err = read_one_cpu_vals(1, &temp1, &power1);
+ if (err) {
+ failure_state |= FAILURE_SENSOR;
+ cpu_max_all_fans();
+ return;
+ }
+
+ /* Keep track of highest temp */
+ t_max = max(t_max, max(temp0, temp1));
+
+ /* Handle possible overtemps */
+ if (cpu_check_overtemp(t_max))
+ return;
+
+ /* Use the max temp & power of both */
+ temp = max(temp0, temp1);
+ power = max(power0, power1);
+
+ /* Run PID */
+ wf_cpu_pid_run(sp, power, temp);
+
+ /* Scale result for intake fan */
+ intake = (sp->target * CPU_INTAKE_SCALE) >> 16;
+
+ /* Same deal with pump speed */
+ pump0 = cpu_pumps[0];
+ pump1 = cpu_pumps[1];
+ if (!pump0) {
+ pump0 = pump1;
+ pump1 = NULL;
+ }
+ pump = (sp->target * wf_control_get_max(pump0)) /
+ cpu_mpu_data[0]->rmaxn_exhaust_fan;
+
+ DBG_LOTS(" CPUs: target = %d RPM\n", sp->target);
+ DBG_LOTS(" CPUs: intake = %d RPM\n", intake);
+ DBG_LOTS(" CPUs: pump = %d RPM\n", pump);
+
+ for (cpu = 0; cpu < nr_chips; cpu++) {
+ err = wf_control_set(cpu_rear_fans[cpu], sp->target);
+ if (err) {
+ pr_warning("wf_pm72: Fan %s reports error %d\n",
+ cpu_rear_fans[cpu]->name, err);
+ failure_state |= FAILURE_FAN;
+ }
+ err = wf_control_set(cpu_front_fans[cpu], intake);
+ if (err) {
+ pr_warning("wf_pm72: Fan %s reports error %d\n",
+ cpu_front_fans[cpu]->name, err);
+ failure_state |= FAILURE_FAN;
+ }
+ err = 0;
+ if (cpu_pumps[cpu])
+ err = wf_control_set(cpu_pumps[cpu], pump);
+ if (err) {
+ pr_warning("wf_pm72: Pump %s reports error %d\n",
+ cpu_pumps[cpu]->name, err);
+ failure_state |= FAILURE_FAN;
+ }
+ }
+}
+
+/* Implementation... */
+static int cpu_setup_pid(int cpu)
+{
+ struct wf_cpu_pid_param pid;
+ const struct mpu_data *mpu = cpu_mpu_data[cpu];
+ s32 tmax, ttarget, ptarget;
+ int fmin, fmax, hsize;
+
+ /* Get PID params from the appropriate MPU EEPROM */
+ tmax = mpu->tmax << 16;
+ ttarget = mpu->ttarget << 16;
+ ptarget = ((s32)(mpu->pmaxh - mpu->padjmax)) << 16;
+
+ DBG("wf_72: CPU%d ttarget = %d.%03d, tmax = %d.%03d\n",
+ cpu, FIX32TOPRINT(ttarget), FIX32TOPRINT(tmax));
+
+ /* We keep a global tmax for overtemp calculations */
+ if (tmax < cpu_all_tmax)
+ cpu_all_tmax = tmax;
+
+ /* Set PID min/max by using the rear fan min/max */
+ fmin = wf_control_get_min(cpu_rear_fans[cpu]);
+ fmax = wf_control_get_max(cpu_rear_fans[cpu]);
+ DBG("wf_72: CPU%d max RPM range = [%d..%d]\n", cpu, fmin, fmax);
+
+ /* History size */
+ hsize = min_t(int, mpu->tguardband, WF_PID_MAX_HISTORY);
+ DBG("wf_72: CPU%d history size = %d\n", cpu, hsize);
+
+ /* Initialize PID loop */
+ pid.interval = 1; /* seconds */
+ pid.history_len = hsize;
+ pid.gd = mpu->pid_gd;
+ pid.gp = mpu->pid_gp;
+ pid.gr = mpu->pid_gr;
+ pid.tmax = tmax;
+ pid.ttarget = ttarget;
+ pid.pmaxadj = ptarget;
+ pid.min = fmin;
+ pid.max = fmax;
+
+ wf_cpu_pid_init(&cpu_pid[cpu], &pid);
+ cpu_pid[cpu].target = 1000;
+
+ return 0;
+}
+
+/* Backside/U3 fan */
+static struct wf_pid_param backside_u3_param = {
+ .interval = 5,
+ .history_len = 2,
+ .gd = 40 << 20,
+ .gp = 5 << 20,
+ .gr = 0,
+ .itarget = 65 << 16,
+ .additive = 1,
+ .min = 20,
+ .max = 100,
+};
+
+static struct wf_pid_param backside_u3h_param = {
+ .interval = 5,
+ .history_len = 2,
+ .gd = 20 << 20,
+ .gp = 5 << 20,
+ .gr = 0,
+ .itarget = 75 << 16,
+ .additive = 1,
+ .min = 20,
+ .max = 100,
+};
+
+static void backside_fan_tick(void)
+{
+ s32 temp;
+ int speed;
+ int err;
+
+ if (!backside_fan || !backside_temp || !backside_tick)
+ return;
+ if (--backside_tick > 0)
+ return;
+ backside_tick = backside_pid.param.interval;
+
+ DBG_LOTS("* backside fans tick\n");
+
+ /* Update fan speed from actual fans */
+ err = wf_control_get(backside_fan, &speed);
+ if (!err)
+ backside_pid.target = speed;
+
+ err = wf_sensor_get(backside_temp, &temp);
+ if (err) {
+ printk(KERN_WARNING "windfarm: U4 temp sensor error %d\n",
+ err);
+ failure_state |= FAILURE_SENSOR;
+ wf_control_set_max(backside_fan);
+ return;
+ }
+ speed = wf_pid_run(&backside_pid, temp);
+
+ DBG_LOTS("backside PID temp=%d.%.3d speed=%d\n",
+ FIX32TOPRINT(temp), speed);
+
+ err = wf_control_set(backside_fan, speed);
+ if (err) {
+ printk(KERN_WARNING "windfarm: backside fan error %d\n", err);
+ failure_state |= FAILURE_FAN;
+ }
+}
+
+static void backside_setup_pid(void)
+{
+ /* first time initialize things */
+ s32 fmin = wf_control_get_min(backside_fan);
+ s32 fmax = wf_control_get_max(backside_fan);
+ struct wf_pid_param param;
+ struct device_node *u3;
+ int u3h = 1; /* conservative by default */
+
+ u3 = of_find_node_by_path("/u3@0,f8000000");
+ if (u3 != NULL) {
+ const u32 *vers = of_get_property(u3, "device-rev", NULL);
+ if (vers)
+ if (((*vers) & 0x3f) < 0x34)
+ u3h = 0;
+ of_node_put(u3);
+ }
+
+ param = u3h ? backside_u3h_param : backside_u3_param;
+
+ param.min = max(param.min, fmin);
+ param.max = min(param.max, fmax);
+ wf_pid_init(&backside_pid, &param);
+ backside_tick = 1;
+
+ pr_info("wf_pm72: Backside control loop started.\n");
+}
+
+/* Drive bay fan */
+static const struct wf_pid_param drives_param = {
+ .interval = 5,
+ .history_len = 2,
+ .gd = 30 << 20,
+ .gp = 5 << 20,
+ .gr = 0,
+ .itarget = 40 << 16,
+ .additive = 1,
+ .min = 300,
+ .max = 4000,
+};
+
+static void drives_fan_tick(void)
+{
+ s32 temp;
+ int speed;
+ int err;
+
+ if (!drives_fan || !drives_temp || !drives_tick)
+ return;
+ if (--drives_tick > 0)
+ return;
+ drives_tick = drives_pid.param.interval;
+
+ DBG_LOTS("* drives fans tick\n");
+
+ /* Update fan speed from actual fans */
+ err = wf_control_get(drives_fan, &speed);
+ if (!err)
+ drives_pid.target = speed;
+
+ err = wf_sensor_get(drives_temp, &temp);
+ if (err) {
+ pr_warning("wf_pm72: drive bay temp sensor error %d\n", err);
+ failure_state |= FAILURE_SENSOR;
+ wf_control_set_max(drives_fan);
+ return;
+ }
+ speed = wf_pid_run(&drives_pid, temp);
+
+ DBG_LOTS("drives PID temp=%d.%.3d speed=%d\n",
+ FIX32TOPRINT(temp), speed);
+
+ err = wf_control_set(drives_fan, speed);
+ if (err) {
+ printk(KERN_WARNING "windfarm: drive bay fan error %d\n", err);
+ failure_state |= FAILURE_FAN;
+ }
+}
+
+static void drives_setup_pid(void)
+{
+ /* first time initialize things */
+ s32 fmin = wf_control_get_min(drives_fan);
+ s32 fmax = wf_control_get_max(drives_fan);
+ struct wf_pid_param param = drives_param;
+
+ param.min = max(param.min, fmin);
+ param.max = min(param.max, fmax);
+ wf_pid_init(&drives_pid, &param);
+ drives_tick = 1;
+
+ pr_info("wf_pm72: Drive bay control loop started.\n");
+}
+
+static void set_fail_state(void)
+{
+ cpu_max_all_fans();
+
+ if (backside_fan)
+ wf_control_set_max(backside_fan);
+ if (slots_fan)
+ wf_control_set_max(slots_fan);
+ if (drives_fan)
+ wf_control_set_max(drives_fan);
+}
+
+static void pm72_tick(void)
+{
+ int i, last_failure;
+
+ if (!started) {
+ started = 1;
+ printk(KERN_INFO "windfarm: CPUs control loops started.\n");
+ for (i = 0; i < nr_chips; ++i) {
+ if (cpu_setup_pid(i) < 0) {
+ failure_state = FAILURE_PERM;
+ set_fail_state();
+ break;
+ }
+ }
+ DBG_LOTS("cpu_all_tmax=%d.%03d\n", FIX32TOPRINT(cpu_all_tmax));
+
+ backside_setup_pid();
+ drives_setup_pid();
+
+ /*
+ * We don't have the right stuff to drive the PCI fan
+ * so we fix it to a default value
+ */
+ wf_control_set(slots_fan, SLOTS_FAN_DEFAULT_PWM);
+
+#ifdef HACKED_OVERTEMP
+ cpu_all_tmax = 60 << 16;
+#endif
+ }
+
+ /* Permanent failure, bail out */
+ if (failure_state & FAILURE_PERM)
+ return;
+
+ /*
+ * Clear all failure bits except low overtemp which will be eventually
+ * cleared by the control loop itself
+ */
+ last_failure = failure_state;
+ failure_state &= FAILURE_LOW_OVERTEMP;
+ if (cpu_pid_combined)
+ cpu_fans_tick_combined();
+ else
+ cpu_fans_tick_split();
+ backside_fan_tick();
+ drives_fan_tick();
+
+ DBG_LOTS(" last_failure: 0x%x, failure_state: %x\n",
+ last_failure, failure_state);
+
+ /* Check for failures. Any failure causes cpufreq clamping */
+ if (failure_state && last_failure == 0 && cpufreq_clamp)
+ wf_control_set_max(cpufreq_clamp);
+ if (failure_state == 0 && last_failure && cpufreq_clamp)
+ wf_control_set_min(cpufreq_clamp);
+
+ /* That's it for now, we might want to deal with other failures
+ * differently in the future though
+ */
+}
+
+static void pm72_new_control(struct wf_control *ct)
+{
+ bool all_controls;
+ bool had_pump = cpu_pumps[0] || cpu_pumps[1];
+
+ if (!strcmp(ct->name, "cpu-front-fan-0"))
+ cpu_front_fans[0] = ct;
+ else if (!strcmp(ct->name, "cpu-front-fan-1"))
+ cpu_front_fans[1] = ct;
+ else if (!strcmp(ct->name, "cpu-rear-fan-0"))
+ cpu_rear_fans[0] = ct;
+ else if (!strcmp(ct->name, "cpu-rear-fan-1"))
+ cpu_rear_fans[1] = ct;
+ else if (!strcmp(ct->name, "cpu-pump-0"))
+ cpu_pumps[0] = ct;
+ else if (!strcmp(ct->name, "cpu-pump-1"))
+ cpu_pumps[1] = ct;
+ else if (!strcmp(ct->name, "backside-fan"))
+ backside_fan = ct;
+ else if (!strcmp(ct->name, "slots-fan"))
+ slots_fan = ct;
+ else if (!strcmp(ct->name, "drive-bay-fan"))
+ drives_fan = ct;
+ else if (!strcmp(ct->name, "cpufreq-clamp"))
+ cpufreq_clamp = ct;
+
+ all_controls =
+ cpu_front_fans[0] &&
+ cpu_rear_fans[0] &&
+ backside_fan &&
+ slots_fan &&
+ drives_fan;
+ if (nr_chips > 1)
+ all_controls &=
+ cpu_front_fans[1] &&
+ cpu_rear_fans[1];
+ have_all_controls = all_controls;
+
+ if ((cpu_pumps[0] || cpu_pumps[1]) && !had_pump) {
+ pr_info("wf_pm72: Liquid cooling pump(s) detected,"
+ " using new algorithm !\n");
+ cpu_pid_combined = true;
+ }
+}
+
+
+static void pm72_new_sensor(struct wf_sensor *sr)
+{
+ bool all_sensors;
+
+ if (!strcmp(sr->name, "cpu-diode-temp-0"))
+ sens_cpu_temp[0] = sr;
+ else if (!strcmp(sr->name, "cpu-diode-temp-1"))
+ sens_cpu_temp[1] = sr;
+ else if (!strcmp(sr->name, "cpu-voltage-0"))
+ sens_cpu_volts[0] = sr;
+ else if (!strcmp(sr->name, "cpu-voltage-1"))
+ sens_cpu_volts[1] = sr;
+ else if (!strcmp(sr->name, "cpu-current-0"))
+ sens_cpu_amps[0] = sr;
+ else if (!strcmp(sr->name, "cpu-current-1"))
+ sens_cpu_amps[1] = sr;
+ else if (!strcmp(sr->name, "backside-temp"))
+ backside_temp = sr;
+ else if (!strcmp(sr->name, "hd-temp"))
+ drives_temp = sr;
+
+ all_sensors =
+ sens_cpu_temp[0] &&
+ sens_cpu_volts[0] &&
+ sens_cpu_amps[0] &&
+ backside_temp &&
+ drives_temp;
+ if (nr_chips > 1)
+ all_sensors &=
+ sens_cpu_temp[1] &&
+ sens_cpu_volts[1] &&
+ sens_cpu_amps[1];
+
+ have_all_sensors = all_sensors;
+}
+
+static int pm72_wf_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case WF_EVENT_NEW_SENSOR:
+ pm72_new_sensor(data);
+ break;
+ case WF_EVENT_NEW_CONTROL:
+ pm72_new_control(data);
+ break;
+ case WF_EVENT_TICK:
+ if (have_all_controls && have_all_sensors)
+ pm72_tick();
+ }
+ return 0;
+}
+
+static struct notifier_block pm72_events = {
+ .notifier_call = pm72_wf_notify,
+};
+
+static int wf_pm72_probe(struct platform_device *dev)
+{
+ wf_register_client(&pm72_events);
+ return 0;
+}
+
+static int __devexit wf_pm72_remove(struct platform_device *dev)
+{
+ wf_unregister_client(&pm72_events);
+
+ /* should release all sensors and controls */
+ return 0;
+}
+
+static struct platform_driver wf_pm72_driver = {
+ .probe = wf_pm72_probe,
+ .remove = wf_pm72_remove,
+ .driver = {
+ .name = "windfarm",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init wf_pm72_init(void)
+{
+ struct device_node *cpu;
+ int i;
+
+ if (!of_machine_is_compatible("PowerMac7,2") &&
+ !of_machine_is_compatible("PowerMac7,3"))
+ return -ENODEV;
+
+ /* Count the number of CPU cores */
+ nr_chips = 0;
+ for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ ++nr_chips;
+ if (nr_chips > NR_CHIPS)
+ nr_chips = NR_CHIPS;
+
+ pr_info("windfarm: Initializing for desktop G5 with %d chips\n",
+ nr_chips);
+
+ /* Get MPU data for each CPU */
+ for (i = 0; i < nr_chips; i++) {
+ cpu_mpu_data[i] = wf_get_mpu(i);
+ if (!cpu_mpu_data[i]) {
+ pr_err("wf_pm72: Failed to find MPU data for CPU %d\n", i);
+ return -ENXIO;
+ }
+ }
+
+#ifdef MODULE
+ request_module("windfarm_fcu_controls");
+ request_module("windfarm_lm75_sensor");
+ request_module("windfarm_ad7417_sensor");
+ request_module("windfarm_max6690_sensor");
+ request_module("windfarm_cpufreq_clamp");
+#endif /* MODULE */
+
+ platform_driver_register(&wf_pm72_driver);
+ return 0;
+}
+
+static void __exit wf_pm72_exit(void)
+{
+ platform_driver_unregister(&wf_pm72_driver);
+}
+
+module_init(wf_pm72_init);
+module_exit(wf_pm72_exit);
+
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("Thermal control for AGP PowerMac G5s");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:windfarm");
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index fc13d0f2663b..990c87606be9 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -302,13 +302,13 @@ static void wf_smu_create_sys_fans(void)
pid_param.interval = WF_SMU_SYS_FANS_INTERVAL;
pid_param.history_len = WF_SMU_SYS_FANS_HISTORY_SIZE;
pid_param.itarget = param->itarget;
- pid_param.min = fan_system->ops->get_min(fan_system);
- pid_param.max = fan_system->ops->get_max(fan_system);
+ pid_param.min = wf_control_get_min(fan_system);
+ pid_param.max = wf_control_get_max(fan_system);
if (fan_hd) {
pid_param.min =
- max(pid_param.min,fan_hd->ops->get_min(fan_hd));
+ max(pid_param.min, wf_control_get_min(fan_hd));
pid_param.max =
- min(pid_param.max,fan_hd->ops->get_max(fan_hd));
+ min(pid_param.max, wf_control_get_max(fan_hd));
}
wf_pid_init(&wf_smu_sys_fans->pid, &pid_param);
@@ -337,7 +337,7 @@ static void wf_smu_sys_fans_tick(struct wf_smu_sys_fans_state *st)
}
st->ticks = WF_SMU_SYS_FANS_INTERVAL;
- rc = sensor_hd_temp->ops->get_value(sensor_hd_temp, &temp);
+ rc = wf_sensor_get(sensor_hd_temp, &temp);
if (rc) {
printk(KERN_WARNING "windfarm: HD temp sensor error %d\n",
rc);
@@ -373,7 +373,7 @@ static void wf_smu_sys_fans_tick(struct wf_smu_sys_fans_state *st)
st->hd_setpoint = new_setpoint;
readjust:
if (fan_system && wf_smu_failure_state == 0) {
- rc = fan_system->ops->set_value(fan_system, st->sys_setpoint);
+ rc = wf_control_set(fan_system, st->sys_setpoint);
if (rc) {
printk(KERN_WARNING "windfarm: Sys fan error %d\n",
rc);
@@ -381,7 +381,7 @@ static void wf_smu_sys_fans_tick(struct wf_smu_sys_fans_state *st)
}
}
if (fan_hd && wf_smu_failure_state == 0) {
- rc = fan_hd->ops->set_value(fan_hd, st->hd_setpoint);
+ rc = wf_control_set(fan_hd, st->hd_setpoint);
if (rc) {
printk(KERN_WARNING "windfarm: HD fan error %d\n",
rc);
@@ -447,8 +447,8 @@ static void wf_smu_create_cpu_fans(void)
pid_param.ttarget = tmax - tdelta;
pid_param.pmaxadj = maxpow - powadj;
- pid_param.min = fan_cpu_main->ops->get_min(fan_cpu_main);
- pid_param.max = fan_cpu_main->ops->get_max(fan_cpu_main);
+ pid_param.min = wf_control_get_min(fan_cpu_main);
+ pid_param.max = wf_control_get_max(fan_cpu_main);
wf_cpu_pid_init(&wf_smu_cpu_fans->pid, &pid_param);
@@ -481,7 +481,7 @@ static void wf_smu_cpu_fans_tick(struct wf_smu_cpu_fans_state *st)
}
st->ticks = WF_SMU_CPU_FANS_INTERVAL;
- rc = sensor_cpu_temp->ops->get_value(sensor_cpu_temp, &temp);
+ rc = wf_sensor_get(sensor_cpu_temp, &temp);
if (rc) {
printk(KERN_WARNING "windfarm: CPU temp sensor error %d\n",
rc);
@@ -489,7 +489,7 @@ static void wf_smu_cpu_fans_tick(struct wf_smu_cpu_fans_state *st)
return;
}
- rc = sensor_cpu_power->ops->get_value(sensor_cpu_power, &power);
+ rc = wf_sensor_get(sensor_cpu_power, &power);
if (rc) {
printk(KERN_WARNING "windfarm: CPU power sensor error %d\n",
rc);
@@ -525,8 +525,7 @@ static void wf_smu_cpu_fans_tick(struct wf_smu_cpu_fans_state *st)
st->cpu_setpoint = new_setpoint;
readjust:
if (fan_cpu_main && wf_smu_failure_state == 0) {
- rc = fan_cpu_main->ops->set_value(fan_cpu_main,
- st->cpu_setpoint);
+ rc = wf_control_set(fan_cpu_main, st->cpu_setpoint);
if (rc) {
printk(KERN_WARNING "windfarm: CPU main fan"
" error %d\n", rc);
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index a9430ed4f36c..7653603cb00e 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -192,8 +192,8 @@ static void wf_smu_create_cpu_fans(void)
pid_param.ttarget = tmax - tdelta;
pid_param.pmaxadj = maxpow - powadj;
- pid_param.min = fan_cpu_main->ops->get_min(fan_cpu_main);
- pid_param.max = fan_cpu_main->ops->get_max(fan_cpu_main);
+ pid_param.min = wf_control_get_min(fan_cpu_main);
+ pid_param.max = wf_control_get_max(fan_cpu_main);
wf_cpu_pid_init(&wf_smu_cpu_fans->pid, &pid_param);
@@ -226,7 +226,7 @@ static void wf_smu_cpu_fans_tick(struct wf_smu_cpu_fans_state *st)
}
st->ticks = WF_SMU_CPU_FANS_INTERVAL;
- rc = sensor_cpu_temp->ops->get_value(sensor_cpu_temp, &temp);
+ rc = wf_sensor_get(sensor_cpu_temp, &temp);
if (rc) {
printk(KERN_WARNING "windfarm: CPU temp sensor error %d\n",
rc);
@@ -234,7 +234,7 @@ static void wf_smu_cpu_fans_tick(struct wf_smu_cpu_fans_state *st)
return;
}
- rc = sensor_cpu_power->ops->get_value(sensor_cpu_power, &power);
+ rc = wf_sensor_get(sensor_cpu_power, &power);
if (rc) {
printk(KERN_WARNING "windfarm: CPU power sensor error %d\n",
rc);
@@ -261,8 +261,7 @@ static void wf_smu_cpu_fans_tick(struct wf_smu_cpu_fans_state *st)
st->cpu_setpoint = new_setpoint;
readjust:
if (fan_cpu_main && wf_smu_failure_state == 0) {
- rc = fan_cpu_main->ops->set_value(fan_cpu_main,
- st->cpu_setpoint);
+ rc = wf_control_set(fan_cpu_main, st->cpu_setpoint);
if (rc) {
printk(KERN_WARNING "windfarm: CPU main fan"
" error %d\n", rc);
@@ -270,8 +269,7 @@ static void wf_smu_cpu_fans_tick(struct wf_smu_cpu_fans_state *st)
}
}
if (fan_cpu_second && wf_smu_failure_state == 0) {
- rc = fan_cpu_second->ops->set_value(fan_cpu_second,
- st->cpu_setpoint);
+ rc = wf_control_set(fan_cpu_second, st->cpu_setpoint);
if (rc) {
printk(KERN_WARNING "windfarm: CPU second fan"
" error %d\n", rc);
@@ -279,8 +277,7 @@ static void wf_smu_cpu_fans_tick(struct wf_smu_cpu_fans_state *st)
}
}
if (fan_cpu_third && wf_smu_failure_state == 0) {
- rc = fan_cpu_main->ops->set_value(fan_cpu_third,
- st->cpu_setpoint);
+ rc = wf_control_set(fan_cpu_third, st->cpu_setpoint);
if (rc) {
printk(KERN_WARNING "windfarm: CPU third fan"
" error %d\n", rc);
@@ -312,8 +309,8 @@ static void wf_smu_create_drive_fans(void)
/* Fill PID params */
param.additive = (fan_hd->type == WF_CONTROL_RPM_FAN);
- param.min = fan_hd->ops->get_min(fan_hd);
- param.max = fan_hd->ops->get_max(fan_hd);
+ param.min = wf_control_get_min(fan_hd);
+ param.max = wf_control_get_max(fan_hd);
wf_pid_init(&wf_smu_drive_fans->pid, &param);
DBG("wf: Drive Fan control initialized.\n");
@@ -338,7 +335,7 @@ static void wf_smu_drive_fans_tick(struct wf_smu_drive_fans_state *st)
}
st->ticks = st->pid.param.interval;
- rc = sensor_hd_temp->ops->get_value(sensor_hd_temp, &temp);
+ rc = wf_sensor_get(sensor_hd_temp, &temp);
if (rc) {
printk(KERN_WARNING "windfarm: HD temp sensor error %d\n",
rc);
@@ -361,7 +358,7 @@ static void wf_smu_drive_fans_tick(struct wf_smu_drive_fans_state *st)
st->setpoint = new_setpoint;
readjust:
if (fan_hd && wf_smu_failure_state == 0) {
- rc = fan_hd->ops->set_value(fan_hd, st->setpoint);
+ rc = wf_control_set(fan_hd, st->setpoint);
if (rc) {
printk(KERN_WARNING "windfarm: HD fan error %d\n",
rc);
@@ -393,8 +390,8 @@ static void wf_smu_create_slots_fans(void)
/* Fill PID params */
param.additive = (fan_slots->type == WF_CONTROL_RPM_FAN);
- param.min = fan_slots->ops->get_min(fan_slots);
- param.max = fan_slots->ops->get_max(fan_slots);
+ param.min = wf_control_get_min(fan_slots);
+ param.max = wf_control_get_max(fan_slots);
wf_pid_init(&wf_smu_slots_fans->pid, &param);
DBG("wf: Slots Fan control initialized.\n");
@@ -419,7 +416,7 @@ static void wf_smu_slots_fans_tick(struct wf_smu_slots_fans_state *st)
}
st->ticks = st->pid.param.interval;
- rc = sensor_slots_power->ops->get_value(sensor_slots_power, &power);
+ rc = wf_sensor_get(sensor_slots_power, &power);
if (rc) {
printk(KERN_WARNING "windfarm: Slots power sensor error %d\n",
rc);
@@ -444,7 +441,7 @@ static void wf_smu_slots_fans_tick(struct wf_smu_slots_fans_state *st)
st->setpoint = new_setpoint;
readjust:
if (fan_slots && wf_smu_failure_state == 0) {
- rc = fan_slots->ops->set_value(fan_slots, st->setpoint);
+ rc = wf_control_set(fan_slots, st->setpoint);
if (rc) {
printk(KERN_WARNING "windfarm: Slots fan error %d\n",
rc);
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
new file mode 100644
index 000000000000..3eca6d4b52fc
--- /dev/null
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -0,0 +1,740 @@
+/*
+ * Windfarm PowerMac thermal control.
+ * Control loops for RackMack3,1 (Xserve G5)
+ *
+ * Copyright (C) 2012 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * Use and redistribute under the terms of the GNU GPL v2.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <asm/prom.h>
+#include <asm/smu.h>
+
+#include "windfarm.h"
+#include "windfarm_pid.h"
+#include "windfarm_mpu.h"
+
+#define VERSION "1.0"
+
+#undef DEBUG
+#undef LOTSA_DEBUG
+
+#ifdef DEBUG
+#define DBG(args...) printk(args)
+#else
+#define DBG(args...) do { } while(0)
+#endif
+
+#ifdef LOTSA_DEBUG
+#define DBG_LOTS(args...) printk(args)
+#else
+#define DBG_LOTS(args...) do { } while(0)
+#endif
+
+/* define this to force CPU overtemp to 60 degree, useful for testing
+ * the overtemp code
+ */
+#undef HACKED_OVERTEMP
+
+/* We currently only handle 2 chips */
+#define NR_CHIPS 2
+#define NR_CPU_FANS 3 * NR_CHIPS
+
+/* Controls and sensors */
+static struct wf_sensor *sens_cpu_temp[NR_CHIPS];
+static struct wf_sensor *sens_cpu_volts[NR_CHIPS];
+static struct wf_sensor *sens_cpu_amps[NR_CHIPS];
+static struct wf_sensor *backside_temp;
+static struct wf_sensor *slots_temp;
+static struct wf_sensor *dimms_temp;
+
+static struct wf_control *cpu_fans[NR_CHIPS][3];
+static struct wf_control *backside_fan;
+static struct wf_control *slots_fan;
+static struct wf_control *cpufreq_clamp;
+
+/* We keep a temperature history for average calculation of 180s */
+#define CPU_TEMP_HIST_SIZE 180
+
+/* PID loop state */
+static const struct mpu_data *cpu_mpu_data[NR_CHIPS];
+static struct wf_cpu_pid_state cpu_pid[NR_CHIPS];
+static u32 cpu_thist[CPU_TEMP_HIST_SIZE];
+static int cpu_thist_pt;
+static s64 cpu_thist_total;
+static s32 cpu_all_tmax = 100 << 16;
+static struct wf_pid_state backside_pid;
+static int backside_tick;
+static struct wf_pid_state slots_pid;
+static int slots_tick;
+static int slots_speed;
+static struct wf_pid_state dimms_pid;
+static int dimms_output_clamp;
+
+static int nr_chips;
+static bool have_all_controls;
+static bool have_all_sensors;
+static bool started;
+
+static int failure_state;
+#define FAILURE_SENSOR 1
+#define FAILURE_FAN 2
+#define FAILURE_PERM 4
+#define FAILURE_LOW_OVERTEMP 8
+#define FAILURE_HIGH_OVERTEMP 16
+
+/* Overtemp values */
+#define LOW_OVER_AVERAGE 0
+#define LOW_OVER_IMMEDIATE (10 << 16)
+#define LOW_OVER_CLEAR ((-10) << 16)
+#define HIGH_OVER_IMMEDIATE (14 << 16)
+#define HIGH_OVER_AVERAGE (10 << 16)
+#define HIGH_OVER_IMMEDIATE (14 << 16)
+
+
+static void cpu_max_all_fans(void)
+{
+ int i;
+
+ /* We max all CPU fans in case of a sensor error. We also do the
+ * cpufreq clamping now, even if it's supposedly done later by the
+ * generic code anyway, we do it earlier here to react faster
+ */
+ if (cpufreq_clamp)
+ wf_control_set_max(cpufreq_clamp);
+ for (i = 0; i < nr_chips; i++) {
+ if (cpu_fans[i][0])
+ wf_control_set_max(cpu_fans[i][0]);
+ if (cpu_fans[i][1])
+ wf_control_set_max(cpu_fans[i][1]);
+ if (cpu_fans[i][2])
+ wf_control_set_max(cpu_fans[i][2]);
+ }
+}
+
+static int cpu_check_overtemp(s32 temp)
+{
+ int new_state = 0;
+ s32 t_avg, t_old;
+ static bool first = true;
+
+ /* First check for immediate overtemps */
+ if (temp >= (cpu_all_tmax + LOW_OVER_IMMEDIATE)) {
+ new_state |= FAILURE_LOW_OVERTEMP;
+ if ((failure_state & FAILURE_LOW_OVERTEMP) == 0)
+ printk(KERN_ERR "windfarm: Overtemp due to immediate CPU"
+ " temperature !\n");
+ }
+ if (temp >= (cpu_all_tmax + HIGH_OVER_IMMEDIATE)) {
+ new_state |= FAILURE_HIGH_OVERTEMP;
+ if ((failure_state & FAILURE_HIGH_OVERTEMP) == 0)
+ printk(KERN_ERR "windfarm: Critical overtemp due to"
+ " immediate CPU temperature !\n");
+ }
+
+ /*
+ * The first time around, initialize the array with the first
+ * temperature reading
+ */
+ if (first) {
+ int i;
+
+ cpu_thist_total = 0;
+ for (i = 0; i < CPU_TEMP_HIST_SIZE; i++) {
+ cpu_thist[i] = temp;
+ cpu_thist_total += temp;
+ }
+ first = false;
+ }
+
+ /*
+ * We calculate a history of max temperatures and use that for the
+ * overtemp management
+ */
+ t_old = cpu_thist[cpu_thist_pt];
+ cpu_thist[cpu_thist_pt] = temp;
+ cpu_thist_pt = (cpu_thist_pt + 1) % CPU_TEMP_HIST_SIZE;
+ cpu_thist_total -= t_old;
+ cpu_thist_total += temp;
+ t_avg = cpu_thist_total / CPU_TEMP_HIST_SIZE;
+
+ DBG_LOTS(" t_avg = %d.%03d (out: %d.%03d, in: %d.%03d)\n",
+ FIX32TOPRINT(t_avg), FIX32TOPRINT(t_old), FIX32TOPRINT(temp));
+
+ /* Now check for average overtemps */
+ if (t_avg >= (cpu_all_tmax + LOW_OVER_AVERAGE)) {
+ new_state |= FAILURE_LOW_OVERTEMP;
+ if ((failure_state & FAILURE_LOW_OVERTEMP) == 0)
+ printk(KERN_ERR "windfarm: Overtemp due to average CPU"
+ " temperature !\n");
+ }
+ if (t_avg >= (cpu_all_tmax + HIGH_OVER_AVERAGE)) {
+ new_state |= FAILURE_HIGH_OVERTEMP;
+ if ((failure_state & FAILURE_HIGH_OVERTEMP) == 0)
+ printk(KERN_ERR "windfarm: Critical overtemp due to"
+ " average CPU temperature !\n");
+ }
+
+ /* Now handle overtemp conditions. We don't currently use the windfarm
+ * overtemp handling core as it's not fully suited to the needs of those
+ * new machine. This will be fixed later.
+ */
+ if (new_state) {
+ /* High overtemp -> immediate shutdown */
+ if (new_state & FAILURE_HIGH_OVERTEMP)
+ machine_power_off();
+ if ((failure_state & new_state) != new_state)
+ cpu_max_all_fans();
+ failure_state |= new_state;
+ } else if ((failure_state & FAILURE_LOW_OVERTEMP) &&
+ (temp < (cpu_all_tmax + LOW_OVER_CLEAR))) {
+ printk(KERN_ERR "windfarm: Overtemp condition cleared !\n");
+ failure_state &= ~FAILURE_LOW_OVERTEMP;
+ }
+
+ return failure_state & (FAILURE_LOW_OVERTEMP | FAILURE_HIGH_OVERTEMP);
+}
+
+static int read_one_cpu_vals(int cpu, s32 *temp, s32 *power)
+{
+ s32 dtemp, volts, amps;
+ int rc;
+
+ /* Get diode temperature */
+ rc = wf_sensor_get(sens_cpu_temp[cpu], &dtemp);
+ if (rc) {
+ DBG(" CPU%d: temp reading error !\n", cpu);
+ return -EIO;
+ }
+ DBG_LOTS(" CPU%d: temp = %d.%03d\n", cpu, FIX32TOPRINT((dtemp)));
+ *temp = dtemp;
+
+ /* Get voltage */
+ rc = wf_sensor_get(sens_cpu_volts[cpu], &volts);
+ if (rc) {
+ DBG(" CPU%d, volts reading error !\n", cpu);
+ return -EIO;
+ }
+ DBG_LOTS(" CPU%d: volts = %d.%03d\n", cpu, FIX32TOPRINT((volts)));
+
+ /* Get current */
+ rc = wf_sensor_get(sens_cpu_amps[cpu], &amps);
+ if (rc) {
+ DBG(" CPU%d, current reading error !\n", cpu);
+ return -EIO;
+ }
+ DBG_LOTS(" CPU%d: amps = %d.%03d\n", cpu, FIX32TOPRINT((amps)));
+
+ /* Calculate power */
+
+ /* Scale voltage and current raw sensor values according to fixed scales
+ * obtained in Darwin and calculate power from I and V
+ */
+ *power = (((u64)volts) * ((u64)amps)) >> 16;
+
+ DBG_LOTS(" CPU%d: power = %d.%03d\n", cpu, FIX32TOPRINT((*power)));
+
+ return 0;
+
+}
+
+static void cpu_fans_tick(void)
+{
+ int err, cpu, i;
+ s32 speed, temp, power, t_max = 0;
+
+ DBG_LOTS("* cpu fans_tick_split()\n");
+
+ for (cpu = 0; cpu < nr_chips; ++cpu) {
+ struct wf_cpu_pid_state *sp = &cpu_pid[cpu];
+
+ /* Read current speed */
+ wf_control_get(cpu_fans[cpu][0], &sp->target);
+
+ err = read_one_cpu_vals(cpu, &temp, &power);
+ if (err) {
+ failure_state |= FAILURE_SENSOR;
+ cpu_max_all_fans();
+ return;
+ }
+
+ /* Keep track of highest temp */
+ t_max = max(t_max, temp);
+
+ /* Handle possible overtemps */
+ if (cpu_check_overtemp(t_max))
+ return;
+
+ /* Run PID */
+ wf_cpu_pid_run(sp, power, temp);
+
+ DBG_LOTS(" CPU%d: target = %d RPM\n", cpu, sp->target);
+
+ /* Apply DIMMs clamp */
+ speed = max(sp->target, dimms_output_clamp);
+
+ /* Apply result to all cpu fans */
+ for (i = 0; i < 3; i++) {
+ err = wf_control_set(cpu_fans[cpu][i], speed);
+ if (err) {
+ pr_warning("wf_rm31: Fan %s reports error %d\n",
+ cpu_fans[cpu][i]->name, err);
+ failure_state |= FAILURE_FAN;
+ }
+ }
+ }
+}
+
+/* Implementation... */
+static int cpu_setup_pid(int cpu)
+{
+ struct wf_cpu_pid_param pid;
+ const struct mpu_data *mpu = cpu_mpu_data[cpu];
+ s32 tmax, ttarget, ptarget;
+ int fmin, fmax, hsize;
+
+ /* Get PID params from the appropriate MPU EEPROM */
+ tmax = mpu->tmax << 16;
+ ttarget = mpu->ttarget << 16;
+ ptarget = ((s32)(mpu->pmaxh - mpu->padjmax)) << 16;
+
+ DBG("wf_72: CPU%d ttarget = %d.%03d, tmax = %d.%03d\n",
+ cpu, FIX32TOPRINT(ttarget), FIX32TOPRINT(tmax));
+
+ /* We keep a global tmax for overtemp calculations */
+ if (tmax < cpu_all_tmax)
+ cpu_all_tmax = tmax;
+
+ /* Set PID min/max by using the rear fan min/max */
+ fmin = wf_control_get_min(cpu_fans[cpu][0]);
+ fmax = wf_control_get_max(cpu_fans[cpu][0]);
+ DBG("wf_72: CPU%d max RPM range = [%d..%d]\n", cpu, fmin, fmax);
+
+ /* History size */
+ hsize = min_t(int, mpu->tguardband, WF_PID_MAX_HISTORY);
+ DBG("wf_72: CPU%d history size = %d\n", cpu, hsize);
+
+ /* Initialize PID loop */
+ pid.interval = 1; /* seconds */
+ pid.history_len = hsize;
+ pid.gd = mpu->pid_gd;
+ pid.gp = mpu->pid_gp;
+ pid.gr = mpu->pid_gr;
+ pid.tmax = tmax;
+ pid.ttarget = ttarget;
+ pid.pmaxadj = ptarget;
+ pid.min = fmin;
+ pid.max = fmax;
+
+ wf_cpu_pid_init(&cpu_pid[cpu], &pid);
+ cpu_pid[cpu].target = 4000;
+
+ return 0;
+}
+
+/* Backside/U3 fan */
+static struct wf_pid_param backside_param = {
+ .interval = 1,
+ .history_len = 2,
+ .gd = 0x00500000,
+ .gp = 0x0004cccc,
+ .gr = 0,
+ .itarget = 70 << 16,
+ .additive = 0,
+ .min = 20,
+ .max = 100,
+};
+
+/* DIMMs temperature (clamp the backside fan) */
+static struct wf_pid_param dimms_param = {
+ .interval = 1,
+ .history_len = 20,
+ .gd = 0,
+ .gp = 0,
+ .gr = 0x06553600,
+ .itarget = 50 << 16,
+ .additive = 0,
+ .min = 4000,
+ .max = 14000,
+};
+
+static void backside_fan_tick(void)
+{
+ s32 temp, dtemp;
+ int speed, dspeed, fan_min;
+ int err;
+
+ if (!backside_fan || !backside_temp || !dimms_temp || !backside_tick)
+ return;
+ if (--backside_tick > 0)
+ return;
+ backside_tick = backside_pid.param.interval;
+
+ DBG_LOTS("* backside fans tick\n");
+
+ /* Update fan speed from actual fans */
+ err = wf_control_get(backside_fan, &speed);
+ if (!err)
+ backside_pid.target = speed;
+
+ err = wf_sensor_get(backside_temp, &temp);
+ if (err) {
+ printk(KERN_WARNING "windfarm: U3 temp sensor error %d\n",
+ err);
+ failure_state |= FAILURE_SENSOR;
+ wf_control_set_max(backside_fan);
+ return;
+ }
+ speed = wf_pid_run(&backside_pid, temp);
+
+ DBG_LOTS("backside PID temp=%d.%.3d speed=%d\n",
+ FIX32TOPRINT(temp), speed);
+
+ err = wf_sensor_get(dimms_temp, &dtemp);
+ if (err) {
+ printk(KERN_WARNING "windfarm: DIMMs temp sensor error %d\n",
+ err);
+ failure_state |= FAILURE_SENSOR;
+ wf_control_set_max(backside_fan);
+ return;
+ }
+ dspeed = wf_pid_run(&dimms_pid, dtemp);
+ dimms_output_clamp = dspeed;
+
+ fan_min = (dspeed * 100) / 14000;
+ fan_min = max(fan_min, backside_param.min);
+ speed = max(speed, fan_min);
+
+ err = wf_control_set(backside_fan, speed);
+ if (err) {
+ printk(KERN_WARNING "windfarm: backside fan error %d\n", err);
+ failure_state |= FAILURE_FAN;
+ }
+}
+
+static void backside_setup_pid(void)
+{
+ /* first time initialize things */
+ s32 fmin = wf_control_get_min(backside_fan);
+ s32 fmax = wf_control_get_max(backside_fan);
+ struct wf_pid_param param;
+
+ param = backside_param;
+ param.min = max(param.min, fmin);
+ param.max = min(param.max, fmax);
+ wf_pid_init(&backside_pid, &param);
+
+ param = dimms_param;
+ wf_pid_init(&dimms_pid, &param);
+
+ backside_tick = 1;
+
+ pr_info("wf_rm31: Backside control loop started.\n");
+}
+
+/* Slots fan */
+static const struct wf_pid_param slots_param = {
+ .interval = 5,
+ .history_len = 2,
+ .gd = 30 << 20,
+ .gp = 5 << 20,
+ .gr = 0,
+ .itarget = 40 << 16,
+ .additive = 1,
+ .min = 300,
+ .max = 4000,
+};
+
+static void slots_fan_tick(void)
+{
+ s32 temp;
+ int speed;
+ int err;
+
+ if (!slots_fan || !slots_temp || !slots_tick)
+ return;
+ if (--slots_tick > 0)
+ return;
+ slots_tick = slots_pid.param.interval;
+
+ DBG_LOTS("* slots fans tick\n");
+
+ err = wf_sensor_get(slots_temp, &temp);
+ if (err) {
+ pr_warning("wf_rm31: slots temp sensor error %d\n", err);
+ failure_state |= FAILURE_SENSOR;
+ wf_control_set_max(slots_fan);
+ return;
+ }
+ speed = wf_pid_run(&slots_pid, temp);
+
+ DBG_LOTS("slots PID temp=%d.%.3d speed=%d\n",
+ FIX32TOPRINT(temp), speed);
+
+ slots_speed = speed;
+ err = wf_control_set(slots_fan, speed);
+ if (err) {
+ printk(KERN_WARNING "windfarm: slots bay fan error %d\n", err);
+ failure_state |= FAILURE_FAN;
+ }
+}
+
+static void slots_setup_pid(void)
+{
+ /* first time initialize things */
+ s32 fmin = wf_control_get_min(slots_fan);
+ s32 fmax = wf_control_get_max(slots_fan);
+ struct wf_pid_param param = slots_param;
+
+ param.min = max(param.min, fmin);
+ param.max = min(param.max, fmax);
+ wf_pid_init(&slots_pid, &param);
+ slots_tick = 1;
+
+ pr_info("wf_rm31: Slots control loop started.\n");
+}
+
+static void set_fail_state(void)
+{
+ cpu_max_all_fans();
+
+ if (backside_fan)
+ wf_control_set_max(backside_fan);
+ if (slots_fan)
+ wf_control_set_max(slots_fan);
+}
+
+static void rm31_tick(void)
+{
+ int i, last_failure;
+
+ if (!started) {
+ started = 1;
+ printk(KERN_INFO "windfarm: CPUs control loops started.\n");
+ for (i = 0; i < nr_chips; ++i) {
+ if (cpu_setup_pid(i) < 0) {
+ failure_state = FAILURE_PERM;
+ set_fail_state();
+ break;
+ }
+ }
+ DBG_LOTS("cpu_all_tmax=%d.%03d\n", FIX32TOPRINT(cpu_all_tmax));
+
+ backside_setup_pid();
+ slots_setup_pid();
+
+#ifdef HACKED_OVERTEMP
+ cpu_all_tmax = 60 << 16;
+#endif
+ }
+
+ /* Permanent failure, bail out */
+ if (failure_state & FAILURE_PERM)
+ return;
+
+ /*
+ * Clear all failure bits except low overtemp which will be eventually
+ * cleared by the control loop itself
+ */
+ last_failure = failure_state;
+ failure_state &= FAILURE_LOW_OVERTEMP;
+ backside_fan_tick();
+ slots_fan_tick();
+
+ /* We do CPUs last because they can be clamped high by
+ * DIMM temperature
+ */
+ cpu_fans_tick();
+
+ DBG_LOTS(" last_failure: 0x%x, failure_state: %x\n",
+ last_failure, failure_state);
+
+ /* Check for failures. Any failure causes cpufreq clamping */
+ if (failure_state && last_failure == 0 && cpufreq_clamp)
+ wf_control_set_max(cpufreq_clamp);
+ if (failure_state == 0 && last_failure && cpufreq_clamp)
+ wf_control_set_min(cpufreq_clamp);
+
+ /* That's it for now, we might want to deal with other failures
+ * differently in the future though
+ */
+}
+
+static void rm31_new_control(struct wf_control *ct)
+{
+ bool all_controls;
+
+ if (!strcmp(ct->name, "cpu-fan-a-0"))
+ cpu_fans[0][0] = ct;
+ else if (!strcmp(ct->name, "cpu-fan-b-0"))
+ cpu_fans[0][1] = ct;
+ else if (!strcmp(ct->name, "cpu-fan-c-0"))
+ cpu_fans[0][2] = ct;
+ else if (!strcmp(ct->name, "cpu-fan-a-1"))
+ cpu_fans[1][0] = ct;
+ else if (!strcmp(ct->name, "cpu-fan-b-1"))
+ cpu_fans[1][1] = ct;
+ else if (!strcmp(ct->name, "cpu-fan-c-1"))
+ cpu_fans[1][2] = ct;
+ else if (!strcmp(ct->name, "backside-fan"))
+ backside_fan = ct;
+ else if (!strcmp(ct->name, "slots-fan"))
+ slots_fan = ct;
+ else if (!strcmp(ct->name, "cpufreq-clamp"))
+ cpufreq_clamp = ct;
+
+ all_controls =
+ cpu_fans[0][0] &&
+ cpu_fans[0][1] &&
+ cpu_fans[0][2] &&
+ backside_fan &&
+ slots_fan;
+ if (nr_chips > 1)
+ all_controls &=
+ cpu_fans[1][0] &&
+ cpu_fans[1][1] &&
+ cpu_fans[1][2];
+ have_all_controls = all_controls;
+}
+
+
+static void rm31_new_sensor(struct wf_sensor *sr)
+{
+ bool all_sensors;
+
+ if (!strcmp(sr->name, "cpu-diode-temp-0"))
+ sens_cpu_temp[0] = sr;
+ else if (!strcmp(sr->name, "cpu-diode-temp-1"))
+ sens_cpu_temp[1] = sr;
+ else if (!strcmp(sr->name, "cpu-voltage-0"))
+ sens_cpu_volts[0] = sr;
+ else if (!strcmp(sr->name, "cpu-voltage-1"))
+ sens_cpu_volts[1] = sr;
+ else if (!strcmp(sr->name, "cpu-current-0"))
+ sens_cpu_amps[0] = sr;
+ else if (!strcmp(sr->name, "cpu-current-1"))
+ sens_cpu_amps[1] = sr;
+ else if (!strcmp(sr->name, "backside-temp"))
+ backside_temp = sr;
+ else if (!strcmp(sr->name, "slots-temp"))
+ slots_temp = sr;
+ else if (!strcmp(sr->name, "dimms-temp"))
+ dimms_temp = sr;
+
+ all_sensors =
+ sens_cpu_temp[0] &&
+ sens_cpu_volts[0] &&
+ sens_cpu_amps[0] &&
+ backside_temp &&
+ slots_temp &&
+ dimms_temp;
+ if (nr_chips > 1)
+ all_sensors &=
+ sens_cpu_temp[1] &&
+ sens_cpu_volts[1] &&
+ sens_cpu_amps[1];
+
+ have_all_sensors = all_sensors;
+}
+
+static int rm31_wf_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case WF_EVENT_NEW_SENSOR:
+ rm31_new_sensor(data);
+ break;
+ case WF_EVENT_NEW_CONTROL:
+ rm31_new_control(data);
+ break;
+ case WF_EVENT_TICK:
+ if (have_all_controls && have_all_sensors)
+ rm31_tick();
+ }
+ return 0;
+}
+
+static struct notifier_block rm31_events = {
+ .notifier_call = rm31_wf_notify,
+};
+
+static int wf_rm31_probe(struct platform_device *dev)
+{
+ wf_register_client(&rm31_events);
+ return 0;
+}
+
+static int __devexit wf_rm31_remove(struct platform_device *dev)
+{
+ wf_unregister_client(&rm31_events);
+
+ /* should release all sensors and controls */
+ return 0;
+}
+
+static struct platform_driver wf_rm31_driver = {
+ .probe = wf_rm31_probe,
+ .remove = wf_rm31_remove,
+ .driver = {
+ .name = "windfarm",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init wf_rm31_init(void)
+{
+ struct device_node *cpu;
+ int i;
+
+ if (!of_machine_is_compatible("RackMac3,1"))
+ return -ENODEV;
+
+ /* Count the number of CPU cores */
+ nr_chips = 0;
+ for (cpu = NULL; (cpu = of_find_node_by_type(cpu, "cpu")) != NULL; )
+ ++nr_chips;
+ if (nr_chips > NR_CHIPS)
+ nr_chips = NR_CHIPS;
+
+ pr_info("windfarm: Initializing for desktop G5 with %d chips\n",
+ nr_chips);
+
+ /* Get MPU data for each CPU */
+ for (i = 0; i < nr_chips; i++) {
+ cpu_mpu_data[i] = wf_get_mpu(i);
+ if (!cpu_mpu_data[i]) {
+ pr_err("wf_rm31: Failed to find MPU data for CPU %d\n", i);
+ return -ENXIO;
+ }
+ }
+
+#ifdef MODULE
+ request_module("windfarm_fcu_controls");
+ request_module("windfarm_lm75_sensor");
+ request_module("windfarm_lm87_sensor");
+ request_module("windfarm_ad7417_sensor");
+ request_module("windfarm_max6690_sensor");
+ request_module("windfarm_cpufreq_clamp");
+#endif /* MODULE */
+
+ platform_driver_register(&wf_rm31_driver);
+ return 0;
+}
+
+static void __exit wf_rm31_exit(void)
+{
+ platform_driver_unregister(&wf_rm31_driver);
+}
+
+module_init(wf_rm31_init);
+module_exit(wf_rm31_exit);
+
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("Thermal control for Xserve G5");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:windfarm");
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 3c2be5193fd5..c155a54e8638 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -172,7 +172,6 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
fct->fan_type = pwm_fan;
fct->ctrl.type = pwm_fan ? WF_CONTROL_PWM_FAN : WF_CONTROL_RPM_FAN;
- sysfs_attr_init(&fct->ctrl.attr.attr);
/* We use the name & location here the same way we do for SMU sensors,
* see the comment in windfarm_smu_sensors.c. The locations are a bit
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 65a8ff3e1f8e..426e810233d7 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -20,7 +20,7 @@
#include "windfarm.h"
-#define VERSION "0.2"
+#define VERSION "1.0"
#define DEBUG
@@ -34,11 +34,12 @@
#define MAX_AGE msecs_to_jiffies(800)
struct wf_sat {
+ struct kref ref;
int nr;
- atomic_t refcnt;
struct mutex mutex;
unsigned long last_read; /* jiffies when cache last updated */
u8 cache[16];
+ struct list_head sensors;
struct i2c_client *i2c;
struct device_node *node;
};
@@ -46,11 +47,12 @@ struct wf_sat {
static struct wf_sat *sats[2];
struct wf_sat_sensor {
- int index;
- int index2; /* used for power sensors */
- int shift;
- struct wf_sat *sat;
- struct wf_sensor sens;
+ struct list_head link;
+ int index;
+ int index2; /* used for power sensors */
+ int shift;
+ struct wf_sat *sat;
+ struct wf_sensor sens;
};
#define wf_to_sat(c) container_of(c, struct wf_sat_sensor, sens)
@@ -142,7 +144,7 @@ static int wf_sat_read_cache(struct wf_sat *sat)
return 0;
}
-static int wf_sat_get(struct wf_sensor *sr, s32 *value)
+static int wf_sat_sensor_get(struct wf_sensor *sr, s32 *value)
{
struct wf_sat_sensor *sens = wf_to_sat(sr);
struct wf_sat *sat = sens->sat;
@@ -175,62 +177,34 @@ static int wf_sat_get(struct wf_sensor *sr, s32 *value)
return err;
}
-static void wf_sat_release(struct wf_sensor *sr)
+static void wf_sat_release(struct kref *ref)
+{
+ struct wf_sat *sat = container_of(ref, struct wf_sat, ref);
+
+ if (sat->nr >= 0)
+ sats[sat->nr] = NULL;
+ kfree(sat);
+}
+
+static void wf_sat_sensor_release(struct wf_sensor *sr)
{
struct wf_sat_sensor *sens = wf_to_sat(sr);
struct wf_sat *sat = sens->sat;
- if (atomic_dec_and_test(&sat->refcnt)) {
- if (sat->nr >= 0)
- sats[sat->nr] = NULL;
- kfree(sat);
- }
kfree(sens);
+ kref_put(&sat->ref, wf_sat_release);
}
static struct wf_sensor_ops wf_sat_ops = {
- .get_value = wf_sat_get,
- .release = wf_sat_release,
+ .get_value = wf_sat_sensor_get,
+ .release = wf_sat_sensor_release,
.owner = THIS_MODULE,
};
-static struct i2c_driver wf_sat_driver;
-
-static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
-{
- struct i2c_board_info info;
- struct i2c_client *client;
- const u32 *reg;
- u8 addr;
-
- reg = of_get_property(dev, "reg", NULL);
- if (reg == NULL)
- return;
- addr = *reg;
- DBG(KERN_DEBUG "wf_sat: creating sat at address %x\n", addr);
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = (addr >> 1) & 0x7f;
- info.platform_data = dev;
- strlcpy(info.type, "wf_sat", I2C_NAME_SIZE);
-
- client = i2c_new_device(adapter, &info);
- if (client == NULL) {
- printk(KERN_ERR "windfarm: failed to attach smu-sat to i2c\n");
- return;
- }
-
- /*
- * Let i2c-core delete that device on driver removal.
- * This is safe because i2c-core holds the core_lock mutex for us.
- */
- list_add_tail(&client->detected, &wf_sat_driver.clients);
-}
-
static int wf_sat_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct device_node *dev = client->dev.platform_data;
+ struct device_node *dev = client->dev.of_node;
struct wf_sat *sat;
struct wf_sat_sensor *sens;
const u32 *reg;
@@ -246,9 +220,10 @@ static int wf_sat_probe(struct i2c_client *client,
return -ENOMEM;
sat->nr = -1;
sat->node = of_node_get(dev);
- atomic_set(&sat->refcnt, 0);
+ kref_init(&sat->ref);
mutex_init(&sat->mutex);
sat->i2c = client;
+ INIT_LIST_HEAD(&sat->sensors);
i2c_set_clientdata(client, sat);
vsens[0] = vsens[1] = -1;
@@ -310,14 +285,15 @@ static int wf_sat_probe(struct i2c_client *client,
sens->index2 = -1;
sens->shift = shift;
sens->sat = sat;
- atomic_inc(&sat->refcnt);
sens->sens.ops = &wf_sat_ops;
sens->sens.name = (char *) (sens + 1);
- snprintf(sens->sens.name, 16, "%s-%d", name, cpu);
+ snprintf((char *)sens->sens.name, 16, "%s-%d", name, cpu);
- if (wf_register_sensor(&sens->sens)) {
- atomic_dec(&sat->refcnt);
+ if (wf_register_sensor(&sens->sens))
kfree(sens);
+ else {
+ list_add(&sens->link, &sat->sensors);
+ kref_get(&sat->ref);
}
}
@@ -336,14 +312,15 @@ static int wf_sat_probe(struct i2c_client *client,
sens->index2 = isens[core];
sens->shift = 0;
sens->sat = sat;
- atomic_inc(&sat->refcnt);
sens->sens.ops = &wf_sat_ops;
sens->sens.name = (char *) (sens + 1);
- snprintf(sens->sens.name, 16, "cpu-power-%d", cpu);
+ snprintf((char *)sens->sens.name, 16, "cpu-power-%d", cpu);
- if (wf_register_sensor(&sens->sens)) {
- atomic_dec(&sat->refcnt);
+ if (wf_register_sensor(&sens->sens))
kfree(sens);
+ else {
+ list_add(&sens->link, &sat->sensors);
+ kref_get(&sat->ref);
}
}
@@ -353,42 +330,35 @@ static int wf_sat_probe(struct i2c_client *client,
return 0;
}
-static int wf_sat_attach(struct i2c_adapter *adapter)
-{
- struct device_node *busnode, *dev = NULL;
- struct pmac_i2c_bus *bus;
-
- bus = pmac_i2c_adapter_to_bus(adapter);
- if (bus == NULL)
- return -ENODEV;
- busnode = pmac_i2c_get_bus_node(bus);
-
- while ((dev = of_get_next_child(busnode, dev)) != NULL)
- if (of_device_is_compatible(dev, "smu-sat"))
- wf_sat_create(adapter, dev);
- return 0;
-}
-
static int wf_sat_remove(struct i2c_client *client)
{
struct wf_sat *sat = i2c_get_clientdata(client);
+ struct wf_sat_sensor *sens;
- /* XXX TODO */
-
+ /* release sensors */
+ while(!list_empty(&sat->sensors)) {
+ sens = list_first_entry(&sat->sensors,
+ struct wf_sat_sensor, link);
+ list_del(&sens->link);
+ wf_unregister_sensor(&sens->sens);
+ }
sat->i2c = NULL;
+ i2c_set_clientdata(client, NULL);
+ kref_put(&sat->ref, wf_sat_release);
+
return 0;
}
static const struct i2c_device_id wf_sat_id[] = {
- { "wf_sat", 0 },
+ { "MAC,smu-sat", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, wf_sat_id);
static struct i2c_driver wf_sat_driver = {
.driver = {
.name = "wf_smu_sat",
},
- .attach_adapter = wf_sat_attach,
.probe = wf_sat_probe,
.remove = wf_sat_remove,
.id_table = wf_sat_id,
@@ -399,15 +369,13 @@ static int __init sat_sensors_init(void)
return i2c_add_driver(&wf_sat_driver);
}
-#if 0 /* uncomment when module_exit() below is uncommented */
static void __exit sat_sensors_exit(void)
{
i2c_del_driver(&wf_sat_driver);
}
-#endif
module_init(sat_sensors_init);
-/*module_exit(sat_sensors_exit); Uncomment when cleanup is implemented */
+module_exit(sat_sensors_exit);
MODULE_AUTHOR("Paul Mackerras <paulus@samba.org>");
MODULE_DESCRIPTION("SMU satellite sensors for PowerMac thermal control");
diff --git a/drivers/mca/Kconfig b/drivers/mca/Kconfig
deleted file mode 100644
index a7a0220ab4bd..000000000000
--- a/drivers/mca/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-config MCA_LEGACY
- bool "Legacy MCA API Support"
- depends on MCA
- help
- This compiles in support for the old slot based MCA API. If you
- have an unconverted MCA driver, you will need to say Y here. It
- is safe to say Y anyway.
-
-config MCA_PROC_FS
- bool "Support for the mca entry in /proc"
- depends on MCA_LEGACY && PROC_FS
- help
- If you want the old style /proc/mca directory in addition to the
- new style sysfs say Y here.
diff --git a/drivers/mca/Makefile b/drivers/mca/Makefile
deleted file mode 100644
index 0794b122520e..000000000000
--- a/drivers/mca/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# Makefile for the Linux MCA bus support
-
-obj-y := mca-bus.o mca-device.o mca-driver.o
-
-obj-$(CONFIG_MCA_PROC_FS) += mca-proc.o
-obj-$(CONFIG_MCA_LEGACY) += mca-legacy.o
-
diff --git a/drivers/mca/mca-bus.c b/drivers/mca/mca-bus.c
deleted file mode 100644
index ada5ebbaa255..000000000000
--- a/drivers/mca/mca-bus.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/*
- * MCA bus support functions for sysfs.
- *
- * (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com>
- *
-**-----------------------------------------------------------------------------
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-**-----------------------------------------------------------------------------
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/mca.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-/* Very few machines have more than one MCA bus. However, there are
- * those that do (Voyager 35xx/5xxx), so we do it this way for future
- * expansion. None that I know have more than 2 */
-static struct mca_bus *mca_root_busses[MAX_MCA_BUSSES];
-
-#define MCA_DEVINFO(i,s) { .pos = i, .name = s }
-
-struct mca_device_info {
- short pos_id; /* the 2 byte pos id for this card */
- char name[50];
-};
-
-static int mca_bus_match (struct device *dev, struct device_driver *drv)
-{
- struct mca_device *mca_dev = to_mca_device (dev);
- struct mca_driver *mca_drv = to_mca_driver (drv);
- const unsigned short *mca_ids = mca_drv->id_table;
- int i = 0;
-
- if (mca_ids) {
- for(i = 0; mca_ids[i]; i++) {
- if (mca_ids[i] == mca_dev->pos_id) {
- mca_dev->index = i;
- return 1;
- }
- }
- }
- /* If the integrated id is present, treat it as though it were an
- * additional id in the id_table (it can't be because by definition,
- * integrated id's overflow a short */
- if (mca_drv->integrated_id && mca_dev->pos_id ==
- mca_drv->integrated_id) {
- mca_dev->index = i;
- return 1;
- }
- return 0;
-}
-
-struct bus_type mca_bus_type = {
- .name = "MCA",
- .match = mca_bus_match,
-};
-EXPORT_SYMBOL (mca_bus_type);
-
-static ssize_t mca_show_pos_id(struct device *dev, struct device_attribute *attr, char *buf)
-{
- /* four digits, \n and trailing \0 */
- struct mca_device *mca_dev = to_mca_device(dev);
- int len;
-
- if(mca_dev->pos_id < MCA_DUMMY_POS_START)
- len = sprintf(buf, "%04x\n", mca_dev->pos_id);
- else
- len = sprintf(buf, "none\n");
- return len;
-}
-static ssize_t mca_show_pos(struct device *dev, struct device_attribute *attr, char *buf)
-{
- /* enough for 8 two byte hex chars plus space and new line */
- int j, len=0;
- struct mca_device *mca_dev = to_mca_device(dev);
-
- for(j=0; j<8; j++)
- len += sprintf(buf+len, "%02x ", mca_dev->pos[j]);
- /* change last trailing space to new line */
- buf[len-1] = '\n';
- return len;
-}
-
-static DEVICE_ATTR(id, S_IRUGO, mca_show_pos_id, NULL);
-static DEVICE_ATTR(pos, S_IRUGO, mca_show_pos, NULL);
-
-int __init mca_register_device(int bus, struct mca_device *mca_dev)
-{
- struct mca_bus *mca_bus = mca_root_busses[bus];
- int rc;
-
- mca_dev->dev.parent = &mca_bus->dev;
- mca_dev->dev.bus = &mca_bus_type;
- dev_set_name(&mca_dev->dev, "%02d:%02X", bus, mca_dev->slot);
- mca_dev->dma_mask = mca_bus->default_dma_mask;
- mca_dev->dev.dma_mask = &mca_dev->dma_mask;
- mca_dev->dev.coherent_dma_mask = mca_dev->dma_mask;
-
- rc = device_register(&mca_dev->dev);
- if (rc)
- goto err_out;
-
- rc = device_create_file(&mca_dev->dev, &dev_attr_id);
- if (rc) goto err_out_devreg;
- rc = device_create_file(&mca_dev->dev, &dev_attr_pos);
- if (rc) goto err_out_id;
-
- return 1;
-
-err_out_id:
- device_remove_file(&mca_dev->dev, &dev_attr_id);
-err_out_devreg:
- device_unregister(&mca_dev->dev);
-err_out:
- return 0;
-}
-
-/* */
-struct mca_bus * __devinit mca_attach_bus(int bus)
-{
- struct mca_bus *mca_bus;
-
- if (unlikely(mca_root_busses[bus] != NULL)) {
- /* This should never happen, but just in case */
- printk(KERN_EMERG "MCA tried to add already existing bus %d\n",
- bus);
- dump_stack();
- return NULL;
- }
-
- mca_bus = kzalloc(sizeof(struct mca_bus), GFP_KERNEL);
- if (!mca_bus)
- return NULL;
-
- dev_set_name(&mca_bus->dev, "mca%d", bus);
- sprintf(mca_bus->name,"Host %s MCA Bridge", bus ? "Secondary" : "Primary");
- if (device_register(&mca_bus->dev)) {
- kfree(mca_bus);
- return NULL;
- }
-
- mca_root_busses[bus] = mca_bus;
-
- return mca_bus;
-}
-
-int __init mca_system_init (void)
-{
- return bus_register(&mca_bus_type);
-}
diff --git a/drivers/mca/mca-device.c b/drivers/mca/mca-device.c
deleted file mode 100644
index e7adf89fae41..000000000000
--- a/drivers/mca/mca-device.c
+++ /dev/null
@@ -1,218 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/*
- * MCA device support functions
- *
- * These functions support the ongoing device access API.
- *
- * (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com>
- *
-**-----------------------------------------------------------------------------
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-**-----------------------------------------------------------------------------
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/mca.h>
-#include <linux/string.h>
-
-/**
- * mca_device_read_stored_pos - read POS register from stored data
- * @mca_dev: device to read from
- * @reg: register to read from
- *
- * Fetch a POS value that was stored at boot time by the kernel
- * when it scanned the MCA space. The register value is returned.
- * Missing or invalid registers report 0.
- */
-unsigned char mca_device_read_stored_pos(struct mca_device *mca_dev, int reg)
-{
- if(reg < 0 || reg >= 8)
- return 0;
-
- return mca_dev->pos[reg];
-}
-EXPORT_SYMBOL(mca_device_read_stored_pos);
-
-/**
- * mca_device_read_pos - read POS register from card
- * @mca_dev: device to read from
- * @reg: register to read from
- *
- * Fetch a POS value directly from the hardware to obtain the
- * current value. This is much slower than
- * mca_device_read_stored_pos and may not be invoked from
- * interrupt context. It handles the deep magic required for
- * onboard devices transparently.
- */
-unsigned char mca_device_read_pos(struct mca_device *mca_dev, int reg)
-{
- struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
-
- return mca_bus->f.mca_read_pos(mca_dev, reg);
-
- return mca_dev->pos[reg];
-}
-EXPORT_SYMBOL(mca_device_read_pos);
-
-
-/**
- * mca_device_write_pos - read POS register from card
- * @mca_dev: device to write pos register to
- * @reg: register to write to
- * @byte: byte to write to the POS registers
- *
- * Store a POS value directly to the hardware. You should not
- * normally need to use this function and should have a very good
- * knowledge of MCA bus before you do so. Doing this wrongly can
- * damage the hardware.
- *
- * This function may not be used from interrupt context.
- *
- */
-void mca_device_write_pos(struct mca_device *mca_dev, int reg,
- unsigned char byte)
-{
- struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
-
- mca_bus->f.mca_write_pos(mca_dev, reg, byte);
-}
-EXPORT_SYMBOL(mca_device_write_pos);
-
-/**
- * mca_device_transform_irq - transform the ADF obtained IRQ
- * @mca_device: device whose irq needs transforming
- * @irq: input irq from ADF
- *
- * MCA Adapter Definition Files (ADF) contain irq, ioport, memory
- * etc. definitions. In systems with more than one bus, these need
- * to be transformed through bus mapping functions to get the real
- * system global quantities.
- *
- * This function transforms the interrupt number and returns the
- * transformed system global interrupt
- */
-int mca_device_transform_irq(struct mca_device *mca_dev, int irq)
-{
- struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
-
- return mca_bus->f.mca_transform_irq(mca_dev, irq);
-}
-EXPORT_SYMBOL(mca_device_transform_irq);
-
-/**
- * mca_device_transform_ioport - transform the ADF obtained I/O port
- * @mca_device: device whose port needs transforming
- * @ioport: input I/O port from ADF
- *
- * MCA Adapter Definition Files (ADF) contain irq, ioport, memory
- * etc. definitions. In systems with more than one bus, these need
- * to be transformed through bus mapping functions to get the real
- * system global quantities.
- *
- * This function transforms the I/O port number and returns the
- * transformed system global port number.
- *
- * This transformation can be assumed to be linear for port ranges.
- */
-int mca_device_transform_ioport(struct mca_device *mca_dev, int port)
-{
- struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
-
- return mca_bus->f.mca_transform_ioport(mca_dev, port);
-}
-EXPORT_SYMBOL(mca_device_transform_ioport);
-
-/**
- * mca_device_transform_memory - transform the ADF obtained memory
- * @mca_device: device whose memory region needs transforming
- * @mem: memory region start from ADF
- *
- * MCA Adapter Definition Files (ADF) contain irq, ioport, memory
- * etc. definitions. In systems with more than one bus, these need
- * to be transformed through bus mapping functions to get the real
- * system global quantities.
- *
- * This function transforms the memory region start and returns the
- * transformed system global memory region (physical).
- *
- * This transformation can be assumed to be linear for region ranges.
- */
-void *mca_device_transform_memory(struct mca_device *mca_dev, void *mem)
-{
- struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
-
- return mca_bus->f.mca_transform_memory(mca_dev, mem);
-}
-EXPORT_SYMBOL(mca_device_transform_memory);
-
-
-/**
- * mca_device_claimed - check if claimed by driver
- * @mca_dev: device to check
- *
- * Returns 1 if the slot has been claimed by a driver
- */
-
-int mca_device_claimed(struct mca_device *mca_dev)
-{
- return mca_dev->driver_loaded;
-}
-EXPORT_SYMBOL(mca_device_claimed);
-
-/**
- * mca_device_set_claim - set the claim value of the driver
- * @mca_dev: device to set value for
- * @val: claim value to set (1 claimed, 0 unclaimed)
- */
-void mca_device_set_claim(struct mca_device *mca_dev, int val)
-{
- mca_dev->driver_loaded = val;
-}
-EXPORT_SYMBOL(mca_device_set_claim);
-
-/**
- * mca_device_status - get the status of the device
- * @mca_device: device to get
- *
- * returns an enumeration of the device status:
- *
- * MCA_ADAPTER_NORMAL adapter is OK.
- * MCA_ADAPTER_NONE no adapter at device (should never happen).
- * MCA_ADAPTER_DISABLED adapter is disabled.
- * MCA_ADAPTER_ERROR adapter cannot be initialised.
- */
-enum MCA_AdapterStatus mca_device_status(struct mca_device *mca_dev)
-{
- return mca_dev->status;
-}
-EXPORT_SYMBOL(mca_device_status);
-
-/**
- * mca_device_set_name - set the name of the device
- * @mca_device: device to set the name of
- * @name: name to set
- */
-void mca_device_set_name(struct mca_device *mca_dev, const char *name)
-{
- if(!mca_dev)
- return;
-
- strlcpy(mca_dev->name, name, sizeof(mca_dev->name));
-}
-EXPORT_SYMBOL(mca_device_set_name);
diff --git a/drivers/mca/mca-driver.c b/drivers/mca/mca-driver.c
deleted file mode 100644
index 32cd39bcc715..000000000000
--- a/drivers/mca/mca-driver.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/*
- * MCA driver support functions for sysfs.
- *
- * (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com>
- *
-**-----------------------------------------------------------------------------
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-**-----------------------------------------------------------------------------
- */
-
-#include <linux/device.h>
-#include <linux/mca.h>
-#include <linux/module.h>
-
-int mca_register_driver(struct mca_driver *mca_drv)
-{
- int r;
-
- if (MCA_bus) {
- mca_drv->driver.bus = &mca_bus_type;
- if ((r = driver_register(&mca_drv->driver)) < 0)
- return r;
- mca_drv->integrated_id = 0;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mca_register_driver);
-
-int mca_register_driver_integrated(struct mca_driver *mca_driver,
- int integrated_id)
-{
- int r = mca_register_driver(mca_driver);
-
- if (!r)
- mca_driver->integrated_id = integrated_id;
-
- return r;
-}
-EXPORT_SYMBOL(mca_register_driver_integrated);
-
-void mca_unregister_driver(struct mca_driver *mca_drv)
-{
- if (MCA_bus)
- driver_unregister(&mca_drv->driver);
-}
-EXPORT_SYMBOL(mca_unregister_driver);
diff --git a/drivers/mca/mca-legacy.c b/drivers/mca/mca-legacy.c
deleted file mode 100644
index 494f0c2001f5..000000000000
--- a/drivers/mca/mca-legacy.c
+++ /dev/null
@@ -1,329 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/*
- * MCA bus support functions for legacy (2.4) API.
- *
- * Legacy API means the API that operates in terms of MCA slot number
- *
- * (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com>
- *
-**-----------------------------------------------------------------------------
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-**-----------------------------------------------------------------------------
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/mca-legacy.h>
-#include <asm/io.h>
-
-/* NOTE: This structure is stack allocated */
-struct mca_find_adapter_info {
- int id;
- int slot;
- struct mca_device *mca_dev;
-};
-
-/* The purpose of this iterator is to loop over all the devices and
- * find the one with the smallest slot number that's just greater than
- * or equal to the required slot with a matching id */
-static int mca_find_adapter_callback(struct device *dev, void *data)
-{
- struct mca_find_adapter_info *info = data;
- struct mca_device *mca_dev = to_mca_device(dev);
-
- if(mca_dev->pos_id != info->id)
- return 0;
-
- if(mca_dev->slot < info->slot)
- return 0;
-
- if(!info->mca_dev || info->mca_dev->slot >= mca_dev->slot)
- info->mca_dev = mca_dev;
-
- return 0;
-}
-
-/**
- * mca_find_adapter - scan for adapters
- * @id: MCA identification to search for
- * @start: starting slot
- *
- * Search the MCA configuration for adapters matching the 16bit
- * ID given. The first time it should be called with start as zero
- * and then further calls made passing the return value of the
- * previous call until %MCA_NOTFOUND is returned.
- *
- * Disabled adapters are not reported.
- */
-
-int mca_find_adapter(int id, int start)
-{
- struct mca_find_adapter_info info;
-
- if(id == 0xffff)
- return MCA_NOTFOUND;
-
- info.slot = start;
- info.id = id;
- info.mca_dev = NULL;
-
- for(;;) {
- bus_for_each_dev(&mca_bus_type, NULL, &info, mca_find_adapter_callback);
-
- if(info.mca_dev == NULL)
- return MCA_NOTFOUND;
-
- if(info.mca_dev->status != MCA_ADAPTER_DISABLED)
- break;
-
- /* OK, found adapter but it was disabled. Go around
- * again, excluding the slot we just found */
-
- info.slot = info.mca_dev->slot + 1;
- info.mca_dev = NULL;
- }
-
- return info.mca_dev->slot;
-}
-EXPORT_SYMBOL(mca_find_adapter);
-
-/*--------------------------------------------------------------------*/
-
-/**
- * mca_find_unused_adapter - scan for unused adapters
- * @id: MCA identification to search for
- * @start: starting slot
- *
- * Search the MCA configuration for adapters matching the 16bit
- * ID given. The first time it should be called with start as zero
- * and then further calls made passing the return value of the
- * previous call until %MCA_NOTFOUND is returned.
- *
- * Adapters that have been claimed by drivers and those that
- * are disabled are not reported. This function thus allows a driver
- * to scan for further cards when some may already be driven.
- */
-
-int mca_find_unused_adapter(int id, int start)
-{
- struct mca_find_adapter_info info = { 0 };
-
- if (!MCA_bus || id == 0xffff)
- return MCA_NOTFOUND;
-
- info.slot = start;
- info.id = id;
- info.mca_dev = NULL;
-
- for(;;) {
- bus_for_each_dev(&mca_bus_type, NULL, &info, mca_find_adapter_callback);
-
- if(info.mca_dev == NULL)
- return MCA_NOTFOUND;
-
- if(info.mca_dev->status != MCA_ADAPTER_DISABLED
- && !info.mca_dev->driver_loaded)
- break;
-
- /* OK, found adapter but it was disabled or already in
- * use. Go around again, excluding the slot we just
- * found */
-
- info.slot = info.mca_dev->slot + 1;
- info.mca_dev = NULL;
- }
-
- return info.mca_dev->slot;
-}
-EXPORT_SYMBOL(mca_find_unused_adapter);
-
-/* NOTE: stack allocated structure */
-struct mca_find_device_by_slot_info {
- int slot;
- struct mca_device *mca_dev;
-};
-
-static int mca_find_device_by_slot_callback(struct device *dev, void *data)
-{
- struct mca_find_device_by_slot_info *info = data;
- struct mca_device *mca_dev = to_mca_device(dev);
-
- if(mca_dev->slot == info->slot)
- info->mca_dev = mca_dev;
-
- return 0;
-}
-
-struct mca_device *mca_find_device_by_slot(int slot)
-{
- struct mca_find_device_by_slot_info info;
-
- info.slot = slot;
- info.mca_dev = NULL;
-
- bus_for_each_dev(&mca_bus_type, NULL, &info, mca_find_device_by_slot_callback);
-
- return info.mca_dev;
-}
-
-/**
- * mca_read_stored_pos - read POS register from boot data
- * @slot: slot number to read from
- * @reg: register to read from
- *
- * Fetch a POS value that was stored at boot time by the kernel
- * when it scanned the MCA space. The register value is returned.
- * Missing or invalid registers report 0.
- */
-unsigned char mca_read_stored_pos(int slot, int reg)
-{
- struct mca_device *mca_dev = mca_find_device_by_slot(slot);
-
- if(!mca_dev)
- return 0;
-
- return mca_device_read_stored_pos(mca_dev, reg);
-}
-EXPORT_SYMBOL(mca_read_stored_pos);
-
-
-/**
- * mca_read_pos - read POS register from card
- * @slot: slot number to read from
- * @reg: register to read from
- *
- * Fetch a POS value directly from the hardware to obtain the
- * current value. This is much slower than mca_read_stored_pos and
- * may not be invoked from interrupt context. It handles the
- * deep magic required for onboard devices transparently.
- */
-
-unsigned char mca_read_pos(int slot, int reg)
-{
- struct mca_device *mca_dev = mca_find_device_by_slot(slot);
-
- if(!mca_dev)
- return 0;
-
- return mca_device_read_pos(mca_dev, reg);
-}
-EXPORT_SYMBOL(mca_read_pos);
-
-
-/**
- * mca_write_pos - read POS register from card
- * @slot: slot number to read from
- * @reg: register to read from
- * @byte: byte to write to the POS registers
- *
- * Store a POS value directly from the hardware. You should not
- * normally need to use this function and should have a very good
- * knowledge of MCA bus before you do so. Doing this wrongly can
- * damage the hardware.
- *
- * This function may not be used from interrupt context.
- *
- * Note that this a technically a Bad Thing, as IBM tech stuff says
- * you should only set POS values through their utilities.
- * However, some devices such as the 3c523 recommend that you write
- * back some data to make sure the configuration is consistent.
- * I'd say that IBM is right, but I like my drivers to work.
- *
- * This function can't do checks to see if multiple devices end up
- * with the same resources, so you might see magic smoke if someone
- * screws up.
- */
-
-void mca_write_pos(int slot, int reg, unsigned char byte)
-{
- struct mca_device *mca_dev = mca_find_device_by_slot(slot);
-
- if(!mca_dev)
- return;
-
- mca_device_write_pos(mca_dev, reg, byte);
-}
-EXPORT_SYMBOL(mca_write_pos);
-
-/**
- * mca_set_adapter_name - Set the description of the card
- * @slot: slot to name
- * @name: text string for the namen
- *
- * This function sets the name reported via /proc for this
- * adapter slot. This is for user information only. Setting a
- * name deletes any previous name.
- */
-
-void mca_set_adapter_name(int slot, char* name)
-{
- struct mca_device *mca_dev = mca_find_device_by_slot(slot);
-
- if(!mca_dev)
- return;
-
- mca_device_set_name(mca_dev, name);
-}
-EXPORT_SYMBOL(mca_set_adapter_name);
-
-/**
- * mca_mark_as_used - claim an MCA device
- * @slot: slot to claim
- * FIXME: should we make this threadsafe
- *
- * Claim an MCA slot for a device driver. If the
- * slot is already taken the function returns 1,
- * if it is not taken it is claimed and 0 is
- * returned.
- */
-
-int mca_mark_as_used(int slot)
-{
- struct mca_device *mca_dev = mca_find_device_by_slot(slot);
-
- if(!mca_dev)
- /* FIXME: this is actually a severe error */
- return 1;
-
- if(mca_device_claimed(mca_dev))
- return 1;
-
- mca_device_set_claim(mca_dev, 1);
-
- return 0;
-}
-EXPORT_SYMBOL(mca_mark_as_used);
-
-/**
- * mca_mark_as_unused - release an MCA device
- * @slot: slot to claim
- *
- * Release the slot for other drives to use.
- */
-
-void mca_mark_as_unused(int slot)
-{
- struct mca_device *mca_dev = mca_find_device_by_slot(slot);
-
- if(!mca_dev)
- return;
-
- mca_device_set_claim(mca_dev, 0);
-}
-EXPORT_SYMBOL(mca_mark_as_unused);
-
diff --git a/drivers/mca/mca-proc.c b/drivers/mca/mca-proc.c
deleted file mode 100644
index 81ea0d377bf4..000000000000
--- a/drivers/mca/mca-proc.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/*
- * MCA bus support functions for the proc fs.
- *
- * NOTE: this code *requires* the legacy MCA api.
- *
- * Legacy API means the API that operates in terms of MCA slot number
- *
- * (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com>
- *
-**-----------------------------------------------------------------------------
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-**-----------------------------------------------------------------------------
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/mca.h>
-
-static int get_mca_info_helper(struct mca_device *mca_dev, char *page, int len)
-{
- int j;
-
- for(j=0; j<8; j++)
- len += sprintf(page+len, "%02x ",
- mca_dev ? mca_dev->pos[j] : 0xff);
- len += sprintf(page+len, " %s\n", mca_dev ? mca_dev->name : "");
- return len;
-}
-
-static int get_mca_info(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int i, len = 0;
-
- if(MCA_bus) {
- struct mca_device *mca_dev;
- /* Format POS registers of eight MCA slots */
-
- for(i=0; i<MCA_MAX_SLOT_NR; i++) {
- mca_dev = mca_find_device_by_slot(i);
-
- len += sprintf(page+len, "Slot %d: ", i+1);
- len = get_mca_info_helper(mca_dev, page, len);
- }
-
- /* Format POS registers of integrated video subsystem */
-
- mca_dev = mca_find_device_by_slot(MCA_INTEGVIDEO);
- len += sprintf(page+len, "Video : ");
- len = get_mca_info_helper(mca_dev, page, len);
-
- /* Format POS registers of integrated SCSI subsystem */
-
- mca_dev = mca_find_device_by_slot(MCA_INTEGSCSI);
- len += sprintf(page+len, "SCSI : ");
- len = get_mca_info_helper(mca_dev, page, len);
-
- /* Format POS registers of motherboard */
-
- mca_dev = mca_find_device_by_slot(MCA_MOTHERBOARD);
- len += sprintf(page+len, "Planar: ");
- len = get_mca_info_helper(mca_dev, page, len);
- } else {
- /* Leave it empty if MCA not detected - this should *never*
- * happen!
- */
- }
-
- if (len <= off+count) *eof = 1;
- *start = page + off;
- len -= off;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
-}
-
-/*--------------------------------------------------------------------*/
-
-static int mca_default_procfn(char* buf, struct mca_device *mca_dev)
-{
- int len = 0, i;
- int slot = mca_dev->slot;
-
- /* Print out the basic information */
-
- if(slot < MCA_MAX_SLOT_NR) {
- len += sprintf(buf+len, "Slot: %d\n", slot+1);
- } else if(slot == MCA_INTEGSCSI) {
- len += sprintf(buf+len, "Integrated SCSI Adapter\n");
- } else if(slot == MCA_INTEGVIDEO) {
- len += sprintf(buf+len, "Integrated Video Adapter\n");
- } else if(slot == MCA_MOTHERBOARD) {
- len += sprintf(buf+len, "Motherboard\n");
- }
- if (mca_dev->name[0]) {
-
- /* Drivers might register a name without /proc handler... */
-
- len += sprintf(buf+len, "Adapter Name: %s\n",
- mca_dev->name);
- } else {
- len += sprintf(buf+len, "Adapter Name: Unknown\n");
- }
- len += sprintf(buf+len, "Id: %02x%02x\n",
- mca_dev->pos[1], mca_dev->pos[0]);
- len += sprintf(buf+len, "Enabled: %s\nPOS: ",
- mca_device_status(mca_dev) == MCA_ADAPTER_NORMAL ?
- "Yes" : "No");
- for(i=0; i<8; i++) {
- len += sprintf(buf+len, "%02x ", mca_dev->pos[i]);
- }
- len += sprintf(buf+len, "\nDriver Installed: %s",
- mca_device_claimed(mca_dev) ? "Yes" : "No");
- buf[len++] = '\n';
- buf[len] = 0;
-
- return len;
-} /* mca_default_procfn() */
-
-static int get_mca_machine_info(char* page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len = 0;
-
- len += sprintf(page+len, "Model Id: 0x%x\n", machine_id);
- len += sprintf(page+len, "Submodel Id: 0x%x\n", machine_submodel_id);
- len += sprintf(page+len, "BIOS Revision: 0x%x\n", BIOS_revision);
-
- if (len <= off+count) *eof = 1;
- *start = page + off;
- len -= off;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
-}
-
-static int mca_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- struct mca_device *mca_dev = (struct mca_device *)data;
- int len = 0;
-
- /* Get the standard info */
-
- len = mca_default_procfn(page, mca_dev);
-
- /* Do any device-specific processing, if there is any */
-
- if(mca_dev->procfn) {
- len += mca_dev->procfn(page+len, mca_dev->slot,
- mca_dev->proc_dev);
- }
- if (len <= off+count) *eof = 1;
- *start = page + off;
- len -= off;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
-} /* mca_read_proc() */
-
-/*--------------------------------------------------------------------*/
-
-void __init mca_do_proc_init(void)
-{
- int i;
- struct proc_dir_entry *proc_mca;
- struct proc_dir_entry* node = NULL;
- struct mca_device *mca_dev;
-
- proc_mca = proc_mkdir("mca", NULL);
- create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL);
- create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL);
-
- /* Initialize /proc/mca entries for existing adapters */
-
- for(i = 0; i < MCA_NUMADAPTERS; i++) {
- enum MCA_AdapterStatus status;
- mca_dev = mca_find_device_by_slot(i);
- if(!mca_dev)
- continue;
-
- mca_dev->procfn = NULL;
-
- if(i < MCA_MAX_SLOT_NR) sprintf(mca_dev->procname,"slot%d", i+1);
- else if(i == MCA_INTEGVIDEO) sprintf(mca_dev->procname,"video");
- else if(i == MCA_INTEGSCSI) sprintf(mca_dev->procname,"scsi");
- else if(i == MCA_MOTHERBOARD) sprintf(mca_dev->procname,"planar");
-
- status = mca_device_status(mca_dev);
- if (status != MCA_ADAPTER_NORMAL &&
- status != MCA_ADAPTER_DISABLED)
- continue;
-
- node = create_proc_read_entry(mca_dev->procname, 0, proc_mca,
- mca_read_proc, (void *)mca_dev);
-
- if(node == NULL) {
- printk("Failed to allocate memory for MCA proc-entries!");
- return;
- }
- }
-
-} /* mca_do_proc_init() */
-
-/**
- * mca_set_adapter_procfn - Set the /proc callback
- * @slot: slot to configure
- * @procfn: callback function to call for /proc
- * @dev: device information passed to the callback
- *
- * This sets up an information callback for /proc/mca/slot?. The
- * function is called with the buffer, slot, and device pointer (or
- * some equally informative context information, or nothing, if you
- * prefer), and is expected to put useful information into the
- * buffer. The adapter name, ID, and POS registers get printed
- * before this is called though, so don't do it again.
- *
- * This should be called with a %NULL @procfn when a module
- * unregisters, thus preventing kernel crashes and other such
- * nastiness.
- */
-
-void mca_set_adapter_procfn(int slot, MCA_ProcFn procfn, void* proc_dev)
-{
- struct mca_device *mca_dev = mca_find_device_by_slot(slot);
-
- if(!mca_dev)
- return;
-
- mca_dev->procfn = procfn;
- mca_dev->proc_dev = proc_dev;
-}
-EXPORT_SYMBOL(mca_set_adapter_procfn);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 17e2b472e16d..15dbe03117e4 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -45,7 +45,7 @@ static inline char *bmname(struct bitmap *bitmap)
* if we find our page, we increment the page's refcount so that it stays
* allocated while we're using it
*/
-static int bitmap_checkpage(struct bitmap *bitmap,
+static int bitmap_checkpage(struct bitmap_counts *bitmap,
unsigned long page, int create)
__releases(bitmap->lock)
__acquires(bitmap->lock)
@@ -76,8 +76,7 @@ __acquires(bitmap->lock)
spin_lock_irq(&bitmap->lock);
if (mappage == NULL) {
- pr_debug("%s: bitmap map page allocation failed, hijacking\n",
- bmname(bitmap));
+ pr_debug("md/bitmap: map page allocation failed, hijacking\n");
/* failed - set the hijacked flag so that we can use the
* pointer as a counter */
if (!bitmap->bp[page].map)
@@ -100,7 +99,7 @@ __acquires(bitmap->lock)
/* if page is completely empty, put it back on the free list, or dealloc it */
/* if page was hijacked, unmark the flag so it might get alloced next time */
/* Note: lock should be held when calling this */
-static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
+static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
{
char *ptr;
@@ -130,22 +129,14 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
*/
/* IO operations when bitmap is stored near all superblocks */
-static struct page *read_sb_page(struct mddev *mddev, loff_t offset,
- struct page *page,
- unsigned long index, int size)
+static int read_sb_page(struct mddev *mddev, loff_t offset,
+ struct page *page,
+ unsigned long index, int size)
{
/* choose a good rdev and read the page from there */
struct md_rdev *rdev;
sector_t target;
- int did_alloc = 0;
-
- if (!page) {
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return ERR_PTR(-ENOMEM);
- did_alloc = 1;
- }
rdev_for_each(rdev, mddev) {
if (! test_bit(In_sync, &rdev->flags)
@@ -158,15 +149,10 @@ static struct page *read_sb_page(struct mddev *mddev, loff_t offset,
roundup(size, bdev_logical_block_size(rdev->bdev)),
page, READ, true)) {
page->index = index;
- attach_page_buffers(page, NULL); /* so that free_buffer will
- * quietly no-op */
- return page;
+ return 0;
}
}
- if (did_alloc)
- put_page(page);
- return ERR_PTR(-EIO);
-
+ return -EIO;
}
static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
@@ -208,6 +194,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
struct md_rdev *rdev = NULL;
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
+ struct bitmap_storage *store = &bitmap->storage;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
@@ -215,9 +202,13 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
- if (page->index == bitmap->file_pages-1)
- size = roundup(bitmap->last_page_size,
+ if (page->index == store->file_pages-1) {
+ int last_page_size = store->bytes & (PAGE_SIZE-1);
+ if (last_page_size == 0)
+ last_page_size = PAGE_SIZE;
+ size = roundup(last_page_size,
bdev_logical_block_size(bdev));
+ }
/* Just make sure we aren't corrupting data or
* metadata
*/
@@ -276,10 +267,10 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
{
struct buffer_head *bh;
- if (bitmap->file == NULL) {
+ if (bitmap->storage.file == NULL) {
switch (write_sb_page(bitmap, page, wait)) {
case -EINVAL:
- bitmap->flags |= BITMAP_WRITE_ERROR;
+ set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
}
} else {
@@ -297,20 +288,16 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
}
- if (bitmap->flags & BITMAP_WRITE_ERROR)
+ if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap);
}
static void end_bitmap_write(struct buffer_head *bh, int uptodate)
{
struct bitmap *bitmap = bh->b_private;
- unsigned long flags;
- if (!uptodate) {
- spin_lock_irqsave(&bitmap->lock, flags);
- bitmap->flags |= BITMAP_WRITE_ERROR;
- spin_unlock_irqrestore(&bitmap->lock, flags);
- }
+ if (!uptodate)
+ set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
if (atomic_dec_and_test(&bitmap->pending_writes))
wake_up(&bitmap->write_wait);
}
@@ -325,8 +312,12 @@ __clear_page_buffers(struct page *page)
}
static void free_buffers(struct page *page)
{
- struct buffer_head *bh = page_buffers(page);
+ struct buffer_head *bh;
+ if (!PagePrivate(page))
+ return;
+
+ bh = page_buffers(page);
while (bh) {
struct buffer_head *next = bh->b_this_page;
free_buffer_head(bh);
@@ -343,11 +334,12 @@ static void free_buffers(struct page *page)
* This usage is similar to how swap files are handled, and allows us
* to write to a file with no concerns of memory allocation failing.
*/
-static struct page *read_page(struct file *file, unsigned long index,
- struct bitmap *bitmap,
- unsigned long count)
+static int read_page(struct file *file, unsigned long index,
+ struct bitmap *bitmap,
+ unsigned long count,
+ struct page *page)
{
- struct page *page = NULL;
+ int ret = 0;
struct inode *inode = file->f_path.dentry->d_inode;
struct buffer_head *bh;
sector_t block;
@@ -355,16 +347,9 @@ static struct page *read_page(struct file *file, unsigned long index,
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
- page = alloc_page(GFP_KERNEL);
- if (!page)
- page = ERR_PTR(-ENOMEM);
- if (IS_ERR(page))
- goto out;
-
bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
if (!bh) {
- put_page(page);
- page = ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
goto out;
}
attach_page_buffers(page, bh);
@@ -376,8 +361,7 @@ static struct page *read_page(struct file *file, unsigned long index,
bh->b_blocknr = bmap(inode, block);
if (bh->b_blocknr == 0) {
/* Cannot use this file! */
- free_buffers(page);
- page = ERR_PTR(-EINVAL);
+ ret = -EINVAL;
goto out;
}
bh->b_bdev = inode->i_sb->s_bdev;
@@ -400,17 +384,15 @@ static struct page *read_page(struct file *file, unsigned long index,
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
- if (bitmap->flags & BITMAP_WRITE_ERROR) {
- free_buffers(page);
- page = ERR_PTR(-EIO);
- }
+ if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
+ ret = -EIO;
out:
- if (IS_ERR(page))
- printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n",
+ if (ret)
+ printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n",
(int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT,
- PTR_ERR(page));
- return page;
+ ret);
+ return ret;
}
/*
@@ -426,9 +408,9 @@ void bitmap_update_sb(struct bitmap *bitmap)
return;
if (bitmap->mddev->bitmap_info.external)
return;
- if (!bitmap->sb_page) /* no superblock */
+ if (!bitmap->storage.sb_page) /* no superblock */
return;
- sb = kmap_atomic(bitmap->sb_page);
+ sb = kmap_atomic(bitmap->storage.sb_page);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (bitmap->mddev->events < bitmap->events_cleared)
/* rocking back to read-only */
@@ -438,8 +420,13 @@ void bitmap_update_sb(struct bitmap *bitmap)
/* Just in case these have been changed via sysfs: */
sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
+ /* This might have been changed by a reshape */
+ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
+ sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
+ sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
+ bitmap_info.space);
kunmap_atomic(sb);
- write_page(bitmap, bitmap->sb_page, 1);
+ write_page(bitmap, bitmap->storage.sb_page, 1);
}
/* print out the bitmap file superblock */
@@ -447,9 +434,9 @@ void bitmap_print_sb(struct bitmap *bitmap)
{
bitmap_super_t *sb;
- if (!bitmap || !bitmap->sb_page)
+ if (!bitmap || !bitmap->storage.sb_page)
return;
- sb = kmap_atomic(bitmap->sb_page);
+ sb = kmap_atomic(bitmap->storage.sb_page);
printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -488,15 +475,15 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
unsigned long chunksize, daemon_sleep, write_behind;
int err = -EINVAL;
- bitmap->sb_page = alloc_page(GFP_KERNEL);
- if (IS_ERR(bitmap->sb_page)) {
- err = PTR_ERR(bitmap->sb_page);
- bitmap->sb_page = NULL;
+ bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
+ if (IS_ERR(bitmap->storage.sb_page)) {
+ err = PTR_ERR(bitmap->storage.sb_page);
+ bitmap->storage.sb_page = NULL;
return err;
}
- bitmap->sb_page->index = 0;
+ bitmap->storage.sb_page->index = 0;
- sb = kmap_atomic(bitmap->sb_page);
+ sb = kmap_atomic(bitmap->storage.sb_page);
sb->magic = cpu_to_le32(BITMAP_MAGIC);
sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
@@ -534,8 +521,8 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
memcpy(sb->uuid, bitmap->mddev->uuid, 16);
- bitmap->flags |= BITMAP_STALE;
- sb->state |= cpu_to_le32(BITMAP_STALE);
+ set_bit(BITMAP_STALE, &bitmap->flags);
+ sb->state = cpu_to_le32(bitmap->flags);
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
@@ -551,31 +538,45 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
unsigned long long events;
+ unsigned long sectors_reserved = 0;
int err = -EINVAL;
+ struct page *sb_page;
+ if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
+ chunksize = 128 * 1024 * 1024;
+ daemon_sleep = 5 * HZ;
+ write_behind = 0;
+ set_bit(BITMAP_STALE, &bitmap->flags);
+ err = 0;
+ goto out_no_sb;
+ }
/* page 0 is the superblock, read it... */
- if (bitmap->file) {
- loff_t isize = i_size_read(bitmap->file->f_mapping->host);
+ sb_page = alloc_page(GFP_KERNEL);
+ if (!sb_page)
+ return -ENOMEM;
+ bitmap->storage.sb_page = sb_page;
+
+ if (bitmap->storage.file) {
+ loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
- bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
+ err = read_page(bitmap->storage.file, 0,
+ bitmap, bytes, sb_page);
} else {
- bitmap->sb_page = read_sb_page(bitmap->mddev,
- bitmap->mddev->bitmap_info.offset,
- NULL,
- 0, sizeof(bitmap_super_t));
+ err = read_sb_page(bitmap->mddev,
+ bitmap->mddev->bitmap_info.offset,
+ sb_page,
+ 0, sizeof(bitmap_super_t));
}
- if (IS_ERR(bitmap->sb_page)) {
- err = PTR_ERR(bitmap->sb_page);
- bitmap->sb_page = NULL;
+ if (err)
return err;
- }
- sb = kmap_atomic(bitmap->sb_page);
+ sb = kmap_atomic(sb_page);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
+ sectors_reserved = le32_to_cpu(sb->sectors_reserved);
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -618,60 +619,32 @@ static int bitmap_read_sb(struct bitmap *bitmap)
"-- forcing full recovery\n",
bmname(bitmap), events,
(unsigned long long) bitmap->mddev->events);
- sb->state |= cpu_to_le32(BITMAP_STALE);
+ set_bit(BITMAP_STALE, &bitmap->flags);
}
}
/* assign fields using values from superblock */
- bitmap->mddev->bitmap_info.chunksize = chunksize;
- bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
- bitmap->mddev->bitmap_info.max_write_behind = write_behind;
bitmap->flags |= le32_to_cpu(sb->state);
if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
- bitmap->flags |= BITMAP_HOSTENDIAN;
+ set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
- if (bitmap->flags & BITMAP_STALE)
- bitmap->events_cleared = bitmap->mddev->events;
err = 0;
out:
kunmap_atomic(sb);
+out_no_sb:
+ if (test_bit(BITMAP_STALE, &bitmap->flags))
+ bitmap->events_cleared = bitmap->mddev->events;
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
+ bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
+ bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+ if (bitmap->mddev->bitmap_info.space == 0 ||
+ bitmap->mddev->bitmap_info.space > sectors_reserved)
+ bitmap->mddev->bitmap_info.space = sectors_reserved;
if (err)
bitmap_print_sb(bitmap);
return err;
}
-enum bitmap_mask_op {
- MASK_SET,
- MASK_UNSET
-};
-
-/* record the state of the bitmap in the superblock. Return the old value */
-static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
- enum bitmap_mask_op op)
-{
- bitmap_super_t *sb;
- int old;
-
- if (!bitmap->sb_page) /* can't set the state */
- return 0;
- sb = kmap_atomic(bitmap->sb_page);
- old = le32_to_cpu(sb->state) & bits;
- switch (op) {
- case MASK_SET:
- sb->state |= cpu_to_le32(bits);
- bitmap->flags |= bits;
- break;
- case MASK_UNSET:
- sb->state &= cpu_to_le32(~bits);
- bitmap->flags &= ~bits;
- break;
- default:
- BUG();
- }
- kunmap_atomic(sb);
- return old;
-}
-
/*
* general bitmap file operations
*/
@@ -683,17 +656,19 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
* file a page at a time. There's a superblock at the start of the file.
*/
/* calculate the index of the page that contains this bit */
-static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk)
+static inline unsigned long file_page_index(struct bitmap_storage *store,
+ unsigned long chunk)
{
- if (!bitmap->mddev->bitmap_info.external)
+ if (store->sb_page)
chunk += sizeof(bitmap_super_t) << 3;
return chunk >> PAGE_BIT_SHIFT;
}
/* calculate the (bit) offset of this bit within a page */
-static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk)
+static inline unsigned long file_page_offset(struct bitmap_storage *store,
+ unsigned long chunk)
{
- if (!bitmap->mddev->bitmap_info.external)
+ if (store->sb_page)
chunk += sizeof(bitmap_super_t) << 3;
return chunk & (PAGE_BITS - 1);
}
@@ -705,57 +680,86 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon
* 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
* 0 or page 1
*/
-static inline struct page *filemap_get_page(struct bitmap *bitmap,
+static inline struct page *filemap_get_page(struct bitmap_storage *store,
unsigned long chunk)
{
- if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
+ if (file_page_index(store, chunk) >= store->file_pages)
return NULL;
- return bitmap->filemap[file_page_index(bitmap, chunk)
- - file_page_index(bitmap, 0)];
+ return store->filemap[file_page_index(store, chunk)
+ - file_page_index(store, 0)];
}
-static void bitmap_file_unmap(struct bitmap *bitmap)
+static int bitmap_storage_alloc(struct bitmap_storage *store,
+ unsigned long chunks, int with_super)
+{
+ int pnum;
+ unsigned long num_pages;
+ unsigned long bytes;
+
+ bytes = DIV_ROUND_UP(chunks, 8);
+ if (with_super)
+ bytes += sizeof(bitmap_super_t);
+
+ num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
+
+ store->filemap = kmalloc(sizeof(struct page *)
+ * num_pages, GFP_KERNEL);
+ if (!store->filemap)
+ return -ENOMEM;
+
+ if (with_super && !store->sb_page) {
+ store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (store->sb_page == NULL)
+ return -ENOMEM;
+ store->sb_page->index = 0;
+ }
+ pnum = 0;
+ if (store->sb_page) {
+ store->filemap[0] = store->sb_page;
+ pnum = 1;
+ }
+ for ( ; pnum < num_pages; pnum++) {
+ store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!store->filemap[pnum]) {
+ store->file_pages = pnum;
+ return -ENOMEM;
+ }
+ store->filemap[pnum]->index = pnum;
+ }
+ store->file_pages = pnum;
+
+ /* We need 4 bits per page, rounded up to a multiple
+ * of sizeof(unsigned long) */
+ store->filemap_attr = kzalloc(
+ roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
+ GFP_KERNEL);
+ if (!store->filemap_attr)
+ return -ENOMEM;
+
+ store->bytes = bytes;
+
+ return 0;
+}
+
+static void bitmap_file_unmap(struct bitmap_storage *store)
{
struct page **map, *sb_page;
- unsigned long *attr;
int pages;
- unsigned long flags;
+ struct file *file;
- spin_lock_irqsave(&bitmap->lock, flags);
- map = bitmap->filemap;
- bitmap->filemap = NULL;
- attr = bitmap->filemap_attr;
- bitmap->filemap_attr = NULL;
- pages = bitmap->file_pages;
- bitmap->file_pages = 0;
- sb_page = bitmap->sb_page;
- bitmap->sb_page = NULL;
- spin_unlock_irqrestore(&bitmap->lock, flags);
+ file = store->file;
+ map = store->filemap;
+ pages = store->file_pages;
+ sb_page = store->sb_page;
while (pages--)
if (map[pages] != sb_page) /* 0 is sb_page, release it below */
free_buffers(map[pages]);
kfree(map);
- kfree(attr);
+ kfree(store->filemap_attr);
if (sb_page)
free_buffers(sb_page);
-}
-
-static void bitmap_file_put(struct bitmap *bitmap)
-{
- struct file *file;
- unsigned long flags;
-
- spin_lock_irqsave(&bitmap->lock, flags);
- file = bitmap->file;
- bitmap->file = NULL;
- spin_unlock_irqrestore(&bitmap->lock, flags);
-
- if (file)
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
- bitmap_file_unmap(bitmap);
if (file) {
struct inode *inode = file->f_path.dentry->d_inode;
@@ -773,14 +777,14 @@ static void bitmap_file_kick(struct bitmap *bitmap)
{
char *path, *ptr = NULL;
- if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
+ if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
bitmap_update_sb(bitmap);
- if (bitmap->file) {
+ if (bitmap->storage.file) {
path = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (path)
- ptr = d_path(&bitmap->file->f_path, path,
- PAGE_SIZE);
+ ptr = d_path(&bitmap->storage.file->f_path,
+ path, PAGE_SIZE);
printk(KERN_ALERT
"%s: kicking failed bitmap file %s from array!\n",
@@ -792,10 +796,6 @@ static void bitmap_file_kick(struct bitmap *bitmap)
"%s: disabling internal bitmap due to errors\n",
bmname(bitmap));
}
-
- bitmap_file_put(bitmap);
-
- return;
}
enum bitmap_page_attr {
@@ -805,24 +805,30 @@ enum bitmap_page_attr {
BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
};
-static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
- enum bitmap_page_attr attr)
+static inline void set_page_attr(struct bitmap *bitmap, int pnum,
+ enum bitmap_page_attr attr)
{
- __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
}
-static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
- enum bitmap_page_attr attr)
+static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
+ enum bitmap_page_attr attr)
{
- __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
}
-static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
- enum bitmap_page_attr attr)
+static inline int test_page_attr(struct bitmap *bitmap, int pnum,
+ enum bitmap_page_attr attr)
{
- return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
+ return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
}
+static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
+ enum bitmap_page_attr attr)
+{
+ return test_and_clear_bit((pnum<<2) + attr,
+ bitmap->storage.filemap_attr);
+}
/*
* bitmap_file_set_bit -- called before performing a write to the md device
* to set (and eventually sync) a particular bit in the bitmap file
@@ -835,26 +841,46 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
unsigned long bit;
struct page *page;
void *kaddr;
- unsigned long chunk = block >> bitmap->chunkshift;
+ unsigned long chunk = block >> bitmap->counts.chunkshift;
- if (!bitmap->filemap)
- return;
-
- page = filemap_get_page(bitmap, chunk);
+ page = filemap_get_page(&bitmap->storage, chunk);
if (!page)
return;
- bit = file_page_offset(bitmap, chunk);
+ bit = file_page_offset(&bitmap->storage, chunk);
/* set the bit */
kaddr = kmap_atomic(page);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
+ if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
set_bit(bit, kaddr);
else
- __set_bit_le(bit, kaddr);
+ test_and_set_bit_le(bit, kaddr);
kunmap_atomic(kaddr);
pr_debug("set file bit %lu page %lu\n", bit, page->index);
/* record page number so it gets flushed to disk when unplug occurs */
- set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
+ set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY);
+}
+
+static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
+{
+ unsigned long bit;
+ struct page *page;
+ void *paddr;
+ unsigned long chunk = block >> bitmap->counts.chunkshift;
+
+ page = filemap_get_page(&bitmap->storage, chunk);
+ if (!page)
+ return;
+ bit = file_page_offset(&bitmap->storage, chunk);
+ paddr = kmap_atomic(page);
+ if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
+ clear_bit(bit, paddr);
+ else
+ test_and_clear_bit_le(bit, paddr);
+ kunmap_atomic(paddr);
+ if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) {
+ set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING);
+ bitmap->allclean = 0;
+ }
}
/* this gets called when the md device is ready to unplug its underlying
@@ -862,42 +888,37 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
* sync the dirty pages of the bitmap file to disk */
void bitmap_unplug(struct bitmap *bitmap)
{
- unsigned long i, flags;
+ unsigned long i;
int dirty, need_write;
- struct page *page;
int wait = 0;
- if (!bitmap)
+ if (!bitmap || !bitmap->storage.filemap ||
+ test_bit(BITMAP_STALE, &bitmap->flags))
return;
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
- for (i = 0; i < bitmap->file_pages; i++) {
- spin_lock_irqsave(&bitmap->lock, flags);
- if (!bitmap->filemap) {
- spin_unlock_irqrestore(&bitmap->lock, flags);
+ for (i = 0; i < bitmap->storage.file_pages; i++) {
+ if (!bitmap->storage.filemap)
return;
+ dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
+ need_write = test_and_clear_page_attr(bitmap, i,
+ BITMAP_PAGE_NEEDWRITE);
+ if (dirty || need_write) {
+ clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
+ write_page(bitmap, bitmap->storage.filemap[i], 0);
}
- page = bitmap->filemap[i];
- dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
- need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
- clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
- clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
if (dirty)
wait = 1;
- spin_unlock_irqrestore(&bitmap->lock, flags);
-
- if (dirty || need_write)
- write_page(bitmap, page, 0);
}
if (wait) { /* if any writes were performed, we need to wait on them */
- if (bitmap->file)
+ if (bitmap->storage.file)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
else
md_super_wait(bitmap->mddev);
}
- if (bitmap->flags & BITMAP_WRITE_ERROR)
+ if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap);
}
EXPORT_SYMBOL(bitmap_unplug);
@@ -917,98 +938,77 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
{
unsigned long i, chunks, index, oldindex, bit;
- struct page *page = NULL, *oldpage = NULL;
- unsigned long num_pages, bit_cnt = 0;
+ struct page *page = NULL;
+ unsigned long bit_cnt = 0;
struct file *file;
- unsigned long bytes, offset;
+ unsigned long offset;
int outofdate;
int ret = -ENOSPC;
void *paddr;
+ struct bitmap_storage *store = &bitmap->storage;
- chunks = bitmap->chunks;
- file = bitmap->file;
+ chunks = bitmap->counts.chunks;
+ file = store->file;
- BUG_ON(!file && !bitmap->mddev->bitmap_info.offset);
+ if (!file && !bitmap->mddev->bitmap_info.offset) {
+ /* No permanent bitmap - fill with '1s'. */
+ store->filemap = NULL;
+ store->file_pages = 0;
+ for (i = 0; i < chunks ; i++) {
+ /* if the disk bit is set, set the memory bit */
+ int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
+ >= start);
+ bitmap_set_memory_bits(bitmap,
+ (sector_t)i << bitmap->counts.chunkshift,
+ needed);
+ }
+ return 0;
+ }
- outofdate = bitmap->flags & BITMAP_STALE;
+ outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
if (outofdate)
printk(KERN_INFO "%s: bitmap file is out of date, doing full "
"recovery\n", bmname(bitmap));
- bytes = DIV_ROUND_UP(bitmap->chunks, 8);
- if (!bitmap->mddev->bitmap_info.external)
- bytes += sizeof(bitmap_super_t);
-
- num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
-
- if (file && i_size_read(file->f_mapping->host) < bytes) {
+ if (file && i_size_read(file->f_mapping->host) < store->bytes) {
printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
- bmname(bitmap),
- (unsigned long) i_size_read(file->f_mapping->host),
- bytes);
+ bmname(bitmap),
+ (unsigned long) i_size_read(file->f_mapping->host),
+ store->bytes);
goto err;
}
- ret = -ENOMEM;
-
- bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
- if (!bitmap->filemap)
- goto err;
-
- /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
- bitmap->filemap_attr = kzalloc(
- roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
- GFP_KERNEL);
- if (!bitmap->filemap_attr)
- goto err;
-
oldindex = ~0L;
+ offset = 0;
+ if (!bitmap->mddev->bitmap_info.external)
+ offset = sizeof(bitmap_super_t);
for (i = 0; i < chunks; i++) {
int b;
- index = file_page_index(bitmap, i);
- bit = file_page_offset(bitmap, i);
+ index = file_page_index(&bitmap->storage, i);
+ bit = file_page_offset(&bitmap->storage, i);
if (index != oldindex) { /* this is a new page, read it in */
int count;
/* unmap the old page, we're done with it */
- if (index == num_pages-1)
- count = bytes - index * PAGE_SIZE;
+ if (index == store->file_pages-1)
+ count = store->bytes - index * PAGE_SIZE;
else
count = PAGE_SIZE;
- if (index == 0 && bitmap->sb_page) {
- /*
- * if we're here then the superblock page
- * contains some bits (PAGE_SIZE != sizeof sb)
- * we've already read it in, so just use it
- */
- page = bitmap->sb_page;
- offset = sizeof(bitmap_super_t);
- if (!file)
- page = read_sb_page(
- bitmap->mddev,
- bitmap->mddev->bitmap_info.offset,
- page,
- index, count);
- } else if (file) {
- page = read_page(file, index, bitmap, count);
- offset = 0;
- } else {
- page = read_sb_page(bitmap->mddev,
- bitmap->mddev->bitmap_info.offset,
- NULL,
- index, count);
- offset = 0;
- }
- if (IS_ERR(page)) { /* read error */
- ret = PTR_ERR(page);
+ page = store->filemap[index];
+ if (file)
+ ret = read_page(file, index, bitmap,
+ count, page);
+ else
+ ret = read_sb_page(
+ bitmap->mddev,
+ bitmap->mddev->bitmap_info.offset,
+ page,
+ index, count);
+
+ if (ret)
goto err;
- }
oldindex = index;
- oldpage = page;
-
- bitmap->filemap[bitmap->file_pages++] = page;
- bitmap->last_page_size = count;
if (outofdate) {
/*
@@ -1022,39 +1022,33 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
write_page(bitmap, page, 1);
ret = -EIO;
- if (bitmap->flags & BITMAP_WRITE_ERROR)
+ if (test_bit(BITMAP_WRITE_ERROR,
+ &bitmap->flags))
goto err;
}
}
paddr = kmap_atomic(page);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
+ if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
b = test_bit(bit, paddr);
else
b = test_bit_le(bit, paddr);
kunmap_atomic(paddr);
if (b) {
/* if the disk bit is set, set the memory bit */
- int needed = ((sector_t)(i+1) << bitmap->chunkshift
+ int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
>= start);
bitmap_set_memory_bits(bitmap,
- (sector_t)i << bitmap->chunkshift,
+ (sector_t)i << bitmap->counts.chunkshift,
needed);
bit_cnt++;
}
- }
-
- /* everything went OK */
- ret = 0;
- bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
-
- if (bit_cnt) { /* Kick recovery if any bits were set */
- set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
- md_wakeup_thread(bitmap->mddev->thread);
+ offset = 0;
}
printk(KERN_INFO "%s: bitmap initialized from disk: "
- "read %lu/%lu pages, set %lu of %lu bits\n",
- bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
+ "read %lu pages, set %lu of %lu bits\n",
+ bmname(bitmap), store->file_pages,
+ bit_cnt, chunks);
return 0;
@@ -1071,22 +1065,38 @@ void bitmap_write_all(struct bitmap *bitmap)
*/
int i;
- spin_lock_irq(&bitmap->lock);
- for (i = 0; i < bitmap->file_pages; i++)
- set_page_attr(bitmap, bitmap->filemap[i],
+ if (!bitmap || !bitmap->storage.filemap)
+ return;
+ if (bitmap->storage.file)
+ /* Only one copy, so nothing needed */
+ return;
+
+ for (i = 0; i < bitmap->storage.file_pages; i++)
+ set_page_attr(bitmap, i,
BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
- spin_unlock_irq(&bitmap->lock);
}
-static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
+static void bitmap_count_page(struct bitmap_counts *bitmap,
+ sector_t offset, int inc)
{
sector_t chunk = offset >> bitmap->chunkshift;
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
bitmap->bp[page].count += inc;
bitmap_checkfree(bitmap, page);
}
-static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
+
+static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
+{
+ sector_t chunk = offset >> bitmap->chunkshift;
+ unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
+ struct bitmap_page *bp = &bitmap->bp[page];
+
+ if (!bp->pending)
+ bp->pending = 1;
+}
+
+static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
sector_t offset, sector_t *blocks,
int create);
@@ -1099,10 +1109,9 @@ void bitmap_daemon_work(struct mddev *mddev)
{
struct bitmap *bitmap;
unsigned long j;
- unsigned long flags;
- struct page *page = NULL, *lastpage = NULL;
+ unsigned long nextpage;
sector_t blocks;
- void *paddr;
+ struct bitmap_counts *counts;
/* Use a mutex to guard daemon_work against
* bitmap_destroy.
@@ -1124,112 +1133,90 @@ void bitmap_daemon_work(struct mddev *mddev)
}
bitmap->allclean = 1;
- spin_lock_irqsave(&bitmap->lock, flags);
- for (j = 0; j < bitmap->chunks; j++) {
+ /* Any file-page which is PENDING now needs to be written.
+ * So set NEEDWRITE now, then after we make any last-minute changes
+ * we will write it.
+ */
+ for (j = 0; j < bitmap->storage.file_pages; j++)
+ if (test_and_clear_page_attr(bitmap, j,
+ BITMAP_PAGE_PENDING))
+ set_page_attr(bitmap, j,
+ BITMAP_PAGE_NEEDWRITE);
+
+ if (bitmap->need_sync &&
+ mddev->bitmap_info.external == 0) {
+ /* Arrange for superblock update as well as
+ * other changes */
+ bitmap_super_t *sb;
+ bitmap->need_sync = 0;
+ if (bitmap->storage.filemap) {
+ sb = kmap_atomic(bitmap->storage.sb_page);
+ sb->events_cleared =
+ cpu_to_le64(bitmap->events_cleared);
+ kunmap_atomic(sb);
+ set_page_attr(bitmap, 0,
+ BITMAP_PAGE_NEEDWRITE);
+ }
+ }
+ /* Now look at the bitmap counters and if any are '2' or '1',
+ * decrement and handle accordingly.
+ */
+ counts = &bitmap->counts;
+ spin_lock_irq(&counts->lock);
+ nextpage = 0;
+ for (j = 0; j < counts->chunks; j++) {
bitmap_counter_t *bmc;
- if (!bitmap->filemap)
- /* error or shutdown */
- break;
+ sector_t block = (sector_t)j << counts->chunkshift;
- page = filemap_get_page(bitmap, j);
-
- if (page != lastpage) {
- /* skip this page unless it's marked as needing cleaning */
- if (!test_page_attr(bitmap, page, BITMAP_PAGE_PENDING)) {
- int need_write = test_page_attr(bitmap, page,
- BITMAP_PAGE_NEEDWRITE);
- if (need_write)
- clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
-
- spin_unlock_irqrestore(&bitmap->lock, flags);
- if (need_write)
- write_page(bitmap, page, 0);
- spin_lock_irqsave(&bitmap->lock, flags);
- j |= (PAGE_BITS - 1);
+ if (j == nextpage) {
+ nextpage += PAGE_COUNTER_RATIO;
+ if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
+ j |= PAGE_COUNTER_MASK;
continue;
}
-
- /* grab the new page, sync and release the old */
- if (lastpage != NULL) {
- if (test_page_attr(bitmap, lastpage,
- BITMAP_PAGE_NEEDWRITE)) {
- clear_page_attr(bitmap, lastpage,
- BITMAP_PAGE_NEEDWRITE);
- spin_unlock_irqrestore(&bitmap->lock, flags);
- write_page(bitmap, lastpage, 0);
- } else {
- set_page_attr(bitmap, lastpage,
- BITMAP_PAGE_NEEDWRITE);
- bitmap->allclean = 0;
- spin_unlock_irqrestore(&bitmap->lock, flags);
- }
- } else
- spin_unlock_irqrestore(&bitmap->lock, flags);
- lastpage = page;
-
- /* We are possibly going to clear some bits, so make
- * sure that events_cleared is up-to-date.
- */
- if (bitmap->need_sync &&
- mddev->bitmap_info.external == 0) {
- bitmap_super_t *sb;
- bitmap->need_sync = 0;
- sb = kmap_atomic(bitmap->sb_page);
- sb->events_cleared =
- cpu_to_le64(bitmap->events_cleared);
- kunmap_atomic(sb);
- write_page(bitmap, bitmap->sb_page, 1);
- }
- spin_lock_irqsave(&bitmap->lock, flags);
- if (!bitmap->need_sync)
- clear_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
- else
- bitmap->allclean = 0;
+ counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
}
- bmc = bitmap_get_counter(bitmap,
- (sector_t)j << bitmap->chunkshift,
+ bmc = bitmap_get_counter(counts,
+ block,
&blocks, 0);
- if (!bmc)
+
+ if (!bmc) {
j |= PAGE_COUNTER_MASK;
- else if (*bmc) {
- if (*bmc == 1 && !bitmap->need_sync) {
- /* we can clear the bit */
- *bmc = 0;
- bitmap_count_page(bitmap,
- (sector_t)j << bitmap->chunkshift,
- -1);
-
- /* clear the bit */
- paddr = kmap_atomic(page);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
- clear_bit(file_page_offset(bitmap, j),
- paddr);
- else
- __clear_bit_le(
- file_page_offset(bitmap,
- j),
- paddr);
- kunmap_atomic(paddr);
- } else if (*bmc <= 2) {
- *bmc = 1; /* maybe clear the bit next time */
- set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
- bitmap->allclean = 0;
- }
+ continue;
}
- }
- spin_unlock_irqrestore(&bitmap->lock, flags);
-
- /* now sync the final page */
- if (lastpage != NULL) {
- spin_lock_irqsave(&bitmap->lock, flags);
- if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
- clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
- spin_unlock_irqrestore(&bitmap->lock, flags);
- write_page(bitmap, lastpage, 0);
- } else {
- set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
+ if (*bmc == 1 && !bitmap->need_sync) {
+ /* We can clear the bit */
+ *bmc = 0;
+ bitmap_count_page(counts, block, -1);
+ bitmap_file_clear_bit(bitmap, block);
+ } else if (*bmc && *bmc <= 2) {
+ *bmc = 1;
+ bitmap_set_pending(counts, block);
bitmap->allclean = 0;
- spin_unlock_irqrestore(&bitmap->lock, flags);
+ }
+ }
+ spin_unlock_irq(&counts->lock);
+
+ /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
+ * DIRTY pages need to be written by bitmap_unplug so it can wait
+ * for them.
+ * If we find any DIRTY page we stop there and let bitmap_unplug
+ * handle all the rest. This is important in the case where
+ * the first blocking holds the superblock and it has been updated.
+ * We mustn't write any other blocks before the superblock.
+ */
+ for (j = 0;
+ j < bitmap->storage.file_pages
+ && !test_bit(BITMAP_STALE, &bitmap->flags);
+ j++) {
+
+ if (test_page_attr(bitmap, j,
+ BITMAP_PAGE_DIRTY))
+ /* bitmap_unplug will handle the rest */
+ break;
+ if (test_and_clear_page_attr(bitmap, j,
+ BITMAP_PAGE_NEEDWRITE)) {
+ write_page(bitmap, bitmap->storage.filemap[j], 0);
}
}
@@ -1240,7 +1227,7 @@ void bitmap_daemon_work(struct mddev *mddev)
mutex_unlock(&mddev->bitmap_info.mutex);
}
-static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
+static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap,
sector_t offset, sector_t *blocks,
int create)
__releases(bitmap->lock)
@@ -1302,10 +1289,10 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
sector_t blocks;
bitmap_counter_t *bmc;
- spin_lock_irq(&bitmap->lock);
- bmc = bitmap_get_counter(bitmap, offset, &blocks, 1);
+ spin_lock_irq(&bitmap->counts.lock);
+ bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
if (!bmc) {
- spin_unlock_irq(&bitmap->lock);
+ spin_unlock_irq(&bitmap->counts.lock);
return 0;
}
@@ -1317,7 +1304,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
*/
prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&bitmap->lock);
+ spin_unlock_irq(&bitmap->counts.lock);
io_schedule();
finish_wait(&bitmap->overflow_wait, &__wait);
continue;
@@ -1326,7 +1313,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
switch (*bmc) {
case 0:
bitmap_file_set_bit(bitmap, offset);
- bitmap_count_page(bitmap, offset, 1);
+ bitmap_count_page(&bitmap->counts, offset, 1);
/* fall through */
case 1:
*bmc = 2;
@@ -1334,7 +1321,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
(*bmc)++;
- spin_unlock_irq(&bitmap->lock);
+ spin_unlock_irq(&bitmap->counts.lock);
offset += blocks;
if (sectors > blocks)
@@ -1364,10 +1351,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
unsigned long flags;
bitmap_counter_t *bmc;
- spin_lock_irqsave(&bitmap->lock, flags);
- bmc = bitmap_get_counter(bitmap, offset, &blocks, 0);
+ spin_lock_irqsave(&bitmap->counts.lock, flags);
+ bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
if (!bmc) {
- spin_unlock_irqrestore(&bitmap->lock, flags);
+ spin_unlock_irqrestore(&bitmap->counts.lock, flags);
return;
}
@@ -1386,14 +1373,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
(*bmc)--;
if (*bmc <= 2) {
- set_page_attr(bitmap,
- filemap_get_page(
- bitmap,
- offset >> bitmap->chunkshift),
- BITMAP_PAGE_PENDING);
+ bitmap_set_pending(&bitmap->counts, offset);
bitmap->allclean = 0;
}
- spin_unlock_irqrestore(&bitmap->lock, flags);
+ spin_unlock_irqrestore(&bitmap->counts.lock, flags);
offset += blocks;
if (sectors > blocks)
sectors -= blocks;
@@ -1412,8 +1395,8 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
*blocks = 1024;
return 1; /* always resync if no bitmap */
}
- spin_lock_irq(&bitmap->lock);
- bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
+ spin_lock_irq(&bitmap->counts.lock);
+ bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
rv = 0;
if (bmc) {
/* locked */
@@ -1427,7 +1410,7 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
}
}
}
- spin_unlock_irq(&bitmap->lock);
+ spin_unlock_irq(&bitmap->counts.lock);
return rv;
}
@@ -1464,8 +1447,8 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
*blocks = 1024;
return;
}
- spin_lock_irqsave(&bitmap->lock, flags);
- bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
+ spin_lock_irqsave(&bitmap->counts.lock, flags);
+ bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
if (bmc == NULL)
goto unlock;
/* locked */
@@ -1476,15 +1459,13 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
*bmc |= NEEDED_MASK;
else {
if (*bmc <= 2) {
- set_page_attr(bitmap,
- filemap_get_page(bitmap, offset >> bitmap->chunkshift),
- BITMAP_PAGE_PENDING);
+ bitmap_set_pending(&bitmap->counts, offset);
bitmap->allclean = 0;
}
}
}
unlock:
- spin_unlock_irqrestore(&bitmap->lock, flags);
+ spin_unlock_irqrestore(&bitmap->counts.lock, flags);
}
EXPORT_SYMBOL(bitmap_end_sync);
@@ -1524,7 +1505,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
bitmap->mddev->curr_resync_completed = sector;
set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
- sector &= ~((1ULL << bitmap->chunkshift) - 1);
+ sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
s = 0;
while (s < sector && s < bitmap->mddev->resync_max_sectors) {
bitmap_end_sync(bitmap, s, &blocks, 0);
@@ -1538,27 +1519,25 @@ EXPORT_SYMBOL(bitmap_cond_end_sync);
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
{
/* For each chunk covered by any of these sectors, set the
- * counter to 1 and set resync_needed. They should all
+ * counter to 2 and possibly set resync_needed. They should all
* be 0 at this point
*/
sector_t secs;
bitmap_counter_t *bmc;
- spin_lock_irq(&bitmap->lock);
- bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
+ spin_lock_irq(&bitmap->counts.lock);
+ bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
if (!bmc) {
- spin_unlock_irq(&bitmap->lock);
+ spin_unlock_irq(&bitmap->counts.lock);
return;
}
if (!*bmc) {
- struct page *page;
*bmc = 2 | (needed ? NEEDED_MASK : 0);
- bitmap_count_page(bitmap, offset, 1);
- page = filemap_get_page(bitmap, offset >> bitmap->chunkshift);
- set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
+ bitmap_count_page(&bitmap->counts, offset, 1);
+ bitmap_set_pending(&bitmap->counts, offset);
bitmap->allclean = 0;
}
- spin_unlock_irq(&bitmap->lock);
+ spin_unlock_irq(&bitmap->counts.lock);
}
/* dirty the memory and file bits for bitmap chunks "s" to "e" */
@@ -1567,11 +1546,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
unsigned long chunk;
for (chunk = s; chunk <= e; chunk++) {
- sector_t sec = (sector_t)chunk << bitmap->chunkshift;
+ sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
bitmap_set_memory_bits(bitmap, sec, 1);
- spin_lock_irq(&bitmap->lock);
bitmap_file_set_bit(bitmap, sec);
- spin_unlock_irq(&bitmap->lock);
if (sec < bitmap->mddev->recovery_cp)
/* We are asserting that the array is dirty,
* so move the recovery_cp address back so
@@ -1616,11 +1593,15 @@ static void bitmap_free(struct bitmap *bitmap)
if (!bitmap) /* there was no bitmap */
return;
- /* release the bitmap file and kill the daemon */
- bitmap_file_put(bitmap);
+ /* Shouldn't be needed - but just in case.... */
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes) == 0);
+
+ /* release the bitmap file */
+ bitmap_file_unmap(&bitmap->storage);
- bp = bitmap->bp;
- pages = bitmap->pages;
+ bp = bitmap->counts.bp;
+ pages = bitmap->counts.pages;
/* free all allocated memory */
@@ -1659,25 +1640,19 @@ int bitmap_create(struct mddev *mddev)
{
struct bitmap *bitmap;
sector_t blocks = mddev->resync_max_sectors;
- unsigned long chunks;
- unsigned long pages;
struct file *file = mddev->bitmap_info.file;
int err;
struct sysfs_dirent *bm = NULL;
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
- if (!file
- && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
- return 0;
-
BUG_ON(file && mddev->bitmap_info.offset);
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
return -ENOMEM;
- spin_lock_init(&bitmap->lock);
+ spin_lock_init(&bitmap->counts.lock);
atomic_set(&bitmap->pending_writes, 0);
init_waitqueue_head(&bitmap->write_wait);
init_waitqueue_head(&bitmap->overflow_wait);
@@ -1693,7 +1668,7 @@ int bitmap_create(struct mddev *mddev)
} else
bitmap->sysfs_can_clear = NULL;
- bitmap->file = file;
+ bitmap->storage.file = file;
if (file) {
get_file(file);
/* As future accesses to this file will use bmap,
@@ -1724,32 +1699,15 @@ int bitmap_create(struct mddev *mddev)
goto error;
bitmap->daemon_lastrun = jiffies;
- bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize)
- - BITMAP_BLOCK_SHIFT);
-
- chunks = (blocks + (1 << bitmap->chunkshift) - 1) >>
- bitmap->chunkshift;
- pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
-
- BUG_ON(!pages);
-
- bitmap->chunks = chunks;
- bitmap->pages = pages;
- bitmap->missing_pages = pages;
-
- bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
-
- err = -ENOMEM;
- if (!bitmap->bp)
+ err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
+ if (err)
goto error;
printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
- pages, bmname(bitmap));
+ bitmap->counts.pages, bmname(bitmap));
mddev->bitmap = bitmap;
-
-
- return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
+ return test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
error:
bitmap_free(bitmap);
@@ -1790,13 +1748,17 @@ int bitmap_load(struct mddev *mddev)
if (err)
goto out;
+ clear_bit(BITMAP_STALE, &bitmap->flags);
+
+ /* Kick recovery in case any bits were set */
+ set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
md_wakeup_thread(mddev->thread);
bitmap_update_sb(bitmap);
- if (bitmap->flags & BITMAP_WRITE_ERROR)
+ if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
err = -EIO;
out:
return err;
@@ -1806,30 +1768,194 @@ EXPORT_SYMBOL_GPL(bitmap_load);
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
unsigned long chunk_kb;
- unsigned long flags;
+ struct bitmap_counts *counts;
if (!bitmap)
return;
- spin_lock_irqsave(&bitmap->lock, flags);
+ counts = &bitmap->counts;
+
chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
"%lu%s chunk",
- bitmap->pages - bitmap->missing_pages,
- bitmap->pages,
- (bitmap->pages - bitmap->missing_pages)
+ counts->pages - counts->missing_pages,
+ counts->pages,
+ (counts->pages - counts->missing_pages)
<< (PAGE_SHIFT - 10),
chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
chunk_kb ? "KB" : "B");
- if (bitmap->file) {
+ if (bitmap->storage.file) {
seq_printf(seq, ", file: ");
- seq_path(seq, &bitmap->file->f_path, " \t\n");
+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
}
seq_printf(seq, "\n");
- spin_unlock_irqrestore(&bitmap->lock, flags);
}
+int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ int chunksize, int init)
+{
+ /* If chunk_size is 0, choose an appropriate chunk size.
+ * Then possibly allocate new storage space.
+ * Then quiesce, copy bits, replace bitmap, and re-start
+ *
+ * This function is called both to set up the initial bitmap
+ * and to resize the bitmap while the array is active.
+ * If this happens as a result of the array being resized,
+ * chunksize will be zero, and we need to choose a suitable
+ * chunksize, otherwise we use what we are given.
+ */
+ struct bitmap_storage store;
+ struct bitmap_counts old_counts;
+ unsigned long chunks;
+ sector_t block;
+ sector_t old_blocks, new_blocks;
+ int chunkshift;
+ int ret = 0;
+ long pages;
+ struct bitmap_page *new_bp;
+
+ if (chunksize == 0) {
+ /* If there is enough space, leave the chunk size unchanged,
+ * else increase by factor of two until there is enough space.
+ */
+ long bytes;
+ long space = bitmap->mddev->bitmap_info.space;
+
+ if (space == 0) {
+ /* We don't know how much space there is, so limit
+ * to current size - in sectors.
+ */
+ bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
+ if (!bitmap->mddev->bitmap_info.external)
+ bytes += sizeof(bitmap_super_t);
+ space = DIV_ROUND_UP(bytes, 512);
+ bitmap->mddev->bitmap_info.space = space;
+ }
+ chunkshift = bitmap->counts.chunkshift;
+ chunkshift--;
+ do {
+ /* 'chunkshift' is shift from block size to chunk size */
+ chunkshift++;
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
+ bytes = DIV_ROUND_UP(chunks, 8);
+ if (!bitmap->mddev->bitmap_info.external)
+ bytes += sizeof(bitmap_super_t);
+ } while (bytes > (space << 9));
+ } else
+ chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
+
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
+ memset(&store, 0, sizeof(store));
+ if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
+ ret = bitmap_storage_alloc(&store, chunks,
+ !bitmap->mddev->bitmap_info.external);
+ if (ret)
+ goto err;
+
+ pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
+
+ new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!new_bp) {
+ bitmap_file_unmap(&store);
+ goto err;
+ }
+
+ if (!init)
+ bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
+
+ store.file = bitmap->storage.file;
+ bitmap->storage.file = NULL;
+
+ if (store.sb_page && bitmap->storage.sb_page)
+ memcpy(page_address(store.sb_page),
+ page_address(bitmap->storage.sb_page),
+ sizeof(bitmap_super_t));
+ bitmap_file_unmap(&bitmap->storage);
+ bitmap->storage = store;
+
+ old_counts = bitmap->counts;
+ bitmap->counts.bp = new_bp;
+ bitmap->counts.pages = pages;
+ bitmap->counts.missing_pages = pages;
+ bitmap->counts.chunkshift = chunkshift;
+ bitmap->counts.chunks = chunks;
+ bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
+ BITMAP_BLOCK_SHIFT);
+
+ blocks = min(old_counts.chunks << old_counts.chunkshift,
+ chunks << chunkshift);
+
+ spin_lock_irq(&bitmap->counts.lock);
+ for (block = 0; block < blocks; ) {
+ bitmap_counter_t *bmc_old, *bmc_new;
+ int set;
+
+ bmc_old = bitmap_get_counter(&old_counts, block,
+ &old_blocks, 0);
+ set = bmc_old && NEEDED(*bmc_old);
+
+ if (set) {
+ bmc_new = bitmap_get_counter(&bitmap->counts, block,
+ &new_blocks, 1);
+ if (*bmc_new == 0) {
+ /* need to set on-disk bits too. */
+ sector_t end = block + new_blocks;
+ sector_t start = block >> chunkshift;
+ start <<= chunkshift;
+ while (start < end) {
+ bitmap_file_set_bit(bitmap, block);
+ start += 1 << chunkshift;
+ }
+ *bmc_new = 2;
+ bitmap_count_page(&bitmap->counts,
+ block, 1);
+ bitmap_set_pending(&bitmap->counts,
+ block);
+ }
+ *bmc_new |= NEEDED_MASK;
+ if (new_blocks < old_blocks)
+ old_blocks = new_blocks;
+ }
+ block += old_blocks;
+ }
+
+ if (!init) {
+ int i;
+ while (block < (chunks << chunkshift)) {
+ bitmap_counter_t *bmc;
+ bmc = bitmap_get_counter(&bitmap->counts, block,
+ &new_blocks, 1);
+ if (bmc) {
+ /* new space. It needs to be resynced, so
+ * we set NEEDED_MASK.
+ */
+ if (*bmc == 0) {
+ *bmc = NEEDED_MASK | 2;
+ bitmap_count_page(&bitmap->counts,
+ block, 1);
+ bitmap_set_pending(&bitmap->counts,
+ block);
+ }
+ }
+ block += new_blocks;
+ }
+ for (i = 0; i < bitmap->storage.file_pages; i++)
+ set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
+ }
+ spin_unlock_irq(&bitmap->counts.lock);
+
+ if (!init) {
+ bitmap_unplug(bitmap);
+ bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
+ }
+ ret = 0;
+err:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bitmap_resize);
+
static ssize_t
location_show(struct mddev *mddev, char *page)
{
@@ -1923,6 +2049,43 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry bitmap_location =
__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
+/* 'bitmap/space' is the space available at 'location' for the
+ * bitmap. This allows the kernel to know when it is safe to
+ * resize the bitmap to match a resized array.
+ */
+static ssize_t
+space_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%lu\n", mddev->bitmap_info.space);
+}
+
+static ssize_t
+space_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ unsigned long sectors;
+ int rv;
+
+ rv = kstrtoul(buf, 10, &sectors);
+ if (rv)
+ return rv;
+
+ if (sectors == 0)
+ return -EINVAL;
+
+ if (mddev->bitmap &&
+ sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
+ return -EFBIG; /* Bitmap is too big for this small space */
+
+ /* could make sure it isn't too big, but that isn't really
+ * needed - user-space should be careful.
+ */
+ mddev->bitmap_info.space = sectors;
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_space =
+__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
+
static ssize_t
timeout_show(struct mddev *mddev, char *page)
{
@@ -2098,6 +2261,7 @@ __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
static struct attribute *md_bitmap_attrs[] = {
&bitmap_location.attr,
+ &bitmap_space.attr,
&bitmap_timeout.attr,
&bitmap_backlog.attr,
&bitmap_chunksize.attr,
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index b44b0aba2d47..df4aeb6ac6f0 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -111,9 +111,9 @@ typedef __u16 bitmap_counter_t;
/* use these for bitmap->flags and bitmap->sb->state bit-fields */
enum bitmap_state {
- BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */
- BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */
- BITMAP_HOSTENDIAN = 0x8000,
+ BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */
+ BITMAP_WRITE_ERROR = 2, /* A write error has occurred */
+ BITMAP_HOSTENDIAN =15,
};
/* the superblock at the front of the bitmap file -- little endian */
@@ -128,8 +128,10 @@ typedef struct bitmap_super_s {
__le32 chunksize; /* 52 the bitmap chunk size in bytes */
__le32 daemon_sleep; /* 56 seconds between disk flushes */
__le32 write_behind; /* 60 number of outstanding write-behind writes */
+ __le32 sectors_reserved; /* 64 number of 512-byte sectors that are
+ * reserved for the bitmap. */
- __u8 pad[256 - 64]; /* set to zero */
+ __u8 pad[256 - 68]; /* set to zero */
} bitmap_super_t;
/* notes:
@@ -160,35 +162,48 @@ struct bitmap_page {
*/
unsigned int hijacked:1;
/*
+ * If any counter in this page is '1' or '2' - and so could be
+ * cleared then that page is marked as 'pending'
+ */
+ unsigned int pending:1;
+ /*
* count of dirty bits on the page
*/
- unsigned int count:31;
+ unsigned int count:30;
};
/* the main bitmap structure - one per mddev */
struct bitmap {
- struct bitmap_page *bp;
- unsigned long pages; /* total number of pages in the bitmap */
- unsigned long missing_pages; /* number of pages not yet allocated */
- struct mddev *mddev; /* the md device that the bitmap is for */
+ struct bitmap_counts {
+ spinlock_t lock;
+ struct bitmap_page *bp;
+ unsigned long pages; /* total number of pages
+ * in the bitmap */
+ unsigned long missing_pages; /* number of pages
+ * not yet allocated */
+ unsigned long chunkshift; /* chunksize = 2^chunkshift
+ * (for bitops) */
+ unsigned long chunks; /* Total number of data
+ * chunks for the array */
+ } counts;
- /* bitmap chunksize -- how much data does each bit represent? */
- unsigned long chunkshift; /* chunksize = 2^(chunkshift+9) (for bitops) */
- unsigned long chunks; /* total number of data chunks for the array */
+ struct mddev *mddev; /* the md device that the bitmap is for */
__u64 events_cleared;
int need_sync;
- /* bitmap spinlock */
- spinlock_t lock;
-
- struct file *file; /* backing disk file */
- struct page *sb_page; /* cached copy of the bitmap file superblock */
- struct page **filemap; /* list of cache pages for the file */
- unsigned long *filemap_attr; /* attributes associated w/ filemap pages */
- unsigned long file_pages; /* number of pages in the file */
- int last_page_size; /* bytes in the last page */
+ struct bitmap_storage {
+ struct file *file; /* backing disk file */
+ struct page *sb_page; /* cached copy of the bitmap
+ * file superblock */
+ struct page **filemap; /* list of cache pages for
+ * the file */
+ unsigned long *filemap_attr; /* attributes associated
+ * w/ filemap pages */
+ unsigned long file_pages; /* number of pages in the file*/
+ unsigned long bytes; /* total bytes in the bitmap */
+ } storage;
unsigned long flags;
@@ -242,6 +257,9 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
void bitmap_unplug(struct bitmap *bitmap);
void bitmap_daemon_work(struct mddev *mddev);
+
+int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ int chunksize, int init);
#endif
#endif
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 754f38f8a692..638dae048b4f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/workqueue.h>
+#include <linux/delay.h>
#include <scsi/scsi_dh.h>
#include <linux/atomic.h>
@@ -61,11 +62,11 @@ struct multipath {
struct list_head list;
struct dm_target *ti;
- spinlock_t lock;
-
const char *hw_handler_name;
char *hw_handler_params;
+ spinlock_t lock;
+
unsigned nr_priority_groups;
struct list_head priority_groups;
@@ -81,16 +82,17 @@ struct multipath {
struct priority_group *next_pg; /* Switch to this PG if set */
unsigned repeat_count; /* I/Os left before calling PS again */
- unsigned queue_io; /* Must we queue all I/O? */
- unsigned queue_if_no_path; /* Queue I/O if last path fails? */
- unsigned saved_queue_if_no_path;/* Saved state during suspension */
+ unsigned queue_io:1; /* Must we queue all I/O? */
+ unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
+ unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
+
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
+ unsigned queue_size;
struct work_struct process_queued_ios;
struct list_head queued_ios;
- unsigned queue_size;
struct work_struct trigger_event;
@@ -328,14 +330,18 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
/*
* Loop through priority groups until we find a valid path.
* First time we skip PGs marked 'bypassed'.
- * Second time we only try the ones we skipped.
+ * Second time we only try the ones we skipped, but set
+ * pg_init_delay_retry so we do not hammer controllers.
*/
do {
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed == bypassed)
continue;
- if (!__choose_path_in_pg(m, pg, nr_bytes))
+ if (!__choose_path_in_pg(m, pg, nr_bytes)) {
+ if (!bypassed)
+ m->pg_init_delay_retry = 1;
return;
+ }
}
} while (bypassed--);
@@ -481,9 +487,6 @@ static void process_queued_ios(struct work_struct *work)
spin_lock_irqsave(&m->lock, flags);
- if (!m->queue_size)
- goto out;
-
if (!m->current_pgpath)
__choose_pgpath(m, 0);
@@ -496,7 +499,6 @@ static void process_queued_ios(struct work_struct *work)
if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
__pg_init_all_paths(m);
-out:
spin_unlock_irqrestore(&m->lock, flags);
if (!must_queue)
dispatch_queued_ios(m);
@@ -1517,11 +1519,16 @@ out:
static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
- struct multipath *m = (struct multipath *) ti->private;
- struct block_device *bdev = NULL;
- fmode_t mode = 0;
+ struct multipath *m = ti->private;
+ struct block_device *bdev;
+ fmode_t mode;
unsigned long flags;
- int r = 0;
+ int r;
+
+again:
+ bdev = NULL;
+ mode = 0;
+ r = 0;
spin_lock_irqsave(&m->lock, flags);
@@ -1546,6 +1553,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
r = scsi_verify_blk_ioctl(NULL, cmd);
+ if (r == -EAGAIN && !fatal_signal_pending(current)) {
+ queue_work(kmultipathd, &m->process_queued_ios);
+ msleep(10);
+ goto again;
+ }
+
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
@@ -1643,7 +1656,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 68965e663248..017c34d78d61 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -155,10 +155,7 @@ static void context_free(struct raid_set *rs)
for (i = 0; i < rs->md.raid_disks; i++) {
if (rs->dev[i].meta_dev)
dm_put_device(rs->ti, rs->dev[i].meta_dev);
- if (rs->dev[i].rdev.sb_page)
- put_page(rs->dev[i].rdev.sb_page);
- rs->dev[i].rdev.sb_page = NULL;
- rs->dev[i].rdev.sb_loaded = 0;
+ md_rdev_clear(&rs->dev[i].rdev);
if (rs->dev[i].data_dev)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
@@ -606,7 +603,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
DMERR("Failed to read superblock of device at position %d",
rdev->raid_disk);
- set_bit(Faulty, &rdev->flags);
+ md_error(rdev->mddev, rdev);
return -EINVAL;
}
@@ -617,16 +614,18 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
{
- struct md_rdev *r;
+ int i;
uint64_t failed_devices;
struct dm_raid_superblock *sb;
+ struct raid_set *rs = container_of(mddev, struct raid_set, md);
sb = page_address(rdev->sb_page);
failed_devices = le64_to_cpu(sb->failed_devices);
- rdev_for_each(r, mddev)
- if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags))
- failed_devices |= (1ULL << r->raid_disk);
+ for (i = 0; i < mddev->raid_disks; i++)
+ if (!rs->dev[i].data_dev ||
+ test_bit(Faulty, &(rs->dev[i].rdev.flags)))
+ failed_devices |= (1ULL << i);
memset(sb, 0, sizeof(*sb));
@@ -1252,12 +1251,13 @@ static void raid_resume(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
+ set_bit(MD_CHANGE_DEVS, &rs->md.flags);
if (!rs->bitmap_loaded) {
bitmap_load(&rs->md);
rs->bitmap_loaded = 1;
- } else
- md_wakeup_thread(rs->md.thread);
+ }
+ clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
mddev_resume(&rs->md);
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 737d38865b69..3e2907f0bc46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1082,12 +1082,89 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
return 0;
}
-static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
- dm_block_t *result)
+static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+ int r, inc;
+ struct thin_disk_superblock *disk_super;
+ struct dm_block *copy, *sblock;
+ dm_block_t held_root;
+
+ /*
+ * Copy the superblock.
+ */
+ dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
+ r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &copy, &inc);
+ if (r)
+ return r;
+
+ BUG_ON(!inc);
+
+ held_root = dm_block_location(copy);
+ disk_super = dm_block_data(copy);
+
+ if (le64_to_cpu(disk_super->held_root)) {
+ DMWARN("Pool metadata snapshot already exists: release this before taking another.");
+
+ dm_tm_dec(pmd->tm, held_root);
+ dm_tm_unlock(pmd->tm, copy);
+ pmd->need_commit = 1;
+
+ return -EBUSY;
+ }
+
+ /*
+ * Wipe the spacemap since we're not publishing this.
+ */
+ memset(&disk_super->data_space_map_root, 0,
+ sizeof(disk_super->data_space_map_root));
+ memset(&disk_super->metadata_space_map_root, 0,
+ sizeof(disk_super->metadata_space_map_root));
+
+ /*
+ * Increment the data structures that need to be preserved.
+ */
+ dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
+ dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
+ dm_tm_unlock(pmd->tm, copy);
+
+ /*
+ * Write the held root into the superblock.
+ */
+ r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &sblock);
+ if (r) {
+ dm_tm_dec(pmd->tm, held_root);
+ pmd->need_commit = 1;
+ return r;
+ }
+
+ disk_super = dm_block_data(sblock);
+ disk_super->held_root = cpu_to_le64(held_root);
+ dm_bm_unlock(sblock);
+
+ pmd->need_commit = 1;
+
+ return 0;
+}
+
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = __reserve_metadata_snap(pmd);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+static int __release_metadata_snap(struct dm_pool_metadata *pmd)
{
int r;
struct thin_disk_superblock *disk_super;
- struct dm_block *sblock;
+ struct dm_block *sblock, *copy;
+ dm_block_t held_root;
r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, &sblock);
@@ -1095,18 +1172,65 @@ static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
return r;
disk_super = dm_block_data(sblock);
+ held_root = le64_to_cpu(disk_super->held_root);
+ disk_super->held_root = cpu_to_le64(0);
+ pmd->need_commit = 1;
+
+ dm_bm_unlock(sblock);
+
+ if (!held_root) {
+ DMWARN("No pool metadata snapshot found: nothing to release.");
+ return -EINVAL;
+ }
+
+ r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(copy);
+ dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
+ dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+ dm_sm_dec_block(pmd->metadata_sm, held_root);
+
+ return dm_tm_unlock(pmd->tm, copy);
+}
+
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = __release_metadata_snap(pmd);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+static int __get_metadata_snap(struct dm_pool_metadata *pmd,
+ dm_block_t *result)
+{
+ int r;
+ struct thin_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
*result = le64_to_cpu(disk_super->held_root);
return dm_bm_unlock(sblock);
}
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
- dm_block_t *result)
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+ dm_block_t *result)
{
int r;
down_read(&pmd->root_lock);
- r = __get_held_metadata_root(pmd, result);
+ r = __get_metadata_snap(pmd, result);
up_read(&pmd->root_lock);
return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index ed4725e67c96..b88918ccdaf6 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -90,11 +90,18 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
/*
* Hold/get root for userspace transaction.
+ *
+ * The metadata snapshot is a copy of the current superblock (minus the
+ * space maps). Userland can access the data structures for READ
+ * operations only. A small performance hit is incurred by providing this
+ * copy of the metadata to userland due to extra copy-on-write operations
+ * on the metadata nodes. Release this as soon as you finish with it.
*/
-int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd);
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
- dm_block_t *result);
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+ dm_block_t *result);
/*
* Actions on a single virtual device.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index eb3d138ff55a..ce59824fb414 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -111,7 +111,7 @@ struct cell_key {
dm_block_t block;
};
-struct cell {
+struct dm_bio_prison_cell {
struct hlist_node list;
struct bio_prison *prison;
struct cell_key key;
@@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
return n;
}
+static struct kmem_cache *_cell_cache;
+
/*
* @nr_cells should be the number of cells you want in use _concurrently_.
* Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
return NULL;
spin_lock_init(&prison->lock);
- prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
- sizeof(struct cell));
+ prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
if (!prison->cell_pool) {
kfree(prison);
return NULL;
@@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
(lhs->block == rhs->block);
}
-static struct cell *__search_bucket(struct hlist_head *bucket,
- struct cell_key *key)
+static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
+ struct cell_key *key)
{
- struct cell *cell;
+ struct dm_bio_prison_cell *cell;
struct hlist_node *tmp;
hlist_for_each_entry(cell, tmp, bucket, list)
@@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
* Returns 1 if the cell was already held, 0 if @inmate is the new holder.
*/
static int bio_detain(struct bio_prison *prison, struct cell_key *key,
- struct bio *inmate, struct cell **ref)
+ struct bio *inmate, struct dm_bio_prison_cell **ref)
{
int r = 1;
unsigned long flags;
uint32_t hash = hash_key(prison, key);
- struct cell *cell, *cell2;
+ struct dm_bio_prison_cell *cell, *cell2;
BUG_ON(hash > prison->nr_buckets);
@@ -273,7 +274,7 @@ out:
/*
* @inmates must have been initialised prior to this call
*/
-static void __cell_release(struct cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
{
struct bio_prison *prison = cell->prison;
@@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
mempool_free(cell, prison->cell_pool);
}
-static void cell_release(struct cell *cell, struct bio_list *bios)
+static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
{
unsigned long flags;
struct bio_prison *prison = cell->prison;
@@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
* bio may be in the cell. This function releases the cell, and also does
* a sanity check.
*/
-static void __cell_release_singleton(struct cell *cell, struct bio *bio)
+static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
{
BUG_ON(cell->holder != bio);
BUG_ON(!bio_list_empty(&cell->bios));
@@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio)
__cell_release(cell, NULL);
}
-static void cell_release_singleton(struct cell *cell, struct bio *bio)
+static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
{
unsigned long flags;
struct bio_prison *prison = cell->prison;
@@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio)
/*
* Sometimes we don't want the holder, just the additional bios.
*/
-static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
struct bio_prison *prison = cell->prison;
@@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates
mempool_free(cell, prison->cell_pool);
}
-static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
unsigned long flags;
struct bio_prison *prison = cell->prison;
@@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
spin_unlock_irqrestore(&prison->lock, flags);
}
-static void cell_error(struct cell *cell)
+static void cell_error(struct dm_bio_prison_cell *cell)
{
struct bio_prison *prison = cell->prison;
struct bio_list bios;
@@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
* also provides the interface for creating and destroying internal
* devices.
*/
-struct new_mapping;
+struct dm_thin_new_mapping;
struct pool_features {
unsigned zero_new_blocks:1;
@@ -537,7 +540,7 @@ struct pool {
struct deferred_set shared_read_ds;
struct deferred_set all_io_ds;
- struct new_mapping *next_mapping;
+ struct dm_thin_new_mapping *next_mapping;
mempool_t *mapping_pool;
mempool_t *endio_hook_pool;
};
@@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
/*----------------------------------------------------------------*/
-struct endio_hook {
+struct dm_thin_endio_hook {
struct thin_c *tc;
struct deferred_entry *shared_read_entry;
struct deferred_entry *all_io_entry;
- struct new_mapping *overwrite_mapping;
+ struct dm_thin_new_mapping *overwrite_mapping;
};
static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
bio_list_init(master);
while ((bio = bio_list_pop(&bios))) {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
if (h->tc == tc)
bio_endio(bio, DM_ENDIO_REQUEUE);
else
@@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool)
/*
* Bio endio functions.
*/
-struct new_mapping {
+struct dm_thin_new_mapping {
struct list_head list;
unsigned quiesced:1;
@@ -746,7 +750,7 @@ struct new_mapping {
struct thin_c *tc;
dm_block_t virt_block;
dm_block_t data_block;
- struct cell *cell, *cell2;
+ struct dm_bio_prison_cell *cell, *cell2;
int err;
/*
@@ -759,7 +763,7 @@ struct new_mapping {
bio_end_io_t *saved_bi_end_io;
};
-static void __maybe_add_mapping(struct new_mapping *m)
+static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
{
struct pool *pool = m->tc->pool;
@@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
static void copy_complete(int read_err, unsigned long write_err, void *context)
{
unsigned long flags;
- struct new_mapping *m = context;
+ struct dm_thin_new_mapping *m = context;
struct pool *pool = m->tc->pool;
m->err = read_err || write_err ? -EIO : 0;
@@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
static void overwrite_endio(struct bio *bio, int err)
{
unsigned long flags;
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
- struct new_mapping *m = h->overwrite_mapping;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_new_mapping *m = h->overwrite_mapping;
struct pool *pool = m->tc->pool;
m->err = err;
@@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err)
/*
* This sends the bios in the cell back to the deferred_bios list.
*/
-static void cell_defer(struct thin_c *tc, struct cell *cell,
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
dm_block_t data_block)
{
struct pool *pool = tc->pool;
@@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
* Same as cell_defer above, except it omits one particular detainee,
* a write bio that covers the block and has already been processed.
*/
-static void cell_defer_except(struct thin_c *tc, struct cell *cell)
+static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
struct bio_list bios;
struct pool *pool = tc->pool;
@@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell)
wake_worker(pool);
}
-static void process_prepared_mapping(struct new_mapping *m)
+static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
struct bio *bio;
@@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m)
mempool_free(m, tc->pool->mapping_pool);
}
-static void process_prepared_discard(struct new_mapping *m)
+static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
int r;
struct thin_c *tc = m->tc;
@@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m)
}
static void process_prepared(struct pool *pool, struct list_head *head,
- void (*fn)(struct new_mapping *))
+ void (*fn)(struct dm_thin_new_mapping *))
{
unsigned long flags;
struct list_head maps;
- struct new_mapping *m, *tmp;
+ struct dm_thin_new_mapping *m, *tmp;
INIT_LIST_HEAD(&maps);
spin_lock_irqsave(&pool->lock, flags);
@@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool)
return pool->next_mapping ? 0 : -ENOMEM;
}
-static struct new_mapping *get_next_mapping(struct pool *pool)
+static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
{
- struct new_mapping *r = pool->next_mapping;
+ struct dm_thin_new_mapping *r = pool->next_mapping;
BUG_ON(!pool->next_mapping);
@@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
struct dm_dev *origin, dm_block_t data_origin,
dm_block_t data_dest,
- struct cell *cell, struct bio *bio)
+ struct dm_bio_prison_cell *cell, struct bio *bio)
{
int r;
struct pool *pool = tc->pool;
- struct new_mapping *m = get_next_mapping(pool);
+ struct dm_thin_new_mapping *m = get_next_mapping(pool);
INIT_LIST_HEAD(&m->list);
m->quiesced = 0;
@@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
* bio immediately. Otherwise we use kcopyd to clone the data first.
*/
if (io_overwrites_block(pool, bio)) {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
@@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_origin, dm_block_t data_dest,
- struct cell *cell, struct bio *bio)
+ struct dm_bio_prison_cell *cell, struct bio *bio)
{
schedule_copy(tc, virt_block, tc->pool_dev,
data_origin, data_dest, cell, bio);
@@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_dest,
- struct cell *cell, struct bio *bio)
+ struct dm_bio_prison_cell *cell, struct bio *bio)
{
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio);
}
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
- dm_block_t data_block, struct cell *cell,
+ dm_block_t data_block, struct dm_bio_prison_cell *cell,
struct bio *bio)
{
struct pool *pool = tc->pool;
- struct new_mapping *m = get_next_mapping(pool);
+ struct dm_thin_new_mapping *m = get_next_mapping(pool);
INIT_LIST_HEAD(&m->list);
m->quiesced = 1;
@@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
process_prepared_mapping(m);
else if (io_overwrites_block(pool, bio)) {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
remap_and_issue(tc, bio, data_block);
-
} else {
int r;
struct dm_io_region to;
@@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
*/
static void retry_on_resume(struct bio *bio)
{
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
struct thin_c *tc = h->tc;
struct pool *pool = tc->pool;
unsigned long flags;
@@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&pool->lock, flags);
}
-static void no_space(struct cell *cell)
+static void no_space(struct dm_bio_prison_cell *cell)
{
struct bio *bio;
struct bio_list bios;
@@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
int r;
unsigned long flags;
struct pool *pool = tc->pool;
- struct cell *cell, *cell2;
+ struct dm_bio_prison_cell *cell, *cell2;
struct cell_key key, key2;
dm_block_t block = get_bio_block(tc, bio);
struct dm_thin_lookup_result lookup_result;
- struct new_mapping *m;
+ struct dm_thin_new_mapping *m;
build_virtual_key(tc->td, block, &key);
if (bio_detain(tc->pool->prison, &key, bio, &cell))
@@ -1263,7 +1268,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
struct cell_key *key,
struct dm_thin_lookup_result *lookup_result,
- struct cell *cell)
+ struct dm_bio_prison_cell *cell)
{
int r;
dm_block_t data_block;
@@ -1290,7 +1295,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
dm_block_t block,
struct dm_thin_lookup_result *lookup_result)
{
- struct cell *cell;
+ struct dm_bio_prison_cell *cell;
struct pool *pool = tc->pool;
struct cell_key key;
@@ -1305,7 +1310,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_data_dir(bio) == WRITE)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
h->shared_read_entry = ds_inc(&pool->shared_read_ds);
@@ -1315,7 +1320,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
}
static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
- struct cell *cell)
+ struct dm_bio_prison_cell *cell)
{
int r;
dm_block_t data_block;
@@ -1363,7 +1368,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
{
int r;
dm_block_t block = get_bio_block(tc, bio);
- struct cell *cell;
+ struct dm_bio_prison_cell *cell;
struct cell_key key;
struct dm_thin_lookup_result lookup_result;
@@ -1432,7 +1437,7 @@ static void process_deferred_bios(struct pool *pool)
spin_unlock_irqrestore(&pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
struct thin_c *tc = h->tc;
/*
@@ -1522,10 +1527,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
wake_worker(pool);
}
-static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+ struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
h->tc = tc;
h->shared_read_entry = NULL;
@@ -1687,6 +1692,9 @@ static void __pool_destroy(struct pool *pool)
kfree(pool);
}
+static struct kmem_cache *_new_mapping_cache;
+static struct kmem_cache *_endio_hook_cache;
+
static struct pool *pool_create(struct mapped_device *pool_md,
struct block_device *metadata_dev,
unsigned long block_size, char **error)
@@ -1755,16 +1763,16 @@ static struct pool *pool_create(struct mapped_device *pool_md,
ds_init(&pool->all_io_ds);
pool->next_mapping = NULL;
- pool->mapping_pool =
- mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
+ pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
+ _new_mapping_cache);
if (!pool->mapping_pool) {
*error = "Error creating pool's mapping mempool";
err_p = ERR_PTR(-ENOMEM);
goto bad_mapping_pool;
}
- pool->endio_hook_pool =
- mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
+ pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
+ _endio_hook_cache);
if (!pool->endio_hook_pool) {
*error = "Error creating pool's endio_hook mempool";
err_p = ERR_PTR(-ENOMEM);
@@ -2276,6 +2284,43 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
return 0;
}
+static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+ int r;
+
+ r = check_arg_count(argc, 1);
+ if (r)
+ return r;
+
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+ __func__, r);
+ return r;
+ }
+
+ r = dm_pool_reserve_metadata_snap(pool->pmd);
+ if (r)
+ DMWARN("reserve_metadata_snap message failed.");
+
+ return r;
+}
+
+static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+ int r;
+
+ r = check_arg_count(argc, 1);
+ if (r)
+ return r;
+
+ r = dm_pool_release_metadata_snap(pool->pmd);
+ if (r)
+ DMWARN("release_metadata_snap message failed.");
+
+ return r;
+}
+
/*
* Messages supported:
* create_thin <dev_id>
@@ -2283,6 +2328,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
* delete <dev_id>
* trim <dev_id> <new_size_in_sectors>
* set_transaction_id <current_trans_id> <new_trans_id>
+ * reserve_metadata_snap
+ * release_metadata_snap
*/
static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
{
@@ -2302,6 +2349,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
else if (!strcasecmp(argv[0], "set_transaction_id"))
r = process_set_transaction_id_mesg(argc, argv, pool);
+ else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
+ r = process_reserve_metadata_snap_mesg(argc, argv, pool);
+
+ else if (!strcasecmp(argv[0], "release_metadata_snap"))
+ r = process_release_metadata_snap_mesg(argc, argv, pool);
+
else
DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
@@ -2361,7 +2414,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
if (r)
return r;
- r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
+ r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
if (r)
return r;
@@ -2457,7 +2510,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2613,9 +2666,9 @@ static int thin_endio(struct dm_target *ti,
union map_info *map_context)
{
unsigned long flags;
- struct endio_hook *h = map_context->ptr;
+ struct dm_thin_endio_hook *h = map_context->ptr;
struct list_head work;
- struct new_mapping *m, *tmp;
+ struct dm_thin_new_mapping *m, *tmp;
struct pool *pool = h->tc->pool;
if (h->shared_read_entry) {
@@ -2755,7 +2808,32 @@ static int __init dm_thin_init(void)
r = dm_register_target(&pool_target);
if (r)
- dm_unregister_target(&thin_target);
+ goto bad_pool_target;
+
+ r = -ENOMEM;
+
+ _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
+ if (!_cell_cache)
+ goto bad_cell_cache;
+
+ _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+ if (!_new_mapping_cache)
+ goto bad_new_mapping_cache;
+
+ _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
+ if (!_endio_hook_cache)
+ goto bad_endio_hook_cache;
+
+ return 0;
+
+bad_endio_hook_cache:
+ kmem_cache_destroy(_new_mapping_cache);
+bad_new_mapping_cache:
+ kmem_cache_destroy(_cell_cache);
+bad_cell_cache:
+ dm_unregister_target(&pool_target);
+bad_pool_target:
+ dm_unregister_target(&thin_target);
return r;
}
@@ -2764,6 +2842,10 @@ static void dm_thin_exit(void)
{
dm_unregister_target(&thin_target);
dm_unregister_target(&pool_target);
+
+ kmem_cache_destroy(_cell_cache);
+ kmem_cache_destroy(_new_mapping_cache);
+ kmem_cache_destroy(_endio_hook_cache);
}
module_init(dm_thin_init);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 01233d855eb2..a4c219e3c859 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -402,6 +402,7 @@ void mddev_resume(struct mddev *mddev)
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
}
@@ -452,7 +453,7 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
+ bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
@@ -607,6 +608,7 @@ void mddev_init(struct mddev *mddev)
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
+ mddev->reshape_backwards = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
@@ -802,7 +804,7 @@ static int alloc_disk_sb(struct md_rdev * rdev)
return 0;
}
-static void free_disk_sb(struct md_rdev * rdev)
+void md_rdev_clear(struct md_rdev *rdev)
{
if (rdev->sb_page) {
put_page(rdev->sb_page);
@@ -815,8 +817,10 @@ static void free_disk_sb(struct md_rdev * rdev)
put_page(rdev->bb_page);
rdev->bb_page = NULL;
}
+ kfree(rdev->badblocks.page);
+ rdev->badblocks.page = NULL;
}
-
+EXPORT_SYMBOL_GPL(md_rdev_clear);
static void super_written(struct bio *bio, int error)
{
@@ -887,6 +891,10 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
rdev->meta_bdev : rdev->bdev;
if (metadata_op)
bio->bi_sector = sector + rdev->sb_start;
+ else if (rdev->mddev->reshape_position != MaxSector &&
+ (rdev->mddev->reshape_backwards ==
+ (sector >= rdev->mddev->reshape_position)))
+ bio->bi_sector = sector + rdev->new_data_offset;
else
bio->bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
@@ -1034,12 +1042,17 @@ static unsigned int calc_sb_csum(mdp_super_t * sb)
struct super_type {
char *name;
struct module *owner;
- int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev,
+ int (*load_super)(struct md_rdev *rdev,
+ struct md_rdev *refdev,
int minor_version);
- int (*validate_super)(struct mddev *mddev, struct md_rdev *rdev);
- void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
+ int (*validate_super)(struct mddev *mddev,
+ struct md_rdev *rdev);
+ void (*sync_super)(struct mddev *mddev,
+ struct md_rdev *rdev);
unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
sector_t num_sectors);
+ int (*allow_new_offset)(struct md_rdev *rdev,
+ unsigned long long new_offset);
};
/*
@@ -1111,6 +1124,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
rdev->preferred_minor = sb->md_minor;
rdev->data_offset = 0;
+ rdev->new_data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
rdev->badblocks.shift = -1;
@@ -1184,7 +1198,11 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->dev_sectors = ((sector_t)sb->size) * 2;
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.space = 0;
+ /* bitmap can use 60 K after the 4K superblocks */
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
+ mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
+ mddev->reshape_backwards = 0;
if (mddev->minor_version >= 91) {
mddev->reshape_position = sb->reshape_position;
@@ -1192,6 +1210,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->new_level = sb->new_level;
mddev->new_layout = sb->new_layout;
mddev->new_chunk_sectors = sb->new_chunk >> 9;
+ if (mddev->delta_disks < 0)
+ mddev->reshape_backwards = 1;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
@@ -1218,9 +1238,12 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->max_disks = MD_SB_DISKS;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
- mddev->bitmap_info.file == NULL)
+ mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
+ mddev->bitmap_info.space =
+ mddev->bitmap_info.space;
+ }
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling, except
@@ -1434,6 +1457,12 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
return num_sectors;
}
+static int
+super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
+{
+ /* non-zero offset changes not possible with v0.90 */
+ return new_offset == 0;
+}
/*
* version 1 superblock
@@ -1469,6 +1498,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
struct mdp_superblock_1 *sb;
int ret;
sector_t sb_start;
+ sector_t sectors;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
int bmask;
@@ -1523,9 +1553,18 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
bdevname(rdev->bdev,b));
return -EINVAL;
}
+ if (sb->pad0 ||
+ sb->pad3[0] ||
+ memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
+ /* Some padding is non-zero, might be a new feature */
+ return -EINVAL;
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
+ rdev->new_data_offset = rdev->data_offset;
+ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
+ (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
+ rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
@@ -1536,6 +1575,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
if (minor_version
&& rdev->data_offset < sb_start + (rdev->sb_size/512))
return -EINVAL;
+ if (minor_version
+ && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
+ return -EINVAL;
if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
rdev->desc_nr = -1;
@@ -1607,16 +1649,14 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
else
ret = 0;
}
- if (minor_version)
- rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
- le64_to_cpu(sb->data_offset);
- else
- rdev->sectors = rdev->sb_start;
- if (rdev->sectors < le64_to_cpu(sb->data_size))
+ if (minor_version) {
+ sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
+ sectors -= rdev->data_offset;
+ } else
+ sectors = rdev->sb_start;
+ if (sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
rdev->sectors = le64_to_cpu(sb->data_size);
- if (le64_to_cpu(sb->size) > rdev->sectors)
- return -EINVAL;
return ret;
}
@@ -1644,17 +1684,37 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->dev_sectors = le64_to_cpu(sb->size);
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.space = 0;
+ /* Default location for bitmap is 1K after superblock
+ * using 3K - total of 4K
+ */
mddev->bitmap_info.default_offset = 1024 >> 9;
-
+ mddev->bitmap_info.default_space = (4096-1024) >> 9;
+ mddev->reshape_backwards = 0;
+
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->max_disks = (4096-256)/2;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
- mddev->bitmap_info.file == NULL )
+ mddev->bitmap_info.file == NULL) {
mddev->bitmap_info.offset =
(__s32)le32_to_cpu(sb->bitmap_offset);
+ /* Metadata doesn't record how much space is available.
+ * For 1.0, we assume we can use up to the superblock
+ * if before, else to 4K beyond superblock.
+ * For others, assume no change is possible.
+ */
+ if (mddev->minor_version > 0)
+ mddev->bitmap_info.space = 0;
+ else if (mddev->bitmap_info.offset > 0)
+ mddev->bitmap_info.space =
+ 8 - mddev->bitmap_info.offset;
+ else
+ mddev->bitmap_info.space =
+ -mddev->bitmap_info.offset;
+ }
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
@@ -1662,6 +1722,11 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
+ if (mddev->delta_disks < 0 ||
+ (mddev->delta_disks == 0 &&
+ (le32_to_cpu(sb->feature_map)
+ & MD_FEATURE_RESHAPE_BACKWARDS)))
+ mddev->reshape_backwards = 1;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
@@ -1735,7 +1800,6 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->feature_map = 0;
sb->pad0 = 0;
sb->recovery_offset = cpu_to_le64(0);
- memset(sb->pad1, 0, sizeof(sb->pad1));
memset(sb->pad3, 0, sizeof(sb->pad3));
sb->utime = cpu_to_le64((__u64)mddev->utime);
@@ -1757,6 +1821,8 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->devflags |= WriteMostly1;
else
sb->devflags &= ~WriteMostly1;
+ sb->data_offset = cpu_to_le64(rdev->data_offset);
+ sb->data_size = cpu_to_le64(rdev->sectors);
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
@@ -1781,6 +1847,16 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->delta_disks = cpu_to_le32(mddev->delta_disks);
sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
+ if (mddev->delta_disks == 0 &&
+ mddev->reshape_backwards)
+ sb->feature_map
+ |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
+ if (rdev->new_data_offset != rdev->data_offset) {
+ sb->feature_map
+ |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
+ sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
+ - rdev->data_offset));
+ }
}
if (rdev->badblocks.count == 0)
@@ -1857,6 +1933,8 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
sector_t max_sectors;
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
+ if (rdev->data_offset != rdev->new_data_offset)
+ return 0; /* too confusing */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
@@ -1884,6 +1962,40 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
rdev->sb_page);
md_super_wait(rdev->mddev);
return num_sectors;
+
+}
+
+static int
+super_1_allow_new_offset(struct md_rdev *rdev,
+ unsigned long long new_offset)
+{
+ /* All necessary checks on new >= old have been done */
+ struct bitmap *bitmap;
+ if (new_offset >= rdev->data_offset)
+ return 1;
+
+ /* with 1.0 metadata, there is no metadata to tread on
+ * so we can always move back */
+ if (rdev->mddev->minor_version == 0)
+ return 1;
+
+ /* otherwise we must be sure not to step on
+ * any metadata, so stay:
+ * 36K beyond start of superblock
+ * beyond end of badblocks
+ * beyond write-intent bitmap
+ */
+ if (rdev->sb_start + (32+4)*2 > new_offset)
+ return 0;
+ bitmap = rdev->mddev->bitmap;
+ if (bitmap && !rdev->mddev->bitmap_info.file &&
+ rdev->sb_start + rdev->mddev->bitmap_info.offset +
+ bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
+ return 0;
+ if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
+ return 0;
+
+ return 1;
}
static struct super_type super_types[] = {
@@ -1894,6 +2006,7 @@ static struct super_type super_types[] = {
.validate_super = super_90_validate,
.sync_super = super_90_sync,
.rdev_size_change = super_90_rdev_size_change,
+ .allow_new_offset = super_90_allow_new_offset,
},
[1] = {
.name = "md-1",
@@ -1902,6 +2015,7 @@ static struct super_type super_types[] = {
.validate_super = super_1_validate,
.sync_super = super_1_sync,
.rdev_size_change = super_1_rdev_size_change,
+ .allow_new_offset = super_1_allow_new_offset,
},
};
@@ -2105,9 +2219,7 @@ static void unbind_rdev_from_array(struct md_rdev * rdev)
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
rdev->sysfs_state = NULL;
- kfree(rdev->badblocks.page);
rdev->badblocks.count = 0;
- rdev->badblocks.page = NULL;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
* to delay it due to rcu usage.
@@ -2158,7 +2270,7 @@ static void export_rdev(struct md_rdev * rdev)
bdevname(rdev->bdev,b));
if (rdev->mddev)
MD_BUG();
- free_disk_sb(rdev);
+ md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
@@ -2809,9 +2921,8 @@ offset_show(struct md_rdev *rdev, char *page)
static ssize_t
offset_store(struct md_rdev *rdev, const char *buf, size_t len)
{
- char *e;
- unsigned long long offset = simple_strtoull(buf, &e, 10);
- if (e==buf || (*e && *e != '\n'))
+ unsigned long long offset;
+ if (strict_strtoull(buf, 10, &offset) < 0)
return -EINVAL;
if (rdev->mddev->pers && rdev->raid_disk >= 0)
return -EBUSY;
@@ -2826,6 +2937,63 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len)
static struct rdev_sysfs_entry rdev_offset =
__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
+static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
+{
+ return sprintf(page, "%llu\n",
+ (unsigned long long)rdev->new_data_offset);
+}
+
+static ssize_t new_offset_store(struct md_rdev *rdev,
+ const char *buf, size_t len)
+{
+ unsigned long long new_offset;
+ struct mddev *mddev = rdev->mddev;
+
+ if (strict_strtoull(buf, 10, &new_offset) < 0)
+ return -EINVAL;
+
+ if (mddev->sync_thread)
+ return -EBUSY;
+ if (new_offset == rdev->data_offset)
+ /* reset is always permitted */
+ ;
+ else if (new_offset > rdev->data_offset) {
+ /* must not push array size beyond rdev_sectors */
+ if (new_offset - rdev->data_offset
+ + mddev->dev_sectors > rdev->sectors)
+ return -E2BIG;
+ }
+ /* Metadata worries about other space details. */
+
+ /* decreasing the offset is inconsistent with a backwards
+ * reshape.
+ */
+ if (new_offset < rdev->data_offset &&
+ mddev->reshape_backwards)
+ return -EINVAL;
+ /* Increasing offset is inconsistent with forwards
+ * reshape. reshape_direction should be set to
+ * 'backwards' first.
+ */
+ if (new_offset > rdev->data_offset &&
+ !mddev->reshape_backwards)
+ return -EINVAL;
+
+ if (mddev->pers && mddev->persistent &&
+ !super_types[mddev->major_version]
+ .allow_new_offset(rdev, new_offset))
+ return -E2BIG;
+ rdev->new_data_offset = new_offset;
+ if (new_offset > rdev->data_offset)
+ mddev->reshape_backwards = 1;
+ else if (new_offset < rdev->data_offset)
+ mddev->reshape_backwards = 0;
+
+ return len;
+}
+static struct rdev_sysfs_entry rdev_new_offset =
+__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
+
static ssize_t
rdev_size_show(struct md_rdev *rdev, char *page)
{
@@ -2870,6 +3038,8 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
if (strict_blocks_to_sectors(buf, &sectors) < 0)
return -EINVAL;
+ if (rdev->data_offset != rdev->new_data_offset)
+ return -EINVAL; /* too confusing */
if (my_mddev->pers && rdev->raid_disk >= 0) {
if (my_mddev->persistent) {
sectors = super_types[my_mddev->major_version].
@@ -3006,6 +3176,7 @@ static struct attribute *rdev_default_attrs[] = {
&rdev_errors.attr,
&rdev_slot.attr,
&rdev_offset.attr,
+ &rdev_new_offset.attr,
&rdev_size.attr,
&rdev_recovery_start.attr,
&rdev_bad_blocks.attr,
@@ -3080,6 +3251,7 @@ int md_rdev_init(struct md_rdev *rdev)
rdev->raid_disk = -1;
rdev->flags = 0;
rdev->data_offset = 0;
+ rdev->new_data_offset = 0;
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
@@ -3178,8 +3350,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
abort_free:
if (rdev->bdev)
unlock_rdev(rdev);
- free_disk_sb(rdev);
- kfree(rdev->badblocks.page);
+ md_rdev_clear(rdev);
kfree(rdev);
return ERR_PTR(err);
}
@@ -3419,6 +3590,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
+ mddev->reshape_backwards = 0;
module_put(pers->owner);
printk(KERN_WARNING "md: %s: %s would not accept array\n",
mdname(mddev), clevel);
@@ -3492,6 +3664,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
+ mddev->reshape_backwards = 0;
mddev->degraded = 0;
if (mddev->pers->sync_request == NULL) {
/* this is now an array without redundancy, so
@@ -3501,10 +3674,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
del_timer_sync(&mddev->safemode_timer);
}
pers->run(mddev);
- mddev_resume(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
+ mddev_resume(mddev);
sysfs_notify(&mddev->kobj, NULL, "level");
md_new_event(mddev);
return rv;
@@ -3582,9 +3753,20 @@ raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
if (mddev->pers)
rv = update_raid_disks(mddev, n);
else if (mddev->reshape_position != MaxSector) {
+ struct md_rdev *rdev;
int olddisks = mddev->raid_disks - mddev->delta_disks;
+
+ rdev_for_each(rdev, mddev) {
+ if (olddisks < n &&
+ rdev->data_offset < rdev->new_data_offset)
+ return -EINVAL;
+ if (olddisks > n &&
+ rdev->data_offset > rdev->new_data_offset)
+ return -EINVAL;
+ }
mddev->delta_disks = n - olddisks;
mddev->raid_disks = n;
+ mddev->reshape_backwards = (mddev->delta_disks < 0);
} else
mddev->raid_disks = n;
return rv ? rv : len;
@@ -4266,7 +4448,8 @@ sync_completed_show(struct mddev *mddev, char *page)
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
@@ -4428,6 +4611,7 @@ reshape_position_show(struct mddev *mddev, char *page)
static ssize_t
reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
{
+ struct md_rdev *rdev;
char *e;
unsigned long long new = simple_strtoull(buf, &e, 10);
if (mddev->pers)
@@ -4436,9 +4620,12 @@ reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
return -EINVAL;
mddev->reshape_position = new;
mddev->delta_disks = 0;
+ mddev->reshape_backwards = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
mddev->new_chunk_sectors = mddev->chunk_sectors;
+ rdev_for_each(rdev, mddev)
+ rdev->new_data_offset = rdev->data_offset;
return len;
}
@@ -4447,6 +4634,42 @@ __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
reshape_position_store);
static ssize_t
+reshape_direction_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%s\n",
+ mddev->reshape_backwards ? "backwards" : "forwards");
+}
+
+static ssize_t
+reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ int backwards = 0;
+ if (cmd_match(buf, "forwards"))
+ backwards = 0;
+ else if (cmd_match(buf, "backwards"))
+ backwards = 1;
+ else
+ return -EINVAL;
+ if (mddev->reshape_backwards == backwards)
+ return len;
+
+ /* check if we are allowed to change */
+ if (mddev->delta_disks)
+ return -EBUSY;
+
+ if (mddev->persistent &&
+ mddev->major_version == 0)
+ return -EINVAL;
+
+ mddev->reshape_backwards = backwards;
+ return len;
+}
+
+static struct md_sysfs_entry md_reshape_direction =
+__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
+ reshape_direction_store);
+
+static ssize_t
array_size_show(struct mddev *mddev, char *page)
{
if (mddev->external_size)
@@ -4501,6 +4724,7 @@ static struct attribute *md_default_attrs[] = {
&md_safe_delay.attr,
&md_array_state.attr,
&md_reshape_position.attr,
+ &md_reshape_direction.attr,
&md_array_size.attr,
&max_corr_read_errors.attr,
NULL,
@@ -4914,7 +5138,8 @@ int md_run(struct mddev *mddev)
err = -EINVAL;
mddev->pers->stop(mddev);
}
- if (err == 0 && mddev->pers->sync_request) {
+ if (err == 0 && mddev->pers->sync_request &&
+ (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
err = bitmap_create(mddev);
if (err) {
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
@@ -5064,6 +5289,7 @@ static void md_clean(struct mddev *mddev)
mddev->events = 0;
mddev->can_decrease_events = 0;
mddev->delta_disks = 0;
+ mddev->reshape_backwards = 0;
mddev->new_level = LEVEL_NONE;
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
@@ -5079,6 +5305,7 @@ static void md_clean(struct mddev *mddev)
mddev->merge_check_needed = 0;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = 0;
+ mddev->bitmap_info.default_space = 0;
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
@@ -5421,7 +5648,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
goto out;
/* bitmap disabled, zero the first byte and copy out */
- if (!mddev->bitmap || !mddev->bitmap->file) {
+ if (!mddev->bitmap || !mddev->bitmap->storage.file) {
file->pathname[0] = '\0';
goto copy_out;
}
@@ -5430,7 +5657,8 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
if (!buf)
goto out;
- ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
+ ptr = d_path(&mddev->bitmap->storage.file->f_path,
+ buf, sizeof(file->pathname));
if (IS_ERR(ptr))
goto out;
@@ -5556,8 +5784,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
super_types[mddev->major_version].
validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
- (!test_bit(In_sync, &rdev->flags) ||
- rdev->raid_disk != info->raid_disk)) {
+ rdev->raid_disk != info->raid_disk) {
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
@@ -5875,6 +6102,7 @@ static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
+ mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
mddev->bitmap_info.offset = 0;
mddev->reshape_position = MaxSector;
@@ -5888,6 +6116,7 @@ static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout;
mddev->delta_disks = 0;
+ mddev->reshape_backwards = 0;
return 0;
}
@@ -5922,11 +6151,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
*/
if (mddev->sync_thread)
return -EBUSY;
- if (mddev->bitmap)
- /* Sorry, cannot grow a bitmap yet, just remove it,
- * grow, and re-add.
- */
- return -EBUSY;
+
rdev_for_each(rdev, mddev) {
sector_t avail = rdev->sectors;
@@ -5944,6 +6169,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
static int update_raid_disks(struct mddev *mddev, int raid_disks)
{
int rv;
+ struct md_rdev *rdev;
/* change the number of raid disks */
if (mddev->pers->check_reshape == NULL)
return -EINVAL;
@@ -5952,11 +6178,27 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
return -EINVAL;
if (mddev->sync_thread || mddev->reshape_position != MaxSector)
return -EBUSY;
+
+ rdev_for_each(rdev, mddev) {
+ if (mddev->raid_disks < raid_disks &&
+ rdev->data_offset < rdev->new_data_offset)
+ return -EINVAL;
+ if (mddev->raid_disks > raid_disks &&
+ rdev->data_offset > rdev->new_data_offset)
+ return -EINVAL;
+ }
+
mddev->delta_disks = raid_disks - mddev->raid_disks;
+ if (mddev->delta_disks < 0)
+ mddev->reshape_backwards = 1;
+ else if (mddev->delta_disks > 0)
+ mddev->reshape_backwards = 0;
rv = mddev->pers->check_reshape(mddev);
- if (rv < 0)
+ if (rv < 0) {
mddev->delta_disks = 0;
+ mddev->reshape_backwards = 0;
+ }
return rv;
}
@@ -6039,6 +6281,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
return -EINVAL;
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
+ mddev->bitmap_info.space =
+ mddev->bitmap_info.default_space;
mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev);
if (!rv)
@@ -6050,7 +6294,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
/* remove the bitmap */
if (!mddev->bitmap)
return -ENOENT;
- if (mddev->bitmap->file)
+ if (mddev->bitmap->storage.file)
return -EINVAL;
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
@@ -6373,6 +6617,9 @@ static int md_open(struct block_device *bdev, fmode_t mode)
struct mddev *mddev = mddev_find(bdev->bd_dev);
int err;
+ if (!mddev)
+ return -ENODEV;
+
if (mddev->gendisk != bdev->bd_disk) {
/* we are racing with mddev_put which is discarding this
* bd_disk.
@@ -6503,7 +6750,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
thread->tsk = kthread_run(md_thread, thread,
"%s_%s",
mdname(thread->mddev),
- name ?: mddev->pers->name);
+ name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
@@ -6584,7 +6831,8 @@ static void status_resync(struct seq_file *seq, struct mddev * mddev)
resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
@@ -7049,6 +7297,7 @@ void md_do_sync(struct mddev *mddev)
int skipped = 0;
struct md_rdev *rdev;
char *desc;
+ struct blk_plug plug;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7147,7 +7396,7 @@ void md_do_sync(struct mddev *mddev)
j = mddev->recovery_cp;
} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
- max_sectors = mddev->dev_sectors;
+ max_sectors = mddev->resync_max_sectors;
else {
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
@@ -7198,6 +7447,7 @@ void md_do_sync(struct mddev *mddev)
}
mddev->curr_resync_completed = j;
+ blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
@@ -7303,6 +7553,7 @@ void md_do_sync(struct mddev *mddev)
* this also signals 'finished resyncing' to md_stop
*/
out:
+ blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
@@ -7598,7 +7849,7 @@ void md_check_recovery(struct mddev *mddev)
goto unlock;
if (mddev->pers->sync_request) {
- if (spares && mddev->bitmap && ! mddev->bitmap->file) {
+ if (spares) {
/* We are adding a device or devices to an array
* which has the bitmap stored on all devices.
* So make sure all bitmap pages get written
@@ -7646,6 +7897,20 @@ void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);
+void md_finish_reshape(struct mddev *mddev)
+{
+ /* called be personality module when reshape completes. */
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev) {
+ if (rdev->data_offset > rdev->new_data_offset)
+ rdev->sectors += rdev->data_offset - rdev->new_data_offset;
+ else
+ rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
+ rdev->data_offset = rdev->new_data_offset;
+ }
+}
+EXPORT_SYMBOL(md_finish_reshape);
/* Bad block management.
* We can record which blocks on each device are 'bad' and so just
@@ -7894,10 +8159,15 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
}
int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int acknowledged)
+ int is_new)
{
- int rv = md_set_badblocks(&rdev->badblocks,
- s + rdev->data_offset, sectors, acknowledged);
+ int rv;
+ if (is_new)
+ s += rdev->new_data_offset;
+ else
+ s += rdev->data_offset;
+ rv = md_set_badblocks(&rdev->badblocks,
+ s, sectors, 0);
if (rv) {
/* Make sure they get written out promptly */
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -8003,11 +8273,15 @@ out:
return rv;
}
-int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors)
+int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new)
{
+ if (is_new)
+ s += rdev->new_data_offset;
+ else
+ s += rdev->data_offset;
return md_clear_badblocks(&rdev->badblocks,
- s + rdev->data_offset,
- sectors);
+ s, sectors);
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1c2063ccf48e..7b4a3c318cae 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -55,6 +55,7 @@ struct md_rdev {
int sb_loaded;
__u64 sb_events;
sector_t data_offset; /* start of data in array */
+ sector_t new_data_offset;/* only relevant while reshaping */
sector_t sb_start; /* offset of the super block (in 512byte sectors) */
int sb_size; /* bytes in the superblock */
int preferred_minor; /* autorun support */
@@ -193,8 +194,9 @@ static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
return 0;
}
extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int acknowledged);
-extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors);
+ int is_new);
+extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new);
extern void md_ack_all_badblocks(struct badblocks *bb);
struct mddev {
@@ -262,6 +264,7 @@ struct mddev {
sector_t reshape_position;
int delta_disks, new_level, new_layout;
int new_chunk_sectors;
+ int reshape_backwards;
atomic_t plug_cnt; /* If device is expecting
* more bios soon.
@@ -390,10 +393,13 @@ struct mddev {
* For external metadata, offset
* from start of device.
*/
+ unsigned long space; /* space available at this offset */
loff_t default_offset; /* this is the offset to use when
* hot-adding a bitmap. It should
* eventually be settable by sysfs.
*/
+ unsigned long default_space; /* space available at
+ * default offset */
struct mutex mutex;
unsigned long chunksize;
unsigned long daemon_sleep; /* how many jiffies between updates? */
@@ -591,6 +597,7 @@ extern void md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_end(struct mddev *mddev);
extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
+extern void md_finish_reshape(struct mddev *mddev);
extern int mddev_congested(struct mddev *mddev, int bits);
extern void md_flush_request(struct mddev *mddev, struct bio *bio);
@@ -615,6 +622,7 @@ extern int md_run(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
+extern void md_rdev_clear(struct md_rdev *rdev);
extern void mddev_suspend(struct mddev *mddev);
extern void mddev_resume(struct mddev *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 9339e67fcc79..61a1833ebaf3 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
}
{
- mddev->thread = md_register_thread(multipathd, mddev, NULL);
+ mddev->thread = md_register_thread(multipathd, mddev,
+ "multipath");
if (!mddev->thread) {
printk(KERN_ERR "multipath: couldn't allocate thread"
" for %s\n", mdname(mddev));
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
index 50ed53bf4aa2..fc90c11620ad 100644
--- a/drivers/md/persistent-data/dm-space-map-checker.c
+++ b/drivers/md/persistent-data/dm-space-map-checker.c
@@ -8,6 +8,7 @@
#include <linux/device-mapper.h>
#include <linux/export.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_DM_DEBUG_SPACE_MAPS
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
ca->nr = nr_blocks;
ca->nr_free = nr_blocks;
- ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
- if (!ca->counts)
- return -ENOMEM;
+
+ if (!nr_blocks)
+ ca->counts = NULL;
+ else {
+ ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+ if (!ca->counts)
+ return -ENOMEM;
+ }
return 0;
}
+static void ca_destroy(struct count_array *ca)
+{
+ vfree(ca->counts);
+}
+
static int ca_load(struct count_array *ca, struct dm_space_map *sm)
{
int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
{
dm_block_t nr_blocks = ca->nr + extra_blocks;
- uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+ uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
if (!counts)
return -ENOMEM;
- memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
- kfree(ca->counts);
+ if (ca->counts) {
+ memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+ ca_destroy(ca);
+ }
ca->nr = nr_blocks;
ca->nr_free += extra_blocks;
ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
return 0;
}
-static void ca_destroy(struct count_array *ca)
-{
- kfree(ca->counts);
-}
-
/*----------------------------------------------------------------*/
struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
int r;
struct sm_checker *smc;
- if (!sm)
- return NULL;
+ if (IS_ERR_OR_NULL(sm))
+ return ERR_PTR(-EINVAL);
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
if (!smc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
r = ca_create(&smc->old_counts, sm);
if (r) {
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_create(&smc->counts, sm);
if (r) {
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
ca_destroy(&smc->counts);
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
ca_destroy(&smc->counts);
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
int r;
struct sm_checker *smc;
- if (!sm)
- return NULL;
+ if (IS_ERR_OR_NULL(sm))
+ return ERR_PTR(-EINVAL);
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
if (!smc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
r = ca_create(&smc->old_counts, sm);
if (r) {
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_create(&smc->counts, sm);
if (r) {
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
smc->real_sm = sm;
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index fc469ba9f627..3d0ed5332883 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
dm_block_t nr_blocks)
{
struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
- return dm_sm_checker_create_fresh(sm);
+ struct dm_space_map *smc;
+
+ if (IS_ERR_OR_NULL(sm))
+ return sm;
+
+ smc = dm_sm_checker_create_fresh(sm);
+ if (IS_ERR(smc))
+ dm_sm_destroy(sm);
+
+ return smc;
}
EXPORT_SYMBOL_GPL(dm_sm_disk_create);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 6f8d38747d7f..e5604b32d91f 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
void dm_tm_destroy(struct dm_transaction_manager *tm)
{
+ if (!tm->is_clone)
+ wipe_shadow_table(tm);
+
kfree(tm);
}
EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -249,6 +252,7 @@ int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
return r;
}
+EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
struct dm_block_validator *v,
@@ -259,6 +263,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
return dm_bm_read_lock(tm->bm, b, v, blk);
}
+EXPORT_SYMBOL_GPL(dm_tm_read_lock);
int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
{
@@ -342,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
}
*sm = dm_sm_checker_create(inner);
- if (!*sm)
+ if (IS_ERR(*sm)) {
+ r = PTR_ERR(*sm);
goto bad2;
+ }
} else {
r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -362,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
}
*sm = dm_sm_checker_create(inner);
- if (!*sm)
+ if (IS_ERR(*sm)) {
+ r = PTR_ERR(*sm);
goto bad2;
+ }
}
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 15dd59b84e94..240ff3125040 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
int bad_sectors;
int disk = start_disk + i;
- if (disk >= conf->raid_disks)
- disk -= conf->raid_disks;
+ if (disk >= conf->raid_disks * 2)
+ disk -= conf->raid_disks * 2;
rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
- int plugged;
int first_clone;
int sectors_handled;
int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
* the bad blocks. Each set of writes gets it's own r1bio
* with a set of bios attached.
*/
- plugged = mddev_check_plugged(mddev);
disks = conf->raid_disks * 2;
retry_write:
@@ -1191,6 +1189,8 @@ read_again:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
/* Mustn't call r1_bio_write_done before this next test,
* as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
/* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
-
- if (do_sync || !bitmap || !plugged)
- md_wakeup_thread(mddev->thread);
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -1859,7 +1856,9 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
- test_bit(In_sync, &rdev->flags) &&
+ (test_bit(In_sync, &rdev->flags) ||
+ (!test_bit(Faulty, &rdev->flags) &&
+ rdev->recovery_offset >= sect + s)) &&
is_badblock(rdev, sect, s,
&first_bad, &bad_sectors) == 0 &&
sync_page_io(rdev, sect, s<<9,
@@ -2024,7 +2023,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
continue;
if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
test_bit(R1BIO_MadeGood, &r1_bio->state)) {
- rdev_clear_badblocks(rdev, r1_bio->sector, s);
+ rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
}
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
test_bit(R1BIO_WriteError, &r1_bio->state)) {
@@ -2044,7 +2043,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
struct md_rdev *rdev = conf->mirrors[m].rdev;
rdev_clear_badblocks(rdev,
r1_bio->sector,
- r1_bio->sectors);
+ r1_bio->sectors, 0);
rdev_dec_pending(rdev, conf->mddev);
} else if (r1_bio->bios[m] != NULL) {
/* This drive got a write error. We need to
@@ -2486,9 +2485,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*/
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
atomic_set(&r1_bio->remaining, read_targets);
- for (i = 0; i < conf->raid_disks * 2; i++) {
+ for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
+ read_targets--;
md_sync_acct(bio->bi_bdev, nr_sectors);
generic_make_request(bio);
}
@@ -2548,6 +2548,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -EINVAL;
spin_lock_init(&conf->device_lock);
rdev_for_each(rdev, mddev) {
+ struct request_queue *q;
int disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
@@ -2560,6 +2561,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (disk->rdev)
goto abort;
disk->rdev = rdev;
+ q = bdev_get_queue(rdev->bdev);
+ if (q->merge_bvec_fn)
+ mddev->merge_check_needed = 1;
disk->head_position = 0;
}
@@ -2598,7 +2602,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!disk->rdev ||
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
- if (disk->rdev)
+ if (disk->rdev &&
+ (disk->rdev->saved_raid_disk < 0))
conf->fullsync = 1;
} else if (conf->last_used < 0)
/*
@@ -2614,7 +2619,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
goto abort;
}
err = -ENOMEM;
- conf->thread = md_register_thread(raid1d, mddev, NULL);
+ conf->thread = md_register_thread(raid1d, mddev, "raid1");
if (!conf->thread) {
printk(KERN_ERR
"md/raid1:%s: couldn't allocate thread\n",
@@ -2750,9 +2755,16 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
* any io in the removed space completes, but it hardly seems
* worth it.
*/
- md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0));
- if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
+ sector_t newsize = raid1_size(mddev, sectors, 0);
+ if (mddev->external_size &&
+ mddev->array_sectors > newsize)
return -EINVAL;
+ if (mddev->bitmap) {
+ int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
+ if (ret)
+ return ret;
+ }
+ md_set_array_sectors(mddev, newsize);
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3f91c2e1dfe7..8da6282254c3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
+#include <linux/kthread.h>
#include "md.h"
#include "raid10.h"
#include "raid0.h"
@@ -68,6 +69,11 @@ static int max_queued_requests = 1024;
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
static int enough(struct r10conf *conf, int ignore);
+static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
+ int *skipped);
+static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
+static void end_reshape_write(struct bio *bio, int error);
+static void end_reshape(struct r10conf *conf);
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
@@ -112,7 +118,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
if (!r10_bio)
return NULL;
- if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
nalloc = conf->copies; /* resync */
else
nalloc = 2; /* recovery */
@@ -140,9 +147,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
struct bio *rbio = r10_bio->devs[j].repl_bio;
bio = r10_bio->devs[j].bio;
for (i = 0; i < RESYNC_PAGES; i++) {
- if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
- &conf->mddev->recovery)) {
- /* we can share bv_page's during recovery */
+ if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
+ &conf->mddev->recovery)) {
+ /* we can share bv_page's during recovery
+ * and reshape */
struct bio *rbio = r10_bio->devs[0].bio;
page = rbio->bi_io_vec[i].bv_page;
get_page(page);
@@ -165,10 +173,11 @@ out_free_pages:
while (j--)
for (i = 0; i < RESYNC_PAGES ; i++)
safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
- j = -1;
+ j = 0;
out_free_bio:
- while (++j < nalloc) {
- bio_put(r10_bio->devs[j].bio);
+ for ( ; j < nalloc; j++) {
+ if (r10_bio->devs[j].bio)
+ bio_put(r10_bio->devs[j].bio);
if (r10_bio->devs[j].repl_bio)
bio_put(r10_bio->devs[j].repl_bio);
}
@@ -504,79 +513,96 @@ static void raid10_end_write_request(struct bio *bio, int error)
* sector offset to a virtual address
*/
-static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
+static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
{
int n,f;
sector_t sector;
sector_t chunk;
sector_t stripe;
int dev;
-
int slot = 0;
/* now calculate first sector/dev */
- chunk = r10bio->sector >> conf->chunk_shift;
- sector = r10bio->sector & conf->chunk_mask;
+ chunk = r10bio->sector >> geo->chunk_shift;
+ sector = r10bio->sector & geo->chunk_mask;
- chunk *= conf->near_copies;
+ chunk *= geo->near_copies;
stripe = chunk;
- dev = sector_div(stripe, conf->raid_disks);
- if (conf->far_offset)
- stripe *= conf->far_copies;
+ dev = sector_div(stripe, geo->raid_disks);
+ if (geo->far_offset)
+ stripe *= geo->far_copies;
- sector += stripe << conf->chunk_shift;
+ sector += stripe << geo->chunk_shift;
/* and calculate all the others */
- for (n=0; n < conf->near_copies; n++) {
+ for (n = 0; n < geo->near_copies; n++) {
int d = dev;
sector_t s = sector;
r10bio->devs[slot].addr = sector;
r10bio->devs[slot].devnum = d;
slot++;
- for (f = 1; f < conf->far_copies; f++) {
- d += conf->near_copies;
- if (d >= conf->raid_disks)
- d -= conf->raid_disks;
- s += conf->stride;
+ for (f = 1; f < geo->far_copies; f++) {
+ d += geo->near_copies;
+ if (d >= geo->raid_disks)
+ d -= geo->raid_disks;
+ s += geo->stride;
r10bio->devs[slot].devnum = d;
r10bio->devs[slot].addr = s;
slot++;
}
dev++;
- if (dev >= conf->raid_disks) {
+ if (dev >= geo->raid_disks) {
dev = 0;
- sector += (conf->chunk_mask + 1);
+ sector += (geo->chunk_mask + 1);
}
}
- BUG_ON(slot != conf->copies);
+}
+
+static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
+{
+ struct geom *geo = &conf->geo;
+
+ if (conf->reshape_progress != MaxSector &&
+ ((r10bio->sector >= conf->reshape_progress) !=
+ conf->mddev->reshape_backwards)) {
+ set_bit(R10BIO_Previous, &r10bio->state);
+ geo = &conf->prev;
+ } else
+ clear_bit(R10BIO_Previous, &r10bio->state);
+
+ __raid10_find_phys(geo, r10bio);
}
static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
{
sector_t offset, chunk, vchunk;
+ /* Never use conf->prev as this is only called during resync
+ * or recovery, so reshape isn't happening
+ */
+ struct geom *geo = &conf->geo;
- offset = sector & conf->chunk_mask;
- if (conf->far_offset) {
+ offset = sector & geo->chunk_mask;
+ if (geo->far_offset) {
int fc;
- chunk = sector >> conf->chunk_shift;
- fc = sector_div(chunk, conf->far_copies);
- dev -= fc * conf->near_copies;
+ chunk = sector >> geo->chunk_shift;
+ fc = sector_div(chunk, geo->far_copies);
+ dev -= fc * geo->near_copies;
if (dev < 0)
- dev += conf->raid_disks;
+ dev += geo->raid_disks;
} else {
- while (sector >= conf->stride) {
- sector -= conf->stride;
- if (dev < conf->near_copies)
- dev += conf->raid_disks - conf->near_copies;
+ while (sector >= geo->stride) {
+ sector -= geo->stride;
+ if (dev < geo->near_copies)
+ dev += geo->raid_disks - geo->near_copies;
else
- dev -= conf->near_copies;
+ dev -= geo->near_copies;
}
- chunk = sector >> conf->chunk_shift;
+ chunk = sector >> geo->chunk_shift;
}
- vchunk = chunk * conf->raid_disks + dev;
- sector_div(vchunk, conf->near_copies);
- return (vchunk << conf->chunk_shift) + offset;
+ vchunk = chunk * geo->raid_disks + dev;
+ sector_div(vchunk, geo->near_copies);
+ return (vchunk << geo->chunk_shift) + offset;
}
/**
@@ -597,10 +623,17 @@ static int raid10_mergeable_bvec(struct request_queue *q,
struct r10conf *conf = mddev->private;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
- unsigned int chunk_sectors = mddev->chunk_sectors;
+ unsigned int chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9;
+ struct geom *geo = &conf->geo;
+
+ chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
+ if (conf->reshape_progress != MaxSector &&
+ ((sector >= conf->reshape_progress) !=
+ conf->mddev->reshape_backwards))
+ geo = &conf->prev;
- if (conf->near_copies < conf->raid_disks) {
+ if (geo->near_copies < geo->raid_disks) {
max = (chunk_sectors - ((sector & (chunk_sectors - 1))
+ bio_sectors)) << 9;
if (max < 0)
@@ -614,6 +647,12 @@ static int raid10_mergeable_bvec(struct request_queue *q,
if (mddev->merge_check_needed) {
struct r10bio r10_bio;
int s;
+ if (conf->reshape_progress != MaxSector) {
+ /* Cannot give any guidance during reshape */
+ if (max <= biovec->bv_len && bio_sectors == 0)
+ return biovec->bv_len;
+ return 0;
+ }
r10_bio.sector = sector;
raid10_find_phys(conf, &r10_bio);
rcu_read_lock();
@@ -681,6 +720,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
struct md_rdev *rdev, *best_rdev;
int do_balance;
int best_slot;
+ struct geom *geo = &conf->geo;
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
@@ -761,11 +801,11 @@ retry:
* sequential read speed for 'far copies' arrays. So only
* keep it for 'near' arrays, and review those later.
*/
- if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending))
+ if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
break;
/* for far > 1 always use the lowest address */
- if (conf->far_copies > 1)
+ if (geo->far_copies > 1)
new_distance = r10_bio->devs[slot].addr;
else
new_distance = abs(r10_bio->devs[slot].addr -
@@ -812,7 +852,10 @@ static int raid10_congested(void *data, int bits)
if (mddev_congested(mddev, bits))
return 1;
rcu_read_lock();
- for (i = 0; i < conf->raid_disks && ret == 0; i++) {
+ for (i = 0;
+ (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
+ && ret == 0;
+ i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -973,21 +1016,32 @@ static void unfreeze_array(struct r10conf *conf)
spin_unlock_irq(&conf->resync_lock);
}
+static sector_t choose_data_offset(struct r10bio *r10_bio,
+ struct md_rdev *rdev)
+{
+ if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
+ test_bit(R10BIO_Previous, &r10_bio->state))
+ return rdev->data_offset;
+ else
+ return rdev->new_data_offset;
+}
+
static void make_request(struct mddev *mddev, struct bio * bio)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
- int chunk_sects = conf->chunk_mask + 1;
+ sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
+ int chunk_sects = chunk_mask + 1;
const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
- int plugged;
int sectors_handled;
int max_sectors;
+ int sectors;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
@@ -997,9 +1051,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
/* If this request crosses a chunk boundary, we need to
* split it. This will only happen for 1 PAGE (or less) requests.
*/
- if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
- > chunk_sects &&
- conf->near_copies < conf->raid_disks)) {
+ if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
+ > chunk_sects
+ && (conf->geo.near_copies < conf->geo.raid_disks
+ || conf->prev.near_copies < conf->prev.raid_disks))) {
struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */
if (bio->bi_vcnt != 1 ||
@@ -1051,10 +1106,41 @@ static void make_request(struct mddev *mddev, struct bio * bio)
*/
wait_barrier(conf);
+ sectors = bio->bi_size >> 9;
+ while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ bio->bi_sector < conf->reshape_progress &&
+ bio->bi_sector + sectors > conf->reshape_progress) {
+ /* IO spans the reshape position. Need to wait for
+ * reshape to pass
+ */
+ allow_barrier(conf);
+ wait_event(conf->wait_barrier,
+ conf->reshape_progress <= bio->bi_sector ||
+ conf->reshape_progress >= bio->bi_sector + sectors);
+ wait_barrier(conf);
+ }
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ bio_data_dir(bio) == WRITE &&
+ (mddev->reshape_backwards
+ ? (bio->bi_sector < conf->reshape_safe &&
+ bio->bi_sector + sectors > conf->reshape_progress)
+ : (bio->bi_sector + sectors > conf->reshape_safe &&
+ bio->bi_sector < conf->reshape_progress))) {
+ /* Need to update reshape_position in metadata */
+ mddev->reshape_position = conf->reshape_progress;
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+
+ conf->reshape_safe = mddev->reshape_position;
+ }
+
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
- r10_bio->sectors = bio->bi_size >> 9;
+ r10_bio->sectors = sectors;
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_sector;
@@ -1093,7 +1179,7 @@ read_again:
r10_bio->devs[slot].rdev = rdev;
read_bio->bi_sector = r10_bio->devs[slot].addr +
- rdev->data_offset;
+ choose_data_offset(r10_bio, rdev);
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
read_bio->bi_rw = READ | do_sync;
@@ -1152,7 +1238,6 @@ read_again:
* of r10_bios is recored in bio->bi_phys_segments just as with
* the read case.
*/
- plugged = mddev_check_plugged(mddev);
r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
raid10_find_phys(conf, r10_bio);
@@ -1297,7 +1382,8 @@ retry_write:
r10_bio->devs[i].bio = mbio;
mbio->bi_sector = (r10_bio->devs[i].addr+
- conf->mirrors[d].rdev->data_offset);
+ choose_data_offset(r10_bio,
+ conf->mirrors[d].rdev));
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua;
@@ -1308,6 +1394,8 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
if (!r10_bio->devs[i].repl_bio)
continue;
@@ -1321,8 +1409,10 @@ retry_write:
* so it cannot disappear, so the replacement cannot
* become NULL here
*/
- mbio->bi_sector = (r10_bio->devs[i].addr+
- conf->mirrors[d].replacement->data_offset);
+ mbio->bi_sector = (r10_bio->devs[i].addr +
+ choose_data_offset(
+ r10_bio,
+ conf->mirrors[d].replacement));
mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua;
@@ -1333,6 +1423,8 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1358,9 +1450,6 @@ retry_write:
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
-
- if (do_sync || !mddev->bitmap || !plugged)
- md_wakeup_thread(mddev->thread);
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -1368,19 +1457,19 @@ static void status(struct seq_file *seq, struct mddev *mddev)
struct r10conf *conf = mddev->private;
int i;
- if (conf->near_copies < conf->raid_disks)
+ if (conf->geo.near_copies < conf->geo.raid_disks)
seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
- if (conf->near_copies > 1)
- seq_printf(seq, " %d near-copies", conf->near_copies);
- if (conf->far_copies > 1) {
- if (conf->far_offset)
- seq_printf(seq, " %d offset-copies", conf->far_copies);
+ if (conf->geo.near_copies > 1)
+ seq_printf(seq, " %d near-copies", conf->geo.near_copies);
+ if (conf->geo.far_copies > 1) {
+ if (conf->geo.far_offset)
+ seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
else
- seq_printf(seq, " %d far-copies", conf->far_copies);
+ seq_printf(seq, " %d far-copies", conf->geo.far_copies);
}
- seq_printf(seq, " [%d/%d] [", conf->raid_disks,
- conf->raid_disks - mddev->degraded);
- for (i = 0; i < conf->raid_disks; i++)
+ seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
+ conf->geo.raid_disks - mddev->degraded);
+ for (i = 0; i < conf->geo.raid_disks; i++)
seq_printf(seq, "%s",
conf->mirrors[i].rdev &&
test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
@@ -1392,7 +1481,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
* Don't consider the device numbered 'ignore'
* as we might be about to remove it.
*/
-static int enough(struct r10conf *conf, int ignore)
+static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
{
int first = 0;
@@ -1403,7 +1492,7 @@ static int enough(struct r10conf *conf, int ignore)
if (conf->mirrors[first].rdev &&
first != ignore)
cnt++;
- first = (first+1) % conf->raid_disks;
+ first = (first+1) % geo->raid_disks;
}
if (cnt == 0)
return 0;
@@ -1411,6 +1500,12 @@ static int enough(struct r10conf *conf, int ignore)
return 1;
}
+static int enough(struct r10conf *conf, int ignore)
+{
+ return _enough(conf, &conf->geo, ignore) &&
+ _enough(conf, &conf->prev, ignore);
+}
+
static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
@@ -1445,7 +1540,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
"md/raid10:%s: Disk failure on %s, disabling device.\n"
"md/raid10:%s: Operation continuing on %d devices.\n",
mdname(mddev), bdevname(rdev->bdev, b),
- mdname(mddev), conf->raid_disks - mddev->degraded);
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded);
}
static void print_conf(struct r10conf *conf)
@@ -1458,10 +1553,10 @@ static void print_conf(struct r10conf *conf)
printk(KERN_DEBUG "(!conf)\n");
return;
}
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
- conf->raid_disks);
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
+ conf->geo.raid_disks);
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->geo.raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->mirrors + i;
if (tmp->rdev)
@@ -1493,7 +1588,7 @@ static int raid10_spare_active(struct mddev *mddev)
* Find all non-in_sync disks within the RAID10 configuration
* and mark them in_sync
*/
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->geo.raid_disks; i++) {
tmp = conf->mirrors + i;
if (tmp->replacement
&& tmp->replacement->recovery_offset == MaxSector
@@ -1535,7 +1630,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int err = -EEXIST;
int mirror;
int first = 0;
- int last = conf->raid_disks - 1;
+ int last = conf->geo.raid_disks - 1;
struct request_queue *q = bdev_get_queue(rdev->bdev);
if (mddev->recovery_cp < MaxSector)
@@ -1543,7 +1638,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* very different from resync
*/
return -EBUSY;
- if (rdev->saved_raid_disk < 0 && !enough(conf, -1))
+ if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1))
return -EINVAL;
if (rdev->raid_disk >= 0)
@@ -1635,6 +1730,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (!test_bit(Faulty, &rdev->flags) &&
mddev->recovery_disabled != p->recovery_disabled &&
(!p->replacement || p->replacement == rdev) &&
+ number < conf->geo.raid_disks &&
enough(conf, -1)) {
err = -EBUSY;
goto abort;
@@ -1676,7 +1772,11 @@ static void end_sync_read(struct bio *bio, int error)
struct r10conf *conf = r10_bio->mddev->private;
int d;
- d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
+ if (bio == r10_bio->master_bio) {
+ /* this is a reshape read */
+ d = r10_bio->read_slot; /* really the read dev */
+ } else
+ d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
set_bit(R10BIO_Uptodate, &r10_bio->state);
@@ -2209,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s<<9, conf->tmppage, WRITE)
+ s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
@@ -2218,7 +2318,9 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
- sect + rdev->data_offset),
+ sect +
+ choose_data_offset(r10_bio,
+ rdev)),
bdevname(rdev->bdev, b));
printk(KERN_NOTICE "md/raid10:%s: %s: failing "
"drive\n",
@@ -2246,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
switch (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s<<9, conf->tmppage,
+ s, conf->tmppage,
READ)) {
case 0:
/* Well, this device is dead */
@@ -2256,7 +2358,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
- sect + rdev->data_offset),
+ sect +
+ choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
printk(KERN_NOTICE "md/raid10:%s: %s: failing "
"drive\n",
@@ -2269,7 +2372,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
" (%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(
- sect + rdev->data_offset),
+ sect +
+ choose_data_offset(r10_bio, rdev)),
bdevname(rdev->bdev, b));
atomic_add(s, &rdev->corrected_errors);
}
@@ -2343,7 +2447,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
md_trim_bio(wbio, sector - bio->bi_sector, sectors);
wbio->bi_sector = (r10_bio->devs[i].addr+
- rdev->data_offset+
+ choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
if (submit_bio_wait(WRITE, wbio) == 0)
@@ -2407,7 +2511,7 @@ read_more:
slot = r10_bio->read_slot;
printk_ratelimited(
KERN_ERR
- "md/raid10:%s: %s: redirecting"
+ "md/raid10:%s: %s: redirecting "
"sector %llu to another mirror\n",
mdname(mddev),
bdevname(rdev->bdev, b),
@@ -2420,7 +2524,7 @@ read_more:
r10_bio->devs[slot].bio = bio;
r10_bio->devs[slot].rdev = rdev;
bio->bi_sector = r10_bio->devs[slot].addr
- + rdev->data_offset;
+ + choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio;
@@ -2480,7 +2584,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
- r10_bio->sectors);
+ r10_bio->sectors, 0);
} else {
if (!rdev_set_badblocks(
rdev,
@@ -2496,7 +2600,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
- r10_bio->sectors);
+ r10_bio->sectors, 0);
} else {
if (!rdev_set_badblocks(
rdev,
@@ -2515,7 +2619,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
- r10_bio->sectors);
+ r10_bio->sectors, 0);
rdev_dec_pending(rdev, conf->mddev);
} else if (bio != NULL &&
!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
@@ -2532,7 +2636,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
rdev_clear_badblocks(
rdev,
r10_bio->devs[m].addr,
- r10_bio->sectors);
+ r10_bio->sectors, 0);
rdev_dec_pending(rdev, conf->mddev);
}
}
@@ -2556,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
blk_start_plug(&plug);
for (;;) {
- flush_pending_writes(conf);
+ if (atomic_read(&mddev->plug_cnt) == 0)
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -2573,6 +2678,8 @@ static void raid10d(struct mddev *mddev)
if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
test_bit(R10BIO_WriteError, &r10_bio->state))
handle_write_completed(conf, r10_bio);
+ else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
+ reshape_request_write(mddev, r10_bio);
else if (test_bit(R10BIO_IsSync, &r10_bio->state))
sync_request_write(mddev, r10_bio);
else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
@@ -2603,7 +2710,7 @@ static int init_resync(struct r10conf *conf)
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
BUG_ON(conf->r10buf_pool);
conf->have_replacement = 0;
- for (i = 0; i < conf->raid_disks; i++)
+ for (i = 0; i < conf->geo.raid_disks; i++)
if (conf->mirrors[i].replacement)
conf->have_replacement = 1;
conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
@@ -2657,6 +2764,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sync_blocks;
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
+ sector_t chunk_mask = conf->geo.chunk_mask;
if (!conf->r10buf_pool)
if (init_resync(conf))
@@ -2664,7 +2772,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
skipped:
max_sector = mddev->dev_sectors;
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sector = mddev->resync_max_sectors;
if (sector_nr >= max_sector) {
/* If we aborted, we need to abort the
@@ -2676,11 +2785,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
* we need to convert that to several
* virtual addresses.
*/
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
+ end_reshape(conf);
+ return 0;
+ }
+
if (mddev->curr_resync < max_sector) { /* aborted */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
&sync_blocks, 1);
- else for (i=0; i<conf->raid_disks; i++) {
+ else for (i = 0; i < conf->geo.raid_disks; i++) {
sector_t sect =
raid10_find_virt(conf, mddev->curr_resync, i);
bitmap_end_sync(mddev->bitmap, sect,
@@ -2694,7 +2808,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* Completed a full sync so the replacements
* are now fully recovered.
*/
- for (i = 0; i < conf->raid_disks; i++)
+ for (i = 0; i < conf->geo.raid_disks; i++)
if (conf->mirrors[i].replacement)
conf->mirrors[i].replacement
->recovery_offset
@@ -2707,7 +2821,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
*skipped = 1;
return sectors_skipped;
}
- if (chunks_skipped >= conf->raid_disks) {
+
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ return reshape_request(mddev, sector_nr, skipped);
+
+ if (chunks_skipped >= conf->geo.raid_disks) {
/* if there has been nothing to do on any drive,
* then there is nothing to do at all..
*/
@@ -2721,9 +2839,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* make sure whole request will fit in a chunk - if chunks
* are meaningful
*/
- if (conf->near_copies < conf->raid_disks &&
- max_sector > (sector_nr | conf->chunk_mask))
- max_sector = (sector_nr | conf->chunk_mask) + 1;
+ if (conf->geo.near_copies < conf->geo.raid_disks &&
+ max_sector > (sector_nr | chunk_mask))
+ max_sector = (sector_nr | chunk_mask) + 1;
/*
* If there is non-resync activity waiting for us then
* put in a delay to throttle resync.
@@ -2752,7 +2870,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
int j;
r10_bio = NULL;
- for (i=0 ; i<conf->raid_disks; i++) {
+ for (i = 0 ; i < conf->geo.raid_disks; i++) {
int still_degraded;
struct r10bio *rb2;
sector_t sect;
@@ -2772,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* want to reconstruct this device */
rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i);
+ if (sect >= mddev->resync_max_sectors) {
+ /* last stripe is not complete - don't
+ * try to recover this sector.
+ */
+ continue;
+ }
/* Unless we are doing a full sync, or a replacement
* we only need to recover the block if it is set in
* the bitmap
@@ -2806,7 +2930,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* Need to check if the array will still be
* degraded
*/
- for (j=0; j<conf->raid_disks; j++)
+ for (j = 0; j < conf->geo.raid_disks; j++)
if (conf->mirrors[j].rdev == NULL ||
test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
still_degraded = 1;
@@ -2984,9 +3108,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sector = sector_nr;
set_bit(R10BIO_IsSync, &r10_bio->state);
raid10_find_phys(conf, r10_bio);
- r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
+ r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
- for (i=0; i<conf->copies; i++) {
+ for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
sector_t first_bad, sector;
int bad_sectors;
@@ -3152,16 +3276,17 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
struct r10conf *conf = mddev->private;
if (!raid_disks)
- raid_disks = conf->raid_disks;
+ raid_disks = min(conf->geo.raid_disks,
+ conf->prev.raid_disks);
if (!sectors)
sectors = conf->dev_sectors;
- size = sectors >> conf->chunk_shift;
- sector_div(size, conf->far_copies);
+ size = sectors >> conf->geo.chunk_shift;
+ sector_div(size, conf->geo.far_copies);
size = size * raid_disks;
- sector_div(size, conf->near_copies);
+ sector_div(size, conf->geo.near_copies);
- return size << conf->chunk_shift;
+ return size << conf->geo.chunk_shift;
}
static void calc_sectors(struct r10conf *conf, sector_t size)
@@ -3171,10 +3296,10 @@ static void calc_sectors(struct r10conf *conf, sector_t size)
* conf->stride
*/
- size = size >> conf->chunk_shift;
- sector_div(size, conf->far_copies);
- size = size * conf->raid_disks;
- sector_div(size, conf->near_copies);
+ size = size >> conf->geo.chunk_shift;
+ sector_div(size, conf->geo.far_copies);
+ size = size * conf->geo.raid_disks;
+ sector_div(size, conf->geo.near_copies);
/* 'size' is now the number of chunks in the array */
/* calculate "used chunks per device" */
size = size * conf->copies;
@@ -3182,38 +3307,76 @@ static void calc_sectors(struct r10conf *conf, sector_t size)
/* We need to round up when dividing by raid_disks to
* get the stride size.
*/
- size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
+ size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
- conf->dev_sectors = size << conf->chunk_shift;
+ conf->dev_sectors = size << conf->geo.chunk_shift;
- if (conf->far_offset)
- conf->stride = 1 << conf->chunk_shift;
+ if (conf->geo.far_offset)
+ conf->geo.stride = 1 << conf->geo.chunk_shift;
else {
- sector_div(size, conf->far_copies);
- conf->stride = size << conf->chunk_shift;
+ sector_div(size, conf->geo.far_copies);
+ conf->geo.stride = size << conf->geo.chunk_shift;
}
}
+enum geo_type {geo_new, geo_old, geo_start};
+static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
+{
+ int nc, fc, fo;
+ int layout, chunk, disks;
+ switch (new) {
+ case geo_old:
+ layout = mddev->layout;
+ chunk = mddev->chunk_sectors;
+ disks = mddev->raid_disks - mddev->delta_disks;
+ break;
+ case geo_new:
+ layout = mddev->new_layout;
+ chunk = mddev->new_chunk_sectors;
+ disks = mddev->raid_disks;
+ break;
+ default: /* avoid 'may be unused' warnings */
+ case geo_start: /* new when starting reshape - raid_disks not
+ * updated yet. */
+ layout = mddev->new_layout;
+ chunk = mddev->new_chunk_sectors;
+ disks = mddev->raid_disks + mddev->delta_disks;
+ break;
+ }
+ if (layout >> 17)
+ return -1;
+ if (chunk < (PAGE_SIZE >> 9) ||
+ !is_power_of_2(chunk))
+ return -2;
+ nc = layout & 255;
+ fc = (layout >> 8) & 255;
+ fo = layout & (1<<16);
+ geo->raid_disks = disks;
+ geo->near_copies = nc;
+ geo->far_copies = fc;
+ geo->far_offset = fo;
+ geo->chunk_mask = chunk - 1;
+ geo->chunk_shift = ffz(~chunk);
+ return nc*fc;
+}
+
static struct r10conf *setup_conf(struct mddev *mddev)
{
struct r10conf *conf = NULL;
- int nc, fc, fo;
int err = -EINVAL;
+ struct geom geo;
+ int copies;
+
+ copies = setup_geo(&geo, mddev, geo_new);
- if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
- !is_power_of_2(mddev->new_chunk_sectors)) {
+ if (copies == -2) {
printk(KERN_ERR "md/raid10:%s: chunk size must be "
"at least PAGE_SIZE(%ld) and be a power of 2.\n",
mdname(mddev), PAGE_SIZE);
goto out;
}
- nc = mddev->new_layout & 255;
- fc = (mddev->new_layout >> 8) & 255;
- fo = mddev->new_layout & (1<<16);
-
- if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
- (mddev->new_layout >> 17)) {
+ if (copies < 2 || copies > mddev->raid_disks) {
printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
mdname(mddev), mddev->new_layout);
goto out;
@@ -3224,7 +3387,9 @@ static struct r10conf *setup_conf(struct mddev *mddev)
if (!conf)
goto out;
- conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
+ /* FIXME calc properly */
+ conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks +
+ max(0,mddev->delta_disks)),
GFP_KERNEL);
if (!conf->mirrors)
goto out;
@@ -3233,29 +3398,36 @@ static struct r10conf *setup_conf(struct mddev *mddev)
if (!conf->tmppage)
goto out;
-
- conf->raid_disks = mddev->raid_disks;
- conf->near_copies = nc;
- conf->far_copies = fc;
- conf->copies = nc*fc;
- conf->far_offset = fo;
- conf->chunk_mask = mddev->new_chunk_sectors - 1;
- conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
-
+ conf->geo = geo;
+ conf->copies = copies;
conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
r10bio_pool_free, conf);
if (!conf->r10bio_pool)
goto out;
calc_sectors(conf, mddev->dev_sectors);
-
+ if (mddev->reshape_position == MaxSector) {
+ conf->prev = conf->geo;
+ conf->reshape_progress = MaxSector;
+ } else {
+ if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
+ err = -EINVAL;
+ goto out;
+ }
+ conf->reshape_progress = mddev->reshape_position;
+ if (conf->prev.far_offset)
+ conf->prev.stride = 1 << conf->prev.chunk_shift;
+ else
+ /* far_copies must be 1 */
+ conf->prev.stride = conf->dev_sectors;
+ }
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
- conf->thread = md_register_thread(raid10d, mddev, NULL);
+ conf->thread = md_register_thread(raid10d, mddev, "raid10");
if (!conf->thread)
goto out;
@@ -3263,8 +3435,9 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return conf;
out:
- printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
- mdname(mddev));
+ if (err == -ENOMEM)
+ printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
+ mdname(mddev));
if (conf) {
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
@@ -3282,12 +3455,8 @@ static int run(struct mddev *mddev)
struct mirror_info *disk;
struct md_rdev *rdev;
sector_t size;
-
- /*
- * copy the already verified devices into our private RAID10
- * bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
- */
+ sector_t min_offset_diff = 0;
+ int first = 1;
if (mddev->private == NULL) {
conf = setup_conf(mddev);
@@ -3304,17 +3473,21 @@ static int run(struct mddev *mddev)
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
- if (conf->raid_disks % conf->near_copies)
- blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
+ if (conf->geo.raid_disks % conf->geo.near_copies)
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
else
blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->raid_disks / conf->near_copies));
+ (conf->geo.raid_disks / conf->geo.near_copies));
rdev_for_each(rdev, mddev) {
+ long long diff;
+ struct request_queue *q;
disk_idx = rdev->raid_disk;
- if (disk_idx >= conf->raid_disks
- || disk_idx < 0)
+ if (disk_idx < 0)
+ continue;
+ if (disk_idx >= conf->geo.raid_disks &&
+ disk_idx >= conf->prev.raid_disks)
continue;
disk = conf->mirrors + disk_idx;
@@ -3327,12 +3500,23 @@ static int run(struct mddev *mddev)
goto out_free_conf;
disk->rdev = rdev;
}
+ q = bdev_get_queue(rdev->bdev);
+ if (q->merge_bvec_fn)
+ mddev->merge_check_needed = 1;
+ diff = (rdev->new_data_offset - rdev->data_offset);
+ if (!mddev->reshape_backwards)
+ diff = -diff;
+ if (diff < 0)
+ diff = 0;
+ if (first || diff < min_offset_diff)
+ min_offset_diff = diff;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
disk->head_position = 0;
}
+
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
@@ -3340,8 +3524,21 @@ static int run(struct mddev *mddev)
goto out_free_conf;
}
+ if (conf->reshape_progress != MaxSector) {
+ /* must ensure that shape change is supported */
+ if (conf->geo.far_copies != 1 &&
+ conf->geo.far_offset == 0)
+ goto out_free_conf;
+ if (conf->prev.far_copies != 1 &&
+ conf->geo.far_offset == 0)
+ goto out_free_conf;
+ }
+
mddev->degraded = 0;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0;
+ i < conf->geo.raid_disks
+ || i < conf->prev.raid_disks;
+ i++) {
disk = conf->mirrors + i;
@@ -3368,8 +3565,8 @@ static int run(struct mddev *mddev)
mdname(mddev));
printk(KERN_INFO
"md/raid10:%s: active with %d out of %d devices\n",
- mdname(mddev), conf->raid_disks - mddev->degraded,
- conf->raid_disks);
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded,
+ conf->geo.raid_disks);
/*
* Ok, everything is just fine now
*/
@@ -3386,11 +3583,11 @@ static int run(struct mddev *mddev)
* maybe...
*/
{
- int stripe = conf->raid_disks *
+ int stripe = conf->geo.raid_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- stripe /= conf->near_copies;
- if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
- mddev->queue->backing_dev_info.ra_pages = 2* stripe;
+ stripe /= conf->geo.near_copies;
+ if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
}
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
@@ -3398,6 +3595,30 @@ static int run(struct mddev *mddev)
if (md_integrity_register(mddev))
goto out_free_conf;
+ if (conf->reshape_progress != MaxSector) {
+ unsigned long before_length, after_length;
+
+ before_length = ((1 << conf->prev.chunk_shift) *
+ conf->prev.far_copies);
+ after_length = ((1 << conf->geo.chunk_shift) *
+ conf->geo.far_copies);
+
+ if (max(before_length, after_length) > min_offset_diff) {
+ /* This cannot work */
+ printk("md/raid10: offset difference not enough to continue reshape\n");
+ goto out_free_conf;
+ }
+ conf->offset_diff = min_offset_diff;
+
+ conf->reshape_safe = conf->reshape_progress;
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
+ "reshape");
+ }
+
return 0;
out_free_conf:
@@ -3460,14 +3681,23 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
struct r10conf *conf = mddev->private;
sector_t oldsize, size;
- if (conf->far_copies > 1 && !conf->far_offset)
+ if (mddev->reshape_position != MaxSector)
+ return -EBUSY;
+
+ if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
return -EINVAL;
oldsize = raid10_size(mddev, 0, 0);
size = raid10_size(mddev, sectors, 0);
- md_set_array_sectors(mddev, size);
- if (mddev->array_sectors > size)
+ if (mddev->external_size &&
+ mddev->array_sectors > size)
return -EINVAL;
+ if (mddev->bitmap) {
+ int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
+ if (ret)
+ return ret;
+ }
+ md_set_array_sectors(mddev, size);
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
@@ -3534,6 +3764,758 @@ static void *raid10_takeover(struct mddev *mddev)
return ERR_PTR(-EINVAL);
}
+static int raid10_check_reshape(struct mddev *mddev)
+{
+ /* Called when there is a request to change
+ * - layout (to ->new_layout)
+ * - chunk size (to ->new_chunk_sectors)
+ * - raid_disks (by delta_disks)
+ * or when trying to restart a reshape that was ongoing.
+ *
+ * We need to validate the request and possibly allocate
+ * space if that might be an issue later.
+ *
+ * Currently we reject any reshape of a 'far' mode array,
+ * allow chunk size to change if new is generally acceptable,
+ * allow raid_disks to increase, and allow
+ * a switch between 'near' mode and 'offset' mode.
+ */
+ struct r10conf *conf = mddev->private;
+ struct geom geo;
+
+ if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
+ return -EINVAL;
+
+ if (setup_geo(&geo, mddev, geo_start) != conf->copies)
+ /* mustn't change number of copies */
+ return -EINVAL;
+ if (geo.far_copies > 1 && !geo.far_offset)
+ /* Cannot switch to 'far' mode */
+ return -EINVAL;
+
+ if (mddev->array_sectors & geo.chunk_mask)
+ /* not factor of array size */
+ return -EINVAL;
+
+ if (!enough(conf, -1))
+ return -EINVAL;
+
+ kfree(conf->mirrors_new);
+ conf->mirrors_new = NULL;
+ if (mddev->delta_disks > 0) {
+ /* allocate new 'mirrors' list */
+ conf->mirrors_new = kzalloc(
+ sizeof(struct mirror_info)
+ *(mddev->raid_disks +
+ mddev->delta_disks),
+ GFP_KERNEL);
+ if (!conf->mirrors_new)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * Need to check if array has failed when deciding whether to:
+ * - start an array
+ * - remove non-faulty devices
+ * - add a spare
+ * - allow a reshape
+ * This determination is simple when no reshape is happening.
+ * However if there is a reshape, we need to carefully check
+ * both the before and after sections.
+ * This is because some failed devices may only affect one
+ * of the two sections, and some non-in_sync devices may
+ * be insync in the section most affected by failed devices.
+ */
+static int calc_degraded(struct r10conf *conf)
+{
+ int degraded, degraded2;
+ int i;
+
+ rcu_read_lock();
+ degraded = 0;
+ /* 'prev' section first */
+ for (i = 0; i < conf->prev.raid_disks; i++) {
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ degraded++;
+ else if (!test_bit(In_sync, &rdev->flags))
+ /* When we can reduce the number of devices in
+ * an array, this might not contribute to
+ * 'degraded'. It does now.
+ */
+ degraded++;
+ }
+ rcu_read_unlock();
+ if (conf->geo.raid_disks == conf->prev.raid_disks)
+ return degraded;
+ rcu_read_lock();
+ degraded2 = 0;
+ for (i = 0; i < conf->geo.raid_disks; i++) {
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ degraded2++;
+ else if (!test_bit(In_sync, &rdev->flags)) {
+ /* If reshape is increasing the number of devices,
+ * this section has already been recovered, so
+ * it doesn't contribute to degraded.
+ * else it does.
+ */
+ if (conf->geo.raid_disks <= conf->prev.raid_disks)
+ degraded2++;
+ }
+ }
+ rcu_read_unlock();
+ if (degraded2 > degraded)
+ return degraded2;
+ return degraded;
+}
+
+static int raid10_start_reshape(struct mddev *mddev)
+{
+ /* A 'reshape' has been requested. This commits
+ * the various 'new' fields and sets MD_RECOVER_RESHAPE
+ * This also checks if there are enough spares and adds them
+ * to the array.
+ * We currently require enough spares to make the final
+ * array non-degraded. We also require that the difference
+ * between old and new data_offset - on each device - is
+ * enough that we never risk over-writing.
+ */
+
+ unsigned long before_length, after_length;
+ sector_t min_offset_diff = 0;
+ int first = 1;
+ struct geom new;
+ struct r10conf *conf = mddev->private;
+ struct md_rdev *rdev;
+ int spares = 0;
+ int ret;
+
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ return -EBUSY;
+
+ if (setup_geo(&new, mddev, geo_start) != conf->copies)
+ return -EINVAL;
+
+ before_length = ((1 << conf->prev.chunk_shift) *
+ conf->prev.far_copies);
+ after_length = ((1 << conf->geo.chunk_shift) *
+ conf->geo.far_copies);
+
+ rdev_for_each(rdev, mddev) {
+ if (!test_bit(In_sync, &rdev->flags)
+ && !test_bit(Faulty, &rdev->flags))
+ spares++;
+ if (rdev->raid_disk >= 0) {
+ long long diff = (rdev->new_data_offset
+ - rdev->data_offset);
+ if (!mddev->reshape_backwards)
+ diff = -diff;
+ if (diff < 0)
+ diff = 0;
+ if (first || diff < min_offset_diff)
+ min_offset_diff = diff;
+ }
+ }
+
+ if (max(before_length, after_length) > min_offset_diff)
+ return -EINVAL;
+
+ if (spares < mddev->delta_disks)
+ return -EINVAL;
+
+ conf->offset_diff = min_offset_diff;
+ spin_lock_irq(&conf->device_lock);
+ if (conf->mirrors_new) {
+ memcpy(conf->mirrors_new, conf->mirrors,
+ sizeof(struct mirror_info)*conf->prev.raid_disks);
+ smp_mb();
+ kfree(conf->mirrors_old); /* FIXME and elsewhere */
+ conf->mirrors_old = conf->mirrors;
+ conf->mirrors = conf->mirrors_new;
+ conf->mirrors_new = NULL;
+ }
+ setup_geo(&conf->geo, mddev, geo_start);
+ smp_mb();
+ if (mddev->reshape_backwards) {
+ sector_t size = raid10_size(mddev, 0, 0);
+ if (size < mddev->array_sectors) {
+ spin_unlock_irq(&conf->device_lock);
+ printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+ mddev->resync_max_sectors = size;
+ conf->reshape_progress = size;
+ } else
+ conf->reshape_progress = 0;
+ spin_unlock_irq(&conf->device_lock);
+
+ if (mddev->delta_disks && mddev->bitmap) {
+ ret = bitmap_resize(mddev->bitmap,
+ raid10_size(mddev, 0,
+ conf->geo.raid_disks),
+ 0, 0);
+ if (ret)
+ goto abort;
+ }
+ if (mddev->delta_disks > 0) {
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk < 0 &&
+ !test_bit(Faulty, &rdev->flags)) {
+ if (raid10_add_disk(mddev, rdev) == 0) {
+ if (rdev->raid_disk >=
+ conf->prev.raid_disks)
+ set_bit(In_sync, &rdev->flags);
+ else
+ rdev->recovery_offset = 0;
+
+ if (sysfs_link_rdev(mddev, rdev))
+ /* Failure here is OK */;
+ }
+ } else if (rdev->raid_disk >= conf->prev.raid_disks
+ && !test_bit(Faulty, &rdev->flags)) {
+ /* This is a spare that was manually added */
+ set_bit(In_sync, &rdev->flags);
+ }
+ }
+ /* When a reshape changes the number of devices,
+ * ->degraded is measured against the larger of the
+ * pre and post numbers.
+ */
+ spin_lock_irq(&conf->device_lock);
+ mddev->degraded = calc_degraded(conf);
+ spin_unlock_irq(&conf->device_lock);
+ mddev->raid_disks = conf->geo.raid_disks;
+ mddev->reshape_position = conf->reshape_progress;
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+
+ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
+ "reshape");
+ if (!mddev->sync_thread) {
+ ret = -EAGAIN;
+ goto abort;
+ }
+ conf->reshape_checkpoint = jiffies;
+ md_wakeup_thread(mddev->sync_thread);
+ md_new_event(mddev);
+ return 0;
+
+abort:
+ mddev->recovery = 0;
+ spin_lock_irq(&conf->device_lock);
+ conf->geo = conf->prev;
+ mddev->raid_disks = conf->geo.raid_disks;
+ rdev_for_each(rdev, mddev)
+ rdev->new_data_offset = rdev->data_offset;
+ smp_wmb();
+ conf->reshape_progress = MaxSector;
+ mddev->reshape_position = MaxSector;
+ spin_unlock_irq(&conf->device_lock);
+ return ret;
+}
+
+/* Calculate the last device-address that could contain
+ * any block from the chunk that includes the array-address 's'
+ * and report the next address.
+ * i.e. the address returned will be chunk-aligned and after
+ * any data that is in the chunk containing 's'.
+ */
+static sector_t last_dev_address(sector_t s, struct geom *geo)
+{
+ s = (s | geo->chunk_mask) + 1;
+ s >>= geo->chunk_shift;
+ s *= geo->near_copies;
+ s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
+ s *= geo->far_copies;
+ s <<= geo->chunk_shift;
+ return s;
+}
+
+/* Calculate the first device-address that could contain
+ * any block from the chunk that includes the array-address 's'.
+ * This too will be the start of a chunk
+ */
+static sector_t first_dev_address(sector_t s, struct geom *geo)
+{
+ s >>= geo->chunk_shift;
+ s *= geo->near_copies;
+ sector_div(s, geo->raid_disks);
+ s *= geo->far_copies;
+ s <<= geo->chunk_shift;
+ return s;
+}
+
+static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
+ int *skipped)
+{
+ /* We simply copy at most one chunk (smallest of old and new)
+ * at a time, possibly less if that exceeds RESYNC_PAGES,
+ * or we hit a bad block or something.
+ * This might mean we pause for normal IO in the middle of
+ * a chunk, but that is not a problem was mddev->reshape_position
+ * can record any location.
+ *
+ * If we will want to write to a location that isn't
+ * yet recorded as 'safe' (i.e. in metadata on disk) then
+ * we need to flush all reshape requests and update the metadata.
+ *
+ * When reshaping forwards (e.g. to more devices), we interpret
+ * 'safe' as the earliest block which might not have been copied
+ * down yet. We divide this by previous stripe size and multiply
+ * by previous stripe length to get lowest device offset that we
+ * cannot write to yet.
+ * We interpret 'sector_nr' as an address that we want to write to.
+ * From this we use last_device_address() to find where we might
+ * write to, and first_device_address on the 'safe' position.
+ * If this 'next' write position is after the 'safe' position,
+ * we must update the metadata to increase the 'safe' position.
+ *
+ * When reshaping backwards, we round in the opposite direction
+ * and perform the reverse test: next write position must not be
+ * less than current safe position.
+ *
+ * In all this the minimum difference in data offsets
+ * (conf->offset_diff - always positive) allows a bit of slack,
+ * so next can be after 'safe', but not by more than offset_disk
+ *
+ * We need to prepare all the bios here before we start any IO
+ * to ensure the size we choose is acceptable to all devices.
+ * The means one for each copy for write-out and an extra one for
+ * read-in.
+ * We store the read-in bio in ->master_bio and the others in
+ * ->devs[x].bio and ->devs[x].repl_bio.
+ */
+ struct r10conf *conf = mddev->private;
+ struct r10bio *r10_bio;
+ sector_t next, safe, last;
+ int max_sectors;
+ int nr_sectors;
+ int s;
+ struct md_rdev *rdev;
+ int need_flush = 0;
+ struct bio *blist;
+ struct bio *bio, *read_bio;
+ int sectors_done = 0;
+
+ if (sector_nr == 0) {
+ /* If restarting in the middle, skip the initial sectors */
+ if (mddev->reshape_backwards &&
+ conf->reshape_progress < raid10_size(mddev, 0, 0)) {
+ sector_nr = (raid10_size(mddev, 0, 0)
+ - conf->reshape_progress);
+ } else if (!mddev->reshape_backwards &&
+ conf->reshape_progress > 0)
+ sector_nr = conf->reshape_progress;
+ if (sector_nr) {
+ mddev->curr_resync_completed = sector_nr;
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ *skipped = 1;
+ return sector_nr;
+ }
+ }
+
+ /* We don't use sector_nr to track where we are up to
+ * as that doesn't work well for ->reshape_backwards.
+ * So just use ->reshape_progress.
+ */
+ if (mddev->reshape_backwards) {
+ /* 'next' is the earliest device address that we might
+ * write to for this chunk in the new layout
+ */
+ next = first_dev_address(conf->reshape_progress - 1,
+ &conf->geo);
+
+ /* 'safe' is the last device address that we might read from
+ * in the old layout after a restart
+ */
+ safe = last_dev_address(conf->reshape_safe - 1,
+ &conf->prev);
+
+ if (next + conf->offset_diff < safe)
+ need_flush = 1;
+
+ last = conf->reshape_progress - 1;
+ sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
+ & conf->prev.chunk_mask);
+ if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
+ sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
+ } else {
+ /* 'next' is after the last device address that we
+ * might write to for this chunk in the new layout
+ */
+ next = last_dev_address(conf->reshape_progress, &conf->geo);
+
+ /* 'safe' is the earliest device address that we might
+ * read from in the old layout after a restart
+ */
+ safe = first_dev_address(conf->reshape_safe, &conf->prev);
+
+ /* Need to update metadata if 'next' might be beyond 'safe'
+ * as that would possibly corrupt data
+ */
+ if (next > safe + conf->offset_diff)
+ need_flush = 1;
+
+ sector_nr = conf->reshape_progress;
+ last = sector_nr | (conf->geo.chunk_mask
+ & conf->prev.chunk_mask);
+
+ if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
+ last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
+ }
+
+ if (need_flush ||
+ time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
+ /* Need to update reshape_position in metadata */
+ wait_barrier(conf);
+ mddev->reshape_position = conf->reshape_progress;
+ if (mddev->reshape_backwards)
+ mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
+ - conf->reshape_progress;
+ else
+ mddev->curr_resync_completed = conf->reshape_progress;
+ conf->reshape_checkpoint = jiffies;
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait, mddev->flags == 0 ||
+ kthread_should_stop());
+ conf->reshape_safe = mddev->reshape_position;
+ allow_barrier(conf);
+ }
+
+read_more:
+ /* Now schedule reads for blocks from sector_nr to last */
+ r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ raise_barrier(conf, sectors_done != 0);
+ atomic_set(&r10_bio->remaining, 0);
+ r10_bio->mddev = mddev;
+ r10_bio->sector = sector_nr;
+ set_bit(R10BIO_IsReshape, &r10_bio->state);
+ r10_bio->sectors = last - sector_nr + 1;
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
+
+ if (!rdev) {
+ /* Cannot read from here, so need to record bad blocks
+ * on all the target devices.
+ */
+ // FIXME
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ return sectors_done;
+ }
+
+ read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
+
+ read_bio->bi_bdev = rdev->bdev;
+ read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ + rdev->data_offset);
+ read_bio->bi_private = r10_bio;
+ read_bio->bi_end_io = end_sync_read;
+ read_bio->bi_rw = READ;
+ read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
+ read_bio->bi_flags |= 1 << BIO_UPTODATE;
+ read_bio->bi_vcnt = 0;
+ read_bio->bi_idx = 0;
+ read_bio->bi_size = 0;
+ r10_bio->master_bio = read_bio;
+ r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
+
+ /* Now find the locations in the new layout */
+ __raid10_find_phys(&conf->geo, r10_bio);
+
+ blist = read_bio;
+ read_bio->bi_next = NULL;
+
+ for (s = 0; s < conf->copies*2; s++) {
+ struct bio *b;
+ int d = r10_bio->devs[s/2].devnum;
+ struct md_rdev *rdev2;
+ if (s&1) {
+ rdev2 = conf->mirrors[d].replacement;
+ b = r10_bio->devs[s/2].repl_bio;
+ } else {
+ rdev2 = conf->mirrors[d].rdev;
+ b = r10_bio->devs[s/2].bio;
+ }
+ if (!rdev2 || test_bit(Faulty, &rdev2->flags))
+ continue;
+ b->bi_bdev = rdev2->bdev;
+ b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
+ b->bi_private = r10_bio;
+ b->bi_end_io = end_reshape_write;
+ b->bi_rw = WRITE;
+ b->bi_flags &= ~(BIO_POOL_MASK - 1);
+ b->bi_flags |= 1 << BIO_UPTODATE;
+ b->bi_next = blist;
+ b->bi_vcnt = 0;
+ b->bi_idx = 0;
+ b->bi_size = 0;
+ blist = b;
+ }
+
+ /* Now add as many pages as possible to all of these bios. */
+
+ nr_sectors = 0;
+ for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
+ struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
+ int len = (max_sectors - s) << 9;
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ for (bio = blist; bio ; bio = bio->bi_next) {
+ struct bio *bio2;
+ if (bio_add_page(bio, page, len, 0))
+ continue;
+
+ /* Didn't fit, must stop */
+ for (bio2 = blist;
+ bio2 && bio2 != bio;
+ bio2 = bio2->bi_next) {
+ /* Remove last page from this bio */
+ bio2->bi_vcnt--;
+ bio2->bi_size -= len;
+ bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
+ }
+ goto bio_full;
+ }
+ sector_nr += len >> 9;
+ nr_sectors += len >> 9;
+ }
+bio_full:
+ r10_bio->sectors = nr_sectors;
+
+ /* Now submit the read */
+ md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
+ atomic_inc(&r10_bio->remaining);
+ read_bio->bi_next = NULL;
+ generic_make_request(read_bio);
+ sector_nr += nr_sectors;
+ sectors_done += nr_sectors;
+ if (sector_nr <= last)
+ goto read_more;
+
+ /* Now that we have done the whole section we can
+ * update reshape_progress
+ */
+ if (mddev->reshape_backwards)
+ conf->reshape_progress -= sectors_done;
+ else
+ conf->reshape_progress += sectors_done;
+
+ return sectors_done;
+}
+
+static void end_reshape_request(struct r10bio *r10_bio);
+static int handle_reshape_read_error(struct mddev *mddev,
+ struct r10bio *r10_bio);
+static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
+{
+ /* Reshape read completed. Hopefully we have a block
+ * to write out.
+ * If we got a read error then we do sync 1-page reads from
+ * elsewhere until we find the data - or give up.
+ */
+ struct r10conf *conf = mddev->private;
+ int s;
+
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+ if (handle_reshape_read_error(mddev, r10_bio) < 0) {
+ /* Reshape has been aborted */
+ md_done_sync(mddev, r10_bio->sectors, 0);
+ return;
+ }
+
+ /* We definitely have the data in the pages, schedule the
+ * writes.
+ */
+ atomic_set(&r10_bio->remaining, 1);
+ for (s = 0; s < conf->copies*2; s++) {
+ struct bio *b;
+ int d = r10_bio->devs[s/2].devnum;
+ struct md_rdev *rdev;
+ if (s&1) {
+ rdev = conf->mirrors[d].replacement;
+ b = r10_bio->devs[s/2].repl_bio;
+ } else {
+ rdev = conf->mirrors[d].rdev;
+ b = r10_bio->devs[s/2].bio;
+ }
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ continue;
+ atomic_inc(&rdev->nr_pending);
+ md_sync_acct(b->bi_bdev, r10_bio->sectors);
+ atomic_inc(&r10_bio->remaining);
+ b->bi_next = NULL;
+ generic_make_request(b);
+ }
+ end_reshape_request(r10_bio);
+}
+
+static void end_reshape(struct r10conf *conf)
+{
+ if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
+ return;
+
+ spin_lock_irq(&conf->device_lock);
+ conf->prev = conf->geo;
+ md_finish_reshape(conf->mddev);
+ smp_wmb();
+ conf->reshape_progress = MaxSector;
+ spin_unlock_irq(&conf->device_lock);
+
+ /* read-ahead size must cover two whole stripes, which is
+ * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
+ */
+ if (conf->mddev->queue) {
+ int stripe = conf->geo.raid_disks *
+ ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
+ stripe /= conf->geo.near_copies;
+ if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ }
+ conf->fullsync = 0;
+}
+
+
+static int handle_reshape_read_error(struct mddev *mddev,
+ struct r10bio *r10_bio)
+{
+ /* Use sync reads to get the blocks from somewhere else */
+ int sectors = r10_bio->sectors;
+ struct r10bio r10b;
+ struct r10conf *conf = mddev->private;
+ int slot = 0;
+ int idx = 0;
+ struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
+
+ r10b.sector = r10_bio->sector;
+ __raid10_find_phys(&conf->prev, &r10b);
+
+ while (sectors) {
+ int s = sectors;
+ int success = 0;
+ int first_slot = slot;
+
+ if (s > (PAGE_SIZE >> 9))
+ s = PAGE_SIZE >> 9;
+
+ while (!success) {
+ int d = r10b.devs[slot].devnum;
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
+ sector_t addr;
+ if (rdev == NULL ||
+ test_bit(Faulty, &rdev->flags) ||
+ !test_bit(In_sync, &rdev->flags))
+ goto failed;
+
+ addr = r10b.devs[slot].addr + idx * PAGE_SIZE;
+ success = sync_page_io(rdev,
+ addr,
+ s << 9,
+ bvec[idx].bv_page,
+ READ, false);
+ if (success)
+ break;
+ failed:
+ slot++;
+ if (slot >= conf->copies)
+ slot = 0;
+ if (slot == first_slot)
+ break;
+ }
+ if (!success) {
+ /* couldn't read this block, must give up */
+ set_bit(MD_RECOVERY_INTR,
+ &mddev->recovery);
+ return -EIO;
+ }
+ sectors -= s;
+ idx++;
+ }
+ return 0;
+}
+
+static void end_reshape_write(struct bio *bio, int error)
+{
+ int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct r10bio *r10_bio = bio->bi_private;
+ struct mddev *mddev = r10_bio->mddev;
+ struct r10conf *conf = mddev->private;
+ int d;
+ int slot;
+ int repl;
+ struct md_rdev *rdev = NULL;
+
+ d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+ if (repl)
+ rdev = conf->mirrors[d].replacement;
+ if (!rdev) {
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
+
+ if (!uptodate) {
+ /* FIXME should record badblock */
+ md_error(mddev, rdev);
+ }
+
+ rdev_dec_pending(rdev, mddev);
+ end_reshape_request(r10_bio);
+}
+
+static void end_reshape_request(struct r10bio *r10_bio)
+{
+ if (!atomic_dec_and_test(&r10_bio->remaining))
+ return;
+ md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
+ bio_put(r10_bio->master_bio);
+ put_buf(r10_bio);
+}
+
+static void raid10_finish_reshape(struct mddev *mddev)
+{
+ struct r10conf *conf = mddev->private;
+
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ return;
+
+ if (mddev->delta_disks > 0) {
+ sector_t size = raid10_size(mddev, 0, 0);
+ md_set_array_sectors(mddev, size);
+ if (mddev->recovery_cp > mddev->resync_max_sectors) {
+ mddev->recovery_cp = mddev->resync_max_sectors;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ }
+ mddev->resync_max_sectors = size;
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ } else {
+ int d;
+ for (d = conf->geo.raid_disks ;
+ d < conf->geo.raid_disks - mddev->delta_disks;
+ d++) {
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
+ if (rdev)
+ clear_bit(In_sync, &rdev->flags);
+ rdev = conf->mirrors[d].replacement;
+ if (rdev)
+ clear_bit(In_sync, &rdev->flags);
+ }
+ }
+ mddev->layout = mddev->new_layout;
+ mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
+ mddev->reshape_position = MaxSector;
+ mddev->delta_disks = 0;
+ mddev->reshape_backwards = 0;
+}
+
static struct md_personality raid10_personality =
{
.name = "raid10",
@@ -3552,6 +4534,9 @@ static struct md_personality raid10_personality =
.size = raid10_size,
.resize = raid10_resize,
.takeover = raid10_takeover,
+ .check_reshape = raid10_check_reshape,
+ .start_reshape = raid10_start_reshape,
+ .finish_reshape = raid10_finish_reshape,
};
static int __init raid_init(void)
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 7c615613c381..135b1b0a1554 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -14,32 +14,38 @@ struct mirror_info {
struct r10conf {
struct mddev *mddev;
struct mirror_info *mirrors;
- int raid_disks;
+ struct mirror_info *mirrors_new, *mirrors_old;
spinlock_t device_lock;
/* geometry */
- int near_copies; /* number of copies laid out
+ struct geom {
+ int raid_disks;
+ int near_copies; /* number of copies laid out
* raid0 style */
- int far_copies; /* number of copies laid out
+ int far_copies; /* number of copies laid out
* at large strides across drives
*/
- int far_offset; /* far_copies are offset by 1
+ int far_offset; /* far_copies are offset by 1
* stripe instead of many
*/
- int copies; /* near_copies * far_copies.
- * must be <= raid_disks
- */
- sector_t stride; /* distance between far copies.
+ sector_t stride; /* distance between far copies.
* This is size / far_copies unless
* far_offset, in which case it is
* 1 stripe.
*/
+ int chunk_shift; /* shift from chunks to sectors */
+ sector_t chunk_mask;
+ } prev, geo;
+ int copies; /* near_copies * far_copies.
+ * must be <= raid_disks
+ */
sector_t dev_sectors; /* temp copy of
* mddev->dev_sectors */
-
- int chunk_shift; /* shift from chunks to sectors */
- sector_t chunk_mask;
+ sector_t reshape_progress;
+ sector_t reshape_safe;
+ unsigned long reshape_checkpoint;
+ sector_t offset_diff;
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
@@ -136,6 +142,7 @@ enum r10bio_state {
R10BIO_Uptodate,
R10BIO_IsSync,
R10BIO_IsRecover,
+ R10BIO_IsReshape,
R10BIO_Degraded,
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them.
@@ -146,5 +153,10 @@ enum r10bio_state {
*/
R10BIO_MadeGood,
R10BIO_WriteError,
+/* During a reshape we might be performing IO on the
+ * 'previous' part of the array, in which case this
+ * flag is set
+ */
+ R10BIO_Previous,
};
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f351422938e0..04348d76bb30 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
- if (test_bit(STRIPE_DELAYED, &sh->state))
+ if (test_bit(STRIPE_DELAYED, &sh->state) &&
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0)
list_add_tail(&sh->lru, &conf->bitmap_list);
else {
+ clear_bit(STRIPE_DELAYED, &sh->state);
clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list);
}
@@ -488,6 +490,27 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
return sh;
}
+/* Determine if 'data_offset' or 'new_data_offset' should be used
+ * in this stripe_head.
+ */
+static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
+{
+ sector_t progress = conf->reshape_progress;
+ /* Need a memory barrier to make sure we see the value
+ * of conf->generation, or ->data_offset that was set before
+ * reshape_progress was updated.
+ */
+ smp_rmb();
+ if (progress == MaxSector)
+ return 0;
+ if (sh->generation == conf->generation - 1)
+ return 0;
+ /* We are in a reshape, and this is a new-generation stripe,
+ * so use new_data_offset.
+ */
+ return 1;
+}
+
static void
raid5_end_read_request(struct bio *bi, int error);
static void
@@ -518,6 +541,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
replace_only = 1;
} else
continue;
+ if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
+ rw |= REQ_SYNC;
bi = &sh->dev[i].req;
rbi = &sh->dev[i].rreq; /* For writing to replacement */
@@ -583,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
* a chance*/
md_check_recovery(conf->mddev);
}
+ /*
+ * Because md_wait_for_blocked_rdev
+ * will dec nr_pending, we must
+ * increment it first.
+ */
+ atomic_inc(&rdev->nr_pending);
md_wait_for_blocked_rdev(rdev, conf->mddev);
} else {
/* Acknowledged bad block - skip the write */
@@ -603,7 +634,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
__func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
atomic_inc(&sh->count);
- bi->bi_sector = sh->sector + rdev->data_offset;
+ if (use_new_offset(conf, sh))
+ bi->bi_sector = (sh->sector
+ + rdev->new_data_offset);
+ else
+ bi->bi_sector = (sh->sector
+ + rdev->data_offset);
bi->bi_flags = 1 << BIO_UPTODATE;
bi->bi_idx = 0;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
@@ -627,7 +663,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
__func__, (unsigned long long)sh->sector,
rbi->bi_rw, i);
atomic_inc(&sh->count);
- rbi->bi_sector = sh->sector + rrdev->data_offset;
+ if (use_new_offset(conf, sh))
+ rbi->bi_sector = (sh->sector
+ + rrdev->new_data_offset);
+ else
+ rbi->bi_sector = (sh->sector
+ + rrdev->data_offset);
rbi->bi_flags = 1 << BIO_UPTODATE;
rbi->bi_idx = 0;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
@@ -1114,6 +1155,8 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
dev->sector + STRIPE_SECTORS) {
if (wbi->bi_rw & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
+ if (wbi->bi_rw & REQ_SYNC)
+ set_bit(R5_SyncIO, &dev->flags);
tx = async_copy_data(1, wbi, dev->page,
dev->sector, tx);
wbi = r5_next_bio(wbi, dev->sector);
@@ -1131,13 +1174,15 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
int i;
- bool fua = false;
+ bool fua = false, sync = false;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
- for (i = disks; i--; )
+ for (i = disks; i--; ) {
fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
+ sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
+ }
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -1146,6 +1191,8 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
set_bit(R5_UPTODATE, &dev->flags);
if (fua)
set_bit(R5_WantFUA, &dev->flags);
+ if (sync)
+ set_bit(R5_SyncIO, &dev->flags);
}
}
@@ -1648,7 +1695,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
char b[BDEVNAME_SIZE];
struct md_rdev *rdev = NULL;
-
+ sector_t s;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
@@ -1671,6 +1718,10 @@ static void raid5_end_read_request(struct bio * bi, int error)
if (!rdev)
rdev = conf->disks[i].rdev;
+ if (use_new_offset(conf, sh))
+ s = sh->sector + rdev->new_data_offset;
+ else
+ s = sh->sector + rdev->data_offset;
if (uptodate) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
@@ -1683,8 +1734,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
"md/raid:%s: read error corrected"
" (%lu sectors at %llu on %s)\n",
mdname(conf->mddev), STRIPE_SECTORS,
- (unsigned long long)(sh->sector
- + rdev->data_offset),
+ (unsigned long long)s,
bdevname(rdev->bdev, b));
atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
clear_bit(R5_ReadError, &sh->dev[i].flags);
@@ -1695,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
} else {
const char *bdn = bdevname(rdev->bdev, b);
int retry = 0;
+ int set_bad = 0;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
@@ -1704,29 +1755,28 @@ static void raid5_end_read_request(struct bio * bi, int error)
"md/raid:%s: read error on replacement device "
"(sector %llu on %s).\n",
mdname(conf->mddev),
- (unsigned long long)(sh->sector
- + rdev->data_offset),
+ (unsigned long long)s,
bdn);
- else if (conf->mddev->degraded >= conf->max_degraded)
+ else if (conf->mddev->degraded >= conf->max_degraded) {
+ set_bad = 1;
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error not correctable "
"(sector %llu on %s).\n",
mdname(conf->mddev),
- (unsigned long long)(sh->sector
- + rdev->data_offset),
+ (unsigned long long)s,
bdn);
- else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+ } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
+ set_bad = 1;
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error NOT corrected!! "
"(sector %llu on %s).\n",
mdname(conf->mddev),
- (unsigned long long)(sh->sector
- + rdev->data_offset),
+ (unsigned long long)s,
bdn);
- else if (atomic_read(&rdev->read_errors)
+ } else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
"md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1738,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
else {
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
- md_error(conf->mddev, rdev);
+ if (!(set_bad
+ && test_bit(In_sync, &rdev->flags)
+ && rdev_set_badblocks(
+ rdev, sh->sector, STRIPE_SECTORS, 0)))
+ md_error(conf->mddev, rdev);
}
}
rdev_dec_pending(rdev, conf->mddev);
@@ -3543,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
finish:
/* wait for this device to become unblocked */
- if (conf->mddev->external && unlikely(s.blocked_rdev))
- md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+ if (unlikely(s.blocked_rdev)) {
+ if (conf->mddev->external)
+ md_wait_for_blocked_rdev(s.blocked_rdev,
+ conf->mddev);
+ else
+ /* Internal metadata will immediately
+ * be written by raid5d, so we don't
+ * need to wait here.
+ */
+ rdev_dec_pending(s.blocked_rdev,
+ conf->mddev);
+ }
if (s.handle_bad_blocks)
for (i = disks; i--; ) {
@@ -3561,7 +3625,7 @@ finish:
if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
rdev = conf->disks[i].rdev;
rdev_clear_badblocks(rdev, sh->sector,
- STRIPE_SECTORS);
+ STRIPE_SECTORS, 0);
rdev_dec_pending(rdev, conf->mddev);
}
if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
@@ -3570,7 +3634,7 @@ finish:
/* rdev have been moved down */
rdev = conf->disks[i].rdev;
rdev_clear_badblocks(rdev, sh->sector,
- STRIPE_SECTORS);
+ STRIPE_SECTORS, 0);
rdev_dec_pending(rdev, conf->mddev);
}
}
@@ -3842,7 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
- align_bi->bi_sector += rdev->data_offset;
if (!bio_fits_rdev(align_bi) ||
is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3853,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
return 0;
}
+ /* No reshape active, so we can trust rdev->data_offset */
+ align_bi->bi_sector += rdev->data_offset;
+
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
@@ -3931,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
int remaining;
- int plugged;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
@@ -3950,15 +4015,12 @@ static void make_request(struct mddev *mddev, struct bio * bi)
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
- plugged = mddev_check_plugged(mddev);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
- int disks, data_disks;
int previous;
retry:
previous = 0;
- disks = conf->raid_disks;
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
if (unlikely(conf->reshape_progress != MaxSector)) {
/* spinlock is needed as reshape_progress may be
@@ -3970,13 +4032,12 @@ static void make_request(struct mddev *mddev, struct bio * bi)
* to check again.
*/
spin_lock_irq(&conf->device_lock);
- if (mddev->delta_disks < 0
+ if (mddev->reshape_backwards
? logical_sector < conf->reshape_progress
: logical_sector >= conf->reshape_progress) {
- disks = conf->previous_raid_disks;
previous = 1;
} else {
- if (mddev->delta_disks < 0
+ if (mddev->reshape_backwards
? logical_sector < conf->reshape_safe
: logical_sector >= conf->reshape_safe) {
spin_unlock_irq(&conf->device_lock);
@@ -3986,7 +4047,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
spin_unlock_irq(&conf->device_lock);
}
- data_disks = disks - conf->max_degraded;
new_sector = raid5_compute_sector(conf, logical_sector,
previous,
@@ -4009,7 +4069,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
*/
int must_retry = 0;
spin_lock_irq(&conf->device_lock);
- if (mddev->delta_disks < 0
+ if (mddev->reshape_backwards
? logical_sector >= conf->reshape_progress
: logical_sector < conf->reshape_progress)
/* mismatch, need to try again */
@@ -4056,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ((bi->bi_rw & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
+ mddev_check_plugged(mddev);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -4063,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
finish_wait(&conf->wait_for_overlap, &w);
break;
}
-
}
- if (!plugged)
- md_wakeup_thread(mddev->thread);
spin_lock_irq(&conf->device_lock);
remaining = raid5_dec_bi_phys_segments(bi);
@@ -4108,11 +4166,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
if (sector_nr == 0) {
/* If restarting in the middle, skip the initial sectors */
- if (mddev->delta_disks < 0 &&
+ if (mddev->reshape_backwards &&
conf->reshape_progress < raid5_size(mddev, 0, 0)) {
sector_nr = raid5_size(mddev, 0, 0)
- conf->reshape_progress;
- } else if (mddev->delta_disks >= 0 &&
+ } else if (!mddev->reshape_backwards &&
conf->reshape_progress > 0)
sector_nr = conf->reshape_progress;
sector_div(sector_nr, new_data_disks);
@@ -4133,13 +4191,11 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
else
reshape_sectors = mddev->chunk_sectors;
- /* we update the metadata when there is more than 3Meg
- * in the block range (that is rather arbitrary, should
- * probably be time based) or when the data about to be
- * copied would over-write the source of the data at
- * the front of the range.
- * i.e. one new_stripe along from reshape_progress new_maps
- * to after where reshape_safe old_maps to
+ /* We update the metadata at least every 10 seconds, or when
+ * the data about to be copied would over-write the source of
+ * the data at the front of the range. i.e. one new_stripe
+ * along from reshape_progress new_maps to after where
+ * reshape_safe old_maps to
*/
writepos = conf->reshape_progress;
sector_div(writepos, new_data_disks);
@@ -4147,7 +4203,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
sector_div(readpos, data_disks);
safepos = conf->reshape_safe;
sector_div(safepos, data_disks);
- if (mddev->delta_disks < 0) {
+ if (mddev->reshape_backwards) {
writepos -= min_t(sector_t, reshape_sectors, writepos);
readpos += reshape_sectors;
safepos += reshape_sectors;
@@ -4157,11 +4213,29 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
safepos -= min_t(sector_t, reshape_sectors, safepos);
}
+ /* Having calculated the 'writepos' possibly use it
+ * to set 'stripe_addr' which is where we will write to.
+ */
+ if (mddev->reshape_backwards) {
+ BUG_ON(conf->reshape_progress == 0);
+ stripe_addr = writepos;
+ BUG_ON((mddev->dev_sectors &
+ ~((sector_t)reshape_sectors - 1))
+ - reshape_sectors - stripe_addr
+ != sector_nr);
+ } else {
+ BUG_ON(writepos != sector_nr + reshape_sectors);
+ stripe_addr = sector_nr;
+ }
+
/* 'writepos' is the most advanced device address we might write.
* 'readpos' is the least advanced device address we might read.
* 'safepos' is the least address recorded in the metadata as having
* been reshaped.
- * If 'readpos' is behind 'writepos', then there is no way that we can
+ * If there is a min_offset_diff, these are adjusted either by
+ * increasing the safepos/readpos if diff is negative, or
+ * increasing writepos if diff is positive.
+ * If 'readpos' is then behind 'writepos', there is no way that we can
* ensure safety in the face of a crash - that must be done by userspace
* making a backup of the data. So in that case there is no particular
* rush to update metadata.
@@ -4174,7 +4248,13 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
* Maybe that number should be configurable, but I'm not sure it is
* worth it.... maybe it could be a multiple of safemode_delay???
*/
- if ((mddev->delta_disks < 0
+ if (conf->min_offset_diff < 0) {
+ safepos += -conf->min_offset_diff;
+ readpos += -conf->min_offset_diff;
+ } else
+ writepos += conf->min_offset_diff;
+
+ if ((mddev->reshape_backwards
? (safepos > writepos && readpos < writepos)
: (safepos < writepos && readpos > writepos)) ||
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
@@ -4195,17 +4275,6 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
- if (mddev->delta_disks < 0) {
- BUG_ON(conf->reshape_progress == 0);
- stripe_addr = writepos;
- BUG_ON((mddev->dev_sectors &
- ~((sector_t)reshape_sectors - 1))
- - reshape_sectors - stripe_addr
- != sector_nr);
- } else {
- BUG_ON(writepos != sector_nr + reshape_sectors);
- stripe_addr = sector_nr;
- }
INIT_LIST_HEAD(&stripes);
for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
int j;
@@ -4239,7 +4308,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
list_add(&sh->lru, &stripes);
}
spin_lock_irq(&conf->device_lock);
- if (mddev->delta_disks < 0)
+ if (mddev->reshape_backwards)
conf->reshape_progress -= reshape_sectors * new_data_disks;
else
conf->reshape_progress += reshape_sectors * new_data_disks;
@@ -4776,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
int raid_disk, memory, max_disks;
struct md_rdev *rdev;
struct disk_info *disk;
+ char pers_name[6];
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -4899,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
mdname(mddev), memory);
- conf->thread = md_register_thread(raid5d, mddev, NULL);
+ sprintf(pers_name, "raid%d", mddev->new_level);
+ conf->thread = md_register_thread(raid5d, mddev, pers_name);
if (!conf->thread) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate thread.\n",
@@ -4952,16 +5023,42 @@ static int run(struct mddev *mddev)
struct md_rdev *rdev;
sector_t reshape_offset = 0;
int i;
+ long long min_offset_diff = 0;
+ int first = 1;
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "md/raid:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
+
+ rdev_for_each(rdev, mddev) {
+ long long diff;
+ if (rdev->raid_disk < 0)
+ continue;
+ diff = (rdev->new_data_offset - rdev->data_offset);
+ if (first) {
+ min_offset_diff = diff;
+ first = 0;
+ } else if (mddev->reshape_backwards &&
+ diff < min_offset_diff)
+ min_offset_diff = diff;
+ else if (!mddev->reshape_backwards &&
+ diff > min_offset_diff)
+ min_offset_diff = diff;
+ }
+
if (mddev->reshape_position != MaxSector) {
/* Check that we can continue the reshape.
- * Currently only disks can change, it must
- * increase, and we must be past the point where
- * a stripe over-writes itself
+ * Difficulties arise if the stripe we would write to
+ * next is at or after the stripe we would read from next.
+ * For a reshape that changes the number of devices, this
+ * is only possible for a very short time, and mdadm makes
+ * sure that time appears to have past before assembling
+ * the array. So we fail if that time hasn't passed.
+ * For a reshape that keeps the number of devices the same
+ * mdadm must be monitoring the reshape can keeping the
+ * critical areas read-only and backed up. It will start
+ * the array in read-only mode, so we check for that.
*/
sector_t here_new, here_old;
int old_disks;
@@ -4993,26 +5090,34 @@ static int run(struct mddev *mddev)
/* here_old is the first stripe that we might need to read
* from */
if (mddev->delta_disks == 0) {
+ if ((here_new * mddev->new_chunk_sectors !=
+ here_old * mddev->chunk_sectors)) {
+ printk(KERN_ERR "md/raid:%s: reshape position is"
+ " confused - aborting\n", mdname(mddev));
+ return -EINVAL;
+ }
/* We cannot be sure it is safe to start an in-place
- * reshape. It is only safe if user-space if monitoring
+ * reshape. It is only safe if user-space is monitoring
* and taking constant backups.
* mdadm always starts a situation like this in
* readonly mode so it can take control before
* allowing any writes. So just check for that.
*/
- if ((here_new * mddev->new_chunk_sectors !=
- here_old * mddev->chunk_sectors) ||
- mddev->ro == 0) {
- printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
- " in read-only mode - aborting\n",
+ if (abs(min_offset_diff) >= mddev->chunk_sectors &&
+ abs(min_offset_diff) >= mddev->new_chunk_sectors)
+ /* not really in-place - so OK */;
+ else if (mddev->ro == 0) {
+ printk(KERN_ERR "md/raid:%s: in-place reshape "
+ "must be started in read-only mode "
+ "- aborting\n",
mdname(mddev));
return -EINVAL;
}
- } else if (mddev->delta_disks < 0
- ? (here_new * mddev->new_chunk_sectors <=
+ } else if (mddev->reshape_backwards
+ ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
here_old * mddev->chunk_sectors)
: (here_new * mddev->new_chunk_sectors >=
- here_old * mddev->chunk_sectors)) {
+ here_old * mddev->chunk_sectors + (-min_offset_diff))) {
/* Reading from the same stripe as writing to - bad */
printk(KERN_ERR "md/raid:%s: reshape_position too early for "
"auto-recovery - aborting.\n",
@@ -5037,6 +5142,7 @@ static int run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
+ conf->min_offset_diff = min_offset_diff;
mddev->thread = conf->thread;
conf->thread = NULL;
mddev->private = conf;
@@ -5182,9 +5288,12 @@ static int run(struct mddev *mddev)
blk_queue_io_opt(mddev->queue, chunk_size *
(conf->raid_disks - conf->max_degraded));
- rdev_for_each(rdev, mddev)
+ rdev_for_each(rdev, mddev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->new_data_offset << 9);
+ }
}
return 0;
@@ -5380,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
- disk = rdev->saved_raid_disk;
- else
- disk = first;
- for ( ; disk <= last ; disk++) {
+ first = rdev->saved_raid_disk;
+
+ for (disk = first; disk <= last; disk++) {
p = conf->disks + disk;
if (p->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
@@ -5392,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
rcu_assign_pointer(p->rdev, rdev);
- break;
+ goto out;
}
+ }
+ for (disk = first; disk <= last; disk++) {
+ p = conf->disks + disk;
if (test_bit(WantReplacement, &p->rdev->flags) &&
p->replacement == NULL) {
clear_bit(In_sync, &rdev->flags);
@@ -5405,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
}
+out:
print_raid5_conf(conf);
return err;
}
@@ -5418,12 +5530,18 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
* any io in the removed space completes, but it hardly seems
* worth it.
*/
+ sector_t newsize;
sectors &= ~((sector_t)mddev->chunk_sectors - 1);
- md_set_array_sectors(mddev, raid5_size(mddev, sectors,
- mddev->raid_disks));
- if (mddev->array_sectors >
- raid5_size(mddev, sectors, mddev->raid_disks))
+ newsize = raid5_size(mddev, sectors, mddev->raid_disks);
+ if (mddev->external_size &&
+ mddev->array_sectors > newsize)
return -EINVAL;
+ if (mddev->bitmap) {
+ int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
+ if (ret)
+ return ret;
+ }
+ md_set_array_sectors(mddev, newsize);
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
@@ -5468,9 +5586,6 @@ static int check_reshape(struct mddev *mddev)
mddev->new_layout == mddev->layout &&
mddev->new_chunk_sectors == mddev->chunk_sectors)
return 0; /* nothing to do */
- if (mddev->bitmap)
- /* Cannot grow a bitmap yet */
- return -EBUSY;
if (has_failed(conf))
return -EINVAL;
if (mddev->delta_disks < 0) {
@@ -5505,10 +5620,14 @@ static int raid5_start_reshape(struct mddev *mddev)
if (!check_stripe_cache(mddev))
return -ENOSPC;
- rdev_for_each(rdev, mddev)
+ if (has_failed(conf))
+ return -EINVAL;
+
+ rdev_for_each(rdev, mddev) {
if (!test_bit(In_sync, &rdev->flags)
&& !test_bit(Faulty, &rdev->flags))
spares++;
+ }
if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
/* Not enough devices even to make a degraded array
@@ -5535,12 +5654,16 @@ static int raid5_start_reshape(struct mddev *mddev)
conf->chunk_sectors = mddev->new_chunk_sectors;
conf->prev_algo = conf->algorithm;
conf->algorithm = mddev->new_layout;
- if (mddev->delta_disks < 0)
+ conf->generation++;
+ /* Code that selects data_offset needs to see the generation update
+ * if reshape_progress has been set - so a memory barrier needed.
+ */
+ smp_mb();
+ if (mddev->reshape_backwards)
conf->reshape_progress = raid5_size(mddev, 0, 0);
else
conf->reshape_progress = 0;
conf->reshape_safe = conf->reshape_progress;
- conf->generation++;
spin_unlock_irq(&conf->device_lock);
/* Add some new drives, as many as will fit.
@@ -5592,6 +5715,9 @@ static int raid5_start_reshape(struct mddev *mddev)
mddev->recovery = 0;
spin_lock_irq(&conf->device_lock);
mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
+ rdev_for_each(rdev, mddev)
+ rdev->new_data_offset = rdev->data_offset;
+ smp_wmb();
conf->reshape_progress = MaxSector;
mddev->reshape_position = MaxSector;
spin_unlock_irq(&conf->device_lock);
@@ -5610,9 +5736,13 @@ static void end_reshape(struct r5conf *conf)
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
+ struct md_rdev *rdev;
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
+ rdev_for_each(rdev, conf->mddev)
+ rdev->data_offset = rdev->new_data_offset;
+ smp_wmb();
conf->reshape_progress = MaxSector;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
@@ -5652,17 +5782,18 @@ static void raid5_finish_reshape(struct mddev *mddev)
d < conf->raid_disks - mddev->delta_disks;
d++) {
struct md_rdev *rdev = conf->disks[d].rdev;
- if (rdev &&
- raid5_remove_disk(mddev, rdev) == 0) {
- sysfs_unlink_rdev(mddev, rdev);
- rdev->raid_disk = -1;
- }
+ if (rdev)
+ clear_bit(In_sync, &rdev->flags);
+ rdev = conf->disks[d].replacement;
+ if (rdev)
+ clear_bit(In_sync, &rdev->flags);
}
}
mddev->layout = conf->algorithm;
mddev->chunk_sectors = conf->chunk_sectors;
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
+ mddev->reshape_backwards = 0;
}
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 8d8e13934a48..2164021f3b5f 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -285,6 +285,7 @@ enum r5dev_flags {
*/
R5_Wantdrain, /* dev->towrite needs to be drained */
R5_WantFUA, /* Write should be FUA */
+ R5_SyncIO, /* The IO is sync */
R5_WriteError, /* got a write error - need to record it */
R5_MadeGood, /* A bad block has been fixed by writing to it */
R5_ReadRepl, /* Will/did read from replacement rather than orig */
@@ -385,6 +386,12 @@ struct r5conf {
short generation; /* increments with every reshape */
unsigned long reshape_checkpoint; /* Time we last updated
* metadata */
+ long long min_offset_diff; /* minimum difference between
+ * data_offset and
+ * new_data_offset across all
+ * devices. May be negative,
+ * but is closest to zero.
+ */
struct list_head handle_list; /* stripes needing handling */
struct list_head hold_list; /* preread ready stripes */
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 71f8e018e564..0cdbd742974a 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -199,8 +199,6 @@ static int fops_open(struct file *file)
struct saa7146_fh *fh = NULL;
int result = 0;
- enum v4l2_buf_type type;
-
DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
if (mutex_lock_interruptible(&saa7146_devices_lock))
@@ -208,10 +206,6 @@ static int fops_open(struct file *file)
DEB_D("using: %p\n", dev);
- type = vdev->vfl_type == VFL_TYPE_GRABBER
- ? V4L2_BUF_TYPE_VIDEO_CAPTURE
- : V4L2_BUF_TYPE_VBI_CAPTURE;
-
/* check if an extension is registered */
if( NULL == dev->ext ) {
DEB_S("no extension registered for this device\n");
@@ -227,11 +221,12 @@ static int fops_open(struct file *file)
goto out;
}
- file->private_data = fh;
+ v4l2_fh_init(&fh->fh, vdev);
+
+ file->private_data = &fh->fh;
fh->dev = dev;
- fh->type = type;
- if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
DEB_S("initializing vbi...\n");
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
result = saa7146_vbi_uops.open(dev,file);
@@ -252,6 +247,7 @@ static int fops_open(struct file *file)
}
result = 0;
+ v4l2_fh_add(&fh->fh);
out:
if (fh && result != 0) {
kfree(fh);
@@ -263,6 +259,7 @@ out:
static int fops_release(struct file *file)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
@@ -271,7 +268,7 @@ static int fops_release(struct file *file)
if (mutex_lock_interruptible(&saa7146_devices_lock))
return -ERESTARTSYS;
- if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
saa7146_vbi_uops.release(dev,file);
if (dev->ext_vv_data->vbi_fops.release)
@@ -280,6 +277,8 @@ static int fops_release(struct file *file)
saa7146_video_uops.release(dev,file);
}
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
module_put(dev->ext->module);
file->private_data = NULL;
kfree(fh);
@@ -291,19 +290,22 @@ static int fops_release(struct file *file)
static int fops_mmap(struct file *file, struct vm_area_struct * vma)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct videobuf_queue *q;
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER: {
DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, vma:%p\n",
file, vma);
q = &fh->video_q;
break;
}
- case V4L2_BUF_TYPE_VBI_CAPTURE: {
+ case VFL_TYPE_VBI: {
DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, vma:%p\n",
file, vma);
+ if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_SLICED_VBI_OUTPUT)
+ return -ENODEV;
q = &fh->vbi_q;
break;
}
@@ -317,15 +319,19 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
struct videobuf_queue *q;
+ unsigned int res = v4l2_ctrl_poll(file, wait);
DEB_EE("file:%p, poll:%p\n", file, wait);
- if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
+ if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_SLICED_VBI_OUTPUT)
+ return res | POLLOUT | POLLWRNORM;
if( 0 == fh->vbi_q.streaming )
- return videobuf_poll_stream(file, &fh->vbi_q, wait);
+ return res | videobuf_poll_stream(file, &fh->vbi_q, wait);
q = &fh->vbi_q;
} else {
DEB_D("using video queue\n");
@@ -337,31 +343,32 @@ static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
if (!buf) {
DEB_D("buf == NULL!\n");
- return POLLERR;
+ return res | POLLERR;
}
poll_wait(file, &buf->done, wait);
if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) {
DEB_D("poll succeeded!\n");
- return POLLIN|POLLRDNORM;
+ return res | POLLIN | POLLRDNORM;
}
DEB_D("nothing to poll for, buf->state:%d\n", buf->state);
- return 0;
+ return res;
}
static ssize_t fops_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
/*
DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: file:%p, data:%p, count:%lun",
file, data, (unsigned long)count);
*/
return saa7146_video_uops.read(file,data,count,ppos);
- case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case VFL_TYPE_VBI:
/*
DEB_EE("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, data:%p, count:%lu\n",
file, data, (unsigned long)count);
@@ -377,12 +384,13 @@ static ssize_t fops_read(struct file *file, char __user *data, size_t count, lof
static ssize_t fops_write(struct file *file, const char __user *data, size_t count, loff_t *ppos)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
return -EINVAL;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case VFL_TYPE_VBI:
if (fh->dev->ext_vv_data->vbi_fops.write)
return fh->dev->ext_vv_data->vbi_fops.write(file, data, count, ppos);
else
@@ -429,8 +437,15 @@ static void vv_callback(struct saa7146_dev *dev, unsigned long status)
}
}
+static const struct v4l2_ctrl_ops saa7146_ctrl_ops = {
+ .s_ctrl = saa7146_s_ctrl,
+};
+
int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
{
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
+ struct v4l2_pix_format *fmt;
+ struct v4l2_vbi_format *vbi;
struct saa7146_vv *vv;
int err;
@@ -438,12 +453,32 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
if (err)
return err;
+ v4l2_ctrl_handler_init(hdl, 6);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 127, 1, 64);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (hdl->error) {
+ err = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return err;
+ }
+ dev->v4l2_dev.ctrl_handler = hdl;
+
vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL);
if (vv == NULL) {
ERR("out of memory. aborting.\n");
+ v4l2_ctrl_handler_free(hdl);
return -ENOMEM;
}
- ext_vv->ops = saa7146_video_ioctl_ops;
+ ext_vv->vid_ops = saa7146_video_ioctl_ops;
+ ext_vv->vbi_ops = saa7146_vbi_ioctl_ops;
ext_vv->core_ops = &saa7146_video_ioctl_ops;
DEB_EE("dev:%p\n", dev);
@@ -463,6 +498,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
if( NULL == vv->d_clipping.cpu_addr ) {
ERR("out of memory. aborting.\n");
kfree(vv);
+ v4l2_ctrl_handler_free(hdl);
return -1;
}
memset(vv->d_clipping.cpu_addr, 0x0, SAA7146_CLIPPING_MEM);
@@ -471,6 +507,39 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
saa7146_vbi_uops.init(dev,vv);
+ fmt = &vv->ov_fb.fmt;
+ fmt->width = vv->standard->h_max_out;
+ fmt->height = vv->standard->v_max_out;
+ fmt->pixelformat = V4L2_PIX_FMT_RGB565;
+ fmt->bytesperline = 2 * fmt->width;
+ fmt->sizeimage = fmt->bytesperline * fmt->height;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt = &vv->video_fmt;
+ fmt->width = 384;
+ fmt->height = 288;
+ fmt->pixelformat = V4L2_PIX_FMT_BGR24;
+ fmt->field = V4L2_FIELD_ANY;
+ fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ fmt->bytesperline = 3 * fmt->width;
+ fmt->sizeimage = fmt->bytesperline * fmt->height;
+
+ vbi = &vv->vbi_fmt;
+ vbi->sampling_rate = 27000000;
+ vbi->offset = 248; /* todo */
+ vbi->samples_per_line = 720 * 2;
+ vbi->sample_format = V4L2_PIX_FMT_GREY;
+
+ /* fixme: this only works for PAL */
+ vbi->start[0] = 5;
+ vbi->count[0] = 16;
+ vbi->start[1] = 312;
+ vbi->count[1] = 16;
+
+ init_timer(&vv->vbi_read_timeout);
+
+ vv->ov_fb.capability = V4L2_FBUF_CAP_LIST_CLIPPING;
+ vv->ov_fb.flags = V4L2_FBUF_FLAG_PRIMARY;
dev->vv_data = vv;
dev->vv_callback = &vv_callback;
@@ -486,6 +555,7 @@ int saa7146_vv_release(struct saa7146_dev* dev)
v4l2_device_unregister(&dev->v4l2_dev);
pci_free_consistent(dev->pci, SAA7146_CLIPPING_MEM, vv->d_clipping.cpu_addr, vv->d_clipping.dma_handle);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(vv);
dev->vv_data = NULL;
dev->vv_callback = NULL;
@@ -509,10 +579,19 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
return -ENOMEM;
vfd->fops = &video_fops;
- vfd->ioctl_ops = &dev->ext_vv_data->ops;
+ if (type == VFL_TYPE_GRABBER)
+ vfd->ioctl_ops = &dev->ext_vv_data->vid_ops;
+ else
+ vfd->ioctl_ops = &dev->ext_vv_data->vbi_ops;
vfd->release = video_device_release;
+ /* Locking in file operations other than ioctl should be done by
+ the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->lock = &dev->v4l2_lock;
+ vfd->v4l2_dev = &dev->v4l2_dev;
vfd->tvnorms = 0;
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
for (i = 0; i < dev->ext_vv_data->num_stds; i++)
vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
strlcpy(vfd->name, name, sizeof(vfd->name));
diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
index bc1f545c95cb..be746d1aee9a 100644
--- a/drivers/media/common/saa7146_hlp.c
+++ b/drivers/media/common/saa7146_hlp.c
@@ -343,9 +343,9 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
struct saa7146_vv *vv = dev->vv_data;
__le32 *clipping = vv->d_clipping.cpu_addr;
- int width = fh->ov.win.w.width;
- int height = fh->ov.win.w.height;
- int clipcount = fh->ov.nclips;
+ int width = vv->ov.win.w.width;
+ int height = vv->ov.win.w.height;
+ int clipcount = vv->ov.nclips;
u32 line_list[32];
u32 pixel_list[32];
@@ -365,10 +365,10 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
for(i = 0; i < clipcount; i++) {
int l = 0, r = 0, t = 0, b = 0;
- x[i] = fh->ov.clips[i].c.left;
- y[i] = fh->ov.clips[i].c.top;
- w[i] = fh->ov.clips[i].c.width;
- h[i] = fh->ov.clips[i].c.height;
+ x[i] = vv->ov.clips[i].c.left;
+ y[i] = vv->ov.clips[i].c.top;
+ w[i] = vv->ov.clips[i].c.width;
+ h[i] = vv->ov.clips[i].c.height;
if( w[i] < 0) {
x[i] += w[i]; w[i] = -w[i];
@@ -485,13 +485,14 @@ static void saa7146_disable_clipping(struct saa7146_dev *dev)
static void saa7146_set_clipping_rect(struct saa7146_fh *fh)
{
struct saa7146_dev *dev = fh->dev;
- enum v4l2_field field = fh->ov.win.field;
+ struct saa7146_vv *vv = dev->vv_data;
+ enum v4l2_field field = vv->ov.win.field;
struct saa7146_video_dma vdma2;
u32 clip_format;
u32 arbtr_ctrl;
/* check clipcount, disable clipping if clipcount == 0*/
- if( fh->ov.nclips == 0 ) {
+ if (vv->ov.nclips == 0) {
saa7146_disable_clipping(dev);
return;
}
@@ -651,8 +652,8 @@ int saa7146_enable_overlay(struct saa7146_fh *fh)
struct saa7146_dev *dev = fh->dev;
struct saa7146_vv *vv = dev->vv_data;
- saa7146_set_window(dev, fh->ov.win.w.width, fh->ov.win.w.height, fh->ov.win.field);
- saa7146_set_position(dev, fh->ov.win.w.left, fh->ov.win.w.top, fh->ov.win.w.height, fh->ov.win.field, vv->ov_fmt->pixelformat);
+ saa7146_set_window(dev, vv->ov.win.w.width, vv->ov.win.w.height, vv->ov.win.field);
+ saa7146_set_position(dev, vv->ov.win.w.left, vv->ov.win.w.top, vv->ov.win.w.height, vv->ov.win.field, vv->ov_fmt->pixelformat);
saa7146_set_output_format(dev, vv->ov_fmt->trans);
saa7146_set_clipping_rect(fh);
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index b2e718343739..1e71e374bbfe 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -211,7 +211,7 @@ static int buffer_activate(struct saa7146_dev *dev,
DEB_VBI("dev:%p, buf:%p, next:%p\n", dev, buf, next);
saa7146_set_vbi_capture(dev,buf,next);
- mod_timer(&vv->vbi_q.timeout, jiffies+BUFFER_TIMEOUT);
+ mod_timer(&vv->vbi_dmaq.timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
@@ -294,7 +294,7 @@ static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
struct saa7146_buf *buf = (struct saa7146_buf *)vb;
DEB_VBI("vb:%p\n", vb);
- saa7146_buffer_queue(dev,&vv->vbi_q,buf);
+ saa7146_buffer_queue(dev, &vv->vbi_dmaq, buf);
}
static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
@@ -335,16 +335,15 @@ static void vbi_stop(struct saa7146_fh *fh, struct file *file)
/* shut down dma 3 transfers */
saa7146_write(dev, MC1, MASK_20);
- if (vv->vbi_q.curr) {
- saa7146_buffer_finish(dev,&vv->vbi_q,VIDEOBUF_DONE);
- }
+ if (vv->vbi_dmaq.curr)
+ saa7146_buffer_finish(dev, &vv->vbi_dmaq, VIDEOBUF_DONE);
videobuf_queue_cancel(&fh->vbi_q);
vv->vbi_streaming = NULL;
- del_timer(&vv->vbi_q.timeout);
- del_timer(&fh->vbi_read_timeout);
+ del_timer(&vv->vbi_dmaq.timeout);
+ del_timer(&vv->vbi_read_timeout);
spin_unlock_irqrestore(&dev->slock, flags);
}
@@ -364,12 +363,12 @@ static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
DEB_VBI("dev:%p\n", dev);
- INIT_LIST_HEAD(&vv->vbi_q.queue);
+ INIT_LIST_HEAD(&vv->vbi_dmaq.queue);
- init_timer(&vv->vbi_q.timeout);
- vv->vbi_q.timeout.function = saa7146_buffer_timeout;
- vv->vbi_q.timeout.data = (unsigned long)(&vv->vbi_q);
- vv->vbi_q.dev = dev;
+ init_timer(&vv->vbi_dmaq.timeout);
+ vv->vbi_dmaq.timeout.function = saa7146_buffer_timeout;
+ vv->vbi_dmaq.timeout.data = (unsigned long)(&vv->vbi_dmaq);
+ vv->vbi_dmaq.dev = dev;
init_waitqueue_head(&vv->vbi_wq);
}
@@ -377,6 +376,7 @@ static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
static int vbi_open(struct saa7146_dev *dev, struct file *file)
{
struct saa7146_fh *fh = file->private_data;
+ struct saa7146_vv *vv = fh->dev->vv_data;
u32 arbtr_ctrl = saa7146_read(dev, PCI_BT_V1);
int ret = 0;
@@ -395,19 +395,6 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
saa7146_write(dev, PCI_BT_V1, arbtr_ctrl);
saa7146_write(dev, MC2, (MASK_04|MASK_20));
- memset(&fh->vbi_fmt,0,sizeof(fh->vbi_fmt));
-
- fh->vbi_fmt.sampling_rate = 27000000;
- fh->vbi_fmt.offset = 248; /* todo */
- fh->vbi_fmt.samples_per_line = vbi_pixel_to_capture;
- fh->vbi_fmt.sample_format = V4L2_PIX_FMT_GREY;
-
- /* fixme: this only works for PAL */
- fh->vbi_fmt.start[0] = 5;
- fh->vbi_fmt.count[0] = 16;
- fh->vbi_fmt.start[1] = 312;
- fh->vbi_fmt.count[1] = 16;
-
videobuf_queue_sg_init(&fh->vbi_q, &vbi_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
@@ -415,9 +402,8 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
sizeof(struct saa7146_buf),
file, &dev->v4l2_lock);
- init_timer(&fh->vbi_read_timeout);
- fh->vbi_read_timeout.function = vbi_read_timeout;
- fh->vbi_read_timeout.data = (unsigned long)file;
+ vv->vbi_read_timeout.function = vbi_read_timeout;
+ vv->vbi_read_timeout.data = (unsigned long)file;
/* initialize the brs */
if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
@@ -453,16 +439,16 @@ static void vbi_irq_done(struct saa7146_dev *dev, unsigned long status)
struct saa7146_vv *vv = dev->vv_data;
spin_lock(&dev->slock);
- if (vv->vbi_q.curr) {
- DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_q.curr);
+ if (vv->vbi_dmaq.curr) {
+ DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_dmaq.curr);
/* this must be += 2, one count for each field */
vv->vbi_fieldcount+=2;
- vv->vbi_q.curr->vb.field_count = vv->vbi_fieldcount;
- saa7146_buffer_finish(dev,&vv->vbi_q,VIDEOBUF_DONE);
+ vv->vbi_dmaq.curr->vb.field_count = vv->vbi_fieldcount;
+ saa7146_buffer_finish(dev, &vv->vbi_dmaq, VIDEOBUF_DONE);
} else {
DEB_VBI("dev:%p\n", dev);
}
- saa7146_buffer_next(dev,&vv->vbi_q,1);
+ saa7146_buffer_next(dev, &vv->vbi_dmaq, 1);
spin_unlock(&dev->slock);
}
@@ -488,7 +474,7 @@ static ssize_t vbi_read(struct file *file, char __user *data, size_t count, loff
return -EBUSY;
}
- mod_timer(&fh->vbi_read_timeout, jiffies+BUFFER_TIMEOUT);
+ mod_timer(&vv->vbi_read_timeout, jiffies+BUFFER_TIMEOUT);
ret = videobuf_read_stream(&fh->vbi_q, data, count, ppos, 1,
file->f_flags & O_NONBLOCK);
/*
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index ce30533fd972..6d14785d4747 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -2,6 +2,8 @@
#include <media/saa7146_vv.h>
#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
#include <linux/module.h>
static int max_memory = 32;
@@ -112,8 +114,8 @@ int saa7146_start_preview(struct saa7146_fh *fh)
DEB_EE("dev:%p, fh:%p\n", dev, fh);
- /* check if we have overlay informations */
- if( NULL == fh->ov.fh ) {
+ /* check if we have overlay information */
+ if (vv->ov.fh == NULL) {
DEB_D("no overlay data available. try S_FMT first.\n");
return -EAGAIN;
}
@@ -139,19 +141,18 @@ int saa7146_start_preview(struct saa7146_fh *fh)
return -EBUSY;
}
- fmt.fmt.win = fh->ov.win;
+ fmt.fmt.win = vv->ov.win;
err = vidioc_try_fmt_vid_overlay(NULL, fh, &fmt);
if (0 != err) {
saa7146_res_free(vv->video_fh, RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP);
return -EBUSY;
}
- fh->ov.win = fmt.fmt.win;
- vv->ov_data = &fh->ov;
+ vv->ov.win = fmt.fmt.win;
DEB_D("%dx%d+%d+%d %s field=%s\n",
- fh->ov.win.w.width, fh->ov.win.w.height,
- fh->ov.win.w.left, fh->ov.win.w.top,
- vv->ov_fmt->name, v4l2_field_names[fh->ov.win.field]);
+ vv->ov.win.w.width, vv->ov.win.w.height,
+ vv->ov.win.w.left, vv->ov.win.w.top,
+ vv->ov_fmt->name, v4l2_field_names[vv->ov.win.field]);
if (0 != (ret = saa7146_enable_overlay(fh))) {
DEB_D("enabling overlay failed: %d\n", ret);
@@ -202,65 +203,6 @@ int saa7146_stop_preview(struct saa7146_fh *fh)
EXPORT_SYMBOL_GPL(saa7146_stop_preview);
/********************************************************************************/
-/* device controls */
-
-static struct v4l2_queryctrl controls[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- },{
- .id = V4L2_CID_CONTRAST,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- },{
- .id = V4L2_CID_SATURATION,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- },{
- .id = V4L2_CID_VFLIP,
- .name = "Vertical Flip",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },{
- .id = V4L2_CID_HFLIP,
- .name = "Horizontal Flip",
- .minimum = 0,
- .maximum = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },
-};
-static int NUM_CONTROLS = sizeof(controls)/sizeof(struct v4l2_queryctrl);
-
-#define V4L2_CID_PRIVATE_LASTP1 (V4L2_CID_PRIVATE_BASE + 0)
-
-static struct v4l2_queryctrl* ctrl_by_id(int id)
-{
- int i;
-
- for (i = 0; i < NUM_CONTROLS; i++)
- if (controls[i].id == id)
- return controls+i;
- return NULL;
-}
-
-/********************************************************************************/
/* common pagetable functions */
static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *buf)
@@ -413,7 +355,7 @@ static int video_begin(struct saa7146_fh *fh)
}
}
- fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
+ fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat);
/* we need to have a valid format set here */
BUG_ON(NULL == fmt);
@@ -465,7 +407,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
return -EBUSY;
}
- fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
+ fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat);
/* we need to have a valid format set here */
BUG_ON(NULL == fmt);
@@ -504,18 +446,25 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
static int vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
+ struct video_device *vdev = video_devdata(file);
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
strcpy((char *)cap->driver, "saa7146 v4l2");
strlcpy((char *)cap->card, dev->ext->name, sizeof(cap->card));
sprintf((char *)cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = SAA7146_VERSION_CODE;
- cap->capabilities =
+ cap->device_caps =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VIDEO_OVERLAY |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
- cap->capabilities |= dev->ext_vv_data->capabilities;
+ cap->device_caps |= dev->ext_vv_data->capabilities;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ if (vdev->vfl_type == VFL_TYPE_GRABBER)
+ cap->device_caps &=
+ ~(V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_OUTPUT);
+ else
+ cap->device_caps &=
+ ~(V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_AUDIO);
return 0;
}
@@ -526,6 +475,7 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f
*fb = vv->ov_fb;
fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
+ fb->flags = V4L2_FBUF_FLAG_PRIMARY;
return 0;
}
@@ -579,135 +529,58 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtd
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *c)
+int saa7146_s_ctrl(struct v4l2_ctrl *ctrl)
{
- const struct v4l2_queryctrl *ctrl;
-
- if ((c->id < V4L2_CID_BASE ||
- c->id >= V4L2_CID_LASTP1) &&
- (c->id < V4L2_CID_PRIVATE_BASE ||
- c->id >= V4L2_CID_PRIVATE_LASTP1))
- return -EINVAL;
-
- ctrl = ctrl_by_id(c->id);
- if (ctrl == NULL)
- return -EINVAL;
-
- DEB_EE("VIDIOC_QUERYCTRL: id:%d\n", c->id);
- *c = *ctrl;
- return 0;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct saa7146_dev *dev = container_of(ctrl->handler,
+ struct saa7146_dev, ctrl_handler);
struct saa7146_vv *vv = dev->vv_data;
- const struct v4l2_queryctrl *ctrl;
- u32 value = 0;
+ u32 val;
- ctrl = ctrl_by_id(c->id);
- if (NULL == ctrl)
- return -EINVAL;
- switch (c->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- value = saa7146_read(dev, BCS_CTRL);
- c->value = 0xff & (value >> 24);
- DEB_D("V4L2_CID_BRIGHTNESS: %d\n", c->value);
- break;
- case V4L2_CID_CONTRAST:
- value = saa7146_read(dev, BCS_CTRL);
- c->value = 0x7f & (value >> 16);
- DEB_D("V4L2_CID_CONTRAST: %d\n", c->value);
- break;
- case V4L2_CID_SATURATION:
- value = saa7146_read(dev, BCS_CTRL);
- c->value = 0x7f & (value >> 0);
- DEB_D("V4L2_CID_SATURATION: %d\n", c->value);
- break;
- case V4L2_CID_VFLIP:
- c->value = vv->vflip;
- DEB_D("V4L2_CID_VFLIP: %d\n", c->value);
- break;
- case V4L2_CID_HFLIP:
- c->value = vv->hflip;
- DEB_D("V4L2_CID_HFLIP: %d\n", c->value);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct saa7146_vv *vv = dev->vv_data;
- const struct v4l2_queryctrl *ctrl;
-
- ctrl = ctrl_by_id(c->id);
- if (NULL == ctrl) {
- DEB_D("unknown control %d\n", c->id);
- return -EINVAL;
- }
-
- switch (ctrl->type) {
- case V4L2_CTRL_TYPE_BOOLEAN:
- case V4L2_CTRL_TYPE_MENU:
- case V4L2_CTRL_TYPE_INTEGER:
- if (c->value < ctrl->minimum)
- c->value = ctrl->minimum;
- if (c->value > ctrl->maximum)
- c->value = ctrl->maximum;
- break;
- default:
- /* nothing */;
- }
-
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS: {
- u32 value = saa7146_read(dev, BCS_CTRL);
- value &= 0x00ffffff;
- value |= (c->value << 24);
- saa7146_write(dev, BCS_CTRL, value);
+ val = saa7146_read(dev, BCS_CTRL);
+ val &= 0x00ffffff;
+ val |= (ctrl->val << 24);
+ saa7146_write(dev, BCS_CTRL, val);
saa7146_write(dev, MC2, MASK_22 | MASK_06);
break;
- }
- case V4L2_CID_CONTRAST: {
- u32 value = saa7146_read(dev, BCS_CTRL);
- value &= 0xff00ffff;
- value |= (c->value << 16);
- saa7146_write(dev, BCS_CTRL, value);
+
+ case V4L2_CID_CONTRAST:
+ val = saa7146_read(dev, BCS_CTRL);
+ val &= 0xff00ffff;
+ val |= (ctrl->val << 16);
+ saa7146_write(dev, BCS_CTRL, val);
saa7146_write(dev, MC2, MASK_22 | MASK_06);
break;
- }
- case V4L2_CID_SATURATION: {
- u32 value = saa7146_read(dev, BCS_CTRL);
- value &= 0xffffff00;
- value |= (c->value << 0);
- saa7146_write(dev, BCS_CTRL, value);
+
+ case V4L2_CID_SATURATION:
+ val = saa7146_read(dev, BCS_CTRL);
+ val &= 0xffffff00;
+ val |= (ctrl->val << 0);
+ saa7146_write(dev, BCS_CTRL, val);
saa7146_write(dev, MC2, MASK_22 | MASK_06);
break;
- }
+
case V4L2_CID_HFLIP:
/* fixme: we can support changing VFLIP and HFLIP here... */
- if (IS_CAPTURE_ACTIVE(fh) != 0) {
- DEB_D("V4L2_CID_HFLIP while active capture\n");
+ if ((vv->video_status & STATUS_CAPTURE))
return -EBUSY;
- }
- vv->hflip = c->value;
+ vv->hflip = ctrl->val;
break;
+
case V4L2_CID_VFLIP:
- if (IS_CAPTURE_ACTIVE(fh) != 0) {
- DEB_D("V4L2_CID_VFLIP while active capture\n");
+ if ((vv->video_status & STATUS_CAPTURE))
return -EBUSY;
- }
- vv->vflip = c->value;
+ vv->vflip = ctrl->val;
break;
+
default:
return -EINVAL;
}
- if (IS_OVERLAY_ACTIVE(fh) != 0) {
+ if ((vv->video_status & STATUS_OVERLAY) != 0) { /* CHECK: && (vv->video_fh == fh)) */
+ struct saa7146_fh *fh = vv->video_fh;
+
saa7146_stop_preview(fh);
saa7146_start_preview(fh);
}
@@ -720,6 +593,8 @@ static int vidioc_g_parm(struct file *file, void *fh,
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct saa7146_vv *vv = dev->vv_data;
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
parm->parm.capture.readbuffers = 1;
v4l2_video_std_frame_period(vv->standard->id,
&parm->parm.capture.timeperframe);
@@ -728,19 +603,28 @@ static int vidioc_g_parm(struct file *file, void *fh,
static int vidioc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- f->fmt.pix = ((struct saa7146_fh *)fh)->video_fmt;
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct saa7146_vv *vv = dev->vv_data;
+
+ f->fmt.pix = vv->video_fmt;
return 0;
}
static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_format *f)
{
- f->fmt.win = ((struct saa7146_fh *)fh)->ov.win;
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct saa7146_vv *vv = dev->vv_data;
+
+ f->fmt.win = vv->ov.win;
return 0;
}
static int vidioc_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- f->fmt.vbi = ((struct saa7146_fh *)fh)->vbi_fmt;
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct saa7146_vv *vv = dev->vv_data;
+
+ f->fmt.vbi = vv->vbi_fmt;
return 0;
}
@@ -787,6 +671,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_forma
}
f->fmt.pix.field = field;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
if (f->fmt.pix.width > maxw)
f->fmt.pix.width = maxw;
if (f->fmt.pix.height > maxh)
@@ -883,9 +768,9 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_forma
err = vidioc_try_fmt_vid_cap(file, fh, f);
if (0 != err)
return err;
- fh->video_fmt = f->fmt.pix;
+ vv->video_fmt = f->fmt.pix;
DEB_EE("set to pixelformat '%4.4s'\n",
- (char *)&fh->video_fmt.pixelformat);
+ (char *)&vv->video_fmt.pixelformat);
return 0;
}
@@ -900,17 +785,17 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_f
err = vidioc_try_fmt_vid_overlay(file, fh, f);
if (0 != err)
return err;
- fh->ov.win = f->fmt.win;
- fh->ov.nclips = f->fmt.win.clipcount;
- if (fh->ov.nclips > 16)
- fh->ov.nclips = 16;
- if (copy_from_user(fh->ov.clips, f->fmt.win.clips,
- sizeof(struct v4l2_clip) * fh->ov.nclips)) {
+ vv->ov.win = f->fmt.win;
+ vv->ov.nclips = f->fmt.win.clipcount;
+ if (vv->ov.nclips > 16)
+ vv->ov.nclips = 16;
+ if (copy_from_user(vv->ov.clips, f->fmt.win.clips,
+ sizeof(struct v4l2_clip) * vv->ov.nclips)) {
return -EFAULT;
}
- /* fh->ov.fh is used to indicate that we have valid overlay informations, too */
- fh->ov.fh = fh;
+ /* vv->ov.fh is used to indicate that we have valid overlay informations, too */
+ vv->ov.fh = fh;
/* check if our current overlay is active */
if (IS_OVERLAY_ACTIVE(fh) != 0) {
@@ -1111,10 +996,14 @@ static int vidioc_g_chip_ident(struct file *file, void *__fh,
chip->ident = V4L2_IDENT_NONE;
chip->revision = 0;
- if (chip->match.type == V4L2_CHIP_MATCH_HOST && !chip->match.addr) {
- chip->ident = V4L2_IDENT_SAA7146;
+ if (chip->match.type == V4L2_CHIP_MATCH_HOST) {
+ if (v4l2_chip_match_host(&chip->match))
+ chip->ident = V4L2_IDENT_SAA7146;
return 0;
}
+ if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+ chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
return v4l2_device_call_until_err(&dev->v4l2_dev, 0,
core, g_chip_ident, chip);
}
@@ -1129,7 +1018,6 @@ const struct v4l2_ioctl_ops saa7146_video_ioctl_ops = {
.vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
.vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
.vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
- .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
.vidioc_g_chip_ident = vidioc_g_chip_ident,
.vidioc_overlay = vidioc_overlay,
@@ -1141,12 +1029,29 @@ const struct v4l2_ioctl_ops saa7146_video_ioctl_ops = {
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_g_std = vidioc_g_std,
.vidioc_s_std = vidioc_s_std,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_parm = vidioc_g_parm,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
+ .vidioc_g_chip_ident = vidioc_g_chip_ident,
+
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*********************************************************************************/
@@ -1161,7 +1066,7 @@ static int buffer_activate (struct saa7146_dev *dev,
buf->vb.state = VIDEOBUF_ACTIVE;
saa7146_set_capture(dev,buf,next);
- mod_timer(&vv->video_q.timeout, jiffies+BUFFER_TIMEOUT);
+ mod_timer(&vv->video_dmaq.timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
@@ -1185,44 +1090,44 @@ static int buffer_prepare(struct videobuf_queue *q,
DEB_CAP("vbuf:%p\n", vb);
/* sanity checks */
- if (fh->video_fmt.width < 48 ||
- fh->video_fmt.height < 32 ||
- fh->video_fmt.width > vv->standard->h_max_out ||
- fh->video_fmt.height > vv->standard->v_max_out) {
+ if (vv->video_fmt.width < 48 ||
+ vv->video_fmt.height < 32 ||
+ vv->video_fmt.width > vv->standard->h_max_out ||
+ vv->video_fmt.height > vv->standard->v_max_out) {
DEB_D("w (%d) / h (%d) out of bounds\n",
- fh->video_fmt.width, fh->video_fmt.height);
+ vv->video_fmt.width, vv->video_fmt.height);
return -EINVAL;
}
- size = fh->video_fmt.sizeimage;
+ size = vv->video_fmt.sizeimage;
if (0 != buf->vb.baddr && buf->vb.bsize < size) {
DEB_D("size mismatch\n");
return -EINVAL;
}
DEB_CAP("buffer_prepare [size=%dx%d,bytes=%d,fields=%s]\n",
- fh->video_fmt.width, fh->video_fmt.height,
- size, v4l2_field_names[fh->video_fmt.field]);
- if (buf->vb.width != fh->video_fmt.width ||
- buf->vb.bytesperline != fh->video_fmt.bytesperline ||
- buf->vb.height != fh->video_fmt.height ||
+ vv->video_fmt.width, vv->video_fmt.height,
+ size, v4l2_field_names[vv->video_fmt.field]);
+ if (buf->vb.width != vv->video_fmt.width ||
+ buf->vb.bytesperline != vv->video_fmt.bytesperline ||
+ buf->vb.height != vv->video_fmt.height ||
buf->vb.size != size ||
buf->vb.field != field ||
- buf->vb.field != fh->video_fmt.field ||
- buf->fmt != &fh->video_fmt) {
+ buf->vb.field != vv->video_fmt.field ||
+ buf->fmt != &vv->video_fmt) {
saa7146_dma_free(dev,q,buf);
}
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
struct saa7146_format *sfmt;
- buf->vb.bytesperline = fh->video_fmt.bytesperline;
- buf->vb.width = fh->video_fmt.width;
- buf->vb.height = fh->video_fmt.height;
+ buf->vb.bytesperline = vv->video_fmt.bytesperline;
+ buf->vb.width = vv->video_fmt.width;
+ buf->vb.height = vv->video_fmt.height;
buf->vb.size = size;
buf->vb.field = field;
- buf->fmt = &fh->video_fmt;
- buf->vb.field = fh->video_fmt.field;
+ buf->fmt = &vv->video_fmt;
+ buf->vb.field = vv->video_fmt.field;
sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat);
@@ -1258,11 +1163,12 @@ static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned
{
struct file *file = q->priv_data;
struct saa7146_fh *fh = file->private_data;
+ struct saa7146_vv *vv = fh->dev->vv_data;
if (0 == *count || *count > MAX_SAA7146_CAPTURE_BUFFERS)
*count = MAX_SAA7146_CAPTURE_BUFFERS;
- *size = fh->video_fmt.sizeimage;
+ *size = vv->video_fmt.sizeimage;
/* check if we exceed the "max_memory" parameter */
if( (*count * *size) > (max_memory*1048576) ) {
@@ -1283,7 +1189,7 @@ static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
struct saa7146_buf *buf = (struct saa7146_buf *)vb;
DEB_CAP("vbuf:%p\n", vb);
- saa7146_buffer_queue(fh->dev,&vv->video_q,buf);
+ saa7146_buffer_queue(fh->dev, &vv->video_dmaq, buf);
}
static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
@@ -1312,12 +1218,12 @@ static struct videobuf_queue_ops video_qops = {
static void video_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
- INIT_LIST_HEAD(&vv->video_q.queue);
+ INIT_LIST_HEAD(&vv->video_dmaq.queue);
- init_timer(&vv->video_q.timeout);
- vv->video_q.timeout.function = saa7146_buffer_timeout;
- vv->video_q.timeout.data = (unsigned long)(&vv->video_q);
- vv->video_q.dev = dev;
+ init_timer(&vv->video_dmaq.timeout);
+ vv->video_dmaq.timeout.function = saa7146_buffer_timeout;
+ vv->video_dmaq.timeout.data = (unsigned long)(&vv->video_dmaq);
+ vv->video_dmaq.dev = dev;
/* set some default values */
vv->standard = &dev->ext_vv_data->stds[0];
@@ -1331,15 +1237,6 @@ static void video_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
static int video_open(struct saa7146_dev *dev, struct file *file)
{
struct saa7146_fh *fh = file->private_data;
- struct saa7146_format *sfmt;
-
- fh->video_fmt.width = 384;
- fh->video_fmt.height = 288;
- fh->video_fmt.pixelformat = V4L2_PIX_FMT_BGR24;
- fh->video_fmt.bytesperline = 0;
- fh->video_fmt.field = V4L2_FIELD_ANY;
- sfmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat);
- fh->video_fmt.sizeimage = (fh->video_fmt.width * fh->video_fmt.height * sfmt->depth)/8;
videobuf_queue_sg_init(&fh->video_q, &video_qops,
&dev->pci->dev, &dev->slock,
@@ -1371,7 +1268,7 @@ static void video_close(struct saa7146_dev *dev, struct file *file)
static void video_irq_done(struct saa7146_dev *dev, unsigned long st)
{
struct saa7146_vv *vv = dev->vv_data;
- struct saa7146_dmaqueue *q = &vv->video_q;
+ struct saa7146_dmaqueue *q = &vv->video_dmaq;
spin_lock(&dev->slock);
DEB_CAP("called\n");
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 4a6d5cef3964..bbf4945149a9 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -204,6 +204,27 @@ config MEDIA_TUNER_TDA18218
help
NXP TDA18218 silicon tuner driver.
+config MEDIA_TUNER_FC0011
+ tristate "Fitipower FC0011 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ Fitipower FC0011 silicon tuner driver.
+
+config MEDIA_TUNER_FC0012
+ tristate "Fitipower FC0012 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ Fitipower FC0012 silicon tuner driver.
+
+config MEDIA_TUNER_FC0013
+ tristate "Fitipower FC0013 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ Fitipower FC0013 silicon tuner driver.
+
config MEDIA_TUNER_TDA18212
tristate "NXP TDA18212 silicon tuner"
depends on VIDEO_MEDIA && I2C
@@ -211,4 +232,10 @@ config MEDIA_TUNER_TDA18212
help
NXP TDA18212 silicon tuner driver.
+config MEDIA_TUNER_TUA9001
+ tristate "Infineon TUA 9001 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ Infineon TUA 9001 silicon tuner driver.
endmenu
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index f80407eb8998..891b80e60808 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -28,6 +28,10 @@ obj-$(CONFIG_MEDIA_TUNER_MC44S803) += mc44s803.o
obj-$(CONFIG_MEDIA_TUNER_MAX2165) += max2165.o
obj-$(CONFIG_MEDIA_TUNER_TDA18218) += tda18218.o
obj-$(CONFIG_MEDIA_TUNER_TDA18212) += tda18212.o
+obj-$(CONFIG_MEDIA_TUNER_TUA9001) += tua9001.o
+obj-$(CONFIG_MEDIA_TUNER_FC0011) += fc0011.o
+obj-$(CONFIG_MEDIA_TUNER_FC0012) += fc0012.o
+obj-$(CONFIG_MEDIA_TUNER_FC0013) += fc0013.o
ccflags-y += -I$(srctree)/drivers/media/dvb/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb/frontends
diff --git a/drivers/media/common/tuners/fc0011.c b/drivers/media/common/tuners/fc0011.c
new file mode 100644
index 000000000000..e4882546c283
--- /dev/null
+++ b/drivers/media/common/tuners/fc0011.c
@@ -0,0 +1,524 @@
+/*
+ * Fitipower FC0011 tuner driver
+ *
+ * Copyright (C) 2012 Michael Buesch <m@bues.ch>
+ *
+ * Derived from FC0012 tuner driver:
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "fc0011.h"
+
+
+/* Tuner registers */
+enum {
+ FC11_REG_0,
+ FC11_REG_FA, /* FA */
+ FC11_REG_FP, /* FP */
+ FC11_REG_XINHI, /* XIN high 8 bit */
+ FC11_REG_XINLO, /* XIN low 8 bit */
+ FC11_REG_VCO, /* VCO */
+ FC11_REG_VCOSEL, /* VCO select */
+ FC11_REG_7, /* Unknown tuner reg 7 */
+ FC11_REG_8, /* Unknown tuner reg 8 */
+ FC11_REG_9,
+ FC11_REG_10, /* Unknown tuner reg 10 */
+ FC11_REG_11, /* Unknown tuner reg 11 */
+ FC11_REG_12,
+ FC11_REG_RCCAL, /* RC calibrate */
+ FC11_REG_VCOCAL, /* VCO calibrate */
+ FC11_REG_15,
+ FC11_REG_16, /* Unknown tuner reg 16 */
+ FC11_REG_17,
+
+ FC11_NR_REGS, /* Number of registers */
+};
+
+enum FC11_REG_VCOSEL_bits {
+ FC11_VCOSEL_2 = 0x08, /* VCO select 2 */
+ FC11_VCOSEL_1 = 0x10, /* VCO select 1 */
+ FC11_VCOSEL_CLKOUT = 0x20, /* Fix clock out */
+ FC11_VCOSEL_BW7M = 0x40, /* 7MHz bw */
+ FC11_VCOSEL_BW6M = 0x80, /* 6MHz bw */
+};
+
+enum FC11_REG_RCCAL_bits {
+ FC11_RCCAL_FORCE = 0x10, /* force */
+};
+
+enum FC11_REG_VCOCAL_bits {
+ FC11_VCOCAL_RUN = 0, /* VCO calibration run */
+ FC11_VCOCAL_VALUEMASK = 0x3F, /* VCO calibration value mask */
+ FC11_VCOCAL_OK = 0x40, /* VCO calibration Ok */
+ FC11_VCOCAL_RESET = 0x80, /* VCO calibration reset */
+};
+
+
+struct fc0011_priv {
+ struct i2c_adapter *i2c;
+ u8 addr;
+
+ u32 frequency;
+ u32 bandwidth;
+};
+
+
+static int fc0011_writereg(struct fc0011_priv *priv, u8 reg, u8 val)
+{
+ u8 buf[2] = { reg, val };
+ struct i2c_msg msg = { .addr = priv->addr,
+ .flags = 0, .buf = buf, .len = 2 };
+
+ if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
+ dev_err(&priv->i2c->dev,
+ "I2C write reg failed, reg: %02x, val: %02x\n",
+ reg, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int fc0011_readreg(struct fc0011_priv *priv, u8 reg, u8 *val)
+{
+ u8 dummy;
+ struct i2c_msg msg[2] = {
+ { .addr = priv->addr,
+ .flags = 0, .buf = &reg, .len = 1 },
+ { .addr = priv->addr,
+ .flags = I2C_M_RD, .buf = val ? : &dummy, .len = 1 },
+ };
+
+ if (i2c_transfer(priv->i2c, msg, 2) != 2) {
+ dev_err(&priv->i2c->dev,
+ "I2C read failed, reg: %02x\n", reg);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int fc0011_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+
+ return 0;
+}
+
+static int fc0011_init(struct dvb_frontend *fe)
+{
+ struct fc0011_priv *priv = fe->tuner_priv;
+ int err;
+
+ if (WARN_ON(!fe->callback))
+ return -EINVAL;
+
+ err = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC0011_FE_CALLBACK_POWER, priv->addr);
+ if (err) {
+ dev_err(&priv->i2c->dev, "Power-on callback failed\n");
+ return err;
+ }
+ err = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC0011_FE_CALLBACK_RESET, priv->addr);
+ if (err) {
+ dev_err(&priv->i2c->dev, "Reset callback failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Initiate VCO calibration */
+static int fc0011_vcocal_trigger(struct fc0011_priv *priv)
+{
+ int err;
+
+ err = fc0011_writereg(priv, FC11_REG_VCOCAL, FC11_VCOCAL_RESET);
+ if (err)
+ return err;
+ err = fc0011_writereg(priv, FC11_REG_VCOCAL, FC11_VCOCAL_RUN);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Read VCO calibration value */
+static int fc0011_vcocal_read(struct fc0011_priv *priv, u8 *value)
+{
+ int err;
+
+ err = fc0011_writereg(priv, FC11_REG_VCOCAL, FC11_VCOCAL_RUN);
+ if (err)
+ return err;
+ usleep_range(10000, 20000);
+ err = fc0011_readreg(priv, FC11_REG_VCOCAL, value);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int fc0011_set_params(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct fc0011_priv *priv = fe->tuner_priv;
+ int err;
+ unsigned int i, vco_retries;
+ u32 freq = p->frequency / 1000;
+ u32 bandwidth = p->bandwidth_hz / 1000;
+ u32 fvco, xin, xdiv, xdivr;
+ u16 frac;
+ u8 fa, fp, vco_sel, vco_cal;
+ u8 regs[FC11_NR_REGS] = { };
+
+ regs[FC11_REG_7] = 0x0F;
+ regs[FC11_REG_8] = 0x3E;
+ regs[FC11_REG_10] = 0xB8;
+ regs[FC11_REG_11] = 0x80;
+ regs[FC11_REG_RCCAL] = 0x04;
+ err = fc0011_writereg(priv, FC11_REG_7, regs[FC11_REG_7]);
+ err |= fc0011_writereg(priv, FC11_REG_8, regs[FC11_REG_8]);
+ err |= fc0011_writereg(priv, FC11_REG_10, regs[FC11_REG_10]);
+ err |= fc0011_writereg(priv, FC11_REG_11, regs[FC11_REG_11]);
+ err |= fc0011_writereg(priv, FC11_REG_RCCAL, regs[FC11_REG_RCCAL]);
+ if (err)
+ return -EIO;
+
+ /* Set VCO freq and VCO div */
+ if (freq < 54000) {
+ fvco = freq * 64;
+ regs[FC11_REG_VCO] = 0x82;
+ } else if (freq < 108000) {
+ fvco = freq * 32;
+ regs[FC11_REG_VCO] = 0x42;
+ } else if (freq < 216000) {
+ fvco = freq * 16;
+ regs[FC11_REG_VCO] = 0x22;
+ } else if (freq < 432000) {
+ fvco = freq * 8;
+ regs[FC11_REG_VCO] = 0x12;
+ } else {
+ fvco = freq * 4;
+ regs[FC11_REG_VCO] = 0x0A;
+ }
+
+ /* Calc XIN. The PLL reference frequency is 18 MHz. */
+ xdiv = fvco / 18000;
+ frac = fvco - xdiv * 18000;
+ frac = (frac << 15) / 18000;
+ if (frac >= 16384)
+ frac += 32786;
+ if (!frac)
+ xin = 0;
+ else if (frac < 511)
+ xin = 512;
+ else if (frac < 65026)
+ xin = frac;
+ else
+ xin = 65024;
+ regs[FC11_REG_XINHI] = xin >> 8;
+ regs[FC11_REG_XINLO] = xin;
+
+ /* Calc FP and FA */
+ xdivr = xdiv;
+ if (fvco - xdiv * 18000 >= 9000)
+ xdivr += 1; /* round */
+ fp = xdivr / 8;
+ fa = xdivr - fp * 8;
+ if (fa < 2) {
+ fp -= 1;
+ fa += 8;
+ }
+ if (fp > 0x1F) {
+ fp &= 0x1F;
+ fa &= 0xF;
+ }
+ if (fa >= fp) {
+ dev_warn(&priv->i2c->dev,
+ "fa %02X >= fp %02X, but trying to continue\n",
+ (unsigned int)(u8)fa, (unsigned int)(u8)fp);
+ }
+ regs[FC11_REG_FA] = fa;
+ regs[FC11_REG_FP] = fp;
+
+ /* Select bandwidth */
+ switch (bandwidth) {
+ case 8000:
+ break;
+ case 7000:
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_BW7M;
+ break;
+ default:
+ dev_warn(&priv->i2c->dev, "Unsupported bandwidth %u kHz. "
+ "Using 6000 kHz.\n",
+ bandwidth);
+ bandwidth = 6000;
+ /* fallthrough */
+ case 6000:
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_BW6M;
+ break;
+ }
+
+ /* Pre VCO select */
+ if (fvco < 2320000) {
+ vco_sel = 0;
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ } else if (fvco < 3080000) {
+ vco_sel = 1;
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_1;
+ } else {
+ vco_sel = 2;
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_2;
+ }
+
+ /* Fix for low freqs */
+ if (freq < 45000) {
+ regs[FC11_REG_FA] = 0x6;
+ regs[FC11_REG_FP] = 0x11;
+ }
+
+ /* Clock out fix */
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_CLKOUT;
+
+ /* Write the cached registers */
+ for (i = FC11_REG_FA; i <= FC11_REG_VCOSEL; i++) {
+ err = fc0011_writereg(priv, i, regs[i]);
+ if (err)
+ return err;
+ }
+
+ /* VCO calibration */
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ err = fc0011_vcocal_read(priv, &vco_cal);
+ if (err)
+ return err;
+ vco_retries = 0;
+ while (!(vco_cal & FC11_VCOCAL_OK) && vco_retries < 3) {
+ /* Reset the tuner and try again */
+ err = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC0011_FE_CALLBACK_RESET, priv->addr);
+ if (err) {
+ dev_err(&priv->i2c->dev, "Failed to reset tuner\n");
+ return err;
+ }
+ /* Reinit tuner config */
+ err = 0;
+ for (i = FC11_REG_FA; i <= FC11_REG_VCOSEL; i++)
+ err |= fc0011_writereg(priv, i, regs[i]);
+ err |= fc0011_writereg(priv, FC11_REG_7, regs[FC11_REG_7]);
+ err |= fc0011_writereg(priv, FC11_REG_8, regs[FC11_REG_8]);
+ err |= fc0011_writereg(priv, FC11_REG_10, regs[FC11_REG_10]);
+ err |= fc0011_writereg(priv, FC11_REG_11, regs[FC11_REG_11]);
+ err |= fc0011_writereg(priv, FC11_REG_RCCAL, regs[FC11_REG_RCCAL]);
+ if (err)
+ return -EIO;
+ /* VCO calibration */
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ err = fc0011_vcocal_read(priv, &vco_cal);
+ if (err)
+ return err;
+ vco_retries++;
+ }
+ if (!(vco_cal & FC11_VCOCAL_OK)) {
+ dev_err(&priv->i2c->dev,
+ "Failed to read VCO calibration value (got %02X)\n",
+ (unsigned int)vco_cal);
+ return -EIO;
+ }
+ vco_cal &= FC11_VCOCAL_VALUEMASK;
+
+ switch (vco_sel) {
+ case 0:
+ if (vco_cal < 8) {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_1;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ } else {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ }
+ break;
+ case 1:
+ if (vco_cal < 5) {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_2;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ } else if (vco_cal <= 48) {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_1;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ } else {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ }
+ break;
+ case 2:
+ if (vco_cal > 53) {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_1;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ err = fc0011_vcocal_trigger(priv);
+ if (err)
+ return err;
+ } else {
+ regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
+ regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_2;
+ err = fc0011_writereg(priv, FC11_REG_VCOSEL,
+ regs[FC11_REG_VCOSEL]);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ err = fc0011_vcocal_read(priv, NULL);
+ if (err)
+ return err;
+ usleep_range(10000, 50000);
+
+ err = fc0011_readreg(priv, FC11_REG_RCCAL, &regs[FC11_REG_RCCAL]);
+ if (err)
+ return err;
+ regs[FC11_REG_RCCAL] |= FC11_RCCAL_FORCE;
+ err = fc0011_writereg(priv, FC11_REG_RCCAL, regs[FC11_REG_RCCAL]);
+ if (err)
+ return err;
+ err = fc0011_writereg(priv, FC11_REG_16, 0xB);
+ if (err)
+ return err;
+
+ dev_dbg(&priv->i2c->dev, "Tuned to "
+ "fa=%02X fp=%02X xin=%02X%02X vco=%02X vcosel=%02X "
+ "vcocal=%02X(%u) bw=%u\n",
+ (unsigned int)regs[FC11_REG_FA],
+ (unsigned int)regs[FC11_REG_FP],
+ (unsigned int)regs[FC11_REG_XINHI],
+ (unsigned int)regs[FC11_REG_XINLO],
+ (unsigned int)regs[FC11_REG_VCO],
+ (unsigned int)regs[FC11_REG_VCOSEL],
+ (unsigned int)vco_cal, vco_retries,
+ (unsigned int)bandwidth);
+
+ priv->frequency = p->frequency;
+ priv->bandwidth = p->bandwidth_hz;
+
+ return 0;
+}
+
+static int fc0011_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct fc0011_priv *priv = fe->tuner_priv;
+
+ *frequency = priv->frequency;
+
+ return 0;
+}
+
+static int fc0011_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ *frequency = 0;
+
+ return 0;
+}
+
+static int fc0011_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct fc0011_priv *priv = fe->tuner_priv;
+
+ *bandwidth = priv->bandwidth;
+
+ return 0;
+}
+
+static const struct dvb_tuner_ops fc0011_tuner_ops = {
+ .info = {
+ .name = "Fitipower FC0011",
+
+ .frequency_min = 45000000,
+ .frequency_max = 1000000000,
+ },
+
+ .release = fc0011_release,
+ .init = fc0011_init,
+
+ .set_params = fc0011_set_params,
+
+ .get_frequency = fc0011_get_frequency,
+ .get_if_frequency = fc0011_get_if_frequency,
+ .get_bandwidth = fc0011_get_bandwidth,
+};
+
+struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ const struct fc0011_config *config)
+{
+ struct fc0011_priv *priv;
+
+ priv = kzalloc(sizeof(struct fc0011_priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ priv->i2c = i2c;
+ priv->addr = config->i2c_address;
+
+ fe->tuner_priv = priv;
+ fe->ops.tuner_ops = fc0011_tuner_ops;
+
+ dev_info(&priv->i2c->dev, "Fitipower FC0011 tuner attached\n");
+
+ return fe;
+}
+EXPORT_SYMBOL(fc0011_attach);
+
+MODULE_DESCRIPTION("Fitipower FC0011 silicon tuner driver");
+MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/fc0011.h b/drivers/media/common/tuners/fc0011.h
new file mode 100644
index 000000000000..0ee581f122d2
--- /dev/null
+++ b/drivers/media/common/tuners/fc0011.h
@@ -0,0 +1,41 @@
+#ifndef LINUX_FC0011_H_
+#define LINUX_FC0011_H_
+
+#include "dvb_frontend.h"
+
+
+/** struct fc0011_config - fc0011 hardware config
+ *
+ * @i2c_address: I2C bus address.
+ */
+struct fc0011_config {
+ u8 i2c_address;
+};
+
+/** enum fc0011_fe_callback_commands - Frontend callbacks
+ *
+ * @FC0011_FE_CALLBACK_POWER: Power on tuner hardware.
+ * @FC0011_FE_CALLBACK_RESET: Request a tuner reset.
+ */
+enum fc0011_fe_callback_commands {
+ FC0011_FE_CALLBACK_POWER,
+ FC0011_FE_CALLBACK_RESET,
+};
+
+#if defined(CONFIG_MEDIA_TUNER_FC0011) ||\
+ defined(CONFIG_MEDIA_TUNER_FC0011_MODULE)
+struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ const struct fc0011_config *config);
+#else
+static inline
+struct dvb_frontend *fc0011_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ const struct fc0011_config *config)
+{
+ dev_err(&i2c->dev, "fc0011 driver disabled in Kconfig\n");
+ return NULL;
+}
+#endif
+
+#endif /* LINUX_FC0011_H_ */
diff --git a/drivers/media/common/tuners/fc0012-priv.h b/drivers/media/common/tuners/fc0012-priv.h
new file mode 100644
index 000000000000..4577c917e616
--- /dev/null
+++ b/drivers/media/common/tuners/fc0012-priv.h
@@ -0,0 +1,43 @@
+/*
+ * Fitipower FC0012 tuner driver - private includes
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _FC0012_PRIV_H_
+#define _FC0012_PRIV_H_
+
+#define LOG_PREFIX "fc0012"
+
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+struct fc0012_priv {
+ struct i2c_adapter *i2c;
+ u8 addr;
+ u8 dual_master;
+ u8 xtal_freq;
+
+ u32 frequency;
+ u32 bandwidth;
+};
+
+#endif
diff --git a/drivers/media/common/tuners/fc0012.c b/drivers/media/common/tuners/fc0012.c
new file mode 100644
index 000000000000..308135abd54c
--- /dev/null
+++ b/drivers/media/common/tuners/fc0012.c
@@ -0,0 +1,467 @@
+/*
+ * Fitipower FC0012 tuner driver
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "fc0012.h"
+#include "fc0012-priv.h"
+
+static int fc0012_writereg(struct fc0012_priv *priv, u8 reg, u8 val)
+{
+ u8 buf[2] = {reg, val};
+ struct i2c_msg msg = {
+ .addr = priv->addr, .flags = 0, .buf = buf, .len = 2
+ };
+
+ if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
+ err("I2C write reg failed, reg: %02x, val: %02x", reg, val);
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int fc0012_readreg(struct fc0012_priv *priv, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2] = {
+ { .addr = priv->addr, .flags = 0, .buf = &reg, .len = 1 },
+ { .addr = priv->addr, .flags = I2C_M_RD, .buf = val, .len = 1 },
+ };
+
+ if (i2c_transfer(priv->i2c, msg, 2) != 2) {
+ err("I2C read reg failed, reg: %02x", reg);
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int fc0012_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static int fc0012_init(struct dvb_frontend *fe)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ int i, ret = 0;
+ unsigned char reg[] = {
+ 0x00, /* dummy reg. 0 */
+ 0x05, /* reg. 0x01 */
+ 0x10, /* reg. 0x02 */
+ 0x00, /* reg. 0x03 */
+ 0x00, /* reg. 0x04 */
+ 0x0f, /* reg. 0x05: may also be 0x0a */
+ 0x00, /* reg. 0x06: divider 2, VCO slow */
+ 0x00, /* reg. 0x07: may also be 0x0f */
+ 0xff, /* reg. 0x08: AGC Clock divide by 256, AGC gain 1/256,
+ Loop Bw 1/8 */
+ 0x6e, /* reg. 0x09: Disable LoopThrough, Enable LoopThrough: 0x6f */
+ 0xb8, /* reg. 0x0a: Disable LO Test Buffer */
+ 0x82, /* reg. 0x0b: Output Clock is same as clock frequency,
+ may also be 0x83 */
+ 0xfc, /* reg. 0x0c: depending on AGC Up-Down mode, may need 0xf8 */
+ 0x02, /* reg. 0x0d: AGC Not Forcing & LNA Forcing, 0x02 for DVB-T */
+ 0x00, /* reg. 0x0e */
+ 0x00, /* reg. 0x0f */
+ 0x00, /* reg. 0x10: may also be 0x0d */
+ 0x00, /* reg. 0x11 */
+ 0x1f, /* reg. 0x12: Set to maximum gain */
+ 0x08, /* reg. 0x13: Set to Middle Gain: 0x08,
+ Low Gain: 0x00, High Gain: 0x10, enable IX2: 0x80 */
+ 0x00, /* reg. 0x14 */
+ 0x04, /* reg. 0x15: Enable LNA COMPS */
+ };
+
+ switch (priv->xtal_freq) {
+ case FC_XTAL_27_MHZ:
+ case FC_XTAL_28_8_MHZ:
+ reg[0x07] |= 0x20;
+ break;
+ case FC_XTAL_36_MHZ:
+ default:
+ break;
+ }
+
+ if (priv->dual_master)
+ reg[0x0c] |= 0x02;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ for (i = 1; i < sizeof(reg); i++) {
+ ret = fc0012_writereg(priv, i, reg[i]);
+ if (ret)
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ err("fc0012_writereg failed: %d", ret);
+
+ return ret;
+}
+
+static int fc0012_sleep(struct dvb_frontend *fe)
+{
+ /* nothing to do here */
+ return 0;
+}
+
+static int fc0012_set_params(struct dvb_frontend *fe)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ int i, ret = 0;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 freq = p->frequency / 1000;
+ u32 delsys = p->delivery_system;
+ unsigned char reg[7], am, pm, multi, tmp;
+ unsigned long f_vco;
+ unsigned short xtal_freq_khz_2, xin, xdiv;
+ int vco_select = false;
+
+ if (fe->callback) {
+ ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC_FE_CALLBACK_VHF_ENABLE, (freq > 300000 ? 0 : 1));
+ if (ret)
+ goto exit;
+ }
+
+ switch (priv->xtal_freq) {
+ case FC_XTAL_27_MHZ:
+ xtal_freq_khz_2 = 27000 / 2;
+ break;
+ case FC_XTAL_36_MHZ:
+ xtal_freq_khz_2 = 36000 / 2;
+ break;
+ case FC_XTAL_28_8_MHZ:
+ default:
+ xtal_freq_khz_2 = 28800 / 2;
+ break;
+ }
+
+ /* select frequency divider and the frequency of VCO */
+ if (freq < 37084) { /* freq * 96 < 3560000 */
+ multi = 96;
+ reg[5] = 0x82;
+ reg[6] = 0x00;
+ } else if (freq < 55625) { /* freq * 64 < 3560000 */
+ multi = 64;
+ reg[5] = 0x82;
+ reg[6] = 0x02;
+ } else if (freq < 74167) { /* freq * 48 < 3560000 */
+ multi = 48;
+ reg[5] = 0x42;
+ reg[6] = 0x00;
+ } else if (freq < 111250) { /* freq * 32 < 3560000 */
+ multi = 32;
+ reg[5] = 0x42;
+ reg[6] = 0x02;
+ } else if (freq < 148334) { /* freq * 24 < 3560000 */
+ multi = 24;
+ reg[5] = 0x22;
+ reg[6] = 0x00;
+ } else if (freq < 222500) { /* freq * 16 < 3560000 */
+ multi = 16;
+ reg[5] = 0x22;
+ reg[6] = 0x02;
+ } else if (freq < 296667) { /* freq * 12 < 3560000 */
+ multi = 12;
+ reg[5] = 0x12;
+ reg[6] = 0x00;
+ } else if (freq < 445000) { /* freq * 8 < 3560000 */
+ multi = 8;
+ reg[5] = 0x12;
+ reg[6] = 0x02;
+ } else if (freq < 593334) { /* freq * 6 < 3560000 */
+ multi = 6;
+ reg[5] = 0x0a;
+ reg[6] = 0x00;
+ } else {
+ multi = 4;
+ reg[5] = 0x0a;
+ reg[6] = 0x02;
+ }
+
+ f_vco = freq * multi;
+
+ if (f_vco >= 3060000) {
+ reg[6] |= 0x08;
+ vco_select = true;
+ }
+
+ if (freq >= 45000) {
+ /* From divided value (XDIV) determined the FA and FP value */
+ xdiv = (unsigned short)(f_vco / xtal_freq_khz_2);
+ if ((f_vco - xdiv * xtal_freq_khz_2) >= (xtal_freq_khz_2 / 2))
+ xdiv++;
+
+ pm = (unsigned char)(xdiv / 8);
+ am = (unsigned char)(xdiv - (8 * pm));
+
+ if (am < 2) {
+ reg[1] = am + 8;
+ reg[2] = pm - 1;
+ } else {
+ reg[1] = am;
+ reg[2] = pm;
+ }
+ } else {
+ /* fix for frequency less than 45 MHz */
+ reg[1] = 0x06;
+ reg[2] = 0x11;
+ }
+
+ /* fix clock out */
+ reg[6] |= 0x20;
+
+ /* From VCO frequency determines the XIN ( fractional part of Delta
+ Sigma PLL) and divided value (XDIV) */
+ xin = (unsigned short)(f_vco - (f_vco / xtal_freq_khz_2) * xtal_freq_khz_2);
+ xin = (xin << 15) / xtal_freq_khz_2;
+ if (xin >= 16384)
+ xin += 32768;
+
+ reg[3] = xin >> 8; /* xin with 9 bit resolution */
+ reg[4] = xin & 0xff;
+
+ if (delsys == SYS_DVBT) {
+ reg[6] &= 0x3f; /* bits 6 and 7 describe the bandwidth */
+ switch (p->bandwidth_hz) {
+ case 6000000:
+ reg[6] |= 0x80;
+ break;
+ case 7000000:
+ reg[6] |= 0x40;
+ break;
+ case 8000000:
+ default:
+ break;
+ }
+ } else {
+ err("%s: modulation type not supported!", __func__);
+ return -EINVAL;
+ }
+
+ /* modified for Realtek demod */
+ reg[5] |= 0x07;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ for (i = 1; i <= 6; i++) {
+ ret = fc0012_writereg(priv, i, reg[i]);
+ if (ret)
+ goto exit;
+ }
+
+ /* VCO Calibration */
+ ret = fc0012_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x00);
+
+ /* VCO Re-Calibration if needed */
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x00);
+
+ if (!ret) {
+ msleep(10);
+ ret = fc0012_readreg(priv, 0x0e, &tmp);
+ }
+ if (ret)
+ goto exit;
+
+ /* vco selection */
+ tmp &= 0x3f;
+
+ if (vco_select) {
+ if (tmp > 0x3c) {
+ reg[6] &= ~0x08;
+ ret = fc0012_writereg(priv, 0x06, reg[6]);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x00);
+ }
+ } else {
+ if (tmp < 0x02) {
+ reg[6] |= 0x08;
+ ret = fc0012_writereg(priv, 0x06, reg[6]);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0012_writereg(priv, 0x0e, 0x00);
+ }
+ }
+
+ priv->frequency = p->frequency;
+ priv->bandwidth = p->bandwidth_hz;
+
+exit:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+ if (ret)
+ warn("%s: failed: %d", __func__, ret);
+ return ret;
+}
+
+static int fc0012_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ *frequency = priv->frequency;
+ return 0;
+}
+
+static int fc0012_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ /* CHECK: always ? */
+ *frequency = 0;
+ return 0;
+}
+
+static int fc0012_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ *bandwidth = priv->bandwidth;
+ return 0;
+}
+
+#define INPUT_ADC_LEVEL -8
+
+static int fc0012_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct fc0012_priv *priv = fe->tuner_priv;
+ int ret;
+ unsigned char tmp;
+ int int_temp, lna_gain, int_lna, tot_agc_gain, power;
+ const int fc0012_lna_gain_table[] = {
+ /* low gain */
+ -63, -58, -99, -73,
+ -63, -65, -54, -60,
+ /* middle gain */
+ 71, 70, 68, 67,
+ 65, 63, 61, 58,
+ /* high gain */
+ 197, 191, 188, 186,
+ 184, 182, 181, 179,
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ ret = fc0012_writereg(priv, 0x12, 0x00);
+ if (ret)
+ goto err;
+
+ ret = fc0012_readreg(priv, 0x12, &tmp);
+ if (ret)
+ goto err;
+ int_temp = tmp;
+
+ ret = fc0012_readreg(priv, 0x13, &tmp);
+ if (ret)
+ goto err;
+ lna_gain = tmp & 0x1f;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (lna_gain < ARRAY_SIZE(fc0012_lna_gain_table)) {
+ int_lna = fc0012_lna_gain_table[lna_gain];
+ tot_agc_gain = (abs((int_temp >> 5) - 7) - 2 +
+ (int_temp & 0x1f)) * 2;
+ power = INPUT_ADC_LEVEL - tot_agc_gain - int_lna / 10;
+
+ if (power >= 45)
+ *strength = 255; /* 100% */
+ else if (power < -95)
+ *strength = 0;
+ else
+ *strength = (power + 95) * 255 / 140;
+
+ *strength |= *strength << 8;
+ } else {
+ ret = -1;
+ }
+
+ goto exit;
+
+err:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+exit:
+ if (ret)
+ warn("%s: failed: %d", __func__, ret);
+ return ret;
+}
+
+static const struct dvb_tuner_ops fc0012_tuner_ops = {
+ .info = {
+ .name = "Fitipower FC0012",
+
+ .frequency_min = 37000000, /* estimate */
+ .frequency_max = 862000000, /* estimate */
+ .frequency_step = 0,
+ },
+
+ .release = fc0012_release,
+
+ .init = fc0012_init,
+ .sleep = fc0012_sleep,
+
+ .set_params = fc0012_set_params,
+
+ .get_frequency = fc0012_get_frequency,
+ .get_if_frequency = fc0012_get_if_frequency,
+ .get_bandwidth = fc0012_get_bandwidth,
+
+ .get_rf_strength = fc0012_get_rf_strength,
+};
+
+struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq)
+{
+ struct fc0012_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct fc0012_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->i2c = i2c;
+ priv->dual_master = dual_master;
+ priv->addr = i2c_address;
+ priv->xtal_freq = xtal_freq;
+
+ info("Fitipower FC0012 successfully attached.");
+
+ fe->tuner_priv = priv;
+
+ memcpy(&fe->ops.tuner_ops, &fc0012_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ return fe;
+}
+EXPORT_SYMBOL(fc0012_attach);
+
+MODULE_DESCRIPTION("Fitipower FC0012 silicon tuner driver");
+MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.6");
diff --git a/drivers/media/common/tuners/fc0012.h b/drivers/media/common/tuners/fc0012.h
new file mode 100644
index 000000000000..4dbd5efe8845
--- /dev/null
+++ b/drivers/media/common/tuners/fc0012.h
@@ -0,0 +1,44 @@
+/*
+ * Fitipower FC0012 tuner driver - include
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _FC0012_H_
+#define _FC0012_H_
+
+#include "dvb_frontend.h"
+#include "fc001x-common.h"
+
+#if defined(CONFIG_MEDIA_TUNER_FC0012) || \
+ (defined(CONFIG_MEDIA_TUNER_FC0012_MODULE) && defined(MODULE))
+extern struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq);
+#else
+static inline struct dvb_frontend *fc0012_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/common/tuners/fc0013-priv.h b/drivers/media/common/tuners/fc0013-priv.h
new file mode 100644
index 000000000000..bfd49dedea22
--- /dev/null
+++ b/drivers/media/common/tuners/fc0013-priv.h
@@ -0,0 +1,44 @@
+/*
+ * Fitipower FC0013 tuner driver
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _FC0013_PRIV_H_
+#define _FC0013_PRIV_H_
+
+#define LOG_PREFIX "fc0013"
+
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+struct fc0013_priv {
+ struct i2c_adapter *i2c;
+ u8 addr;
+ u8 dual_master;
+ u8 xtal_freq;
+
+ u32 frequency;
+ u32 bandwidth;
+};
+
+#endif
diff --git a/drivers/media/common/tuners/fc0013.c b/drivers/media/common/tuners/fc0013.c
new file mode 100644
index 000000000000..bd8f0f1e8f3b
--- /dev/null
+++ b/drivers/media/common/tuners/fc0013.c
@@ -0,0 +1,634 @@
+/*
+ * Fitipower FC0013 tuner driver
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ * partially based on driver code from Fitipower
+ * Copyright (C) 2010 Fitipower Integrated Technology Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include "fc0013.h"
+#include "fc0013-priv.h"
+
+static int fc0013_writereg(struct fc0013_priv *priv, u8 reg, u8 val)
+{
+ u8 buf[2] = {reg, val};
+ struct i2c_msg msg = {
+ .addr = priv->addr, .flags = 0, .buf = buf, .len = 2
+ };
+
+ if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
+ err("I2C write reg failed, reg: %02x, val: %02x", reg, val);
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int fc0013_readreg(struct fc0013_priv *priv, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2] = {
+ { .addr = priv->addr, .flags = 0, .buf = &reg, .len = 1 },
+ { .addr = priv->addr, .flags = I2C_M_RD, .buf = val, .len = 1 },
+ };
+
+ if (i2c_transfer(priv->i2c, msg, 2) != 2) {
+ err("I2C read reg failed, reg: %02x", reg);
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int fc0013_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static int fc0013_init(struct dvb_frontend *fe)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int i, ret = 0;
+ unsigned char reg[] = {
+ 0x00, /* reg. 0x00: dummy */
+ 0x09, /* reg. 0x01 */
+ 0x16, /* reg. 0x02 */
+ 0x00, /* reg. 0x03 */
+ 0x00, /* reg. 0x04 */
+ 0x17, /* reg. 0x05 */
+ 0x02, /* reg. 0x06 */
+ 0x0a, /* reg. 0x07: CHECK */
+ 0xff, /* reg. 0x08: AGC Clock divide by 256, AGC gain 1/256,
+ Loop Bw 1/8 */
+ 0x6f, /* reg. 0x09: enable LoopThrough */
+ 0xb8, /* reg. 0x0a: Disable LO Test Buffer */
+ 0x82, /* reg. 0x0b: CHECK */
+ 0xfc, /* reg. 0x0c: depending on AGC Up-Down mode, may need 0xf8 */
+ 0x01, /* reg. 0x0d: AGC Not Forcing & LNA Forcing, may need 0x02 */
+ 0x00, /* reg. 0x0e */
+ 0x00, /* reg. 0x0f */
+ 0x00, /* reg. 0x10 */
+ 0x00, /* reg. 0x11 */
+ 0x00, /* reg. 0x12 */
+ 0x00, /* reg. 0x13 */
+ 0x50, /* reg. 0x14: DVB-t High Gain, UHF.
+ Middle Gain: 0x48, Low Gain: 0x40 */
+ 0x01, /* reg. 0x15 */
+ };
+
+ switch (priv->xtal_freq) {
+ case FC_XTAL_27_MHZ:
+ case FC_XTAL_28_8_MHZ:
+ reg[0x07] |= 0x20;
+ break;
+ case FC_XTAL_36_MHZ:
+ default:
+ break;
+ }
+
+ if (priv->dual_master)
+ reg[0x0c] |= 0x02;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ for (i = 1; i < sizeof(reg); i++) {
+ ret = fc0013_writereg(priv, i, reg[i]);
+ if (ret)
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (ret)
+ err("fc0013_writereg failed: %d", ret);
+
+ return ret;
+}
+
+static int fc0013_sleep(struct dvb_frontend *fe)
+{
+ /* nothing to do here */
+ return 0;
+}
+
+int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int ret;
+ u8 rc_cal;
+ int val;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* push rc_cal value, get rc_cal value */
+ ret = fc0013_writereg(priv, 0x10, 0x00);
+ if (ret)
+ goto error_out;
+
+ /* get rc_cal value */
+ ret = fc0013_readreg(priv, 0x10, &rc_cal);
+ if (ret)
+ goto error_out;
+
+ rc_cal &= 0x0f;
+
+ val = (int)rc_cal + rc_val;
+
+ /* forcing rc_cal */
+ ret = fc0013_writereg(priv, 0x0d, 0x11);
+ if (ret)
+ goto error_out;
+
+ /* modify rc_cal value */
+ if (val > 15)
+ ret = fc0013_writereg(priv, 0x10, 0x0f);
+ else if (val < 0)
+ ret = fc0013_writereg(priv, 0x10, 0x00);
+ else
+ ret = fc0013_writereg(priv, 0x10, (u8)val);
+
+error_out:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ return ret;
+}
+EXPORT_SYMBOL(fc0013_rc_cal_add);
+
+int fc0013_rc_cal_reset(struct dvb_frontend *fe)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int ret;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ ret = fc0013_writereg(priv, 0x0d, 0x01);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x10, 0x00);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ return ret;
+}
+EXPORT_SYMBOL(fc0013_rc_cal_reset);
+
+static int fc0013_set_vhf_track(struct fc0013_priv *priv, u32 freq)
+{
+ int ret;
+ u8 tmp;
+
+ ret = fc0013_readreg(priv, 0x1d, &tmp);
+ if (ret)
+ goto error_out;
+ tmp &= 0xe3;
+ if (freq <= 177500) { /* VHF Track: 7 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x1c);
+ } else if (freq <= 184500) { /* VHF Track: 6 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x18);
+ } else if (freq <= 191500) { /* VHF Track: 5 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x14);
+ } else if (freq <= 198500) { /* VHF Track: 4 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x10);
+ } else if (freq <= 205500) { /* VHF Track: 3 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x0c);
+ } else if (freq <= 219500) { /* VHF Track: 2 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x08);
+ } else if (freq < 300000) { /* VHF Track: 1 */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x04);
+ } else { /* UHF and GPS */
+ ret = fc0013_writereg(priv, 0x1d, tmp | 0x1c);
+ }
+ if (ret)
+ goto error_out;
+error_out:
+ return ret;
+}
+
+static int fc0013_set_params(struct dvb_frontend *fe)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int i, ret = 0;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 freq = p->frequency / 1000;
+ u32 delsys = p->delivery_system;
+ unsigned char reg[7], am, pm, multi, tmp;
+ unsigned long f_vco;
+ unsigned short xtal_freq_khz_2, xin, xdiv;
+ int vco_select = false;
+
+ if (fe->callback) {
+ ret = fe->callback(priv->i2c, DVB_FRONTEND_COMPONENT_TUNER,
+ FC_FE_CALLBACK_VHF_ENABLE, (freq > 300000 ? 0 : 1));
+ if (ret)
+ goto exit;
+ }
+
+ switch (priv->xtal_freq) {
+ case FC_XTAL_27_MHZ:
+ xtal_freq_khz_2 = 27000 / 2;
+ break;
+ case FC_XTAL_36_MHZ:
+ xtal_freq_khz_2 = 36000 / 2;
+ break;
+ case FC_XTAL_28_8_MHZ:
+ default:
+ xtal_freq_khz_2 = 28800 / 2;
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ /* set VHF track */
+ ret = fc0013_set_vhf_track(priv, freq);
+ if (ret)
+ goto exit;
+
+ if (freq < 300000) {
+ /* enable VHF filter */
+ ret = fc0013_readreg(priv, 0x07, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x07, tmp | 0x10);
+ if (ret)
+ goto exit;
+
+ /* disable UHF & disable GPS */
+ ret = fc0013_readreg(priv, 0x14, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x14, tmp & 0x1f);
+ if (ret)
+ goto exit;
+ } else if (freq <= 862000) {
+ /* disable VHF filter */
+ ret = fc0013_readreg(priv, 0x07, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x07, tmp & 0xef);
+ if (ret)
+ goto exit;
+
+ /* enable UHF & disable GPS */
+ ret = fc0013_readreg(priv, 0x14, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x14, (tmp & 0x1f) | 0x40);
+ if (ret)
+ goto exit;
+ } else {
+ /* disable VHF filter */
+ ret = fc0013_readreg(priv, 0x07, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x07, tmp & 0xef);
+ if (ret)
+ goto exit;
+
+ /* disable UHF & enable GPS */
+ ret = fc0013_readreg(priv, 0x14, &tmp);
+ if (ret)
+ goto exit;
+ ret = fc0013_writereg(priv, 0x14, (tmp & 0x1f) | 0x20);
+ if (ret)
+ goto exit;
+ }
+
+ /* select frequency divider and the frequency of VCO */
+ if (freq < 37084) { /* freq * 96 < 3560000 */
+ multi = 96;
+ reg[5] = 0x82;
+ reg[6] = 0x00;
+ } else if (freq < 55625) { /* freq * 64 < 3560000 */
+ multi = 64;
+ reg[5] = 0x02;
+ reg[6] = 0x02;
+ } else if (freq < 74167) { /* freq * 48 < 3560000 */
+ multi = 48;
+ reg[5] = 0x42;
+ reg[6] = 0x00;
+ } else if (freq < 111250) { /* freq * 32 < 3560000 */
+ multi = 32;
+ reg[5] = 0x82;
+ reg[6] = 0x02;
+ } else if (freq < 148334) { /* freq * 24 < 3560000 */
+ multi = 24;
+ reg[5] = 0x22;
+ reg[6] = 0x00;
+ } else if (freq < 222500) { /* freq * 16 < 3560000 */
+ multi = 16;
+ reg[5] = 0x42;
+ reg[6] = 0x02;
+ } else if (freq < 296667) { /* freq * 12 < 3560000 */
+ multi = 12;
+ reg[5] = 0x12;
+ reg[6] = 0x00;
+ } else if (freq < 445000) { /* freq * 8 < 3560000 */
+ multi = 8;
+ reg[5] = 0x22;
+ reg[6] = 0x02;
+ } else if (freq < 593334) { /* freq * 6 < 3560000 */
+ multi = 6;
+ reg[5] = 0x0a;
+ reg[6] = 0x00;
+ } else if (freq < 950000) { /* freq * 4 < 3800000 */
+ multi = 4;
+ reg[5] = 0x12;
+ reg[6] = 0x02;
+ } else {
+ multi = 2;
+ reg[5] = 0x0a;
+ reg[6] = 0x02;
+ }
+
+ f_vco = freq * multi;
+
+ if (f_vco >= 3060000) {
+ reg[6] |= 0x08;
+ vco_select = true;
+ }
+
+ if (freq >= 45000) {
+ /* From divided value (XDIV) determined the FA and FP value */
+ xdiv = (unsigned short)(f_vco / xtal_freq_khz_2);
+ if ((f_vco - xdiv * xtal_freq_khz_2) >= (xtal_freq_khz_2 / 2))
+ xdiv++;
+
+ pm = (unsigned char)(xdiv / 8);
+ am = (unsigned char)(xdiv - (8 * pm));
+
+ if (am < 2) {
+ reg[1] = am + 8;
+ reg[2] = pm - 1;
+ } else {
+ reg[1] = am;
+ reg[2] = pm;
+ }
+ } else {
+ /* fix for frequency less than 45 MHz */
+ reg[1] = 0x06;
+ reg[2] = 0x11;
+ }
+
+ /* fix clock out */
+ reg[6] |= 0x20;
+
+ /* From VCO frequency determines the XIN ( fractional part of Delta
+ Sigma PLL) and divided value (XDIV) */
+ xin = (unsigned short)(f_vco - (f_vco / xtal_freq_khz_2) * xtal_freq_khz_2);
+ xin = (xin << 15) / xtal_freq_khz_2;
+ if (xin >= 16384)
+ xin += 32768;
+
+ reg[3] = xin >> 8;
+ reg[4] = xin & 0xff;
+
+ if (delsys == SYS_DVBT) {
+ reg[6] &= 0x3f; /* bits 6 and 7 describe the bandwidth */
+ switch (p->bandwidth_hz) {
+ case 6000000:
+ reg[6] |= 0x80;
+ break;
+ case 7000000:
+ reg[6] |= 0x40;
+ break;
+ case 8000000:
+ default:
+ break;
+ }
+ } else {
+ err("%s: modulation type not supported!", __func__);
+ return -EINVAL;
+ }
+
+ /* modified for Realtek demod */
+ reg[5] |= 0x07;
+
+ for (i = 1; i <= 6; i++) {
+ ret = fc0013_writereg(priv, i, reg[i]);
+ if (ret)
+ goto exit;
+ }
+
+ ret = fc0013_readreg(priv, 0x11, &tmp);
+ if (ret)
+ goto exit;
+ if (multi == 64)
+ ret = fc0013_writereg(priv, 0x11, tmp | 0x04);
+ else
+ ret = fc0013_writereg(priv, 0x11, tmp & 0xfb);
+ if (ret)
+ goto exit;
+
+ /* VCO Calibration */
+ ret = fc0013_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x00);
+
+ /* VCO Re-Calibration if needed */
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x00);
+
+ if (!ret) {
+ msleep(10);
+ ret = fc0013_readreg(priv, 0x0e, &tmp);
+ }
+ if (ret)
+ goto exit;
+
+ /* vco selection */
+ tmp &= 0x3f;
+
+ if (vco_select) {
+ if (tmp > 0x3c) {
+ reg[6] &= ~0x08;
+ ret = fc0013_writereg(priv, 0x06, reg[6]);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x00);
+ }
+ } else {
+ if (tmp < 0x02) {
+ reg[6] |= 0x08;
+ ret = fc0013_writereg(priv, 0x06, reg[6]);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x80);
+ if (!ret)
+ ret = fc0013_writereg(priv, 0x0e, 0x00);
+ }
+ }
+
+ priv->frequency = p->frequency;
+ priv->bandwidth = p->bandwidth_hz;
+
+exit:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+ if (ret)
+ warn("%s: failed: %d", __func__, ret);
+ return ret;
+}
+
+static int fc0013_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ *frequency = priv->frequency;
+ return 0;
+}
+
+static int fc0013_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ /* always ? */
+ *frequency = 0;
+ return 0;
+}
+
+static int fc0013_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ *bandwidth = priv->bandwidth;
+ return 0;
+}
+
+#define INPUT_ADC_LEVEL -8
+
+static int fc0013_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct fc0013_priv *priv = fe->tuner_priv;
+ int ret;
+ unsigned char tmp;
+ int int_temp, lna_gain, int_lna, tot_agc_gain, power;
+ const int fc0013_lna_gain_table[] = {
+ /* low gain */
+ -63, -58, -99, -73,
+ -63, -65, -54, -60,
+ /* middle gain */
+ 71, 70, 68, 67,
+ 65, 63, 61, 58,
+ /* high gain */
+ 197, 191, 188, 186,
+ 184, 182, 181, 179,
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
+
+ ret = fc0013_writereg(priv, 0x13, 0x00);
+ if (ret)
+ goto err;
+
+ ret = fc0013_readreg(priv, 0x13, &tmp);
+ if (ret)
+ goto err;
+ int_temp = tmp;
+
+ ret = fc0013_readreg(priv, 0x14, &tmp);
+ if (ret)
+ goto err;
+ lna_gain = tmp & 0x1f;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+
+ if (lna_gain < ARRAY_SIZE(fc0013_lna_gain_table)) {
+ int_lna = fc0013_lna_gain_table[lna_gain];
+ tot_agc_gain = (abs((int_temp >> 5) - 7) - 2 +
+ (int_temp & 0x1f)) * 2;
+ power = INPUT_ADC_LEVEL - tot_agc_gain - int_lna / 10;
+
+ if (power >= 45)
+ *strength = 255; /* 100% */
+ else if (power < -95)
+ *strength = 0;
+ else
+ *strength = (power + 95) * 255 / 140;
+
+ *strength |= *strength << 8;
+ } else {
+ ret = -1;
+ }
+
+ goto exit;
+
+err:
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
+exit:
+ if (ret)
+ warn("%s: failed: %d", __func__, ret);
+ return ret;
+}
+
+static const struct dvb_tuner_ops fc0013_tuner_ops = {
+ .info = {
+ .name = "Fitipower FC0013",
+
+ .frequency_min = 37000000, /* estimate */
+ .frequency_max = 1680000000, /* CHECK */
+ .frequency_step = 0,
+ },
+
+ .release = fc0013_release,
+
+ .init = fc0013_init,
+ .sleep = fc0013_sleep,
+
+ .set_params = fc0013_set_params,
+
+ .get_frequency = fc0013_get_frequency,
+ .get_if_frequency = fc0013_get_if_frequency,
+ .get_bandwidth = fc0013_get_bandwidth,
+
+ .get_rf_strength = fc0013_get_rf_strength,
+};
+
+struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq)
+{
+ struct fc0013_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct fc0013_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->i2c = i2c;
+ priv->dual_master = dual_master;
+ priv->addr = i2c_address;
+ priv->xtal_freq = xtal_freq;
+
+ info("Fitipower FC0013 successfully attached.");
+
+ fe->tuner_priv = priv;
+
+ memcpy(&fe->ops.tuner_ops, &fc0013_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ return fe;
+}
+EXPORT_SYMBOL(fc0013_attach);
+
+MODULE_DESCRIPTION("Fitipower FC0013 silicon tuner driver");
+MODULE_AUTHOR("Hans-Frieder Vogt <hfvogt@gmx.net>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.2");
diff --git a/drivers/media/common/tuners/fc0013.h b/drivers/media/common/tuners/fc0013.h
new file mode 100644
index 000000000000..594efd64aeec
--- /dev/null
+++ b/drivers/media/common/tuners/fc0013.h
@@ -0,0 +1,57 @@
+/*
+ * Fitipower FC0013 tuner driver
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _FC0013_H_
+#define _FC0013_H_
+
+#include "dvb_frontend.h"
+#include "fc001x-common.h"
+
+#if defined(CONFIG_MEDIA_TUNER_FC0013) || \
+ (defined(CONFIG_MEDIA_TUNER_FC0013_MODULE) && defined(MODULE))
+extern struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq);
+extern int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val);
+extern int fc0013_rc_cal_reset(struct dvb_frontend *fe);
+#else
+static inline struct dvb_frontend *fc0013_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ u8 i2c_address, int dual_master,
+ enum fc001x_xtal_freq xtal_freq)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline int fc0013_rc_cal_add(struct dvb_frontend *fe, int rc_val)
+{
+ return 0;
+}
+
+static inline int fc0013_rc_cal_reset(struct dvb_frontend *fe)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/media/common/tuners/fc001x-common.h b/drivers/media/common/tuners/fc001x-common.h
new file mode 100644
index 000000000000..718818156934
--- /dev/null
+++ b/drivers/media/common/tuners/fc001x-common.h
@@ -0,0 +1,39 @@
+/*
+ * Fitipower FC0012 & FC0013 tuner driver - common defines
+ *
+ * Copyright (C) 2012 Hans-Frieder Vogt <hfvogt@gmx.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _FC001X_COMMON_H_
+#define _FC001X_COMMON_H_
+
+enum fc001x_xtal_freq {
+ FC_XTAL_27_MHZ, /* 27000000 */
+ FC_XTAL_28_8_MHZ, /* 28800000 */
+ FC_XTAL_36_MHZ, /* 36000000 */
+};
+
+/*
+ * enum fc001x_fe_callback_commands - Frontend callbacks
+ *
+ * @FC_FE_CALLBACK_VHF_ENABLE: enable VHF or UHF
+ */
+enum fc001x_fe_callback_commands {
+ FC_FE_CALLBACK_VHF_ENABLE,
+};
+
+#endif
diff --git a/drivers/media/common/tuners/tua9001.c b/drivers/media/common/tuners/tua9001.c
new file mode 100644
index 000000000000..de2607084672
--- /dev/null
+++ b/drivers/media/common/tuners/tua9001.c
@@ -0,0 +1,215 @@
+/*
+ * Infineon TUA 9001 silicon tuner driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "tua9001.h"
+#include "tua9001_priv.h"
+
+/* write register */
+static int tua9001_wr_reg(struct tua9001_priv *priv, u8 reg, u16 val)
+{
+ int ret;
+ u8 buf[3] = { reg, (val >> 8) & 0xff, (val >> 0) & 0xff };
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg->i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ printk(KERN_WARNING "%s: I2C wr failed=%d reg=%02x\n",
+ __func__, ret, reg);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+static int tua9001_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+
+ return 0;
+}
+
+static int tua9001_init(struct dvb_frontend *fe)
+{
+ struct tua9001_priv *priv = fe->tuner_priv;
+ int ret = 0;
+ u8 i;
+ struct reg_val data[] = {
+ { 0x1e, 0x6512 },
+ { 0x25, 0xb888 },
+ { 0x39, 0x5460 },
+ { 0x3b, 0x00c0 },
+ { 0x3a, 0xf000 },
+ { 0x08, 0x0000 },
+ { 0x32, 0x0030 },
+ { 0x41, 0x703a },
+ { 0x40, 0x1c78 },
+ { 0x2c, 0x1c00 },
+ { 0x36, 0xc013 },
+ { 0x37, 0x6f18 },
+ { 0x27, 0x0008 },
+ { 0x2a, 0x0001 },
+ { 0x34, 0x0a40 },
+ };
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c-gate */
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ ret = tua9001_wr_reg(priv, data[i].reg, data[i].val);
+ if (ret)
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c-gate */
+
+ if (ret < 0)
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int tua9001_set_params(struct dvb_frontend *fe)
+{
+ struct tua9001_priv *priv = fe->tuner_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i;
+ u16 val;
+ u32 frequency;
+ struct reg_val data[2];
+
+ pr_debug("%s: delivery_system=%d frequency=%d bandwidth_hz=%d\n",
+ __func__, c->delivery_system, c->frequency,
+ c->bandwidth_hz);
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ switch (c->bandwidth_hz) {
+ case 8000000:
+ val = 0x0000;
+ break;
+ case 7000000:
+ val = 0x1000;
+ break;
+ case 6000000:
+ val = 0x2000;
+ break;
+ case 5000000:
+ val = 0x3000;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data[0].reg = 0x04;
+ data[0].val = val;
+
+ frequency = (c->frequency - 150000000);
+ frequency /= 100;
+ frequency *= 48;
+ frequency /= 10000;
+
+ data[1].reg = 0x1f;
+ data[1].val = frequency;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c-gate */
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ ret = tua9001_wr_reg(priv, data[i].reg, data[i].val);
+ if (ret < 0)
+ break;
+ }
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c-gate */
+
+err:
+ if (ret < 0)
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int tua9001_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ *frequency = 0; /* Zero-IF */
+
+ return 0;
+}
+
+static const struct dvb_tuner_ops tua9001_tuner_ops = {
+ .info = {
+ .name = "Infineon TUA 9001",
+
+ .frequency_min = 170000000,
+ .frequency_max = 862000000,
+ .frequency_step = 0,
+ },
+
+ .release = tua9001_release,
+
+ .init = tua9001_init,
+ .set_params = tua9001_set_params,
+
+ .get_if_frequency = tua9001_get_if_frequency,
+};
+
+struct dvb_frontend *tua9001_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tua9001_config *cfg)
+{
+ struct tua9001_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct tua9001_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+
+ priv->cfg = cfg;
+ priv->i2c = i2c;
+
+ printk(KERN_INFO "Infineon TUA 9001 successfully attached.");
+
+ memcpy(&fe->ops.tuner_ops, &tua9001_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ fe->tuner_priv = priv;
+ return fe;
+}
+EXPORT_SYMBOL(tua9001_attach);
+
+MODULE_DESCRIPTION("Infineon TUA 9001 silicon tuner driver");
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/tua9001.h b/drivers/media/common/tuners/tua9001.h
new file mode 100644
index 000000000000..38d6ae76b1d6
--- /dev/null
+++ b/drivers/media/common/tuners/tua9001.h
@@ -0,0 +1,46 @@
+/*
+ * Infineon TUA 9001 silicon tuner driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef TUA9001_H
+#define TUA9001_H
+
+#include "dvb_frontend.h"
+
+struct tua9001_config {
+ /*
+ * I2C address
+ */
+ u8 i2c_addr;
+};
+
+#if defined(CONFIG_MEDIA_TUNER_TUA9001) || \
+ (defined(CONFIG_MEDIA_TUNER_TUA9001_MODULE) && defined(MODULE))
+extern struct dvb_frontend *tua9001_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tua9001_config *cfg);
+#else
+static inline struct dvb_frontend *tua9001_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, struct tua9001_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/common/tuners/tua9001_priv.h b/drivers/media/common/tuners/tua9001_priv.h
new file mode 100644
index 000000000000..73cc1ce0575c
--- /dev/null
+++ b/drivers/media/common/tuners/tua9001_priv.h
@@ -0,0 +1,34 @@
+/*
+ * Infineon TUA 9001 silicon tuner driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef TUA9001_PRIV_H
+#define TUA9001_PRIV_H
+
+struct reg_val {
+ u8 reg;
+ u16 val;
+};
+
+struct tua9001_priv {
+ struct tua9001_config *cfg;
+ struct i2c_adapter *i2c;
+};
+
+#endif
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index eab2ea424200..dcca42ca57be 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -54,7 +54,7 @@ struct xc5000_priv {
struct list_head hybrid_tuner_instance_list;
u32 if_khz;
- u32 xtal_khz;
+ u16 xtal_khz;
u32 freq_hz;
u32 bandwidth;
u8 video_standard;
@@ -631,7 +631,10 @@ static int xc5000_fwupload(struct dvb_frontend *fe)
ret = xc_load_i2c_sequence(fe, fw->data);
if (XC_RESULT_SUCCESS == ret)
ret = xc_set_xtal(fe);
- printk(KERN_INFO "xc5000: firmware upload complete...\n");
+ if (XC_RESULT_SUCCESS == ret)
+ printk(KERN_INFO "xc5000: firmware upload complete...\n");
+ else
+ printk(KERN_ERR "xc5000: firmware upload failed...\n");
}
out:
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index 39a73bf01406..b1a547494625 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -34,7 +34,7 @@ struct xc5000_config {
u8 i2c_address;
u32 if_khz;
u8 radio_input;
- u32 xtal_khz;
+ u16 xtal_khz;
int chip_id;
};
diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c
index 48e48e8af55a..66f52f116b60 100644
--- a/drivers/media/dvb/bt8xx/dst_ca.c
+++ b/drivers/media/dvb/bt8xx/dst_ca.c
@@ -477,7 +477,6 @@ static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message
static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg)
{
int i = 0;
- unsigned int ca_message_header_len;
u32 command = 0;
struct ca_msg *hw_buffer;
@@ -496,7 +495,6 @@ static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message,
if (p_ca_message->msg) {
- ca_message_header_len = p_ca_message->length; /* Restore it back when you are done */
/* EN50221 tag */
command = 0;
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
index d88c4aa7d24d..131b938e9e81 100644
--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -31,7 +31,6 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/i2c.h>
#include <linux/swab.h>
#include <linux/vmalloc.h>
@@ -1696,7 +1695,7 @@ static struct pci_driver ddb_pci_driver = {
.name = "DDBridge",
.id_table = ddb_id_tbl,
.probe = ddb_probe,
- .remove = ddb_remove,
+ .remove = __devexit_p(ddb_remove),
};
static __init int module_init_ddbridge(void)
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index faa3671b649e..d82469f842e2 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -568,6 +568,16 @@ void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
}
EXPORT_SYMBOL(dvb_dmx_swfilter_204);
+void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
+{
+ spin_lock(&demux->lock);
+
+ demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, DMX_OK);
+
+ spin_unlock(&demux->lock);
+}
+EXPORT_SYMBOL(dvb_dmx_swfilter_raw);
+
static struct dvb_demux_filter *dvb_dmx_filter_alloc(struct dvb_demux *demux)
{
int i;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index a7d876fd02dd..fa7188a253aa 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -145,5 +145,7 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf,
size_t count);
+void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
+ size_t count);
#endif /* _DVB_DEMUX_H_ */
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index cb888d835a89..aebcdf221dda 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -182,13 +182,13 @@ static enum dvbv3_emulation_type dvbv3_type(u32 delivery_system)
case SYS_DMBTH:
return DVBV3_OFDM;
case SYS_ATSC:
+ case SYS_ATSCMH:
case SYS_DVBC_ANNEX_B:
return DVBV3_ATSC;
case SYS_UNDEFINED:
case SYS_ISDBC:
case SYS_DVBH:
case SYS_DAB:
- case SYS_ATSCMH:
default:
/*
* Doesn't know how to emulate those types and/or
@@ -1030,6 +1030,25 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_HIERARCHY, 0, 0),
_DTV_CMD(DTV_ENUM_DELSYS, 0, 0),
+
+ _DTV_CMD(DTV_ATSCMH_PARADE_ID, 1, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 1, 0),
+
+ _DTV_CMD(DTV_ATSCMH_FIC_VER, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_PARADE_ID, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_NOG, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_TNOG, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SGN, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_PRC, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_FRAME_MODE, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_PRI, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_SEC, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_BLOCK_MODE, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_A, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0),
+ _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0),
};
static void dtv_property_dump(struct dtv_property *tvp)
@@ -1121,6 +1140,8 @@ static int dtv_property_cache_sync(struct dvb_frontend *fe,
case DVBV3_ATSC:
dprintk("%s() Preparing ATSC req\n", __func__);
c->modulation = p->u.vsb.modulation;
+ if (c->delivery_system == SYS_ATSCMH)
+ break;
if ((c->modulation == VSB_8) || (c->modulation == VSB_16))
c->delivery_system = SYS_ATSC;
else
@@ -1367,6 +1388,54 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
case DTV_DVBT2_PLP_ID:
tvp->u.data = c->dvbt2_plp_id;
break;
+
+ /* ATSC-MH */
+ case DTV_ATSCMH_FIC_VER:
+ tvp->u.data = fe->dtv_property_cache.atscmh_fic_ver;
+ break;
+ case DTV_ATSCMH_PARADE_ID:
+ tvp->u.data = fe->dtv_property_cache.atscmh_parade_id;
+ break;
+ case DTV_ATSCMH_NOG:
+ tvp->u.data = fe->dtv_property_cache.atscmh_nog;
+ break;
+ case DTV_ATSCMH_TNOG:
+ tvp->u.data = fe->dtv_property_cache.atscmh_tnog;
+ break;
+ case DTV_ATSCMH_SGN:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sgn;
+ break;
+ case DTV_ATSCMH_PRC:
+ tvp->u.data = fe->dtv_property_cache.atscmh_prc;
+ break;
+ case DTV_ATSCMH_RS_FRAME_MODE:
+ tvp->u.data = fe->dtv_property_cache.atscmh_rs_frame_mode;
+ break;
+ case DTV_ATSCMH_RS_FRAME_ENSEMBLE:
+ tvp->u.data = fe->dtv_property_cache.atscmh_rs_frame_ensemble;
+ break;
+ case DTV_ATSCMH_RS_CODE_MODE_PRI:
+ tvp->u.data = fe->dtv_property_cache.atscmh_rs_code_mode_pri;
+ break;
+ case DTV_ATSCMH_RS_CODE_MODE_SEC:
+ tvp->u.data = fe->dtv_property_cache.atscmh_rs_code_mode_sec;
+ break;
+ case DTV_ATSCMH_SCCC_BLOCK_MODE:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_block_mode;
+ break;
+ case DTV_ATSCMH_SCCC_CODE_MODE_A:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_a;
+ break;
+ case DTV_ATSCMH_SCCC_CODE_MODE_B:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_b;
+ break;
+ case DTV_ATSCMH_SCCC_CODE_MODE_C:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_c;
+ break;
+ case DTV_ATSCMH_SCCC_CODE_MODE_D:
+ tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_d;
+ break;
+
default:
return -EINVAL;
}
@@ -1708,6 +1777,15 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
case DTV_DVBT2_PLP_ID:
c->dvbt2_plp_id = tvp->u.data;
break;
+
+ /* ATSC-MH */
+ case DTV_ATSCMH_PARADE_ID:
+ fe->dtv_property_cache.atscmh_parade_id = tvp->u.data;
+ break;
+ case DTV_ATSCMH_RS_FRAME_ENSEMBLE:
+ fe->dtv_property_cache.atscmh_rs_frame_ensemble = tvp->u.data;
+ break;
+
default:
return -EINVAL;
}
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index d63a8215fe03..e929d5697b87 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -372,6 +372,24 @@ struct dtv_frontend_properties {
/* DVB-T2 specifics */
u32 dvbt2_plp_id;
+
+ /* ATSC-MH specifics */
+ u8 atscmh_fic_ver;
+ u8 atscmh_parade_id;
+ u8 atscmh_nog;
+ u8 atscmh_tnog;
+ u8 atscmh_sgn;
+ u8 atscmh_prc;
+
+ u8 atscmh_rs_frame_mode;
+ u8 atscmh_rs_frame_ensemble;
+ u8 atscmh_rs_code_mode_pri;
+ u8 atscmh_rs_code_mode_sec;
+ u8 atscmh_sccc_block_mode;
+ u8 atscmh_sccc_code_mode_a;
+ u8 atscmh_sccc_code_mode_b;
+ u8 atscmh_sccc_code_mode_c;
+ u8 atscmh_sccc_code_mode_d;
};
struct dvb_frontend {
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 00a67326c193..39eab73b01ae 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
if (minor == MAX_DVB_MINORS) {
kfree(dvbdevfops);
kfree(dvbdev);
+ up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
return -EINVAL;
}
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 63bf45679f98..a26949336b3d 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -409,6 +409,7 @@ config DVB_USB_MXL111SF
tristate "MxL111SF DTV USB2.0 support"
depends on DVB_USB
select DVB_LGDT3305 if !DVB_FE_CUSTOMISE
+ select DVB_LG2160 if !DVB_FE_CUSTOMISE
select VIDEO_TVEEPROM
help
Say Y here to support the MxL111SF USB2.0 DTV receiver.
@@ -422,3 +423,15 @@ config DVB_USB_RTL28XXU
select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
help
Say Y here to support the Realtek RTL28xxU DVB USB receiver.
+
+config DVB_USB_AF9035
+ tristate "Afatech AF9035 DVB-T USB2.0 support"
+ depends on DVB_USB
+ select DVB_AF9033
+ select MEDIA_TUNER_TUA9001 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_FC0011 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_TDA18218 if !MEDIA_TUNER_CUSTOMISE
+ help
+ Say Y here to support the Afatech AF9035 based DVB USB receiver.
+
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index b76acb5387e6..b667ac39a4e3 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -110,6 +110,9 @@ obj-$(CONFIG_DVB_USB_MXL111SF) += mxl111sf-tuner.o
dvb-usb-rtl28xxu-objs = rtl28xxu.o
obj-$(CONFIG_DVB_USB_RTL28XXU) += dvb-usb-rtl28xxu.o
+dvb-usb-af9035-objs = af9035.o
+obj-$(CONFIG_DVB_USB_AF9035) += dvb-usb-af9035.o
+
ccflags-y += -I$(srctree)/drivers/media/dvb/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb/frontends/
# due to tuner-xc3028
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 7e70ea50ef26..677fed79b01e 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -244,8 +244,7 @@ static int af9015_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
u8 uninitialized_var(mbox), addr_len;
struct req_t req;
-/* TODO: implement bus lock
-
+/*
The bus lock is needed because there is two tuners both using same I2C-address.
Due to that the only way to select correct tuner is use demodulator I2C-gate.
@@ -789,7 +788,7 @@ static void af9015_set_remote_config(struct usb_device *udev,
/* try to load remote based USB ID */
if (!props->rc.core.rc_codes)
props->rc.core.rc_codes = af9015_rc_setup_match(
- (vid << 16) + pid, af9015_rc_setup_usbids);
+ (vid << 16) | pid, af9015_rc_setup_usbids);
/* try to load remote based USB iManufacturer string */
if (!props->rc.core.rc_codes && vid == USB_VID_AFATECH) {
@@ -1220,8 +1219,8 @@ static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
}
/* attach demodulator */
- adap->fe_adap[0].fe = dvb_attach(af9013_attach, &af9015_af9013_config[adap->id],
- &adap->dev->i2c_adap);
+ adap->fe_adap[0].fe = dvb_attach(af9013_attach,
+ &af9015_af9013_config[adap->id], &adap->dev->i2c_adap);
/*
* AF9015 firmware does not like if it gets interrupted by I2C adapter
@@ -1324,14 +1323,15 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
switch (af9015_af9013_config[adap->id].tuner) {
case AF9013_TUNER_MT2060:
case AF9013_TUNER_MT2060_2:
- ret = dvb_attach(mt2060_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
- &af9015_mt2060_config,
+ ret = dvb_attach(mt2060_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, &af9015_mt2060_config,
af9015_config.mt2060_if1[adap->id])
== NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_QT1010:
case AF9013_TUNER_QT1010A:
- ret = dvb_attach(qt1010_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap,
+ ret = dvb_attach(qt1010_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap,
&af9015_qt1010_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_TDA18271:
@@ -1434,69 +1434,85 @@ enum af9015_usb_table_entry {
};
static struct usb_device_id af9015_usb_table[] = {
- [AFATECH_9015] =
- {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9015)},
- [AFATECH_9016] =
- {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9016)},
- [WINFAST_DTV_GOLD] =
- {USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_GOLD)},
- [PINNACLE_PCTV_71E] =
- {USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV71E)},
- [KWORLD_PLUSTV_399U] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U)},
- [TINYTWIN] = {USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_TINYTWIN)},
- [AZUREWAVE_TU700] =
- {USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_AZUREWAVE_AD_TU700)},
- [TERRATEC_AF9015] = {USB_DEVICE(USB_VID_TERRATEC,
+ [AFATECH_9015] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9015)},
+ [AFATECH_9016] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9015_9016)},
+ [WINFAST_DTV_GOLD] = {
+ USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_GOLD)},
+ [PINNACLE_PCTV_71E] = {
+ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV71E)},
+ [KWORLD_PLUSTV_399U] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U)},
+ [TINYTWIN] = {
+ USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_TINYTWIN)},
+ [AZUREWAVE_TU700] = {
+ USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_AZUREWAVE_AD_TU700)},
+ [TERRATEC_AF9015] = {
+ USB_DEVICE(USB_VID_TERRATEC,
USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2)},
- [KWORLD_PLUSTV_PC160] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_2T)},
- [AVERTV_VOLAR_X] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X)},
- [XTENSIONS_380U] =
- {USB_DEVICE(USB_VID_XTENSIONS, USB_PID_XTENSIONS_XD_380)},
- [MSI_DIGIVOX_DUO] =
- {USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGIVOX_DUO)},
- [AVERTV_VOLAR_X_REV2] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X_2)},
- [TELESTAR_STARSTICK_2] =
- {USB_DEVICE(USB_VID_TELESTAR, USB_PID_TELESTAR_STARSTICK_2)},
- [AVERMEDIA_A309_USB] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A309)},
- [MSI_DIGIVOX_MINI_III] =
- {USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGI_VOX_MINI_III)},
- [KWORLD_E396] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U)},
- [KWORLD_E39B] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_2)},
- [KWORLD_E395] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_3)},
- [TREKSTOR_DVBT] = {USB_DEVICE(USB_VID_AFATECH, USB_PID_TREKSTOR_DVBT)},
- [AVERTV_A850] = {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)},
- [AVERTV_A805] = {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A805)},
- [CONCEPTRONIC_CTVDIGRCU] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU)},
- [KWORLD_MC810] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810)},
- [GENIUS_TVGO_DVB_T03] =
- {USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03)},
- [KWORLD_399U_2] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2)},
- [KWORLD_PC160_T] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_T)},
- [SVEON_STV20] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20)},
- [TINYTWIN_2] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TINYTWIN_2)},
- [WINFAST_DTV2000DS] =
- {USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS)},
- [KWORLD_UB383_T] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
- [KWORLD_E39A] =
- {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
- [AVERMEDIA_A815M] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A815M)},
- [CINERGY_T_STICK_RC] = {USB_DEVICE(USB_VID_TERRATEC,
+ [KWORLD_PLUSTV_PC160] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_2T)},
+ [AVERTV_VOLAR_X] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X)},
+ [XTENSIONS_380U] = {
+ USB_DEVICE(USB_VID_XTENSIONS, USB_PID_XTENSIONS_XD_380)},
+ [MSI_DIGIVOX_DUO] = {
+ USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGIVOX_DUO)},
+ [AVERTV_VOLAR_X_REV2] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_VOLAR_X_2)},
+ [TELESTAR_STARSTICK_2] = {
+ USB_DEVICE(USB_VID_TELESTAR, USB_PID_TELESTAR_STARSTICK_2)},
+ [AVERMEDIA_A309_USB] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A309)},
+ [MSI_DIGIVOX_MINI_III] = {
+ USB_DEVICE(USB_VID_MSI_2, USB_PID_MSI_DIGI_VOX_MINI_III)},
+ [KWORLD_E396] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U)},
+ [KWORLD_E39B] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_2)},
+ [KWORLD_E395] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_3)},
+ [TREKSTOR_DVBT] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_TREKSTOR_DVBT)},
+ [AVERTV_A850] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)},
+ [AVERTV_A805] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A805)},
+ [CONCEPTRONIC_CTVDIGRCU] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU)},
+ [KWORLD_MC810] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810)},
+ [GENIUS_TVGO_DVB_T03] = {
+ USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03)},
+ [KWORLD_399U_2] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2)},
+ [KWORLD_PC160_T] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_PC160_T)},
+ [SVEON_STV20] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20)},
+ [TINYTWIN_2] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_TINYTWIN_2)},
+ [WINFAST_DTV2000DS] = {
+ USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV2000DS)},
+ [KWORLD_UB383_T] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB383_T)},
+ [KWORLD_E39A] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_4)},
+ [AVERMEDIA_A815M] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A815M)},
+ [CINERGY_T_STICK_RC] = {
+ USB_DEVICE(USB_VID_TERRATEC,
USB_PID_TERRATEC_CINERGY_T_STICK_RC)},
- [CINERGY_T_DUAL_RC] = {USB_DEVICE(USB_VID_TERRATEC,
+ [CINERGY_T_DUAL_RC] = {
+ USB_DEVICE(USB_VID_TERRATEC,
USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)},
- [AVERTV_A850T] =
- {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
- [TINYTWIN_3] = {USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
- [SVEON_STV22] = {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22)},
+ [AVERTV_A850T] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
+ [TINYTWIN_3] = {
+ USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
+ [SVEON_STV22] = {
+ USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22)},
{ }
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1516,43 +1532,44 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.num_adapters = 2,
.adapter = {
{
- .num_frontends = 1,
- .fe = {{
- .caps = DVB_USB_ADAP_HAS_PID_FILTER |
- DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
-
- .pid_filter_count = 32,
- .pid_filter = af9015_pid_filter,
- .pid_filter_ctrl = af9015_pid_filter_ctrl,
-
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x84,
+ .num_frontends = 1,
+ .fe = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 32,
+ .pid_filter = af9015_pid_filter,
+ .pid_filter_ctrl = af9015_pid_filter_ctrl,
+
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ },
+ }
},
- }},
},
{
- .num_frontends = 1,
- .fe = {{
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x85,
- .u = {
- .bulk = {
- .buffersize =
- TS_USB20_FRAME_SIZE,
- }
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x85,
+ .u = {
+ .bulk = {
+ .buffersize = TS_USB20_FRAME_SIZE,
+ }
+ }
+ },
}
},
- }},
}
},
@@ -1575,102 +1592,67 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.cold_ids = {
&af9015_usb_table[AFATECH_9015],
&af9015_usb_table[AFATECH_9016],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Leadtek WinFast DTV Dongle Gold",
.cold_ids = {
&af9015_usb_table[WINFAST_DTV_GOLD],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Pinnacle PCTV 71e",
.cold_ids = {
&af9015_usb_table[PINNACLE_PCTV_71E],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld PlusTV Dual DVB-T Stick " \
"(DVB-T 399U)",
.cold_ids = {
&af9015_usb_table[KWORLD_PLUSTV_399U],
&af9015_usb_table[KWORLD_399U_2],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "DigitalNow TinyTwin DVB-T Receiver",
.cold_ids = {
&af9015_usb_table[TINYTWIN],
&af9015_usb_table[TINYTWIN_2],
&af9015_usb_table[TINYTWIN_3],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TwinHan AzureWave AD-TU700(704J)",
.cold_ids = {
&af9015_usb_table[AZUREWAVE_TU700],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TerraTec Cinergy T USB XE",
.cold_ids = {
&af9015_usb_table[TERRATEC_AF9015],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld PlusTV Dual DVB-T PCI " \
"(DVB-T PC160-2T)",
.cold_ids = {
&af9015_usb_table[KWORLD_PLUSTV_PC160],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AVerMedia AVerTV DVB-T Volar X",
.cold_ids = {
&af9015_usb_table[AVERTV_VOLAR_X],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TerraTec Cinergy T Stick RC",
.cold_ids = {
&af9015_usb_table[CINERGY_T_STICK_RC],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TerraTec Cinergy T Stick Dual RC",
.cold_ids = {
&af9015_usb_table[CINERGY_T_DUAL_RC],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AverMedia AVerTV Red HD+ (A850T)",
.cold_ids = {
&af9015_usb_table[AVERTV_A850T],
- NULL
},
- .warm_ids = {NULL},
},
}
}, {
@@ -1686,43 +1668,44 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.num_adapters = 2,
.adapter = {
{
- .num_frontends = 1,
- .fe = {{
- .caps = DVB_USB_ADAP_HAS_PID_FILTER |
- DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
-
- .pid_filter_count = 32,
- .pid_filter = af9015_pid_filter,
- .pid_filter_ctrl = af9015_pid_filter_ctrl,
-
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x84,
+ .num_frontends = 1,
+ .fe = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 32,
+ .pid_filter = af9015_pid_filter,
+ .pid_filter_ctrl = af9015_pid_filter_ctrl,
+
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ },
+ }
},
- }},
},
{
- .num_frontends = 1,
- .fe = {{
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x85,
- .u = {
- .bulk = {
- .buffersize =
- TS_USB20_FRAME_SIZE,
- }
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x85,
+ .u = {
+ .bulk = {
+ .buffersize = TS_USB20_FRAME_SIZE,
+ }
+ }
+ },
}
},
- }},
}
},
@@ -1744,51 +1727,33 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.name = "Xtensions XD-380",
.cold_ids = {
&af9015_usb_table[XTENSIONS_380U],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "MSI DIGIVOX Duo",
.cold_ids = {
&af9015_usb_table[MSI_DIGIVOX_DUO],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Fujitsu-Siemens Slim Mobile USB DVB-T",
.cold_ids = {
&af9015_usb_table[AVERTV_VOLAR_X_REV2],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Telestar Starstick 2",
.cold_ids = {
&af9015_usb_table[TELESTAR_STARSTICK_2],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AVerMedia A309",
.cold_ids = {
&af9015_usb_table[AVERMEDIA_A309_USB],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "MSI Digi VOX mini III",
.cold_ids = {
&af9015_usb_table[MSI_DIGIVOX_MINI_III],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld USB DVB-T TV Stick II " \
"(VS-DVB-T 395U)",
.cold_ids = {
@@ -1796,34 +1761,23 @@ static struct dvb_usb_device_properties af9015_properties[] = {
&af9015_usb_table[KWORLD_E39B],
&af9015_usb_table[KWORLD_E395],
&af9015_usb_table[KWORLD_E39A],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "TrekStor DVB-T USB Stick",
.cold_ids = {
&af9015_usb_table[TREKSTOR_DVBT],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AverMedia AVerTV Volar Black HD " \
"(A850)",
.cold_ids = {
&af9015_usb_table[AVERTV_A850],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Sveon STV22 Dual USB DVB-T Tuner HDTV",
.cold_ids = {
&af9015_usb_table[SVEON_STV22],
- NULL
},
- .warm_ids = {NULL},
},
}
}, {
@@ -1839,43 +1793,44 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.num_adapters = 2,
.adapter = {
{
- .num_frontends = 1,
- .fe = {{
- .caps = DVB_USB_ADAP_HAS_PID_FILTER |
- DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
-
- .pid_filter_count = 32,
- .pid_filter = af9015_pid_filter,
- .pid_filter_ctrl = af9015_pid_filter_ctrl,
-
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x84,
+ .num_frontends = 1,
+ .fe = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+
+ .pid_filter_count = 32,
+ .pid_filter = af9015_pid_filter,
+ .pid_filter_ctrl = af9015_pid_filter_ctrl,
+
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ },
+ }
},
- }},
},
{
- .num_frontends = 1,
- .fe = {{
- .frontend_attach =
- af9015_af9013_frontend_attach,
- .tuner_attach = af9015_tuner_attach,
- .stream = {
- .type = USB_BULK,
- .count = 6,
- .endpoint = 0x85,
- .u = {
- .bulk = {
- .buffersize =
- TS_USB20_FRAME_SIZE,
- }
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9015_af9013_frontend_attach,
+ .tuner_attach = af9015_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x85,
+ .u = {
+ .bulk = {
+ .buffersize = TS_USB20_FRAME_SIZE,
+ }
+ }
+ },
}
},
- }},
}
},
@@ -1897,76 +1852,50 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.name = "AverMedia AVerTV Volar GPS 805 (A805)",
.cold_ids = {
&af9015_usb_table[AVERTV_A805],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Conceptronic USB2.0 DVB-T CTVDIGRCU " \
"V3.0",
.cold_ids = {
&af9015_usb_table[CONCEPTRONIC_CTVDIGRCU],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld Digial MC-810",
.cold_ids = {
&af9015_usb_table[KWORLD_MC810],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Genius TVGo DVB-T03",
.cold_ids = {
&af9015_usb_table[GENIUS_TVGO_DVB_T03],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld PlusTV DVB-T PCI Pro Card " \
"(DVB-T PC160-T)",
.cold_ids = {
&af9015_usb_table[KWORLD_PC160_T],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Sveon STV20 Tuner USB DVB-T HDTV",
.cold_ids = {
&af9015_usb_table[SVEON_STV20],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "Leadtek WinFast DTV2000DS",
.cold_ids = {
&af9015_usb_table[WINFAST_DTV2000DS],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "KWorld USB DVB-T Stick Mobile " \
"(UB383-T)",
.cold_ids = {
&af9015_usb_table[KWORLD_UB383_T],
- NULL
},
- .warm_ids = {NULL},
- },
- {
+ }, {
.name = "AverMedia AVerTV Volar M (A815Mac)",
.cold_ids = {
&af9015_usb_table[AVERMEDIA_A815M],
- NULL
},
- .warm_ids = {NULL},
},
}
},
@@ -2019,5 +1948,5 @@ static struct usb_driver af9015_usb_driver = {
module_usb_driver(af9015_usb_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
-MODULE_DESCRIPTION("Driver for Afatech AF9015 DVB-T");
+MODULE_DESCRIPTION("Afatech AF9015 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/af9035.c b/drivers/media/dvb/dvb-usb/af9035.c
new file mode 100644
index 000000000000..e83b39d3993c
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9035.c
@@ -0,0 +1,1242 @@
+/*
+ * Afatech AF9035 DVB USB driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "af9035.h"
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+static DEFINE_MUTEX(af9035_usb_mutex);
+static struct dvb_usb_device_properties af9035_properties[2];
+static int af9035_properties_count = ARRAY_SIZE(af9035_properties);
+
+static u16 af9035_checksum(const u8 *buf, size_t len)
+{
+ size_t i;
+ u16 checksum = 0;
+
+ for (i = 1; i < len; i++) {
+ if (i % 2)
+ checksum += buf[i] << 8;
+ else
+ checksum += buf[i];
+ }
+ checksum = ~checksum;
+
+ return checksum;
+}
+
+static int af9035_ctrl_msg(struct usb_device *udev, struct usb_req *req)
+{
+#define BUF_LEN 64
+#define REQ_HDR_LEN 4 /* send header size */
+#define ACK_HDR_LEN 3 /* rece header size */
+#define CHECKSUM_LEN 2
+#define USB_TIMEOUT 2000
+
+ int ret, msg_len, act_len;
+ u8 buf[BUF_LEN];
+ static u8 seq; /* packet sequence number */
+ u16 checksum, tmp_checksum;
+
+ /* buffer overflow check */
+ if (req->wlen > (BUF_LEN - REQ_HDR_LEN - CHECKSUM_LEN) ||
+ req->rlen > (BUF_LEN - ACK_HDR_LEN - CHECKSUM_LEN)) {
+ pr_debug("%s: too much data wlen=%d rlen=%d\n", __func__,
+ req->wlen, req->rlen);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&af9035_usb_mutex) < 0)
+ return -EAGAIN;
+
+ buf[0] = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN - 1;
+ buf[1] = req->mbox;
+ buf[2] = req->cmd;
+ buf[3] = seq++;
+ if (req->wlen)
+ memcpy(&buf[4], req->wbuf, req->wlen);
+
+ /* calc and add checksum */
+ checksum = af9035_checksum(buf, buf[0] - 1);
+ buf[buf[0] - 1] = (checksum >> 8);
+ buf[buf[0] - 0] = (checksum & 0xff);
+
+ msg_len = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN ;
+
+ /* send req */
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x02), buf, msg_len,
+ &act_len, USB_TIMEOUT);
+ if (ret < 0)
+ err("bulk message failed=%d (%d/%d)", ret, msg_len, act_len);
+ else
+ if (act_len != msg_len)
+ ret = -EIO; /* all data is not send */
+ if (ret < 0)
+ goto err_mutex_unlock;
+
+ /* no ack for those packets */
+ if (req->cmd == CMD_FW_DL)
+ goto exit_mutex_unlock;
+
+ /* receive ack and data if read req */
+ msg_len = ACK_HDR_LEN + req->rlen + CHECKSUM_LEN;
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, 0x81), buf, msg_len,
+ &act_len, USB_TIMEOUT);
+ if (ret < 0) {
+ err("recv bulk message failed=%d", ret);
+ ret = -EIO;
+ goto err_mutex_unlock;
+ }
+
+ if (act_len != msg_len) {
+ err("recv bulk message truncated (%d != %d)", act_len, msg_len);
+ ret = -EIO;
+ goto err_mutex_unlock;
+ }
+
+ /* verify checksum */
+ checksum = af9035_checksum(buf, act_len - 2);
+ tmp_checksum = (buf[act_len - 2] << 8) | buf[act_len - 1];
+ if (tmp_checksum != checksum) {
+ err("%s: command=%02x checksum mismatch (%04x != %04x)",
+ __func__, req->cmd, tmp_checksum, checksum);
+ ret = -EIO;
+ goto err_mutex_unlock;
+ }
+
+ /* check status */
+ if (buf[2]) {
+ pr_debug("%s: command=%02x failed fw error=%d\n", __func__,
+ req->cmd, buf[2]);
+ ret = -EIO;
+ goto err_mutex_unlock;
+ }
+
+ /* read request, copy returned data to return buf */
+ if (req->rlen)
+ memcpy(req->rbuf, &buf[ACK_HDR_LEN], req->rlen);
+
+err_mutex_unlock:
+exit_mutex_unlock:
+ mutex_unlock(&af9035_usb_mutex);
+
+ return ret;
+}
+
+/* write multiple registers */
+static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
+{
+ u8 wbuf[6 + len];
+ u8 mbox = (reg >> 16) & 0xff;
+ struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL };
+
+ wbuf[0] = len;
+ wbuf[1] = 2;
+ wbuf[2] = 0;
+ wbuf[3] = 0;
+ wbuf[4] = (reg >> 8) & 0xff;
+ wbuf[5] = (reg >> 0) & 0xff;
+ memcpy(&wbuf[6], val, len);
+
+ return af9035_ctrl_msg(d->udev, &req);
+}
+
+/* read multiple registers */
+static int af9035_rd_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
+{
+ u8 wbuf[] = { len, 2, 0, 0, (reg >> 8) & 0xff, reg & 0xff };
+ u8 mbox = (reg >> 16) & 0xff;
+ struct usb_req req = { CMD_MEM_RD, mbox, sizeof(wbuf), wbuf, len, val };
+
+ return af9035_ctrl_msg(d->udev, &req);
+}
+
+/* write single register */
+static int af9035_wr_reg(struct dvb_usb_device *d, u32 reg, u8 val)
+{
+ return af9035_wr_regs(d, reg, &val, 1);
+}
+
+/* read single register */
+static int af9035_rd_reg(struct dvb_usb_device *d, u32 reg, u8 *val)
+{
+ return af9035_rd_regs(d, reg, val, 1);
+}
+
+/* write single register with mask */
+static int af9035_wr_reg_mask(struct dvb_usb_device *d, u32 reg, u8 val,
+ u8 mask)
+{
+ int ret;
+ u8 tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = af9035_rd_regs(d, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ tmp &= ~mask;
+ val |= tmp;
+ }
+
+ return af9035_wr_regs(d, reg, &val, 1);
+}
+
+static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msg[], int num)
+{
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct state *state = d->priv;
+ int ret;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ /*
+ * I2C sub header is 5 bytes long. Meaning of those bytes are:
+ * 0: data len
+ * 1: I2C addr << 1
+ * 2: reg addr len
+ * byte 3 and 4 can be used as reg addr
+ * 3: reg addr MSB
+ * used when reg addr len is set to 2
+ * 4: reg addr LSB
+ * used when reg addr len is set to 1 or 2
+ *
+ * For the simplify we do not use register addr at all.
+ * NOTE: As a firmware knows tuner type there is very small possibility
+ * there could be some tuner I2C hacks done by firmware and this may
+ * lead problems if firmware expects those bytes are used.
+ */
+ if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
+ (msg[1].flags & I2C_M_RD)) {
+ if (msg[0].len > 40 || msg[1].len > 40) {
+ /* TODO: correct limits > 40 */
+ ret = -EOPNOTSUPP;
+ } else if (msg[0].addr == state->af9033_config[0].i2c_addr) {
+ /* integrated demod */
+ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+ ret = af9035_rd_regs(d, reg, &msg[1].buf[0],
+ msg[1].len);
+ } else {
+ /* I2C */
+ u8 buf[5 + msg[0].len];
+ struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
+ buf, msg[1].len, msg[1].buf };
+ buf[0] = msg[1].len;
+ buf[1] = msg[0].addr << 1;
+ buf[2] = 0x00; /* reg addr len */
+ buf[3] = 0x00; /* reg addr MSB */
+ buf[4] = 0x00; /* reg addr LSB */
+ memcpy(&buf[5], msg[0].buf, msg[0].len);
+ ret = af9035_ctrl_msg(d->udev, &req);
+ }
+ } else if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ if (msg[0].len > 40) {
+ /* TODO: correct limits > 40 */
+ ret = -EOPNOTSUPP;
+ } else if (msg[0].addr == state->af9033_config[0].i2c_addr) {
+ /* integrated demod */
+ u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
+ msg[0].buf[2];
+ ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
+ msg[0].len - 3);
+ } else {
+ /* I2C */
+ u8 buf[5 + msg[0].len];
+ struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
+ 0, NULL };
+ buf[0] = msg[0].len;
+ buf[1] = msg[0].addr << 1;
+ buf[2] = 0x00; /* reg addr len */
+ buf[3] = 0x00; /* reg addr MSB */
+ buf[4] = 0x00; /* reg addr LSB */
+ memcpy(&buf[5], msg[0].buf, msg[0].len);
+ ret = af9035_ctrl_msg(d->udev, &req);
+ }
+ } else {
+ /*
+ * We support only two kind of I2C transactions:
+ * 1) 1 x read + 1 x write
+ * 2) 1 x write
+ */
+ ret = -EOPNOTSUPP;
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+
+ if (ret < 0)
+ return ret;
+ else
+ return num;
+}
+
+static u32 af9035_i2c_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm af9035_i2c_algo = {
+ .master_xfer = af9035_i2c_master_xfer,
+ .functionality = af9035_i2c_functionality,
+};
+
+#define AF9035_POLL 250
+static int af9035_rc_query(struct dvb_usb_device *d)
+{
+ unsigned int key;
+ unsigned char b[4];
+ int ret;
+ struct usb_req req = { CMD_IR_GET, 0, 0, NULL, 4, b };
+
+ ret = af9035_ctrl_msg(d->udev, &req);
+ if (ret < 0)
+ goto err;
+
+ if ((b[2] + b[3]) == 0xff) {
+ if ((b[0] + b[1]) == 0xff) {
+ /* NEC */
+ key = b[0] << 8 | b[2];
+ } else {
+ /* ext. NEC */
+ key = b[0] << 16 | b[1] << 8 | b[2];
+ }
+ } else {
+ key = b[0] << 24 | b[1] << 16 | b[2] << 8 | b[3];
+ }
+
+ rc_keydown(d->rc_dev, key, 0);
+
+err:
+ /* ignore errors */
+ return 0;
+}
+
+static int af9035_init(struct dvb_usb_device *d)
+{
+ struct state *state = d->priv;
+ int ret, i;
+ u16 frame_size = 87 * 188 / 4;
+ u8 packet_size = 512 / 4;
+ struct reg_val_mask tab[] = {
+ { 0x80f99d, 0x01, 0x01 },
+ { 0x80f9a4, 0x01, 0x01 },
+ { 0x00dd11, 0x00, 0x20 },
+ { 0x00dd11, 0x00, 0x40 },
+ { 0x00dd13, 0x00, 0x20 },
+ { 0x00dd13, 0x00, 0x40 },
+ { 0x00dd11, 0x20, 0x20 },
+ { 0x00dd88, (frame_size >> 0) & 0xff, 0xff},
+ { 0x00dd89, (frame_size >> 8) & 0xff, 0xff},
+ { 0x00dd0c, packet_size, 0xff},
+ { 0x00dd11, state->dual_mode << 6, 0x40 },
+ { 0x00dd8a, (frame_size >> 0) & 0xff, 0xff},
+ { 0x00dd8b, (frame_size >> 8) & 0xff, 0xff},
+ { 0x00dd0d, packet_size, 0xff },
+ { 0x80f9a3, 0x00, 0x01 },
+ { 0x80f9cd, 0x00, 0x01 },
+ { 0x80f99d, 0x00, 0x01 },
+ { 0x80f9a4, 0x00, 0x01 },
+ };
+
+ pr_debug("%s: USB speed=%d frame_size=%04x packet_size=%02x\n",
+ __func__, d->udev->speed, frame_size, packet_size);
+
+ /* init endpoints */
+ for (i = 0; i < ARRAY_SIZE(tab); i++) {
+ ret = af9035_wr_reg_mask(d, tab[i].reg, tab[i].val,
+ tab[i].mask);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc,
+ int *cold)
+{
+ int ret;
+ u8 wbuf[1] = { 1 };
+ u8 rbuf[4];
+ struct usb_req req = { CMD_FW_QUERYINFO, 0, sizeof(wbuf), wbuf,
+ sizeof(rbuf), rbuf };
+
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ pr_debug("%s: reply=%02x %02x %02x %02x\n", __func__,
+ rbuf[0], rbuf[1], rbuf[2], rbuf[3]);
+ if (rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])
+ *cold = 0;
+ else
+ *cold = 1;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_download_firmware(struct usb_device *udev,
+ const struct firmware *fw)
+{
+ int ret, i, j, len;
+ u8 wbuf[1];
+ u8 rbuf[4];
+ struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
+ struct usb_req req_fw_dl = { CMD_FW_DL, 0, 0, wbuf, 0, NULL };
+ struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf } ;
+ u8 hdr_core;
+ u16 hdr_addr, hdr_data_len, hdr_checksum;
+ #define MAX_DATA 58
+ #define HDR_SIZE 7
+
+ /*
+ * Thanks to Daniel Glöckner <daniel-gl@gmx.net> about that info!
+ *
+ * byte 0: MCS 51 core
+ * There are two inside the AF9035 (1=Link and 2=OFDM) with separate
+ * address spaces
+ * byte 1-2: Big endian destination address
+ * byte 3-4: Big endian number of data bytes following the header
+ * byte 5-6: Big endian header checksum, apparently ignored by the chip
+ * Calculated as ~(h[0]*256+h[1]+h[2]*256+h[3]+h[4]*256)
+ */
+
+ for (i = fw->size; i > HDR_SIZE;) {
+ hdr_core = fw->data[fw->size - i + 0];
+ hdr_addr = fw->data[fw->size - i + 1] << 8;
+ hdr_addr |= fw->data[fw->size - i + 2] << 0;
+ hdr_data_len = fw->data[fw->size - i + 3] << 8;
+ hdr_data_len |= fw->data[fw->size - i + 4] << 0;
+ hdr_checksum = fw->data[fw->size - i + 5] << 8;
+ hdr_checksum |= fw->data[fw->size - i + 6] << 0;
+
+ pr_debug("%s: core=%d addr=%04x data_len=%d checksum=%04x\n",
+ __func__, hdr_core, hdr_addr, hdr_data_len,
+ hdr_checksum);
+
+ if (((hdr_core != 1) && (hdr_core != 2)) ||
+ (hdr_data_len > i)) {
+ pr_debug("%s: bad firmware\n", __func__);
+ break;
+ }
+
+ /* download begin packet */
+ req.cmd = CMD_FW_DL_BEGIN;
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ /* download firmware packet(s) */
+ for (j = HDR_SIZE + hdr_data_len; j > 0; j -= MAX_DATA) {
+ len = j;
+ if (len > MAX_DATA)
+ len = MAX_DATA;
+ req_fw_dl.wlen = len;
+ req_fw_dl.wbuf = (u8 *) &fw->data[fw->size - i +
+ HDR_SIZE + hdr_data_len - j];
+ ret = af9035_ctrl_msg(udev, &req_fw_dl);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* download end packet */
+ req.cmd = CMD_FW_DL_END;
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ i -= hdr_data_len + HDR_SIZE;
+
+ pr_debug("%s: data uploaded=%zu\n", __func__, fw->size - i);
+ }
+
+ /* firmware loaded, request boot */
+ req.cmd = CMD_FW_BOOT;
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ /* ensure firmware starts */
+ wbuf[0] = 1;
+ ret = af9035_ctrl_msg(udev, &req_fw_ver);
+ if (ret < 0)
+ goto err;
+
+ if (!(rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])) {
+ info("firmware did not run");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ info("firmware version=%d.%d.%d.%d", rbuf[0], rbuf[1], rbuf[2],
+ rbuf[3]);
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_download_firmware_it9135(struct usb_device *udev,
+ const struct firmware *fw)
+{
+ int ret, i, i_prev;
+ u8 wbuf[1];
+ u8 rbuf[4];
+ struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
+ struct usb_req req_fw_dl = { CMD_FW_SCATTER_WR, 0, 0, NULL, 0, NULL };
+ struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf } ;
+ #define HDR_SIZE 7
+
+ /*
+ * There seems to be following firmware header. Meaning of bytes 0-3
+ * is unknown.
+ *
+ * 0: 3
+ * 1: 0, 1
+ * 2: 0
+ * 3: 1, 2, 3
+ * 4: addr MSB
+ * 5: addr LSB
+ * 6: count of data bytes ?
+ */
+
+ for (i = HDR_SIZE, i_prev = 0; i <= fw->size; i++) {
+ if (i == fw->size ||
+ (fw->data[i + 0] == 0x03 &&
+ (fw->data[i + 1] == 0x00 ||
+ fw->data[i + 1] == 0x01) &&
+ fw->data[i + 2] == 0x00)) {
+ req_fw_dl.wlen = i - i_prev;
+ req_fw_dl.wbuf = (u8 *) &fw->data[i_prev];
+ i_prev = i;
+ ret = af9035_ctrl_msg(udev, &req_fw_dl);
+ if (ret < 0)
+ goto err;
+
+ pr_debug("%s: data uploaded=%d\n", __func__, i);
+ }
+ }
+
+ /* firmware loaded, request boot */
+ req.cmd = CMD_FW_BOOT;
+ ret = af9035_ctrl_msg(udev, &req);
+ if (ret < 0)
+ goto err;
+
+ /* ensure firmware starts */
+ wbuf[0] = 1;
+ ret = af9035_ctrl_msg(udev, &req_fw_ver);
+ if (ret < 0)
+ goto err;
+
+ if (!(rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])) {
+ info("firmware did not run");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ info("firmware version=%d.%d.%d.%d", rbuf[0], rbuf[1], rbuf[2],
+ rbuf[3]);
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+/* abuse that callback as there is no better one for reading eeprom */
+static int af9035_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
+{
+ struct state *state = d->priv;
+ int ret, i, eeprom_shift = 0;
+ u8 tmp;
+ u16 tmp16;
+
+ /* check if there is dual tuners */
+ ret = af9035_rd_reg(d, EEPROM_DUAL_MODE, &tmp);
+ if (ret < 0)
+ goto err;
+
+ state->dual_mode = tmp;
+ pr_debug("%s: dual mode=%d\n", __func__, state->dual_mode);
+
+ for (i = 0; i < af9035_properties[0].num_adapters; i++) {
+ /* tuner */
+ ret = af9035_rd_reg(d, EEPROM_1_TUNER_ID + eeprom_shift, &tmp);
+ if (ret < 0)
+ goto err;
+
+ state->af9033_config[i].tuner = tmp;
+ pr_debug("%s: [%d]tuner=%02x\n", __func__, i, tmp);
+
+ switch (tmp) {
+ case AF9033_TUNER_TUA9001:
+ case AF9033_TUNER_FC0011:
+ case AF9033_TUNER_MXL5007T:
+ case AF9033_TUNER_TDA18218:
+ state->af9033_config[i].spec_inv = 1;
+ break;
+ default:
+ warn("tuner ID=%02x not supported, please report!",
+ tmp);
+ };
+
+ /* tuner IF frequency */
+ ret = af9035_rd_reg(d, EEPROM_1_IFFREQ_L + eeprom_shift, &tmp);
+ if (ret < 0)
+ goto err;
+
+ tmp16 = tmp;
+
+ ret = af9035_rd_reg(d, EEPROM_1_IFFREQ_H + eeprom_shift, &tmp);
+ if (ret < 0)
+ goto err;
+
+ tmp16 |= tmp << 8;
+
+ pr_debug("%s: [%d]IF=%d\n", __func__, i, tmp16);
+
+ eeprom_shift = 0x10; /* shift for the 2nd tuner params */
+ }
+
+ /* get demod clock */
+ ret = af9035_rd_reg(d, 0x00d800, &tmp);
+ if (ret < 0)
+ goto err;
+
+ tmp = (tmp >> 0) & 0x0f;
+
+ for (i = 0; i < af9035_properties[0].num_adapters; i++)
+ state->af9033_config[i].clock = clock_lut[tmp];
+
+ ret = af9035_rd_reg(d, EEPROM_IR_MODE, &tmp);
+ if (ret < 0)
+ goto err;
+ pr_debug("%s: ir_mode=%02x\n", __func__, tmp);
+
+ /* don't activate rc if in HID mode or if not available */
+ if (tmp == 5) {
+ ret = af9035_rd_reg(d, EEPROM_IR_TYPE, &tmp);
+ if (ret < 0)
+ goto err;
+ pr_debug("%s: ir_type=%02x\n", __func__, tmp);
+
+ switch (tmp) {
+ case 0: /* NEC */
+ default:
+ d->props.rc.core.protocol = RC_TYPE_NEC;
+ d->props.rc.core.allowed_protos = RC_TYPE_NEC;
+ break;
+ case 1: /* RC6 */
+ d->props.rc.core.protocol = RC_TYPE_RC6;
+ d->props.rc.core.allowed_protos = RC_TYPE_RC6;
+ break;
+ }
+ d->props.rc.core.rc_query = af9035_rc_query;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+/* abuse that callback as there is no better one for reading eeprom */
+static int af9035_read_mac_address_it9135(struct dvb_usb_device *d, u8 mac[6])
+{
+ struct state *state = d->priv;
+ int ret, i;
+ u8 tmp;
+
+ state->dual_mode = false;
+
+ /* get demod clock */
+ ret = af9035_rd_reg(d, 0x00d800, &tmp);
+ if (ret < 0)
+ goto err;
+
+ tmp = (tmp >> 0) & 0x0f;
+
+ for (i = 0; i < af9035_properties[0].num_adapters; i++)
+ state->af9033_config[i].clock = clock_lut_it9135[tmp];
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_fc0011_tuner_callback(struct dvb_usb_device *d,
+ int cmd, int arg)
+{
+ int ret;
+
+ switch (cmd) {
+ case FC0011_FE_CALLBACK_POWER:
+ /* Tuner enable */
+ ret = af9035_wr_reg_mask(d, 0xd8eb, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8ec, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8ed, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ /* LED */
+ ret = af9035_wr_reg_mask(d, 0xd8d0, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(d, 0xd8d1, 1, 1);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(10000, 50000);
+ break;
+ case FC0011_FE_CALLBACK_RESET:
+ ret = af9035_wr_reg(d, 0xd8e9, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0xd8e8, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(d, 0xd8e7, 1);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(10000, 20000);
+
+ ret = af9035_wr_reg(d, 0xd8e7, 0);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(10000, 20000);
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9035_tuner_callback(struct dvb_usb_device *d, int cmd, int arg)
+{
+ struct state *state = d->priv;
+
+ switch (state->af9033_config[0].tuner) {
+ case AF9033_TUNER_FC0011:
+ return af9035_fc0011_tuner_callback(d, cmd, arg);
+ default:
+ break;
+ }
+
+ return -ENODEV;
+}
+
+static int af9035_frontend_callback(void *adapter_priv, int component,
+ int cmd, int arg)
+{
+ struct i2c_adapter *adap = adapter_priv;
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+
+ switch (component) {
+ case DVB_FRONTEND_COMPONENT_TUNER:
+ return af9035_tuner_callback(d, cmd, arg);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct state *state = adap->dev->priv;
+ int ret;
+
+ if (!state->af9033_config[adap->id].tuner) {
+ /* unsupported tuner */
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (adap->id == 0) {
+ state->af9033_config[0].ts_mode = AF9033_TS_MODE_USB;
+ state->af9033_config[1].ts_mode = AF9033_TS_MODE_SERIAL;
+
+ ret = af9035_wr_reg(adap->dev, 0x00417f,
+ state->af9033_config[1].i2c_addr);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg(adap->dev, 0x00d81a,
+ state->dual_mode);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* attach demodulator */
+ adap->fe_adap[0].fe = dvb_attach(af9033_attach,
+ &state->af9033_config[adap->id], &adap->dev->i2c_adap);
+ if (adap->fe_adap[0].fe == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* disable I2C-gate */
+ adap->fe_adap[0].fe->ops.i2c_gate_ctrl = NULL;
+ adap->fe_adap[0].fe->callback = af9035_frontend_callback;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static struct tua9001_config af9035_tua9001_config = {
+ .i2c_addr = 0x60,
+};
+
+static const struct fc0011_config af9035_fc0011_config = {
+ .i2c_address = 0x60,
+};
+
+static struct mxl5007t_config af9035_mxl5007t_config = {
+ .xtal_freq_hz = MxL_XTAL_24_MHZ,
+ .if_freq_hz = MxL_IF_4_57_MHZ,
+ .invert_if = 0,
+ .loop_thru_enable = 0,
+ .clk_out_enable = 0,
+ .clk_out_amp = MxL_CLKOUT_AMP_0_94V,
+};
+
+static struct tda18218_config af9035_tda18218_config = {
+ .i2c_address = 0x60,
+ .i2c_wr_max = 21,
+};
+
+static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct state *state = adap->dev->priv;
+ int ret;
+ struct dvb_frontend *fe;
+
+ switch (state->af9033_config[adap->id].tuner) {
+ case AF9033_TUNER_TUA9001:
+ /* AF9035 gpiot3 = TUA9001 RESETN
+ AF9035 gpiot2 = TUA9001 RXEN */
+
+ /* configure gpiot2 and gpiot2 as output */
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8ec, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8ed, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8e8, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8e9, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* reset tuner */
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8e7, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(2000, 20000);
+
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8e7, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* activate tuner RX */
+ /* TODO: use callback for TUA9001 RXEN */
+ ret = af9035_wr_reg_mask(adap->dev, 0x00d8eb, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+
+ /* attach tuner */
+ fe = dvb_attach(tua9001_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, &af9035_tua9001_config);
+ break;
+ case AF9033_TUNER_FC0011:
+ fe = dvb_attach(fc0011_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, &af9035_fc0011_config);
+ break;
+ case AF9033_TUNER_MXL5007T:
+ ret = af9035_wr_reg(adap->dev, 0x00d8e0, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8e1, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8df, 0);
+ if (ret < 0)
+ goto err;
+
+ msleep(30);
+
+ ret = af9035_wr_reg(adap->dev, 0x00d8df, 1);
+ if (ret < 0)
+ goto err;
+
+ msleep(300);
+
+ ret = af9035_wr_reg(adap->dev, 0x00d8c0, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8c1, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8bf, 0);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8b4, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8b5, 1);
+ if (ret < 0)
+ goto err;
+ ret = af9035_wr_reg(adap->dev, 0x00d8b3, 1);
+ if (ret < 0)
+ goto err;
+
+ /* attach tuner */
+ fe = dvb_attach(mxl5007t_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, 0x60, &af9035_mxl5007t_config);
+ break;
+ case AF9033_TUNER_TDA18218:
+ /* attach tuner */
+ fe = dvb_attach(tda18218_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, &af9035_tda18218_config);
+ break;
+ default:
+ fe = NULL;
+ }
+
+ if (fe == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+enum af9035_id_entry {
+ AF9035_15A4_9035,
+ AF9035_15A4_1000,
+ AF9035_15A4_1001,
+ AF9035_15A4_1002,
+ AF9035_15A4_1003,
+ AF9035_0CCD_0093,
+ AF9035_07CA_A835,
+ AF9035_07CA_B835,
+ AF9035_07CA_1867,
+ AF9035_07CA_A867,
+ AF9035_07CA_0825,
+};
+
+static struct usb_device_id af9035_id[] = {
+ [AF9035_15A4_9035] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_9035)},
+ [AF9035_15A4_1000] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1000)},
+ [AF9035_15A4_1001] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1001)},
+ [AF9035_15A4_1002] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1002)},
+ [AF9035_15A4_1003] = {
+ USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1003)},
+ [AF9035_0CCD_0093] = {
+ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK)},
+ [AF9035_07CA_A835] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835)},
+ [AF9035_07CA_B835] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_B835)},
+ [AF9035_07CA_1867] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_1867)},
+ [AF9035_07CA_A867] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A867)},
+ [AF9035_07CA_0825] = {
+ USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TWINSTAR)},
+ {},
+};
+
+MODULE_DEVICE_TABLE(usb, af9035_id);
+
+static struct dvb_usb_device_properties af9035_properties[] = {
+ {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = af9035_download_firmware,
+ .firmware = "dvb-usb-af9035-02.fw",
+ .no_reconnect = 1,
+
+ .size_of_priv = sizeof(struct state),
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9035_frontend_attach,
+ .tuner_attach = af9035_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ .u = {
+ .bulk = {
+ .buffersize = (87 * 188),
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+
+ .identify_state = af9035_identify_state,
+ .read_mac_address = af9035_read_mac_address,
+
+ .i2c_algo = &af9035_i2c_algo,
+
+ .rc.core = {
+ .protocol = RC_TYPE_UNKNOWN,
+ .module_name = "af9035",
+ .rc_query = NULL,
+ .rc_interval = AF9035_POLL,
+ .allowed_protos = RC_TYPE_UNKNOWN,
+ .rc_codes = RC_MAP_EMPTY,
+ },
+ .num_device_descs = 5,
+ .devices = {
+ {
+ .name = "Afatech AF9035 reference design",
+ .cold_ids = {
+ &af9035_id[AF9035_15A4_9035],
+ &af9035_id[AF9035_15A4_1000],
+ &af9035_id[AF9035_15A4_1001],
+ &af9035_id[AF9035_15A4_1002],
+ &af9035_id[AF9035_15A4_1003],
+ },
+ }, {
+ .name = "TerraTec Cinergy T Stick",
+ .cold_ids = {
+ &af9035_id[AF9035_0CCD_0093],
+ },
+ }, {
+ .name = "AVerMedia AVerTV Volar HD/PRO (A835)",
+ .cold_ids = {
+ &af9035_id[AF9035_07CA_A835],
+ &af9035_id[AF9035_07CA_B835],
+ },
+ }, {
+ .name = "AVerMedia HD Volar (A867)",
+ .cold_ids = {
+ &af9035_id[AF9035_07CA_1867],
+ &af9035_id[AF9035_07CA_A867],
+ },
+ }, {
+ .name = "AVerMedia Twinstar (A825)",
+ .cold_ids = {
+ &af9035_id[AF9035_07CA_0825],
+ },
+ },
+ }
+ },
+ {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .download_firmware = af9035_download_firmware_it9135,
+ .firmware = "dvb-usb-it9135-01.fw",
+ .no_reconnect = 1,
+
+ .size_of_priv = sizeof(struct state),
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {
+ {
+ .frontend_attach = af9035_frontend_attach,
+ .tuner_attach = af9035_tuner_attach,
+ .stream = {
+ .type = USB_BULK,
+ .count = 6,
+ .endpoint = 0x84,
+ .u = {
+ .bulk = {
+ .buffersize = (87 * 188),
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+
+ .identify_state = af9035_identify_state,
+ .read_mac_address = af9035_read_mac_address_it9135,
+
+ .i2c_algo = &af9035_i2c_algo,
+
+ .num_device_descs = 0, /* disabled as no support for IT9135 */
+ .devices = {
+ {
+ .name = "ITE Tech. IT9135 reference design",
+ },
+ }
+ },
+};
+
+static int af9035_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret, i;
+ struct dvb_usb_device *d = NULL;
+ struct usb_device *udev;
+ bool found;
+
+ pr_debug("%s: interface=%d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+
+ /* interface 0 is used by DVB-T receiver and
+ interface 1 is for remote controller (HID) */
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
+ return 0;
+
+ /* Dynamic USB ID support. Replaces first device ID with current one. */
+ udev = interface_to_usbdev(intf);
+
+ for (i = 0, found = false; i < ARRAY_SIZE(af9035_id) - 1; i++) {
+ if (af9035_id[i].idVendor ==
+ le16_to_cpu(udev->descriptor.idVendor) &&
+ af9035_id[i].idProduct ==
+ le16_to_cpu(udev->descriptor.idProduct)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("%s: using dynamic ID %04x:%04x\n", __func__,
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+ af9035_properties[0].devices[0].cold_ids[0]->idVendor =
+ le16_to_cpu(udev->descriptor.idVendor);
+ af9035_properties[0].devices[0].cold_ids[0]->idProduct =
+ le16_to_cpu(udev->descriptor.idProduct);
+ }
+
+
+ for (i = 0; i < af9035_properties_count; i++) {
+ ret = dvb_usb_device_init(intf, &af9035_properties[i],
+ THIS_MODULE, &d, adapter_nr);
+
+ if (ret == -ENODEV)
+ continue;
+ else
+ break;
+ }
+
+ if (ret < 0)
+ goto err;
+
+ if (d) {
+ ret = af9035_init(d);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver af9035_usb_driver = {
+ .name = "dvb_usb_af9035",
+ .probe = af9035_usb_probe,
+ .disconnect = dvb_usb_device_exit,
+ .id_table = af9035_id,
+};
+
+module_usb_driver(af9035_usb_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Afatech AF9035 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/af9035.h b/drivers/media/dvb/dvb-usb/af9035.h
new file mode 100644
index 000000000000..481a1a43dd2a
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9035.h
@@ -0,0 +1,113 @@
+/*
+ * Afatech AF9035 DVB USB driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AF9035_H
+#define AF9035_H
+
+/* prefix for dvb-usb log writings */
+#define DVB_USB_LOG_PREFIX "af9035"
+
+#include "dvb-usb.h"
+#include "af9033.h"
+#include "tua9001.h"
+#include "fc0011.h"
+#include "mxl5007t.h"
+#include "tda18218.h"
+
+struct reg_val {
+ u32 reg;
+ u8 val;
+};
+
+struct reg_val_mask {
+ u32 reg;
+ u8 val;
+ u8 mask;
+};
+
+struct usb_req {
+ u8 cmd;
+ u8 mbox;
+ u8 wlen;
+ u8 *wbuf;
+ u8 rlen;
+ u8 *rbuf;
+};
+
+struct state {
+ bool dual_mode;
+
+ struct af9033_config af9033_config[2];
+};
+
+u32 clock_lut[] = {
+ 20480000, /* FPGA */
+ 16384000, /* 16.38 MHz */
+ 20480000, /* 20.48 MHz */
+ 36000000, /* 36.00 MHz */
+ 30000000, /* 30.00 MHz */
+ 26000000, /* 26.00 MHz */
+ 28000000, /* 28.00 MHz */
+ 32000000, /* 32.00 MHz */
+ 34000000, /* 34.00 MHz */
+ 24000000, /* 24.00 MHz */
+ 22000000, /* 22.00 MHz */
+ 12000000, /* 12.00 MHz */
+};
+
+u32 clock_lut_it9135[] = {
+ 12000000, /* 12.00 MHz */
+ 20480000, /* 20.48 MHz */
+ 36000000, /* 36.00 MHz */
+ 30000000, /* 30.00 MHz */
+ 26000000, /* 26.00 MHz */
+ 28000000, /* 28.00 MHz */
+ 32000000, /* 32.00 MHz */
+ 34000000, /* 34.00 MHz */
+ 24000000, /* 24.00 MHz */
+ 22000000, /* 22.00 MHz */
+};
+
+/* EEPROM locations */
+#define EEPROM_IR_MODE 0x430d
+#define EEPROM_DUAL_MODE 0x4326
+#define EEPROM_IR_TYPE 0x4329
+#define EEPROM_1_IFFREQ_L 0x432d
+#define EEPROM_1_IFFREQ_H 0x432e
+#define EEPROM_1_TUNER_ID 0x4331
+#define EEPROM_2_IFFREQ_L 0x433d
+#define EEPROM_2_IFFREQ_H 0x433e
+#define EEPROM_2_TUNER_ID 0x4341
+
+/* USB commands */
+#define CMD_MEM_RD 0x00
+#define CMD_MEM_WR 0x01
+#define CMD_I2C_RD 0x02
+#define CMD_I2C_WR 0x03
+#define CMD_IR_GET 0x18
+#define CMD_FW_DL 0x21
+#define CMD_FW_QUERYINFO 0x22
+#define CMD_FW_BOOT 0x23
+#define CMD_FW_DL_BEGIN 0x24
+#define CMD_FW_DL_END 0x25
+#define CMD_FW_SCATTER_WR 0x29
+
+#endif
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 02290c60f72f..7e9e00fae04e 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -32,7 +32,7 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
@@ -118,7 +118,7 @@ int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_GPIO;
@@ -139,7 +139,7 @@ static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
if (st->fw_version >= 0x10201) {
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_USB_XFER_LEN;
@@ -178,7 +178,7 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
/* Ensure nobody else hits the i2c bus while we're sending our
sequence of messages, (such as the remote control thread) */
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
+ return -EINTR;
for (i = 0; i < num; i++) {
if (i == 0) {
@@ -228,7 +228,8 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
/* Write request */
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ mutex_unlock(&d->i2c_mutex);
+ return -EINTR;
}
st->buf[0] = REQUEST_NEW_I2C_WRITE;
st->buf[1] = msg[i].addr << 1;
@@ -271,10 +272,11 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
int i,len;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
+ return -EINTR;
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ mutex_unlock(&d->i2c_mutex);
+ return -EINTR;
}
for (i = 0; i < num; i++) {
@@ -369,7 +371,7 @@ static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll,
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_CLOCK;
@@ -401,7 +403,7 @@ int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz)
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_I2C_PARAM;
@@ -561,7 +563,7 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
if (mutex_lock_interruptible(&adap->dev->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_ENABLE_VIDEO;
@@ -611,7 +613,7 @@ int dib0700_change_protocol(struct rc_dev *rc, u64 rc_type)
if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
err("could not acquire lock");
- return 0;
+ return -EINTR;
}
st->buf[0] = REQUEST_SET_RC;
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index f9e966aa26e7..510001da6e83 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -3569,6 +3569,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7090E) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7790E) },
/* 80 */{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE8096P) },
+ { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT_2) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -3832,7 +3833,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
},
},
- .num_device_descs = 11,
+ .num_device_descs = 12,
.devices = {
{ "DiBcom STK7070P reference design",
{ &dib0700_usb_id_table[15], NULL },
@@ -3878,6 +3879,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ &dib0700_usb_id_table[50], NULL },
{ NULL },
},
+ { "Elgato EyeTV DTT rev. 2",
+ { &dib0700_usb_id_table[81], NULL },
+ { NULL },
+ },
},
.rc.core = {
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 397d8f232731..7a6160bf54ba 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -76,6 +76,11 @@
#define USB_PID_AFATECH_AF9005 0x9020
#define USB_PID_AFATECH_AF9015_9015 0x9015
#define USB_PID_AFATECH_AF9015_9016 0x9016
+#define USB_PID_AFATECH_AF9035_1000 0x1000
+#define USB_PID_AFATECH_AF9035_1001 0x1001
+#define USB_PID_AFATECH_AF9035_1002 0x1002
+#define USB_PID_AFATECH_AF9035_1003 0x1003
+#define USB_PID_AFATECH_AF9035_9035 0x9035
#define USB_PID_TREKSTOR_DVBT 0x901b
#define USB_VID_ALINK_DTU 0xf170
#define USB_PID_ANSONIC_DVBT_USB 0x6000
@@ -152,6 +157,7 @@
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
+#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
#define USB_PID_TWINHAN_VP7041_COLD 0x3201
@@ -221,6 +227,11 @@
#define USB_PID_AVERMEDIA_A850T 0x850b
#define USB_PID_AVERMEDIA_A805 0xa805
#define USB_PID_AVERMEDIA_A815M 0x815a
+#define USB_PID_AVERMEDIA_A835 0xa835
+#define USB_PID_AVERMEDIA_B835 0xb835
+#define USB_PID_AVERMEDIA_1867 0x1867
+#define USB_PID_AVERMEDIA_A867 0xa867
+#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d
#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
@@ -327,6 +338,7 @@
#define USB_PID_MYGICA_D689 0xd811
#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
+#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
#define USB_PID_ELGATO_EYETV_SAT 0x002a
#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
index 53a5c30b51b2..5c8f651344fc 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
@@ -80,6 +80,14 @@ static void dvb_usb_data_complete_204(struct usb_data_stream *stream, u8 *buffer
dvb_dmx_swfilter_204(&adap->demux, buffer, length);
}
+static void dvb_usb_data_complete_raw(struct usb_data_stream *stream,
+ u8 *buffer, size_t length)
+{
+ struct dvb_usb_adapter *adap = stream->user_priv;
+ if (adap->feedcount > 0 && adap->state & DVB_USB_ADAP_STATE_DVB)
+ dvb_dmx_swfilter_raw(&adap->demux, buffer, length);
+}
+
int dvb_usb_adapter_stream_init(struct dvb_usb_adapter *adap)
{
int i, ret = 0;
@@ -90,6 +98,10 @@ int dvb_usb_adapter_stream_init(struct dvb_usb_adapter *adap)
adap->fe_adap[i].stream.complete =
dvb_usb_data_complete_204;
else
+ if (adap->props.fe[i].caps & DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD)
+ adap->fe_adap[i].stream.complete =
+ dvb_usb_data_complete_raw;
+ else
adap->fe_adap[i].stream.complete = dvb_usb_data_complete;
adap->fe_adap[i].stream.user_priv = adap;
ret = usb_urb_init(&adap->fe_adap[i].stream,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 6d7d13f9ce68..99f94409efa1 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -141,6 +141,7 @@ struct dvb_usb_adapter_fe_properties {
#define DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF 0x02
#define DVB_USB_ADAP_NEED_PID_FILTERING 0x04
#define DVB_USB_ADAP_RECEIVES_204_BYTE_TS 0x08
+#define DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD 0x10
int caps;
int pid_filter_count;
@@ -156,7 +157,7 @@ struct dvb_usb_adapter_fe_properties {
int size_of_priv;
};
-#define MAX_NO_OF_FE_PER_ADAP 2
+#define MAX_NO_OF_FE_PER_ADAP 3
struct dvb_usb_adapter_properties {
int size_of_priv;
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index 451c5a7adfb2..9382895b1b88 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -148,7 +148,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i = 0, ret = 0;
+ int i = 0;
u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0};
u16 value;
@@ -162,7 +162,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
/* read stv0299 register */
value = msg[0].buf[0];/* register */
for (i = 0; i < msg[1].len; i++) {
- ret = dw210x_op_rw(d->udev, 0xb5, value + i, 0,
+ dw210x_op_rw(d->udev, 0xb5, value + i, 0,
buf6, 2, DW210X_READ_MSG);
msg[1].buf[i] = buf6[0];
}
@@ -174,7 +174,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
buf6[0] = 0x2a;
buf6[1] = msg[0].buf[0];
buf6[2] = msg[0].buf[1];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 3, DW210X_WRITE_MSG);
break;
case 0x60:
@@ -187,17 +187,17 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
buf6[4] = msg[0].buf[1];
buf6[5] = msg[0].buf[2];
buf6[6] = msg[0].buf[3];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 7, DW210X_WRITE_MSG);
} else {
/* read from tuner */
- ret = dw210x_op_rw(d->udev, 0xb5, 0, 0,
+ dw210x_op_rw(d->udev, 0xb5, 0, 0,
buf6, 1, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
}
break;
case (DW2102_RC_QUERY):
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
buf6, 2, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
msg[0].buf[1] = buf6[1];
@@ -205,7 +205,7 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
case (DW2102_VOLTAGE_CTRL):
buf6[0] = 0x30;
buf6[1] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 2, DW210X_WRITE_MSG);
break;
}
@@ -221,7 +221,6 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int ret = 0;
u8 buf6[] = {0, 0, 0, 0, 0, 0, 0};
if (!d)
@@ -235,10 +234,10 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
buf6[0] = msg[0].addr << 1;
buf6[1] = msg[0].len;
buf6[2] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
buf6, msg[0].len + 2, DW210X_WRITE_MSG);
/* read si2109 register */
- ret = dw210x_op_rw(d->udev, 0xc3, 0xd0, 0,
+ dw210x_op_rw(d->udev, 0xc3, 0xd0, 0,
buf6, msg[1].len + 2, DW210X_READ_MSG);
memcpy(msg[1].buf, buf6 + 2, msg[1].len);
@@ -250,11 +249,11 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
buf6[0] = msg[0].addr << 1;
buf6[1] = msg[0].len;
memcpy(buf6 + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6,
msg[0].len + 2, DW210X_WRITE_MSG);
break;
case(DW2102_RC_QUERY):
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
buf6, 2, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
msg[0].buf[1] = buf6[1];
@@ -262,7 +261,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
case(DW2102_VOLTAGE_CTRL):
buf6[0] = 0x30;
buf6[1] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 2, DW210X_WRITE_MSG);
break;
}
@@ -276,7 +275,6 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int ret = 0;
if (!d)
return -ENODEV;
@@ -291,10 +289,10 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
/* second read registers */
- ret = dw210x_op_rw(d->udev, 0xc3, 0xd1 , 0,
+ dw210x_op_rw(d->udev, 0xc3, 0xd1 , 0,
ibuf, msg[1].len + 2, DW210X_READ_MSG);
memcpy(msg[1].buf, ibuf + 2, msg[1].len);
@@ -308,7 +306,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
break;
}
@@ -318,13 +316,13 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
break;
}
case(DW2102_RC_QUERY): {
u8 ibuf[2];
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 2, DW210X_READ_MSG);
memcpy(msg[0].buf, ibuf , 2);
break;
@@ -333,7 +331,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
u8 obuf[2];
obuf[0] = 0x30;
obuf[1] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
@@ -349,7 +347,6 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int ret = 0;
int len, i, j;
if (!d)
@@ -361,7 +358,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
switch (msg[j].addr) {
case(DW2102_RC_QUERY): {
u8 ibuf[2];
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 2, DW210X_READ_MSG);
memcpy(msg[j].buf, ibuf , 2);
break;
@@ -370,7 +367,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
u8 obuf[2];
obuf[0] = 0x30;
obuf[1] = msg[j].buf[0];
- ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
+ dw210x_op_rw(d->udev, 0xb2, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
@@ -382,7 +379,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
if (msg[j].flags == I2C_M_RD) {
/* read registers */
u8 ibuf[msg[j].len + 2];
- ret = dw210x_op_rw(d->udev, 0xc3,
+ dw210x_op_rw(d->udev, 0xc3,
(msg[j].addr << 1) + 1, 0,
ibuf, msg[j].len + 2,
DW210X_READ_MSG);
@@ -402,7 +399,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
do {
memcpy(obuf + 3, msg[j].buf + i,
(len > 16 ? 16 : len));
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, (len > 16 ? 16 : len) + 3,
DW210X_WRITE_MSG);
i += 16;
@@ -414,7 +411,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
obuf[0] = msg[j].addr << 1;
obuf[1] = msg[j].len;
memcpy(obuf + 2, msg[j].buf, msg[j].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[j].len + 2,
DW210X_WRITE_MSG);
}
@@ -432,7 +429,7 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int ret = 0, i;
+ int i;
if (!d)
return -ENODEV;
@@ -447,10 +444,10 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
/* second read registers */
- ret = dw210x_op_rw(d->udev, 0xc3, 0x19 , 0,
+ dw210x_op_rw(d->udev, 0xc3, 0x19 , 0,
ibuf, msg[1].len + 2, DW210X_READ_MSG);
memcpy(msg[1].buf, ibuf + 2, msg[1].len);
@@ -465,13 +462,13 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ dw210x_op_rw(d->udev, 0xc2, 0, 0,
obuf, msg[0].len + 2, DW210X_WRITE_MSG);
break;
}
case(DW2102_RC_QUERY): {
u8 ibuf[2];
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 2, DW210X_READ_MSG);
memcpy(msg[0].buf, ibuf , 2);
break;
@@ -496,7 +493,6 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct usb_device *udev;
- int ret = 0;
int len, i, j;
if (!d)
@@ -509,7 +505,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
switch (msg[j].addr) {
case (DW2102_RC_QUERY): {
u8 ibuf[5];
- ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
+ dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 5, DW210X_READ_MSG);
memcpy(msg[j].buf, ibuf + 3, 2);
break;
@@ -519,11 +515,11 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = 1;
obuf[1] = msg[j].buf[1];/* off-on */
- ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
+ dw210x_op_rw(d->udev, 0x8a, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
obuf[0] = 3;
obuf[1] = msg[j].buf[0];/* 13v-18v */
- ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
+ dw210x_op_rw(d->udev, 0x8a, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
@@ -532,7 +528,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = 5;
obuf[1] = msg[j].buf[0];
- ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
+ dw210x_op_rw(d->udev, 0x8a, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
@@ -545,7 +541,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (msg[j].flags == I2C_M_RD) {
/* read registers */
u8 ibuf[msg[j].len];
- ret = dw210x_op_rw(d->udev, 0x91, 0, 0,
+ dw210x_op_rw(d->udev, 0x91, 0, 0,
ibuf, msg[j].len,
DW210X_READ_MSG);
memcpy(msg[j].buf, ibuf, msg[j].len);
@@ -563,7 +559,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
do {
memcpy(obuf + 3, msg[j].buf + i,
(len > 16 ? 16 : len));
- ret = dw210x_op_rw(d->udev, 0x80, 0, 0,
+ dw210x_op_rw(d->udev, 0x80, 0, 0,
obuf, (len > 16 ? 16 : len) + 3,
DW210X_WRITE_MSG);
i += 16;
@@ -575,7 +571,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = msg[j + 1].len;
obuf[1] = (msg[j].addr << 1);
memcpy(obuf + 2, msg[j].buf, msg[j].len);
- ret = dw210x_op_rw(d->udev,
+ dw210x_op_rw(d->udev,
udev->descriptor.idProduct ==
0x7500 ? 0x92 : 0x90, 0, 0,
obuf, msg[j].len + 2,
@@ -587,7 +583,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
obuf[0] = msg[j].len + 1;
obuf[1] = (msg[j].addr << 1);
memcpy(obuf + 2, msg[j].buf, msg[j].len);
- ret = dw210x_op_rw(d->udev, 0x80, 0, 0,
+ dw210x_op_rw(d->udev, 0x80, 0, 0,
obuf, msg[j].len + 2,
DW210X_WRITE_MSG);
break;
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
index 482d249ca7f3..6244fe9d1a3a 100644
--- a/drivers/media/dvb/dvb-usb/it913x.c
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -81,7 +81,7 @@ static int it913x_bulk_write(struct usb_device *dev,
for (i = 0; i < IT913X_RETRY; i++) {
ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
snd, len , &actual_l, IT913X_SND_TIMEOUT);
- if (ret == 0 || ret != -EBUSY || ret != -ETIMEDOUT)
+ if (ret != -EBUSY && ret != -ETIMEDOUT)
break;
}
@@ -99,7 +99,7 @@ static int it913x_bulk_read(struct usb_device *dev,
for (i = 0; i < IT913X_RETRY; i++) {
ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
rev, len , &actual_l, IT913X_RCV_TIMEOUT);
- if (ret == 0 || ret != -EBUSY || ret != -ETIMEDOUT)
+ if (ret != -EBUSY && ret != -ETIMEDOUT)
break;
}
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index 5dde06d066ff..25d1031460f8 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -373,7 +373,7 @@ static int lme2510_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
struct lme2510_state *st = adap->dev->priv;
static u8 clear_pid_reg[] = LME_ALL_PIDS;
static u8 rbuf[1];
- int ret;
+ int ret = 0;
deb_info(1, "PID Clearing Filter");
@@ -1205,14 +1205,13 @@ static int lme2510_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
- int ret = 0;
usb_reset_configuration(udev);
usb_set_interface(udev, intf->cur_altsetting->desc.bInterfaceNumber, 1);
if (udev->speed != USB_SPEED_HIGH) {
- ret = usb_reset_device(udev);
+ usb_reset_device(udev);
info("DEV Failed to connect in HIGH SPEED mode");
return -ENODEV;
}
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
index 72db6eef4b9c..74da5bb1ce99 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.c
@@ -284,6 +284,7 @@ static int mxl111sf_tuner_set_params(struct dvb_frontend *fe)
switch (delsys) {
case SYS_ATSC:
+ case SYS_ATSCMH:
bw = 0; /* ATSC */
break;
case SYS_DVBC_ANNEX_B:
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.c b/drivers/media/dvb/dvb-usb/mxl111sf.c
index 81305de2fea5..cd842798f5af 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf.c
@@ -21,6 +21,7 @@
#include "mxl111sf-tuner.h"
#include "lgdt3305.h"
+#include "lg2160.h"
int dvb_usb_mxl111sf_debug;
module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
@@ -31,6 +32,10 @@ int dvb_usb_mxl111sf_isoc;
module_param_named(isoc, dvb_usb_mxl111sf_isoc, int, 0644);
MODULE_PARM_DESC(isoc, "enable usb isoc xfer (0=bulk, 1=isoc).");
+int dvb_usb_mxl111sf_spi;
+module_param_named(spi, dvb_usb_mxl111sf_spi, int, 0644);
+MODULE_PARM_DESC(spi, "use spi rather than tp for data xfer (0=tp, 1=spi).");
+
#define ANT_PATH_AUTO 0
#define ANT_PATH_EXTERNAL 1
#define ANT_PATH_INTERNAL 2
@@ -340,7 +345,6 @@ static int mxl111sf_ep6_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
struct mxl111sf_state *state = d->priv;
struct mxl111sf_adap_state *adap_state = adap->fe_adap[adap->active_fe].priv;
int ret = 0;
- u8 tmp;
deb_info("%s(%d)\n", __func__, onoff);
@@ -361,6 +365,33 @@ static int mxl111sf_ep6_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
return ret;
}
+static int mxl111sf_ep5_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int ret = 0;
+
+ deb_info("%s(%d)\n", __func__, onoff);
+
+ if (onoff) {
+ ret = mxl111sf_enable_usb_output(state);
+ mxl_fail(ret);
+
+ ret = mxl111sf_init_i2s_port(state, 200);
+ mxl_fail(ret);
+ ret = mxl111sf_config_i2s(state, 0, 15);
+ mxl_fail(ret);
+ } else {
+ ret = mxl111sf_disable_i2s_port(state);
+ mxl_fail(ret);
+ }
+ if (state->chip_rev > MXL111SF_V6)
+ ret = mxl111sf_config_spi(state, onoff);
+ mxl_fail(ret);
+
+ return ret;
+}
+
static int mxl111sf_ep4_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
struct dvb_usb_device *d = adap->dev;
@@ -453,6 +484,255 @@ fail:
return ret;
}
+static struct lg2160_config hauppauge_lg2160_config = {
+ .lg_chip = LG2160,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+};
+
+static int mxl111sf_lg2160_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int fe_id = adap->num_frontends_initialized;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe_id].priv;
+ int ret;
+
+ deb_adv("%s()\n", __func__);
+
+ /* save a pointer to the dvb_usb_device in device state */
+ state->d = d;
+ adap_state->alt_mode = (dvb_usb_mxl111sf_isoc) ? 2 : 1;
+ state->alt_mode = adap_state->alt_mode;
+
+ if (usb_set_interface(adap->dev->udev, 0, state->alt_mode) < 0)
+ err("set interface failed");
+
+ state->gpio_mode = MXL111SF_GPIO_MOD_MH;
+ adap_state->gpio_mode = state->gpio_mode;
+ adap_state->device_mode = MXL_TUNER_MODE;
+ adap_state->ep6_clockphase = 1;
+
+ ret = mxl1x1sf_soft_reset(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_init_tuner_demod(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl1x1sf_set_device_mode(state, adap_state->device_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_enable_usb_output(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_top_master_ctrl(state, 1);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_init_port_expander(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_gpio_mode_switch(state, state->gpio_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = get_chip_info(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ adap->fe_adap[fe_id].fe = dvb_attach(lg2160_attach,
+ &hauppauge_lg2160_config,
+ &adap->dev->i2c_adap);
+ if (adap->fe_adap[fe_id].fe) {
+ adap_state->fe_init = adap->fe_adap[fe_id].fe->ops.init;
+ adap->fe_adap[fe_id].fe->ops.init = mxl111sf_adap_fe_init;
+ adap_state->fe_sleep = adap->fe_adap[fe_id].fe->ops.sleep;
+ adap->fe_adap[fe_id].fe->ops.sleep = mxl111sf_adap_fe_sleep;
+ return 0;
+ }
+ ret = -EIO;
+fail:
+ return ret;
+}
+
+static struct lg2160_config hauppauge_lg2161_1019_config = {
+ .lg_chip = LG2161_1019,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+ .output_if = 2, /* LG2161_OIF_SPI_MAS */
+};
+
+static struct lg2160_config hauppauge_lg2161_1040_config = {
+ .lg_chip = LG2161_1040,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+ .output_if = 4, /* LG2161_OIF_SPI_MAS */
+};
+
+static int mxl111sf_lg2161_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int fe_id = adap->num_frontends_initialized;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe_id].priv;
+ int ret;
+
+ deb_adv("%s()\n", __func__);
+
+ /* save a pointer to the dvb_usb_device in device state */
+ state->d = d;
+ adap_state->alt_mode = (dvb_usb_mxl111sf_isoc) ? 2 : 1;
+ state->alt_mode = adap_state->alt_mode;
+
+ if (usb_set_interface(adap->dev->udev, 0, state->alt_mode) < 0)
+ err("set interface failed");
+
+ state->gpio_mode = MXL111SF_GPIO_MOD_MH;
+ adap_state->gpio_mode = state->gpio_mode;
+ adap_state->device_mode = MXL_TUNER_MODE;
+ adap_state->ep6_clockphase = 1;
+
+ ret = mxl1x1sf_soft_reset(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_init_tuner_demod(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl1x1sf_set_device_mode(state, adap_state->device_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_enable_usb_output(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_top_master_ctrl(state, 1);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_init_port_expander(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_gpio_mode_switch(state, state->gpio_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = get_chip_info(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ adap->fe_adap[fe_id].fe = dvb_attach(lg2160_attach,
+ (MXL111SF_V8_200 == state->chip_rev) ?
+ &hauppauge_lg2161_1040_config :
+ &hauppauge_lg2161_1019_config,
+ &adap->dev->i2c_adap);
+ if (adap->fe_adap[fe_id].fe) {
+ adap_state->fe_init = adap->fe_adap[fe_id].fe->ops.init;
+ adap->fe_adap[fe_id].fe->ops.init = mxl111sf_adap_fe_init;
+ adap_state->fe_sleep = adap->fe_adap[fe_id].fe->ops.sleep;
+ adap->fe_adap[fe_id].fe->ops.sleep = mxl111sf_adap_fe_sleep;
+ return 0;
+ }
+ ret = -EIO;
+fail:
+ return ret;
+}
+
+static struct lg2160_config hauppauge_lg2161_1019_ep6_config = {
+ .lg_chip = LG2161_1019,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+ .output_if = 1, /* LG2161_OIF_SERIAL_TS */
+};
+
+static struct lg2160_config hauppauge_lg2161_1040_ep6_config = {
+ .lg_chip = LG2161_1040,
+ .i2c_addr = 0x1c >> 1,
+ .deny_i2c_rptr = 1,
+ .spectral_inversion = 0,
+ .if_khz = 6000,
+ .output_if = 7, /* LG2161_OIF_SERIAL_TS */
+};
+
+static int mxl111sf_lg2161_ep6_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct mxl111sf_state *state = d->priv;
+ int fe_id = adap->num_frontends_initialized;
+ struct mxl111sf_adap_state *adap_state = adap->fe_adap[fe_id].priv;
+ int ret;
+
+ deb_adv("%s()\n", __func__);
+
+ /* save a pointer to the dvb_usb_device in device state */
+ state->d = d;
+ adap_state->alt_mode = (dvb_usb_mxl111sf_isoc) ? 2 : 1;
+ state->alt_mode = adap_state->alt_mode;
+
+ if (usb_set_interface(adap->dev->udev, 0, state->alt_mode) < 0)
+ err("set interface failed");
+
+ state->gpio_mode = MXL111SF_GPIO_MOD_MH;
+ adap_state->gpio_mode = state->gpio_mode;
+ adap_state->device_mode = MXL_TUNER_MODE;
+ adap_state->ep6_clockphase = 0;
+
+ ret = mxl1x1sf_soft_reset(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_init_tuner_demod(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl1x1sf_set_device_mode(state, adap_state->device_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_enable_usb_output(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl1x1sf_top_master_ctrl(state, 1);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = mxl111sf_init_port_expander(state);
+ if (mxl_fail(ret))
+ goto fail;
+ ret = mxl111sf_gpio_mode_switch(state, state->gpio_mode);
+ if (mxl_fail(ret))
+ goto fail;
+
+ ret = get_chip_info(state);
+ if (mxl_fail(ret))
+ goto fail;
+
+ adap->fe_adap[fe_id].fe = dvb_attach(lg2160_attach,
+ (MXL111SF_V8_200 == state->chip_rev) ?
+ &hauppauge_lg2161_1040_ep6_config :
+ &hauppauge_lg2161_1019_ep6_config,
+ &adap->dev->i2c_adap);
+ if (adap->fe_adap[fe_id].fe) {
+ adap_state->fe_init = adap->fe_adap[fe_id].fe->ops.init;
+ adap->fe_adap[fe_id].fe->ops.init = mxl111sf_adap_fe_init;
+ adap_state->fe_sleep = adap->fe_adap[fe_id].fe->ops.sleep;
+ adap->fe_adap[fe_id].fe->ops.sleep = mxl111sf_adap_fe_sleep;
+ return 0;
+ }
+ ret = -EIO;
+fail:
+ return ret;
+}
+
static struct mxl111sf_demod_config mxl_demod_config = {
.read_reg = mxl111sf_read_reg,
.write_reg = mxl111sf_write_reg,
@@ -650,6 +930,18 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_bulk_properties;
static struct dvb_usb_device_properties mxl111sf_dvbt_isoc_properties;
static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties;
static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_atsc_mh_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_atsc_mh_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mh_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mh_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_spi_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_spi_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_tp_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_tp_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_mh_spi_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_mh_spi_isoc_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_mh_tp_bulk_properties;
+static struct dvb_usb_device_properties mxl111sf_mercury_mh_tp_isoc_properties;
static int mxl111sf_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -664,12 +956,50 @@ static int mxl111sf_probe(struct usb_interface *intf,
THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf,
&mxl111sf_atsc_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_atsc_mh_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mh_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ ((dvb_usb_mxl111sf_spi) &&
+ (0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_spi_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_mh_spi_isoc_properties,
+ THIS_MODULE, &d, adapter_nr))) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_tp_isoc_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_mh_tp_isoc_properties,
THIS_MODULE, &d, adapter_nr))) ||
0 == dvb_usb_device_init(intf,
&mxl111sf_dvbt_bulk_properties,
THIS_MODULE, &d, adapter_nr) ||
0 == dvb_usb_device_init(intf,
&mxl111sf_atsc_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_atsc_mh_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mh_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ ((dvb_usb_mxl111sf_spi) &&
+ (0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_spi_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_mh_spi_bulk_properties,
+ THIS_MODULE, &d, adapter_nr))) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_tp_bulk_properties,
+ THIS_MODULE, &d, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf,
+ &mxl111sf_mercury_mh_tp_bulk_properties,
THIS_MODULE, &d, adapter_nr) || 0) {
struct mxl111sf_state *state = d->priv;
@@ -787,6 +1117,36 @@ MODULE_DEVICE_TABLE(usb, mxl111sf_table);
} \
}
+#define MXL111SF_EP5_BULK_STREAMING_CONFIG \
+ .size_of_priv = sizeof(struct mxl111sf_adap_state), \
+ .streaming_ctrl = mxl111sf_ep5_streaming_ctrl, \
+ .stream = { \
+ .type = USB_BULK, \
+ .count = 5, \
+ .endpoint = 0x05, \
+ .u = { \
+ .bulk = { \
+ .buffersize = 8192, \
+ } \
+ } \
+ }
+
+#define MXL111SF_EP5_ISOC_STREAMING_CONFIG \
+ .size_of_priv = sizeof(struct mxl111sf_adap_state), \
+ .streaming_ctrl = mxl111sf_ep5_streaming_ctrl, \
+ .stream = { \
+ .type = USB_ISOC, \
+ .count = 5, \
+ .endpoint = 0x05, \
+ .u = { \
+ .isoc = { \
+ .framesperurb = 96, \
+ .framesize = 200, \
+ .interval = 1, \
+ } \
+ } \
+ }
+
#define MXL111SF_EP6_BULK_STREAMING_CONFIG \
.size_of_priv = sizeof(struct mxl111sf_adap_state), \
.streaming_ctrl = mxl111sf_ep6_streaming_ctrl, \
@@ -848,7 +1208,7 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_bulk_properties = {
} },
},
},
- .num_device_descs = 4,
+ .num_device_descs = 3,
.devices = {
{ "Hauppauge 126xxx DVBT (bulk)",
{ NULL },
@@ -866,11 +1226,6 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_bulk_properties = {
&mxl111sf_table[24], &mxl111sf_table[26],
NULL },
},
- { "Hauppauge 126xxx (tp-bulk)",
- { NULL },
- { &mxl111sf_table[28], &mxl111sf_table[30],
- NULL },
- },
}
};
@@ -890,7 +1245,7 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_isoc_properties = {
} },
},
},
- .num_device_descs = 4,
+ .num_device_descs = 3,
.devices = {
{ "Hauppauge 126xxx DVBT (isoc)",
{ NULL },
@@ -908,11 +1263,6 @@ static struct dvb_usb_device_properties mxl111sf_dvbt_isoc_properties = {
&mxl111sf_table[24], &mxl111sf_table[26],
NULL },
},
- { "Hauppauge 126xxx (tp-isoc)",
- { NULL },
- { &mxl111sf_table[28], &mxl111sf_table[30],
- NULL },
- },
}
};
@@ -923,33 +1273,159 @@ static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties = {
.adapter = {
{
.fe_ioctl_override = mxl111sf_fe_ioctl_override,
- .num_frontends = 2,
+ .num_frontends = 1,
.fe = {{
.frontend_attach = mxl111sf_lgdt3305_frontend_attach,
.tuner_attach = mxl111sf_attach_tuner,
MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ }},
},
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "Hauppauge 126xxx ATSC (bulk)",
+ { NULL },
+ { &mxl111sf_table[1], &mxl111sf_table[5],
+ NULL },
+ },
+ { "Hauppauge 117xxx ATSC (bulk)",
+ { NULL },
+ { &mxl111sf_table[12],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
{
- .frontend_attach = mxl111sf_attach_demod,
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 1,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
.tuner_attach = mxl111sf_attach_tuner,
- MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
}},
},
},
- .num_device_descs = 6,
+ .num_device_descs = 2,
.devices = {
- { "Hauppauge 126xxx ATSC (bulk)",
+ { "Hauppauge 126xxx ATSC (isoc)",
{ NULL },
{ &mxl111sf_table[1], &mxl111sf_table[5],
NULL },
},
- { "Hauppauge 117xxx ATSC (bulk)",
+ { "Hauppauge 117xxx ATSC (isoc)",
{ NULL },
{ &mxl111sf_table[12],
NULL },
},
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mh_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2160_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "HCW 126xxx (bulk)",
+ { NULL },
+ { &mxl111sf_table[2], &mxl111sf_table[6],
+ NULL },
+ },
+ { "HCW 117xxx (bulk)",
+ { NULL },
+ { &mxl111sf_table[13],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mh_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 1,
+ .fe = {{
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2160_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "HCW 126xxx (isoc)",
+ { NULL },
+ { &mxl111sf_table[2], &mxl111sf_table[6],
+ NULL },
+ },
+ { "HCW 117xxx (isoc)",
+ { NULL },
+ { &mxl111sf_table[13],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_atsc_mh_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2160_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
{ "Hauppauge 126xxx ATSC+ (bulk)",
{ NULL },
{ &mxl111sf_table[0], &mxl111sf_table[3],
@@ -963,13 +1439,96 @@ static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties = {
&mxl111sf_table[32], &mxl111sf_table[33],
NULL },
},
- { "Hauppauge Mercury (tp-bulk)",
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_atsc_mh_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2160_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "Hauppauge 126xxx ATSC+ (isoc)",
+ { NULL },
+ { &mxl111sf_table[0], &mxl111sf_table[3],
+ &mxl111sf_table[7], &mxl111sf_table[9],
+ &mxl111sf_table[10], NULL },
+ },
+ { "Hauppauge 117xxx ATSC+ (isoc)",
+ { NULL },
+ { &mxl111sf_table[11], &mxl111sf_table[14],
+ &mxl111sf_table[16], &mxl111sf_table[17],
+ &mxl111sf_table[32], &mxl111sf_table[33],
+ NULL },
+ },
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mercury_spi_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "Hauppauge Mercury (spi-bulk)",
{ NULL },
{ &mxl111sf_table[19], &mxl111sf_table[21],
&mxl111sf_table[23], &mxl111sf_table[25],
- &mxl111sf_table[27], NULL },
+ NULL },
},
- { "Hauppauge WinTV-Aero-M",
+ { "Hauppauge WinTV-Aero-M (spi-bulk)",
{ NULL },
{ &mxl111sf_table[29], &mxl111sf_table[31],
NULL },
@@ -977,14 +1536,14 @@ static struct dvb_usb_device_properties mxl111sf_atsc_bulk_properties = {
}
};
-static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
+static struct dvb_usb_device_properties mxl111sf_mercury_spi_isoc_properties = {
MXL111SF_DEFAULT_DEVICE_PROPERTIES,
.num_adapters = 1,
.adapter = {
{
.fe_ioctl_override = mxl111sf_fe_ioctl_override,
- .num_frontends = 2,
+ .num_frontends = 3,
.fe = {{
.frontend_attach = mxl111sf_lgdt3305_frontend_attach,
.tuner_attach = mxl111sf_attach_tuner,
@@ -996,34 +1555,111 @@ static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
.tuner_attach = mxl111sf_attach_tuner,
MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_ISOC_STREAMING_CONFIG,
}},
},
},
- .num_device_descs = 6,
+ .num_device_descs = 2,
.devices = {
- { "Hauppauge 126xxx ATSC (isoc)",
+ { "Hauppauge Mercury (spi-isoc)",
{ NULL },
- { &mxl111sf_table[1], &mxl111sf_table[5],
+ { &mxl111sf_table[19], &mxl111sf_table[21],
+ &mxl111sf_table[23], &mxl111sf_table[25],
NULL },
},
- { "Hauppauge 117xxx ATSC (isoc)",
+ { "Hauppauge WinTV-Aero-M (spi-isoc)",
{ NULL },
- { &mxl111sf_table[12],
+ { &mxl111sf_table[29], &mxl111sf_table[31],
NULL },
},
- { "Hauppauge 126xxx ATSC+ (isoc)",
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mercury_tp_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_ep6_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
+ { "Hauppauge Mercury (tp-bulk)",
{ NULL },
- { &mxl111sf_table[0], &mxl111sf_table[3],
- &mxl111sf_table[7], &mxl111sf_table[9],
- &mxl111sf_table[10], NULL },
+ { &mxl111sf_table[19], &mxl111sf_table[21],
+ &mxl111sf_table[23], &mxl111sf_table[25],
+ &mxl111sf_table[27], NULL },
},
- { "Hauppauge 117xxx ATSC+ (isoc)",
+ { "Hauppauge WinTV-Aero-M",
{ NULL },
- { &mxl111sf_table[11], &mxl111sf_table[14],
- &mxl111sf_table[16], &mxl111sf_table[17],
- &mxl111sf_table[32], &mxl111sf_table[33],
+ { &mxl111sf_table[29], &mxl111sf_table[31],
NULL },
},
+ }
+};
+
+static struct dvb_usb_device_properties mxl111sf_mercury_tp_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 3,
+ .fe = {{
+ .frontend_attach = mxl111sf_lgdt3305_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_ep6_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 2,
+ .devices = {
{ "Hauppauge Mercury (tp-isoc)",
{ NULL },
{ &mxl111sf_table[19], &mxl111sf_table[21],
@@ -1038,6 +1674,146 @@ static struct dvb_usb_device_properties mxl111sf_atsc_isoc_properties = {
}
};
+static
+struct dvb_usb_device_properties mxl111sf_mercury_mh_tp_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_ep6_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge 126xxx (tp-bulk)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
+static
+struct dvb_usb_device_properties mxl111sf_mercury_mh_tp_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_ep6_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP6_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge 126xxx (tp-isoc)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
+static
+struct dvb_usb_device_properties mxl111sf_mercury_mh_spi_bulk_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_BULK_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_BULK_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge 126xxx (spi-bulk)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
+static
+struct dvb_usb_device_properties mxl111sf_mercury_mh_spi_isoc_properties = {
+ MXL111SF_DEFAULT_DEVICE_PROPERTIES,
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .fe_ioctl_override = mxl111sf_fe_ioctl_override,
+ .num_frontends = 2,
+ .fe = {{
+ .frontend_attach = mxl111sf_attach_demod,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP4_ISOC_STREAMING_CONFIG,
+ },
+ {
+ .caps = DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD,
+
+ .frontend_attach = mxl111sf_lg2161_frontend_attach,
+ .tuner_attach = mxl111sf_attach_tuner,
+
+ MXL111SF_EP5_ISOC_STREAMING_CONFIG,
+ }},
+ },
+ },
+ .num_device_descs = 1,
+ .devices = {
+ { "Hauppauge 126xxx (spi-isoc)",
+ { NULL },
+ { &mxl111sf_table[28], &mxl111sf_table[30],
+ NULL },
+ },
+ }
+};
+
static struct usb_driver mxl111sf_driver = {
.name = "dvb_usb_mxl111sf",
.probe = mxl111sf_probe,
diff --git a/drivers/media/dvb/dvb-usb/rtl28xxu.c b/drivers/media/dvb/dvb-usb/rtl28xxu.c
index 8f4736a10fc8..41e1f5537f44 100644
--- a/drivers/media/dvb/dvb-usb/rtl28xxu.c
+++ b/drivers/media/dvb/dvb-usb/rtl28xxu.c
@@ -322,6 +322,9 @@ static int rtl2831u_frontend_attach(struct dvb_usb_adapter *adap)
* since there is some demod params needed to set according to tuner.
*/
+ /* demod needs some time to wake up */
+ msleep(20);
+
/* open demod I2C gate */
ret = rtl28xxu_ctrl_msg(adap->dev, &req_gate);
if (ret)
@@ -909,6 +912,8 @@ static int rtl28xxu_probe(struct usb_interface *intf,
int ret, i;
int properties_count = ARRAY_SIZE(rtl28xxu_properties);
struct dvb_usb_device *d;
+ struct usb_device *udev;
+ bool found;
deb_info("%s: interface=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
@@ -916,6 +921,29 @@ static int rtl28xxu_probe(struct usb_interface *intf,
if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
return 0;
+ /* Dynamic USB ID support. Replaces first device ID with current one .*/
+ udev = interface_to_usbdev(intf);
+
+ for (i = 0, found = false; i < ARRAY_SIZE(rtl28xxu_table) - 1; i++) {
+ if (rtl28xxu_table[i].idVendor ==
+ le16_to_cpu(udev->descriptor.idVendor) &&
+ rtl28xxu_table[i].idProduct ==
+ le16_to_cpu(udev->descriptor.idProduct)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ deb_info("%s: using dynamic ID %04x:%04x\n", __func__,
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+ rtl28xxu_properties[0].devices[0].warm_ids[0]->idVendor =
+ le16_to_cpu(udev->descriptor.idVendor);
+ rtl28xxu_properties[0].devices[0].warm_ids[0]->idProduct =
+ le16_to_cpu(udev->descriptor.idProduct);
+ }
+
for (i = 0; i < properties_count; i++) {
ret = dvb_usb_device_init(intf, &rtl28xxu_properties[i],
THIS_MODULE, &d, adapter_nr);
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 21246707fbfb..b98ebb264e29 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -531,6 +531,14 @@ config DVB_LGDT3305
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
+config DVB_LG2160
+ tristate "LG Electronics LG216x based"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ An ATSC/MH demodulator module. Say Y when you want
+ to support this frontend.
+
config DVB_S5H1409
tristate "Samsung S5H1409 based"
depends on DVB_CORE && I2C
@@ -540,12 +548,26 @@ config DVB_S5H1409
to support this frontend.
config DVB_AU8522
- tristate "Auvitek AU8522 based"
- depends on DVB_CORE && I2C && VIDEO_V4L2
+ depends on I2C
+ tristate
+
+config DVB_AU8522_DTV
+ tristate "Auvitek AU8522 based DTV demod"
+ depends on DVB_CORE && I2C
+ select DVB_AU8522
default m if DVB_FE_CUSTOMISE
help
- An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
- to support this frontend.
+ An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
+ you want to enable DTV demodulation support for this frontend.
+
+config DVB_AU8522_V4L
+ tristate "Auvitek AU8522 based ATV demod"
+ depends on VIDEO_V4L2 && I2C
+ select DVB_AU8522
+ default m if DVB_FE_CUSTOMISE
+ help
+ An ATSC 8VSB, QAM64/256 & NTSC demodulator module. Say Y when
+ you want to enable ATV demodulation support for this frontend.
config DVB_S5H1411
tristate "Samsung S5H1411 based"
@@ -713,6 +735,11 @@ config DVB_M88RS2000
A DVB-S tuner module.
Say Y when you want to support this frontend.
+config DVB_AF9033
+ tristate "Afatech AF9033 DVB-T demodulator"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+
comment "Tools to develop new frontends"
config DVB_DUMMY_FE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 86fa808bf589..cd1ac2fd5774 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -7,7 +7,6 @@ ccflags-y += -I$(srctree)/drivers/media/common/tuners/
stb0899-objs = stb0899_drv.o stb0899_algo.o
stv0900-objs = stv0900_core.o stv0900_sw.o
-au8522-objs = au8522_dig.o au8522_decoder.o
drxd-objs = drxd_firm.o drxd_hard.o
cxd2820r-objs = cxd2820r_core.o cxd2820r_c.o cxd2820r_t.o cxd2820r_t2.o
drxk-objs := drxk_hard.o
@@ -50,6 +49,7 @@ obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
obj-$(CONFIG_DVB_LGDT3305) += lgdt3305.o
+obj-$(CONFIG_DVB_LG2160) += lg2160.o
obj-$(CONFIG_DVB_CX24123) += cx24123.o
obj-$(CONFIG_DVB_LNBP21) += lnbp21.o
obj-$(CONFIG_DVB_LNBP22) += lnbp22.o
@@ -63,7 +63,9 @@ obj-$(CONFIG_DVB_TUNER_DIB0090) += dib0090.o
obj-$(CONFIG_DVB_TUA6100) += tua6100.o
obj-$(CONFIG_DVB_S5H1409) += s5h1409.o
obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
-obj-$(CONFIG_DVB_AU8522) += au8522.o
+obj-$(CONFIG_DVB_AU8522) += au8522_common.o
+obj-$(CONFIG_DVB_AU8522_DTV) += au8522_dig.o
+obj-$(CONFIG_DVB_AU8522_V4L) += au8522_decoder.o
obj-$(CONFIG_DVB_TDA10048) += tda10048.o
obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o
obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
@@ -98,4 +100,5 @@ obj-$(CONFIG_DVB_A8293) += a8293.o
obj-$(CONFIG_DVB_TDA10071) += tda10071.o
obj-$(CONFIG_DVB_RTL2830) += rtl2830.o
obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
+obj-$(CONFIG_DVB_AF9033) += af9033.o
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index 6bcbcf543b38..5bc570d77846 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -514,7 +514,6 @@ err:
static void af9013_statistics_work(struct work_struct *work)
{
- int ret;
struct af9013_state *state = container_of(work,
struct af9013_state, statistics_work.work);
unsigned int next_msec;
@@ -530,27 +529,27 @@ static void af9013_statistics_work(struct work_struct *work)
default:
state->statistics_step = 0;
case 0:
- ret = af9013_statistics_signal_strength(&state->fe);
+ af9013_statistics_signal_strength(&state->fe);
state->statistics_step++;
next_msec = 300;
break;
case 1:
- ret = af9013_statistics_snr_start(&state->fe);
+ af9013_statistics_snr_start(&state->fe);
state->statistics_step++;
next_msec = 200;
break;
case 2:
- ret = af9013_statistics_ber_unc_start(&state->fe);
+ af9013_statistics_ber_unc_start(&state->fe);
state->statistics_step++;
next_msec = 1000;
break;
case 3:
- ret = af9013_statistics_snr_result(&state->fe);
+ af9013_statistics_snr_result(&state->fe);
state->statistics_step++;
next_msec = 400;
break;
case 4:
- ret = af9013_statistics_ber_unc_result(&state->fe);
+ af9013_statistics_ber_unc_result(&state->fe);
state->statistics_step++;
next_msec = 100;
break;
@@ -558,8 +557,6 @@ static void af9013_statistics_work(struct work_struct *work)
schedule_delayed_work(&state->statistics_work,
msecs_to_jiffies(next_msec));
-
- return;
}
static int af9013_get_tune_settings(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb/frontends/af9033.c b/drivers/media/dvb/frontends/af9033.c
new file mode 100644
index 000000000000..a38998286260
--- /dev/null
+++ b/drivers/media/dvb/frontends/af9033.c
@@ -0,0 +1,980 @@
+/*
+ * Afatech AF9033 demodulator driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "af9033_priv.h"
+
+struct af9033_state {
+ struct i2c_adapter *i2c;
+ struct dvb_frontend fe;
+ struct af9033_config cfg;
+
+ u32 bandwidth_hz;
+ bool ts_mode_parallel;
+ bool ts_mode_serial;
+
+ u32 ber;
+ u32 ucb;
+ unsigned long last_stat_check;
+};
+
+/* write multiple registers */
+static int af9033_wr_regs(struct af9033_state *state, u32 reg, const u8 *val,
+ int len)
+{
+ int ret;
+ u8 buf[3 + len];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = state->cfg.i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ buf[0] = (reg >> 16) & 0xff;
+ buf[1] = (reg >> 8) & 0xff;
+ buf[2] = (reg >> 0) & 0xff;
+ memcpy(&buf[3], val, len);
+
+ ret = i2c_transfer(state->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ printk(KERN_WARNING "%s: i2c wr failed=%d reg=%06x len=%d\n",
+ __func__, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+/* read multiple registers */
+static int af9033_rd_regs(struct af9033_state *state, u32 reg, u8 *val, int len)
+{
+ int ret;
+ u8 buf[3] = { (reg >> 16) & 0xff, (reg >> 8) & 0xff,
+ (reg >> 0) & 0xff };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = state->cfg.i2c_addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf
+ }, {
+ .addr = state->cfg.i2c_addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val
+ }
+ };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+ if (ret == 2) {
+ ret = 0;
+ } else {
+ printk(KERN_WARNING "%s: i2c rd failed=%d reg=%06x len=%d\n",
+ __func__, ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+
+ return ret;
+}
+
+
+/* write single register */
+static int af9033_wr_reg(struct af9033_state *state, u32 reg, u8 val)
+{
+ return af9033_wr_regs(state, reg, &val, 1);
+}
+
+/* read single register */
+static int af9033_rd_reg(struct af9033_state *state, u32 reg, u8 *val)
+{
+ return af9033_rd_regs(state, reg, val, 1);
+}
+
+/* write single register with mask */
+static int af9033_wr_reg_mask(struct af9033_state *state, u32 reg, u8 val,
+ u8 mask)
+{
+ int ret;
+ u8 tmp;
+
+ /* no need for read if whole reg is written */
+ if (mask != 0xff) {
+ ret = af9033_rd_regs(state, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ val &= mask;
+ tmp &= ~mask;
+ val |= tmp;
+ }
+
+ return af9033_wr_regs(state, reg, &val, 1);
+}
+
+/* read single register with mask */
+static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
+ u8 mask)
+{
+ int ret, i;
+ u8 tmp;
+
+ ret = af9033_rd_regs(state, reg, &tmp, 1);
+ if (ret)
+ return ret;
+
+ tmp &= mask;
+
+ /* find position of the first bit */
+ for (i = 0; i < 8; i++) {
+ if ((mask >> i) & 0x01)
+ break;
+ }
+ *val = tmp >> i;
+
+ return 0;
+}
+
+static u32 af9033_div(u32 a, u32 b, u32 x)
+{
+ u32 r = 0, c = 0, i;
+
+ pr_debug("%s: a=%d b=%d x=%d\n", __func__, a, b, x);
+
+ if (a > b) {
+ c = a / b;
+ a = a - c * b;
+ }
+
+ for (i = 0; i < x; i++) {
+ if (a >= b) {
+ r += 1;
+ a -= b;
+ }
+ a <<= 1;
+ r <<= 1;
+ }
+ r = (c << (u32)x) + r;
+
+ pr_debug("%s: a=%d b=%d x=%d r=%d r=%x\n", __func__, a, b, x, r, r);
+
+ return r;
+}
+
+static void af9033_release(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+
+ kfree(state);
+}
+
+static int af9033_init(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret, i, len;
+ const struct reg_val *init;
+ u8 buf[4];
+ u32 adc_cw, clock_cw;
+ struct reg_val_mask tab[] = {
+ { 0x80fb24, 0x00, 0x08 },
+ { 0x80004c, 0x00, 0xff },
+ { 0x00f641, state->cfg.tuner, 0xff },
+ { 0x80f5ca, 0x01, 0x01 },
+ { 0x80f715, 0x01, 0x01 },
+ { 0x00f41f, 0x04, 0x04 },
+ { 0x00f41a, 0x01, 0x01 },
+ { 0x80f731, 0x00, 0x01 },
+ { 0x00d91e, 0x00, 0x01 },
+ { 0x00d919, 0x00, 0x01 },
+ { 0x80f732, 0x00, 0x01 },
+ { 0x00d91f, 0x00, 0x01 },
+ { 0x00d91a, 0x00, 0x01 },
+ { 0x80f730, 0x00, 0x01 },
+ { 0x80f778, 0x00, 0xff },
+ { 0x80f73c, 0x01, 0x01 },
+ { 0x80f776, 0x00, 0x01 },
+ { 0x00d8fd, 0x01, 0xff },
+ { 0x00d830, 0x01, 0xff },
+ { 0x00d831, 0x00, 0xff },
+ { 0x00d832, 0x00, 0xff },
+ { 0x80f985, state->ts_mode_serial, 0x01 },
+ { 0x80f986, state->ts_mode_parallel, 0x01 },
+ { 0x00d827, 0x00, 0xff },
+ { 0x00d829, 0x00, 0xff },
+ };
+
+ /* program clock control */
+ clock_cw = af9033_div(state->cfg.clock, 1000000ul, 19ul);
+ buf[0] = (clock_cw >> 0) & 0xff;
+ buf[1] = (clock_cw >> 8) & 0xff;
+ buf[2] = (clock_cw >> 16) & 0xff;
+ buf[3] = (clock_cw >> 24) & 0xff;
+
+ pr_debug("%s: clock=%d clock_cw=%08x\n", __func__, state->cfg.clock,
+ clock_cw);
+
+ ret = af9033_wr_regs(state, 0x800025, buf, 4);
+ if (ret < 0)
+ goto err;
+
+ /* program ADC control */
+ for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
+ if (clock_adc_lut[i].clock == state->cfg.clock)
+ break;
+ }
+
+ adc_cw = af9033_div(clock_adc_lut[i].adc, 1000000ul, 19ul);
+ buf[0] = (adc_cw >> 0) & 0xff;
+ buf[1] = (adc_cw >> 8) & 0xff;
+ buf[2] = (adc_cw >> 16) & 0xff;
+
+ pr_debug("%s: adc=%d adc_cw=%06x\n", __func__, clock_adc_lut[i].adc,
+ adc_cw);
+
+ ret = af9033_wr_regs(state, 0x80f1cd, buf, 3);
+ if (ret < 0)
+ goto err;
+
+ /* program register table */
+ for (i = 0; i < ARRAY_SIZE(tab); i++) {
+ ret = af9033_wr_reg_mask(state, tab[i].reg, tab[i].val,
+ tab[i].mask);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* settings for TS interface */
+ if (state->cfg.ts_mode == AF9033_TS_MODE_USB) {
+ ret = af9033_wr_reg_mask(state, 0x80f9a5, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x80f9b5, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+ } else {
+ ret = af9033_wr_reg_mask(state, 0x80f990, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x80f9b5, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* load OFSM settings */
+ pr_debug("%s: load ofsm settings\n", __func__);
+ len = ARRAY_SIZE(ofsm_init);
+ init = ofsm_init;
+ for (i = 0; i < len; i++) {
+ ret = af9033_wr_reg(state, init[i].reg, init[i].val);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* load tuner specific settings */
+ pr_debug("%s: load tuner specific settings\n",
+ __func__);
+ switch (state->cfg.tuner) {
+ case AF9033_TUNER_TUA9001:
+ len = ARRAY_SIZE(tuner_init_tua9001);
+ init = tuner_init_tua9001;
+ break;
+ case AF9033_TUNER_FC0011:
+ len = ARRAY_SIZE(tuner_init_fc0011);
+ init = tuner_init_fc0011;
+ break;
+ case AF9033_TUNER_MXL5007T:
+ len = ARRAY_SIZE(tuner_init_mxl5007t);
+ init = tuner_init_mxl5007t;
+ break;
+ case AF9033_TUNER_TDA18218:
+ len = ARRAY_SIZE(tuner_init_tda18218);
+ init = tuner_init_tda18218;
+ break;
+ default:
+ pr_debug("%s: unsupported tuner ID=%d\n", __func__,
+ state->cfg.tuner);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ for (i = 0; i < len; i++) {
+ ret = af9033_wr_reg(state, init[i].reg, init[i].val);
+ if (ret < 0)
+ goto err;
+ }
+
+ state->bandwidth_hz = 0; /* force to program all parameters */
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_sleep(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret, i;
+ u8 tmp;
+
+ ret = af9033_wr_reg(state, 0x80004c, 1);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x800000, 0);
+ if (ret < 0)
+ goto err;
+
+ for (i = 100, tmp = 1; i && tmp; i--) {
+ ret = af9033_rd_reg(state, 0x80004c, &tmp);
+ if (ret < 0)
+ goto err;
+
+ usleep_range(200, 10000);
+ }
+
+ pr_debug("%s: loop=%d\n", __func__, i);
+
+ if (i == 0) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = af9033_wr_reg_mask(state, 0x80fb24, 0x08, 0x08);
+ if (ret < 0)
+ goto err;
+
+ /* prevent current leak (?) */
+ if (state->cfg.ts_mode == AF9033_TS_MODE_SERIAL) {
+ /* enable parallel TS */
+ ret = af9033_wr_reg_mask(state, 0x00d917, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x00d916, 0x01, 0x01);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *fesettings)
+{
+ fesettings->min_delay_ms = 800;
+ fesettings->step_size = 0;
+ fesettings->max_drift = 0;
+
+ return 0;
+}
+
+static int af9033_set_frontend(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i, spec_inv;
+ u8 tmp, buf[3], bandwidth_reg_val;
+ u32 if_frequency, freq_cw, adc_freq;
+
+ pr_debug("%s: frequency=%d bandwidth_hz=%d\n", __func__, c->frequency,
+ c->bandwidth_hz);
+
+ /* check bandwidth */
+ switch (c->bandwidth_hz) {
+ case 6000000:
+ bandwidth_reg_val = 0x00;
+ break;
+ case 7000000:
+ bandwidth_reg_val = 0x01;
+ break;
+ case 8000000:
+ bandwidth_reg_val = 0x02;
+ break;
+ default:
+ pr_debug("%s: invalid bandwidth_hz\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+
+ /* program CFOE coefficients */
+ if (c->bandwidth_hz != state->bandwidth_hz) {
+ for (i = 0; i < ARRAY_SIZE(coeff_lut); i++) {
+ if (coeff_lut[i].clock == state->cfg.clock &&
+ coeff_lut[i].bandwidth_hz == c->bandwidth_hz) {
+ break;
+ }
+ }
+ ret = af9033_wr_regs(state, 0x800001,
+ coeff_lut[i].val, sizeof(coeff_lut[i].val));
+ }
+
+ /* program frequency control */
+ if (c->bandwidth_hz != state->bandwidth_hz) {
+ spec_inv = state->cfg.spec_inv ? -1 : 1;
+
+ for (i = 0; i < ARRAY_SIZE(clock_adc_lut); i++) {
+ if (clock_adc_lut[i].clock == state->cfg.clock)
+ break;
+ }
+ adc_freq = clock_adc_lut[i].adc;
+
+ /* get used IF frequency */
+ if (fe->ops.tuner_ops.get_if_frequency)
+ fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
+ else
+ if_frequency = 0;
+
+ while (if_frequency > (adc_freq / 2))
+ if_frequency -= adc_freq;
+
+ if (if_frequency >= 0)
+ spec_inv *= -1;
+ else
+ if_frequency *= -1;
+
+ freq_cw = af9033_div(if_frequency, adc_freq, 23ul);
+
+ if (spec_inv == -1)
+ freq_cw *= -1;
+
+ /* get adc multiplies */
+ ret = af9033_rd_reg(state, 0x800045, &tmp);
+ if (ret < 0)
+ goto err;
+
+ if (tmp == 1)
+ freq_cw /= 2;
+
+ buf[0] = (freq_cw >> 0) & 0xff;
+ buf[1] = (freq_cw >> 8) & 0xff;
+ buf[2] = (freq_cw >> 16) & 0x7f;
+ ret = af9033_wr_regs(state, 0x800029, buf, 3);
+ if (ret < 0)
+ goto err;
+
+ state->bandwidth_hz = c->bandwidth_hz;
+ }
+
+ ret = af9033_wr_reg_mask(state, 0x80f904, bandwidth_reg_val, 0x03);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x800040, 0x00);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x800047, 0x00);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg_mask(state, 0x80f999, 0x00, 0x01);
+ if (ret < 0)
+ goto err;
+
+ if (c->frequency <= 230000000)
+ tmp = 0x00; /* VHF */
+ else
+ tmp = 0x01; /* UHF */
+
+ ret = af9033_wr_reg(state, 0x80004b, tmp);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_wr_reg(state, 0x800000, 0x00);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_get_frontend(struct dvb_frontend *fe)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u8 buf[8];
+
+ pr_debug("%s\n", __func__);
+
+ /* read all needed registers */
+ ret = af9033_rd_regs(state, 0x80f900, buf, sizeof(buf));
+ if (ret < 0)
+ goto err;
+
+ switch ((buf[0] >> 0) & 3) {
+ case 0:
+ c->transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 1:
+ c->transmission_mode = TRANSMISSION_MODE_8K;
+ break;
+ }
+
+ switch ((buf[1] >> 0) & 3) {
+ case 0:
+ c->guard_interval = GUARD_INTERVAL_1_32;
+ break;
+ case 1:
+ c->guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ case 2:
+ c->guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case 3:
+ c->guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ }
+
+ switch ((buf[2] >> 0) & 7) {
+ case 0:
+ c->hierarchy = HIERARCHY_NONE;
+ break;
+ case 1:
+ c->hierarchy = HIERARCHY_1;
+ break;
+ case 2:
+ c->hierarchy = HIERARCHY_2;
+ break;
+ case 3:
+ c->hierarchy = HIERARCHY_4;
+ break;
+ }
+
+ switch ((buf[3] >> 0) & 3) {
+ case 0:
+ c->modulation = QPSK;
+ break;
+ case 1:
+ c->modulation = QAM_16;
+ break;
+ case 2:
+ c->modulation = QAM_64;
+ break;
+ }
+
+ switch ((buf[4] >> 0) & 3) {
+ case 0:
+ c->bandwidth_hz = 6000000;
+ break;
+ case 1:
+ c->bandwidth_hz = 7000000;
+ break;
+ case 2:
+ c->bandwidth_hz = 8000000;
+ break;
+ }
+
+ switch ((buf[6] >> 0) & 7) {
+ case 0:
+ c->code_rate_HP = FEC_1_2;
+ break;
+ case 1:
+ c->code_rate_HP = FEC_2_3;
+ break;
+ case 2:
+ c->code_rate_HP = FEC_3_4;
+ break;
+ case 3:
+ c->code_rate_HP = FEC_5_6;
+ break;
+ case 4:
+ c->code_rate_HP = FEC_7_8;
+ break;
+ case 5:
+ c->code_rate_HP = FEC_NONE;
+ break;
+ }
+
+ switch ((buf[7] >> 0) & 7) {
+ case 0:
+ c->code_rate_LP = FEC_1_2;
+ break;
+ case 1:
+ c->code_rate_LP = FEC_2_3;
+ break;
+ case 2:
+ c->code_rate_LP = FEC_3_4;
+ break;
+ case 3:
+ c->code_rate_LP = FEC_5_6;
+ break;
+ case 4:
+ c->code_rate_LP = FEC_7_8;
+ break;
+ case 5:
+ c->code_rate_LP = FEC_NONE;
+ break;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+ u8 tmp;
+
+ *status = 0;
+
+ /* radio channel status, 0=no result, 1=has signal, 2=no signal */
+ ret = af9033_rd_reg(state, 0x800047, &tmp);
+ if (ret < 0)
+ goto err;
+
+ /* has signal */
+ if (tmp == 0x01)
+ *status |= FE_HAS_SIGNAL;
+
+ if (tmp != 0x02) {
+ /* TPS lock */
+ ret = af9033_rd_reg_mask(state, 0x80f5a9, &tmp, 0x01);
+ if (ret < 0)
+ goto err;
+
+ if (tmp)
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI;
+
+ /* full lock */
+ ret = af9033_rd_reg_mask(state, 0x80f999, &tmp, 0x01);
+ if (ret < 0)
+ goto err;
+
+ if (tmp)
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ }
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret, i, len;
+ u8 buf[3], tmp;
+ u32 snr_val;
+ const struct val_snr *uninitialized_var(snr_lut);
+
+ /* read value */
+ ret = af9033_rd_regs(state, 0x80002c, buf, 3);
+ if (ret < 0)
+ goto err;
+
+ snr_val = (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+ /* read current modulation */
+ ret = af9033_rd_reg(state, 0x80f903, &tmp);
+ if (ret < 0)
+ goto err;
+
+ switch ((tmp >> 0) & 3) {
+ case 0:
+ len = ARRAY_SIZE(qpsk_snr_lut);
+ snr_lut = qpsk_snr_lut;
+ break;
+ case 1:
+ len = ARRAY_SIZE(qam16_snr_lut);
+ snr_lut = qam16_snr_lut;
+ break;
+ case 2:
+ len = ARRAY_SIZE(qam64_snr_lut);
+ snr_lut = qam64_snr_lut;
+ break;
+ default:
+ goto err;
+ }
+
+ for (i = 0; i < len; i++) {
+ tmp = snr_lut[i].snr;
+
+ if (snr_val < snr_lut[i].val)
+ break;
+ }
+
+ *snr = tmp * 10; /* dB/10 */
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+ u8 strength2;
+
+ /* read signal strength of 0-100 scale */
+ ret = af9033_rd_reg(state, 0x800048, &strength2);
+ if (ret < 0)
+ goto err;
+
+ /* scale value to 0x0000-0xffff */
+ *strength = strength2 * 0xffff / 100;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int af9033_update_ch_stat(struct af9033_state *state)
+{
+ int ret = 0;
+ u32 err_cnt, bit_cnt;
+ u16 abort_cnt;
+ u8 buf[7];
+
+ /* only update data every half second */
+ if (time_after(jiffies, state->last_stat_check + msecs_to_jiffies(500))) {
+ ret = af9033_rd_regs(state, 0x800032, buf, sizeof(buf));
+ if (ret < 0)
+ goto err;
+ /* in 8 byte packets? */
+ abort_cnt = (buf[1] << 8) + buf[0];
+ /* in bits */
+ err_cnt = (buf[4] << 16) + (buf[3] << 8) + buf[2];
+ /* in 8 byte packets? always(?) 0x2710 = 10000 */
+ bit_cnt = (buf[6] << 8) + buf[5];
+
+ if (bit_cnt < abort_cnt) {
+ abort_cnt = 1000;
+ state->ber = 0xffffffff;
+ } else {
+ /* 8 byte packets, that have not been rejected already */
+ bit_cnt -= (u32)abort_cnt;
+ if (bit_cnt == 0) {
+ state->ber = 0xffffffff;
+ } else {
+ err_cnt -= (u32)abort_cnt * 8 * 8;
+ bit_cnt *= 8 * 8;
+ state->ber = err_cnt * (0xffffffff / bit_cnt);
+ }
+ }
+ state->ucb += abort_cnt;
+ state->last_stat_check = jiffies;
+ }
+
+ return 0;
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int af9033_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+
+ ret = af9033_update_ch_stat(state);
+ if (ret < 0)
+ return ret;
+
+ *ber = state->ber;
+
+ return 0;
+}
+
+static int af9033_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+
+ ret = af9033_update_ch_stat(state);
+ if (ret < 0)
+ return ret;
+
+ *ucblocks = state->ucb;
+
+ return 0;
+}
+
+static int af9033_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct af9033_state *state = fe->demodulator_priv;
+ int ret;
+
+ pr_debug("%s: enable=%d\n", __func__, enable);
+
+ ret = af9033_wr_reg_mask(state, 0x00fa04, enable, 0x01);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ pr_debug("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+static struct dvb_frontend_ops af9033_ops;
+
+struct dvb_frontend *af9033_attach(const struct af9033_config *config,
+ struct i2c_adapter *i2c)
+{
+ int ret;
+ struct af9033_state *state;
+ u8 buf[8];
+
+ pr_debug("%s:\n", __func__);
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct af9033_state), GFP_KERNEL);
+ if (state == NULL)
+ goto err;
+
+ /* setup the state */
+ state->i2c = i2c;
+ memcpy(&state->cfg, config, sizeof(struct af9033_config));
+
+ if (state->cfg.clock != 12000000) {
+ printk(KERN_INFO "af9033: unsupported clock=%d, only " \
+ "12000000 Hz is supported currently\n",
+ state->cfg.clock);
+ goto err;
+ }
+
+ /* firmware version */
+ ret = af9033_rd_regs(state, 0x0083e9, &buf[0], 4);
+ if (ret < 0)
+ goto err;
+
+ ret = af9033_rd_regs(state, 0x804191, &buf[4], 4);
+ if (ret < 0)
+ goto err;
+
+ printk(KERN_INFO "af9033: firmware version: LINK=%d.%d.%d.%d " \
+ "OFDM=%d.%d.%d.%d\n", buf[0], buf[1], buf[2], buf[3],
+ buf[4], buf[5], buf[6], buf[7]);
+
+ /* configure internal TS mode */
+ switch (state->cfg.ts_mode) {
+ case AF9033_TS_MODE_PARALLEL:
+ state->ts_mode_parallel = true;
+ break;
+ case AF9033_TS_MODE_SERIAL:
+ state->ts_mode_serial = true;
+ break;
+ case AF9033_TS_MODE_USB:
+ /* usb mode for AF9035 */
+ default:
+ break;
+ }
+
+ /* create dvb_frontend */
+ memcpy(&state->fe.ops, &af9033_ops, sizeof(struct dvb_frontend_ops));
+ state->fe.demodulator_priv = state;
+
+ return &state->fe;
+
+err:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(af9033_attach);
+
+static struct dvb_frontend_ops af9033_ops = {
+ .delsys = { SYS_DVBT },
+ .info = {
+ .name = "Afatech AF9033 (DVB-T)",
+ .frequency_min = 174000000,
+ .frequency_max = 862000000,
+ .frequency_stepsize = 250000,
+ .frequency_tolerance = 0,
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_RECOVER |
+ FE_CAN_MUTE_TS
+ },
+
+ .release = af9033_release,
+
+ .init = af9033_init,
+ .sleep = af9033_sleep,
+
+ .get_tune_settings = af9033_get_tune_settings,
+ .set_frontend = af9033_set_frontend,
+ .get_frontend = af9033_get_frontend,
+
+ .read_status = af9033_read_status,
+ .read_snr = af9033_read_snr,
+ .read_signal_strength = af9033_read_signal_strength,
+ .read_ber = af9033_read_ber,
+ .read_ucblocks = af9033_read_ucblocks,
+
+ .i2c_gate_ctrl = af9033_i2c_gate_ctrl,
+};
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Afatech AF9033 DVB-T demodulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/af9033.h b/drivers/media/dvb/frontends/af9033.h
new file mode 100644
index 000000000000..9e302c3f0f7d
--- /dev/null
+++ b/drivers/media/dvb/frontends/af9033.h
@@ -0,0 +1,75 @@
+/*
+ * Afatech AF9033 demodulator driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AF9033_H
+#define AF9033_H
+
+struct af9033_config {
+ /*
+ * I2C address
+ */
+ u8 i2c_addr;
+
+ /*
+ * clock Hz
+ * 12000000, 22000000, 24000000, 34000000, 32000000, 28000000, 26000000,
+ * 30000000, 36000000, 20480000, 16384000
+ */
+ u32 clock;
+
+ /*
+ * tuner
+ */
+#define AF9033_TUNER_TUA9001 0x27 /* Infineon TUA 9001 */
+#define AF9033_TUNER_FC0011 0x28 /* Fitipower FC0011 */
+#define AF9033_TUNER_MXL5007T 0xa0 /* MaxLinear MxL5007T */
+#define AF9033_TUNER_TDA18218 0xa1 /* NXP TDA 18218HN */
+ u8 tuner;
+
+ /*
+ * TS settings
+ */
+#define AF9033_TS_MODE_USB 0
+#define AF9033_TS_MODE_PARALLEL 1
+#define AF9033_TS_MODE_SERIAL 2
+ u8 ts_mode:2;
+
+ /*
+ * input spectrum inversion
+ */
+ bool spec_inv;
+};
+
+
+#if defined(CONFIG_DVB_AF9033) || \
+ (defined(CONFIG_DVB_AF9033_MODULE) && defined(MODULE))
+extern struct dvb_frontend *af9033_attach(const struct af9033_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *af9033_attach(
+ const struct af9033_config *config, struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif /* AF9033_H */
diff --git a/drivers/media/dvb/frontends/af9033_priv.h b/drivers/media/dvb/frontends/af9033_priv.h
new file mode 100644
index 000000000000..0b783b9ed75e
--- /dev/null
+++ b/drivers/media/dvb/frontends/af9033_priv.h
@@ -0,0 +1,470 @@
+/*
+ * Afatech AF9033 demodulator driver
+ *
+ * Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef AF9033_PRIV_H
+#define AF9033_PRIV_H
+
+#include "dvb_frontend.h"
+#include "af9033.h"
+
+struct reg_val {
+ u32 reg;
+ u8 val;
+};
+
+struct reg_val_mask {
+ u32 reg;
+ u8 val;
+ u8 mask;
+};
+
+struct coeff {
+ u32 clock;
+ u32 bandwidth_hz;
+ u8 val[36];
+};
+
+struct clock_adc {
+ u32 clock;
+ u32 adc;
+};
+
+struct val_snr {
+ u32 val;
+ u8 snr;
+};
+
+/* Xtal clock vs. ADC clock lookup table */
+static const struct clock_adc clock_adc_lut[] = {
+ { 16384000, 20480000 },
+ { 20480000, 20480000 },
+ { 36000000, 20250000 },
+ { 30000000, 20156250 },
+ { 26000000, 20583333 },
+ { 28000000, 20416667 },
+ { 32000000, 20500000 },
+ { 34000000, 20187500 },
+ { 24000000, 20500000 },
+ { 22000000, 20625000 },
+ { 12000000, 20250000 },
+};
+
+/* pre-calculated coeff lookup table */
+static const struct coeff coeff_lut[] = {
+ /* 12.000 MHz */
+ { 12000000, 8000000, {
+ 0x01, 0xce, 0x55, 0xc9, 0x00, 0xe7, 0x2a, 0xe4, 0x00, 0x73,
+ 0x99, 0x0f, 0x00, 0x73, 0x95, 0x72, 0x00, 0x73, 0x91, 0xd5,
+ 0x00, 0x39, 0xca, 0xb9, 0x00, 0xe7, 0x2a, 0xe4, 0x00, 0x73,
+ 0x95, 0x72, 0x37, 0x02, 0xce, 0x01 }
+ },
+ { 12000000, 7000000, {
+ 0x01, 0x94, 0x8b, 0x10, 0x00, 0xca, 0x45, 0x88, 0x00, 0x65,
+ 0x25, 0xed, 0x00, 0x65, 0x22, 0xc4, 0x00, 0x65, 0x1f, 0x9b,
+ 0x00, 0x32, 0x91, 0x62, 0x00, 0xca, 0x45, 0x88, 0x00, 0x65,
+ 0x22, 0xc4, 0x88, 0x02, 0x95, 0x01 }
+ },
+ { 12000000, 6000000, {
+ 0x01, 0x5a, 0xc0, 0x56, 0x00, 0xad, 0x60, 0x2b, 0x00, 0x56,
+ 0xb2, 0xcb, 0x00, 0x56, 0xb0, 0x15, 0x00, 0x56, 0xad, 0x60,
+ 0x00, 0x2b, 0x58, 0x0b, 0x00, 0xad, 0x60, 0x2b, 0x00, 0x56,
+ 0xb0, 0x15, 0xf4, 0x02, 0x5b, 0x01 }
+ },
+};
+
+/* QPSK SNR lookup table */
+static const struct val_snr qpsk_snr_lut[] = {
+ { 0x0b4771, 0 },
+ { 0x0c1aed, 1 },
+ { 0x0d0d27, 2 },
+ { 0x0e4d19, 3 },
+ { 0x0e5da8, 4 },
+ { 0x107097, 5 },
+ { 0x116975, 6 },
+ { 0x1252d9, 7 },
+ { 0x131fa4, 8 },
+ { 0x13d5e1, 9 },
+ { 0x148e53, 10 },
+ { 0x15358b, 11 },
+ { 0x15dd29, 12 },
+ { 0x168112, 13 },
+ { 0x170b61, 14 },
+ { 0x17a532, 15 },
+ { 0x180f94, 16 },
+ { 0x186ed2, 17 },
+ { 0x18b271, 18 },
+ { 0x18e118, 19 },
+ { 0x18ff4b, 20 },
+ { 0x190af1, 21 },
+ { 0x191451, 22 },
+ { 0xffffff, 23 },
+};
+
+/* QAM16 SNR lookup table */
+static const struct val_snr qam16_snr_lut[] = {
+ { 0x04f0d5, 0 },
+ { 0x05387a, 1 },
+ { 0x0573a4, 2 },
+ { 0x05a99e, 3 },
+ { 0x05cc80, 4 },
+ { 0x05eb62, 5 },
+ { 0x05fecf, 6 },
+ { 0x060b80, 7 },
+ { 0x062501, 8 },
+ { 0x064865, 9 },
+ { 0x069604, 10 },
+ { 0x06f356, 11 },
+ { 0x07706a, 12 },
+ { 0x0804d3, 13 },
+ { 0x089d1a, 14 },
+ { 0x093e3d, 15 },
+ { 0x09e35d, 16 },
+ { 0x0a7c3c, 17 },
+ { 0x0afaf8, 18 },
+ { 0x0b719d, 19 },
+ { 0x0bda6a, 20 },
+ { 0x0c0c75, 21 },
+ { 0x0c3f7d, 22 },
+ { 0x0c5e62, 23 },
+ { 0x0c6c31, 24 },
+ { 0x0c7925, 25 },
+ { 0xffffff, 26 },
+};
+
+/* QAM64 SNR lookup table */
+static const struct val_snr qam64_snr_lut[] = {
+ { 0x0256d0, 0 },
+ { 0x027a65, 1 },
+ { 0x029873, 2 },
+ { 0x02b7fe, 3 },
+ { 0x02cf1e, 4 },
+ { 0x02e234, 5 },
+ { 0x02f409, 6 },
+ { 0x030046, 7 },
+ { 0x030844, 8 },
+ { 0x030a02, 9 },
+ { 0x030cde, 10 },
+ { 0x031031, 11 },
+ { 0x03144c, 12 },
+ { 0x0315dd, 13 },
+ { 0x031920, 14 },
+ { 0x0322d0, 15 },
+ { 0x0339fc, 16 },
+ { 0x0364a1, 17 },
+ { 0x038bcc, 18 },
+ { 0x03c7d3, 19 },
+ { 0x0408cc, 20 },
+ { 0x043bed, 21 },
+ { 0x048061, 22 },
+ { 0x04be95, 23 },
+ { 0x04fa7d, 24 },
+ { 0x052405, 25 },
+ { 0x05570d, 26 },
+ { 0x059feb, 27 },
+ { 0x05bf38, 28 },
+ { 0xffffff, 29 },
+};
+
+static const struct reg_val ofsm_init[] = {
+ { 0x800051, 0x01 },
+ { 0x800070, 0x0a },
+ { 0x80007e, 0x04 },
+ { 0x800081, 0x0a },
+ { 0x80008a, 0x01 },
+ { 0x80008e, 0x01 },
+ { 0x800092, 0x06 },
+ { 0x800099, 0x01 },
+ { 0x80009f, 0xe1 },
+ { 0x8000a0, 0xcf },
+ { 0x8000a3, 0x01 },
+ { 0x8000a5, 0x01 },
+ { 0x8000a6, 0x01 },
+ { 0x8000a9, 0x00 },
+ { 0x8000aa, 0x01 },
+ { 0x8000ab, 0x01 },
+ { 0x8000b0, 0x01 },
+ { 0x8000c0, 0x05 },
+ { 0x8000c4, 0x19 },
+ { 0x80f000, 0x0f },
+ { 0x80f016, 0x10 },
+ { 0x80f017, 0x04 },
+ { 0x80f018, 0x05 },
+ { 0x80f019, 0x04 },
+ { 0x80f01a, 0x05 },
+ { 0x80f021, 0x03 },
+ { 0x80f022, 0x0a },
+ { 0x80f023, 0x0a },
+ { 0x80f02b, 0x00 },
+ { 0x80f02c, 0x01 },
+ { 0x80f064, 0x03 },
+ { 0x80f065, 0xf9 },
+ { 0x80f066, 0x03 },
+ { 0x80f067, 0x01 },
+ { 0x80f06f, 0xe0 },
+ { 0x80f070, 0x03 },
+ { 0x80f072, 0x0f },
+ { 0x80f073, 0x03 },
+ { 0x80f078, 0x00 },
+ { 0x80f087, 0x00 },
+ { 0x80f09b, 0x3f },
+ { 0x80f09c, 0x00 },
+ { 0x80f09d, 0x20 },
+ { 0x80f09e, 0x00 },
+ { 0x80f09f, 0x0c },
+ { 0x80f0a0, 0x00 },
+ { 0x80f130, 0x04 },
+ { 0x80f132, 0x04 },
+ { 0x80f144, 0x1a },
+ { 0x80f146, 0x00 },
+ { 0x80f14a, 0x01 },
+ { 0x80f14c, 0x00 },
+ { 0x80f14d, 0x00 },
+ { 0x80f14f, 0x04 },
+ { 0x80f158, 0x7f },
+ { 0x80f15a, 0x00 },
+ { 0x80f15b, 0x08 },
+ { 0x80f15d, 0x03 },
+ { 0x80f15e, 0x05 },
+ { 0x80f163, 0x05 },
+ { 0x80f166, 0x01 },
+ { 0x80f167, 0x40 },
+ { 0x80f168, 0x0f },
+ { 0x80f17a, 0x00 },
+ { 0x80f17b, 0x00 },
+ { 0x80f183, 0x01 },
+ { 0x80f19d, 0x40 },
+ { 0x80f1bc, 0x36 },
+ { 0x80f1bd, 0x00 },
+ { 0x80f1cb, 0xa0 },
+ { 0x80f1cc, 0x01 },
+ { 0x80f204, 0x10 },
+ { 0x80f214, 0x00 },
+ { 0x80f40e, 0x0a },
+ { 0x80f40f, 0x40 },
+ { 0x80f410, 0x08 },
+ { 0x80f55f, 0x0a },
+ { 0x80f561, 0x15 },
+ { 0x80f562, 0x20 },
+ { 0x80f5df, 0xfb },
+ { 0x80f5e0, 0x00 },
+ { 0x80f5e3, 0x09 },
+ { 0x80f5e4, 0x01 },
+ { 0x80f5e5, 0x01 },
+ { 0x80f5f8, 0x01 },
+ { 0x80f5fd, 0x01 },
+ { 0x80f600, 0x05 },
+ { 0x80f601, 0x08 },
+ { 0x80f602, 0x0b },
+ { 0x80f603, 0x0e },
+ { 0x80f604, 0x11 },
+ { 0x80f605, 0x14 },
+ { 0x80f606, 0x17 },
+ { 0x80f607, 0x1f },
+ { 0x80f60e, 0x00 },
+ { 0x80f60f, 0x04 },
+ { 0x80f610, 0x32 },
+ { 0x80f611, 0x10 },
+ { 0x80f707, 0xfc },
+ { 0x80f708, 0x00 },
+ { 0x80f709, 0x37 },
+ { 0x80f70a, 0x00 },
+ { 0x80f78b, 0x01 },
+ { 0x80f80f, 0x40 },
+ { 0x80f810, 0x54 },
+ { 0x80f811, 0x5a },
+ { 0x80f905, 0x01 },
+ { 0x80fb06, 0x03 },
+ { 0x80fd8b, 0x00 },
+};
+
+/* Infineon TUA 9001 tuner init
+ AF9033_TUNER_TUA9001 = 0x27 */
+static const struct reg_val tuner_init_tua9001[] = {
+ { 0x800046, 0x27 },
+ { 0x800057, 0x00 },
+ { 0x800058, 0x01 },
+ { 0x80005f, 0x00 },
+ { 0x800060, 0x00 },
+ { 0x80006d, 0x00 },
+ { 0x800071, 0x05 },
+ { 0x800072, 0x02 },
+ { 0x800074, 0x01 },
+ { 0x800075, 0x03 },
+ { 0x800076, 0x02 },
+ { 0x800077, 0x00 },
+ { 0x800078, 0x01 },
+ { 0x800079, 0x00 },
+ { 0x80007a, 0x7e },
+ { 0x80007b, 0x3e },
+ { 0x800093, 0x00 },
+ { 0x800094, 0x01 },
+ { 0x800095, 0x02 },
+ { 0x800096, 0x01 },
+ { 0x800098, 0x0a },
+ { 0x80009b, 0x05 },
+ { 0x80009c, 0x80 },
+ { 0x8000b3, 0x00 },
+ { 0x8000c1, 0x01 },
+ { 0x8000c2, 0x00 },
+ { 0x80f007, 0x00 },
+ { 0x80f01f, 0x82 },
+ { 0x80f020, 0x00 },
+ { 0x80f029, 0x82 },
+ { 0x80f02a, 0x00 },
+ { 0x80f047, 0x00 },
+ { 0x80f054, 0x00 },
+ { 0x80f055, 0x00 },
+ { 0x80f077, 0x01 },
+ { 0x80f1e6, 0x00 },
+};
+
+/* Fitipower fc0011 tuner init
+ AF9033_TUNER_FC0011 = 0x28 */
+static const struct reg_val tuner_init_fc0011[] = {
+ { 0x800046, AF9033_TUNER_FC0011 },
+ { 0x800057, 0x00 },
+ { 0x800058, 0x01 },
+ { 0x80005f, 0x00 },
+ { 0x800060, 0x00 },
+ { 0x800068, 0xa5 },
+ { 0x80006e, 0x01 },
+ { 0x800071, 0x0A },
+ { 0x800072, 0x02 },
+ { 0x800074, 0x01 },
+ { 0x800079, 0x01 },
+ { 0x800093, 0x00 },
+ { 0x800094, 0x00 },
+ { 0x800095, 0x00 },
+ { 0x800096, 0x00 },
+ { 0x80009b, 0x2D },
+ { 0x80009c, 0x60 },
+ { 0x80009d, 0x23 },
+ { 0x8000a4, 0x50 },
+ { 0x8000ad, 0x50 },
+ { 0x8000b3, 0x01 },
+ { 0x8000b7, 0x88 },
+ { 0x8000b8, 0xa6 },
+ { 0x8000c3, 0x01 },
+ { 0x8000c4, 0x01 },
+ { 0x8000c7, 0x69 },
+ { 0x80F007, 0x00 },
+ { 0x80F00A, 0x1B },
+ { 0x80F00B, 0x1B },
+ { 0x80F00C, 0x1B },
+ { 0x80F00D, 0x1B },
+ { 0x80F00E, 0xFF },
+ { 0x80F00F, 0x01 },
+ { 0x80F010, 0x00 },
+ { 0x80F011, 0x02 },
+ { 0x80F012, 0xFF },
+ { 0x80F013, 0x01 },
+ { 0x80F014, 0x00 },
+ { 0x80F015, 0x02 },
+ { 0x80F01B, 0xEF },
+ { 0x80F01C, 0x01 },
+ { 0x80F01D, 0x0f },
+ { 0x80F01E, 0x02 },
+ { 0x80F01F, 0x6E },
+ { 0x80F020, 0x00 },
+ { 0x80F025, 0xDE },
+ { 0x80F026, 0x00 },
+ { 0x80F027, 0x0A },
+ { 0x80F028, 0x03 },
+ { 0x80F029, 0x6E },
+ { 0x80F02A, 0x00 },
+ { 0x80F047, 0x00 },
+ { 0x80F054, 0x00 },
+ { 0x80F055, 0x00 },
+ { 0x80F077, 0x01 },
+ { 0x80F1E6, 0x00 },
+};
+
+/* MaxLinear MxL5007T tuner init
+ AF9033_TUNER_MXL5007T = 0xa0 */
+static const struct reg_val tuner_init_mxl5007t[] = {
+ { 0x800046, 0x1b },
+ { 0x800057, 0x01 },
+ { 0x800058, 0x01 },
+ { 0x80005f, 0x00 },
+ { 0x800060, 0x00 },
+ { 0x800068, 0x96 },
+ { 0x800071, 0x05 },
+ { 0x800072, 0x02 },
+ { 0x800074, 0x01 },
+ { 0x800079, 0x01 },
+ { 0x800093, 0x00 },
+ { 0x800094, 0x00 },
+ { 0x800095, 0x00 },
+ { 0x800096, 0x00 },
+ { 0x8000b3, 0x01 },
+ { 0x8000c1, 0x01 },
+ { 0x8000c2, 0x00 },
+ { 0x80f007, 0x00 },
+ { 0x80f00c, 0x19 },
+ { 0x80f00d, 0x1a },
+ { 0x80f012, 0xda },
+ { 0x80f013, 0x00 },
+ { 0x80f014, 0x00 },
+ { 0x80f015, 0x02 },
+ { 0x80f01f, 0x82 },
+ { 0x80f020, 0x00 },
+ { 0x80f029, 0x82 },
+ { 0x80f02a, 0x00 },
+ { 0x80f077, 0x02 },
+ { 0x80f1e6, 0x00 },
+};
+
+/* NXP TDA 18218HN tuner init
+ AF9033_TUNER_TDA18218 = 0xa1 */
+static const struct reg_val tuner_init_tda18218[] = {
+ {0x800046, 0xa1},
+ {0x800057, 0x01},
+ {0x800058, 0x01},
+ {0x80005f, 0x00},
+ {0x800060, 0x00},
+ {0x800071, 0x05},
+ {0x800072, 0x02},
+ {0x800074, 0x01},
+ {0x800079, 0x01},
+ {0x800093, 0x00},
+ {0x800094, 0x00},
+ {0x800095, 0x00},
+ {0x800096, 0x00},
+ {0x8000b3, 0x01},
+ {0x8000c3, 0x01},
+ {0x8000c4, 0x00},
+ {0x80f007, 0x00},
+ {0x80f00c, 0x19},
+ {0x80f00d, 0x1a},
+ {0x80f012, 0xda},
+ {0x80f013, 0x00},
+ {0x80f014, 0x00},
+ {0x80f015, 0x02},
+ {0x80f01f, 0x82},
+ {0x80f020, 0x00},
+ {0x80f029, 0x82},
+ {0x80f02a, 0x00},
+ {0x80f077, 0x02},
+ {0x80f1e6, 0x00},
+};
+
+#endif /* AF9033_PRIV_H */
+
diff --git a/drivers/media/dvb/frontends/au8522_common.c b/drivers/media/dvb/frontends/au8522_common.c
new file mode 100644
index 000000000000..5cfe151ee394
--- /dev/null
+++ b/drivers/media/dvb/frontends/au8522_common.c
@@ -0,0 +1,259 @@
+/*
+ Auvitek AU8522 QAM/8VSB demodulator driver
+
+ Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
+ Copyright (C) 2008 Devin Heitmueller <dheitmueller@linuxtv.org>
+ Copyright (C) 2005-2008 Auvitek International, Ltd.
+ Copyright (C) 2012 Michael Krufky <mkrufky@linuxtv.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+#include "au8522_priv.h"
+
+MODULE_LICENSE("GPL");
+
+static int debug;
+
+#define dprintk(arg...)\
+ do { if (debug)\
+ printk(arg);\
+ } while (0)
+
+/* Despite the name "hybrid_tuner", the framework works just as well for
+ hybrid demodulators as well... */
+static LIST_HEAD(hybrid_tuner_instance_list);
+static DEFINE_MUTEX(au8522_list_mutex);
+
+/* 16 bit registers, 8 bit values */
+int au8522_writereg(struct au8522_state *state, u16 reg, u8 data)
+{
+ int ret;
+ u8 buf[] = { (reg >> 8) | 0x80, reg & 0xff, data };
+
+ struct i2c_msg msg = { .addr = state->config->demod_address,
+ .flags = 0, .buf = buf, .len = 3 };
+
+ ret = i2c_transfer(state->i2c, &msg, 1);
+
+ if (ret != 1)
+ printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
+ "ret == %i)\n", __func__, reg, data, ret);
+
+ return (ret != 1) ? -1 : 0;
+}
+EXPORT_SYMBOL(au8522_writereg);
+
+u8 au8522_readreg(struct au8522_state *state, u16 reg)
+{
+ int ret;
+ u8 b0[] = { (reg >> 8) | 0x40, reg & 0xff };
+ u8 b1[] = { 0 };
+
+ struct i2c_msg msg[] = {
+ { .addr = state->config->demod_address, .flags = 0,
+ .buf = b0, .len = 2 },
+ { .addr = state->config->demod_address, .flags = I2C_M_RD,
+ .buf = b1, .len = 1 } };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+
+ if (ret != 2)
+ printk(KERN_ERR "%s: readreg error (ret == %i)\n",
+ __func__, ret);
+ return b1[0];
+}
+EXPORT_SYMBOL(au8522_readreg);
+
+int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct au8522_state *state = fe->demodulator_priv;
+
+ dprintk("%s(%d)\n", __func__, enable);
+
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're being asked to manage the gate even though we're
+ not in digital mode. This can occur if we get switched
+ over to analog mode before the dvb_frontend kernel thread
+ has completely shutdown */
+ return 0;
+ }
+
+ if (enable)
+ return au8522_writereg(state, 0x106, 1);
+ else
+ return au8522_writereg(state, 0x106, 0);
+}
+EXPORT_SYMBOL(au8522_i2c_gate_ctrl);
+
+/* Reset the demod hardware and reset all of the configuration registers
+ to a default state. */
+int au8522_get_state(struct au8522_state **state, struct i2c_adapter *i2c,
+ u8 client_address)
+{
+ int ret;
+
+ mutex_lock(&au8522_list_mutex);
+ ret = hybrid_tuner_request_state(struct au8522_state, (*state),
+ hybrid_tuner_instance_list,
+ i2c, client_address, "au8522");
+ mutex_unlock(&au8522_list_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(au8522_get_state);
+
+void au8522_release_state(struct au8522_state *state)
+{
+ mutex_lock(&au8522_list_mutex);
+ if (state != NULL)
+ hybrid_tuner_release_state(state);
+ mutex_unlock(&au8522_list_mutex);
+}
+EXPORT_SYMBOL(au8522_release_state);
+
+static int au8522_led_gpio_enable(struct au8522_state *state, int onoff)
+{
+ struct au8522_led_config *led_config = state->config->led_cfg;
+ u8 val;
+
+ /* bail out if we can't control an LED */
+ if (!led_config || !led_config->gpio_output ||
+ !led_config->gpio_output_enable || !led_config->gpio_output_disable)
+ return 0;
+
+ val = au8522_readreg(state, 0x4000 |
+ (led_config->gpio_output & ~0xc000));
+ if (onoff) {
+ /* enable GPIO output */
+ val &= ~((led_config->gpio_output_enable >> 8) & 0xff);
+ val |= (led_config->gpio_output_enable & 0xff);
+ } else {
+ /* disable GPIO output */
+ val &= ~((led_config->gpio_output_disable >> 8) & 0xff);
+ val |= (led_config->gpio_output_disable & 0xff);
+ }
+ return au8522_writereg(state, 0x8000 |
+ (led_config->gpio_output & ~0xc000), val);
+}
+
+/* led = 0 | off
+ * led = 1 | signal ok
+ * led = 2 | signal strong
+ * led < 0 | only light led if leds are currently off
+ */
+int au8522_led_ctrl(struct au8522_state *state, int led)
+{
+ struct au8522_led_config *led_config = state->config->led_cfg;
+ int i, ret = 0;
+
+ /* bail out if we can't control an LED */
+ if (!led_config || !led_config->gpio_leds ||
+ !led_config->num_led_states || !led_config->led_states)
+ return 0;
+
+ if (led < 0) {
+ /* if LED is already lit, then leave it as-is */
+ if (state->led_state)
+ return 0;
+ else
+ led *= -1;
+ }
+
+ /* toggle LED if changing state */
+ if (state->led_state != led) {
+ u8 val;
+
+ dprintk("%s: %d\n", __func__, led);
+
+ au8522_led_gpio_enable(state, 1);
+
+ val = au8522_readreg(state, 0x4000 |
+ (led_config->gpio_leds & ~0xc000));
+
+ /* start with all leds off */
+ for (i = 0; i < led_config->num_led_states; i++)
+ val &= ~led_config->led_states[i];
+
+ /* set selected LED state */
+ if (led < led_config->num_led_states)
+ val |= led_config->led_states[led];
+ else if (led_config->num_led_states)
+ val |=
+ led_config->led_states[led_config->num_led_states - 1];
+
+ ret = au8522_writereg(state, 0x8000 |
+ (led_config->gpio_leds & ~0xc000), val);
+ if (ret < 0)
+ return ret;
+
+ state->led_state = led;
+
+ if (led == 0)
+ au8522_led_gpio_enable(state, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(au8522_led_ctrl);
+
+int au8522_init(struct dvb_frontend *fe)
+{
+ struct au8522_state *state = fe->demodulator_priv;
+ dprintk("%s()\n", __func__);
+
+ state->operational_mode = AU8522_DIGITAL_MODE;
+
+ /* Clear out any state associated with the digital side of the
+ chip, so that when it gets powered back up it won't think
+ that it is already tuned */
+ state->current_frequency = 0;
+
+ au8522_writereg(state, 0xa4, 1 << 5);
+
+ au8522_i2c_gate_ctrl(fe, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL(au8522_init);
+
+int au8522_sleep(struct dvb_frontend *fe)
+{
+ struct au8522_state *state = fe->demodulator_priv;
+ dprintk("%s()\n", __func__);
+
+ /* Only power down if the digital side is currently using the chip */
+ if (state->operational_mode == AU8522_ANALOG_MODE) {
+ /* We're not in one of the expected power modes, which means
+ that the DVB thread is probably telling us to go to sleep
+ even though the analog frontend has already started using
+ the chip. So ignore the request */
+ return 0;
+ }
+
+ /* turn off led */
+ au8522_led_ctrl(state, 0);
+
+ /* Power down the chip */
+ au8522_writereg(state, 0xa4, 1 << 5);
+
+ state->current_frequency = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(au8522_sleep);
diff --git a/drivers/media/dvb/frontends/au8522_dig.c b/drivers/media/dvb/frontends/au8522_dig.c
index 25f650934c73..5fc70d6cd04f 100644
--- a/drivers/media/dvb/frontends/au8522_dig.c
+++ b/drivers/media/dvb/frontends/au8522_dig.c
@@ -30,74 +30,11 @@
static int debug;
-/* Despite the name "hybrid_tuner", the framework works just as well for
- hybrid demodulators as well... */
-static LIST_HEAD(hybrid_tuner_instance_list);
-static DEFINE_MUTEX(au8522_list_mutex);
-
#define dprintk(arg...)\
do { if (debug)\
printk(arg);\
} while (0)
-/* 16 bit registers, 8 bit values */
-int au8522_writereg(struct au8522_state *state, u16 reg, u8 data)
-{
- int ret;
- u8 buf[] = { (reg >> 8) | 0x80, reg & 0xff, data };
-
- struct i2c_msg msg = { .addr = state->config->demod_address,
- .flags = 0, .buf = buf, .len = 3 };
-
- ret = i2c_transfer(state->i2c, &msg, 1);
-
- if (ret != 1)
- printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
- "ret == %i)\n", __func__, reg, data, ret);
-
- return (ret != 1) ? -1 : 0;
-}
-
-u8 au8522_readreg(struct au8522_state *state, u16 reg)
-{
- int ret;
- u8 b0[] = { (reg >> 8) | 0x40, reg & 0xff };
- u8 b1[] = { 0 };
-
- struct i2c_msg msg[] = {
- { .addr = state->config->demod_address, .flags = 0,
- .buf = b0, .len = 2 },
- { .addr = state->config->demod_address, .flags = I2C_M_RD,
- .buf = b1, .len = 1 } };
-
- ret = i2c_transfer(state->i2c, msg, 2);
-
- if (ret != 2)
- printk(KERN_ERR "%s: readreg error (ret == %i)\n",
- __func__, ret);
- return b1[0];
-}
-
-static int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
-{
- struct au8522_state *state = fe->demodulator_priv;
-
- dprintk("%s(%d)\n", __func__, enable);
-
- if (state->operational_mode == AU8522_ANALOG_MODE) {
- /* We're being asked to manage the gate even though we're
- not in digital mode. This can occur if we get switched
- over to analog mode before the dvb_frontend kernel thread
- has completely shutdown */
- return 0;
- }
-
- if (enable)
- return au8522_writereg(state, 0x106, 1);
- else
- return au8522_writereg(state, 0x106, 0);
-}
-
struct mse2snr_tab {
u16 val;
u16 data;
@@ -609,136 +546,6 @@ static int au8522_set_frontend(struct dvb_frontend *fe)
return 0;
}
-/* Reset the demod hardware and reset all of the configuration registers
- to a default state. */
-int au8522_init(struct dvb_frontend *fe)
-{
- struct au8522_state *state = fe->demodulator_priv;
- dprintk("%s()\n", __func__);
-
- state->operational_mode = AU8522_DIGITAL_MODE;
-
- /* Clear out any state associated with the digital side of the
- chip, so that when it gets powered back up it won't think
- that it is already tuned */
- state->current_frequency = 0;
-
- au8522_writereg(state, 0xa4, 1 << 5);
-
- au8522_i2c_gate_ctrl(fe, 1);
-
- return 0;
-}
-
-static int au8522_led_gpio_enable(struct au8522_state *state, int onoff)
-{
- struct au8522_led_config *led_config = state->config->led_cfg;
- u8 val;
-
- /* bail out if we can't control an LED */
- if (!led_config || !led_config->gpio_output ||
- !led_config->gpio_output_enable || !led_config->gpio_output_disable)
- return 0;
-
- val = au8522_readreg(state, 0x4000 |
- (led_config->gpio_output & ~0xc000));
- if (onoff) {
- /* enable GPIO output */
- val &= ~((led_config->gpio_output_enable >> 8) & 0xff);
- val |= (led_config->gpio_output_enable & 0xff);
- } else {
- /* disable GPIO output */
- val &= ~((led_config->gpio_output_disable >> 8) & 0xff);
- val |= (led_config->gpio_output_disable & 0xff);
- }
- return au8522_writereg(state, 0x8000 |
- (led_config->gpio_output & ~0xc000), val);
-}
-
-/* led = 0 | off
- * led = 1 | signal ok
- * led = 2 | signal strong
- * led < 0 | only light led if leds are currently off
- */
-static int au8522_led_ctrl(struct au8522_state *state, int led)
-{
- struct au8522_led_config *led_config = state->config->led_cfg;
- int i, ret = 0;
-
- /* bail out if we can't control an LED */
- if (!led_config || !led_config->gpio_leds ||
- !led_config->num_led_states || !led_config->led_states)
- return 0;
-
- if (led < 0) {
- /* if LED is already lit, then leave it as-is */
- if (state->led_state)
- return 0;
- else
- led *= -1;
- }
-
- /* toggle LED if changing state */
- if (state->led_state != led) {
- u8 val;
-
- dprintk("%s: %d\n", __func__, led);
-
- au8522_led_gpio_enable(state, 1);
-
- val = au8522_readreg(state, 0x4000 |
- (led_config->gpio_leds & ~0xc000));
-
- /* start with all leds off */
- for (i = 0; i < led_config->num_led_states; i++)
- val &= ~led_config->led_states[i];
-
- /* set selected LED state */
- if (led < led_config->num_led_states)
- val |= led_config->led_states[led];
- else if (led_config->num_led_states)
- val |=
- led_config->led_states[led_config->num_led_states - 1];
-
- ret = au8522_writereg(state, 0x8000 |
- (led_config->gpio_leds & ~0xc000), val);
- if (ret < 0)
- return ret;
-
- state->led_state = led;
-
- if (led == 0)
- au8522_led_gpio_enable(state, 0);
- }
-
- return 0;
-}
-
-int au8522_sleep(struct dvb_frontend *fe)
-{
- struct au8522_state *state = fe->demodulator_priv;
- dprintk("%s()\n", __func__);
-
- /* Only power down if the digital side is currently using the chip */
- if (state->operational_mode == AU8522_ANALOG_MODE) {
- /* We're not in one of the expected power modes, which means
- that the DVB thread is probably telling us to go to sleep
- even though the analog frontend has already started using
- the chip. So ignore the request */
- return 0;
- }
-
- /* turn off led */
- au8522_led_ctrl(state, 0);
-
- /* Power down the chip */
- au8522_writereg(state, 0xa4, 1 << 5);
-
- state->current_frequency = 0;
-
- return 0;
-}
-
static int au8522_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct au8522_state *state = fe->demodulator_priv;
@@ -931,28 +738,6 @@ static int au8522_get_tune_settings(struct dvb_frontend *fe,
static struct dvb_frontend_ops au8522_ops;
-int au8522_get_state(struct au8522_state **state, struct i2c_adapter *i2c,
- u8 client_address)
-{
- int ret;
-
- mutex_lock(&au8522_list_mutex);
- ret = hybrid_tuner_request_state(struct au8522_state, (*state),
- hybrid_tuner_instance_list,
- i2c, client_address, "au8522");
- mutex_unlock(&au8522_list_mutex);
-
- return ret;
-}
-
-void au8522_release_state(struct au8522_state *state)
-{
- mutex_lock(&au8522_list_mutex);
- if (state != NULL)
- hybrid_tuner_release_state(state);
- mutex_unlock(&au8522_list_mutex);
-}
-
static void au8522_release(struct dvb_frontend *fe)
{
diff --git a/drivers/media/dvb/frontends/au8522_priv.h b/drivers/media/dvb/frontends/au8522_priv.h
index 751e17d692a9..6e4a438732b5 100644
--- a/drivers/media/dvb/frontends/au8522_priv.h
+++ b/drivers/media/dvb/frontends/au8522_priv.h
@@ -81,6 +81,8 @@ int au8522_sleep(struct dvb_frontend *fe);
int au8522_get_state(struct au8522_state **state, struct i2c_adapter *i2c,
u8 client_address);
void au8522_release_state(struct au8522_state *state);
+int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable);
+int au8522_led_ctrl(struct au8522_state *state, int led);
/* REGISTERS */
#define AU8522_INPUT_CONTROL_REG081H 0x081
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index 5101f10f2d7a..3180f5b2a6a6 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -512,14 +512,13 @@ static int cx24110_read_snr(struct dvb_frontend* fe, u16* snr)
static int cx24110_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
struct cx24110_state *state = fe->demodulator_priv;
- u32 lastbyer;
if(cx24110_readreg(state,0x10)&0x40) {
/* the RS error counter has finished one counting window */
cx24110_writereg(state,0x10,0x60); /* select the byer reg */
- lastbyer=cx24110_readreg(state,0x12)|
- (cx24110_readreg(state,0x13)<<8)|
- (cx24110_readreg(state,0x14)<<16);
+ (void)(cx24110_readreg(state, 0x12) |
+ (cx24110_readreg(state, 0x13) << 8) |
+ (cx24110_readreg(state, 0x14) << 16));
cx24110_writereg(state,0x10,0x70); /* select the bler reg */
state->lastbler=cx24110_readreg(state,0x12)|
(cx24110_readreg(state,0x13)<<8)|
diff --git a/drivers/media/dvb/frontends/cxd2820r_c.c b/drivers/media/dvb/frontends/cxd2820r_c.c
index 945404991529..ed3b0ba624de 100644
--- a/drivers/media/dvb/frontends/cxd2820r_c.c
+++ b/drivers/media/dvb/frontends/cxd2820r_c.c
@@ -121,7 +121,7 @@ int cxd2820r_get_frontend_c(struct dvb_frontend *fe)
if (ret)
goto error;
- switch ((buf[0] >> 0) & 0x03) {
+ switch ((buf[0] >> 0) & 0x07) {
case 0:
c->modulation = QAM_16;
break;
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 5c7c2aaf9bf5..3bba37d74f57 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -526,12 +526,12 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
if (ret)
goto error;
- if (status & FE_HAS_SIGNAL)
+ if (status & FE_HAS_LOCK)
break;
}
/* check if we have a valid signal */
- if (status) {
+ if (status & FE_HAS_LOCK) {
priv->last_tune_failed = 0;
return DVBFE_ALGO_SEARCH_SUCCESS;
} else {
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 5ceadc285b3a..3e1eefada0e8 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -2396,11 +2396,6 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
more common) */
st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
- /* FIXME: make sure the dev.parent field is initialized, or else
- request_firmware() will hit an OOPS (this should be moved somewhere
- more common) */
- st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
-
dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
/* init 7090 tuner adapter */
diff --git a/drivers/media/dvb/frontends/dib9000.c b/drivers/media/dvb/frontends/dib9000.c
index 80848b4c15d4..6201c59a78dd 100644
--- a/drivers/media/dvb/frontends/dib9000.c
+++ b/drivers/media/dvb/frontends/dib9000.c
@@ -31,13 +31,6 @@ struct i2c_device {
u8 *i2c_write_buffer;
};
-/* lock */
-#define DIB_LOCK struct mutex
-#define DibAcquireLock(lock) mutex_lock_interruptible(lock)
-#define DibReleaseLock(lock) mutex_unlock(lock)
-#define DibInitLock(lock) mutex_init(lock)
-#define DibFreeLock(lock)
-
struct dib9000_pid_ctrl {
#define DIB9000_PID_FILTER_CTRL 0
#define DIB9000_PID_FILTER 1
@@ -82,11 +75,11 @@ struct dib9000_state {
} fe_mm[18];
u8 memcmd;
- DIB_LOCK mbx_if_lock; /* to protect read/write operations */
- DIB_LOCK mbx_lock; /* to protect the whole mailbox handling */
+ struct mutex mbx_if_lock; /* to protect read/write operations */
+ struct mutex mbx_lock; /* to protect the whole mailbox handling */
- DIB_LOCK mem_lock; /* to protect the memory accesses */
- DIB_LOCK mem_mbx_lock; /* to protect the memory-based mailbox */
+ struct mutex mem_lock; /* to protect the memory accesses */
+ struct mutex mem_mbx_lock; /* to protect the memory-based mailbox */
#define MBX_MAX_WORDS (256 - 200 - 2)
#define DIB9000_MSG_CACHE_SIZE 2
@@ -108,7 +101,7 @@ struct dib9000_state {
struct i2c_msg msg[2];
u8 i2c_write_buffer[255];
u8 i2c_read_buffer[255];
- DIB_LOCK demod_lock;
+ struct mutex demod_lock;
u8 get_frontend_internal;
struct dib9000_pid_ctrl pid_ctrl[10];
s8 pid_ctrl_index; /* -1: empty list; -2: do not use the list */
@@ -446,13 +439,13 @@ static int dib9000_risc_mem_read(struct dib9000_state *state, u8 cmd, u8 * b, u1
if (!state->platform.risc.fw_is_running)
return -EIO;
- if (DibAcquireLock(&state->platform.risc.mem_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
dib9000_risc_mem_setup(state, cmd | 0x80);
dib9000_risc_mem_read_chunks(state, b, len);
- DibReleaseLock(&state->platform.risc.mem_lock);
+ mutex_unlock(&state->platform.risc.mem_lock);
return 0;
}
@@ -462,13 +455,13 @@ static int dib9000_risc_mem_write(struct dib9000_state *state, u8 cmd, const u8
if (!state->platform.risc.fw_is_running)
return -EIO;
- if (DibAcquireLock(&state->platform.risc.mem_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
dib9000_risc_mem_setup(state, cmd);
dib9000_risc_mem_write_chunks(state, b, m->size);
- DibReleaseLock(&state->platform.risc.mem_lock);
+ mutex_unlock(&state->platform.risc.mem_lock);
return 0;
}
@@ -537,7 +530,7 @@ static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data,
if (!state->platform.risc.fw_is_running)
return -EINVAL;
- if (DibAcquireLock(&state->platform.risc.mbx_if_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -584,7 +577,7 @@ static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data,
ret = (u8) dib9000_write_word_attr(state, 1043, 1 << 14, attr);
out:
- DibReleaseLock(&state->platform.risc.mbx_if_lock);
+ mutex_unlock(&state->platform.risc.mbx_if_lock);
return ret;
}
@@ -602,7 +595,7 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
if (!state->platform.risc.fw_is_running)
return 0;
- if (DibAcquireLock(&state->platform.risc.mbx_if_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
dprintk("could not get the lock");
return 0;
}
@@ -643,7 +636,7 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
/* Update register nb_mes_in_TX */
dib9000_write_word_attr(state, 1028 + mc_base, 1 << 14, attr);
- DibReleaseLock(&state->platform.risc.mbx_if_lock);
+ mutex_unlock(&state->platform.risc.mbx_if_lock);
return size + 1;
}
@@ -708,12 +701,11 @@ static u8 dib9000_mbx_count(struct dib9000_state *state, u8 risc_id, u16 attr)
static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
{
int ret = 0;
- u16 tmp;
if (!state->platform.risc.fw_is_running)
return -1;
- if (DibAcquireLock(&state->platform.risc.mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mbx_lock) < 0) {
dprintk("could not get the lock");
return -1;
}
@@ -721,10 +713,10 @@ static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
if (dib9000_mbx_count(state, 1, attr)) /* 1=RiscB */
ret = dib9000_mbx_fetch_to_cache(state, attr);
- tmp = dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */
+ dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */
/* if (tmp) */
/* dprintk( "cleared IRQ: %x", tmp); */
- DibReleaseLock(&state->platform.risc.mbx_lock);
+ mutex_unlock(&state->platform.risc.mbx_lock);
return ret;
}
@@ -1193,7 +1185,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe)
struct dibDVBTChannel *ch;
int ret = 0;
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -1323,7 +1315,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe)
}
error:
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
return ret;
}
@@ -1678,7 +1670,7 @@ static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2
p[12] = 0;
}
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
return 0;
}
@@ -1692,7 +1684,7 @@ static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2
/* do the transaction */
if (dib9000_fw_memmbx_sync(state, FE_SYNC_COMPONENT_ACCESS) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
return 0;
}
@@ -1700,7 +1692,7 @@ static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2
if ((num > 1) && (msg[1].flags & I2C_M_RD))
dib9000_risc_mem_read(state, FE_MM_RW_COMPONENT_ACCESS_BUFFER, msg[1].buf, msg[1].len);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
return num;
}
@@ -1789,7 +1781,7 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
return 0;
}
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -1799,7 +1791,7 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
dprintk("PID filter enabled %d", onoff);
ret = dib9000_write_word(state, 294 + 1, val);
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -1824,14 +1816,14 @@ int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
return 0;
}
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
ret = dib9000_write_word(state, 300 + 1 + id,
onoff ? (1 << 13) | pid : 0);
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
EXPORT_SYMBOL(dib9000_fw_pid_filter);
@@ -1851,11 +1843,6 @@ static void dib9000_release(struct dvb_frontend *demod)
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (st->fe[index_frontend] != NULL); index_frontend++)
dvb_frontend_detach(st->fe[index_frontend]);
- DibFreeLock(&state->platform.risc.mbx_if_lock);
- DibFreeLock(&state->platform.risc.mbx_lock);
- DibFreeLock(&state->platform.risc.mem_lock);
- DibFreeLock(&state->platform.risc.mem_mbx_lock);
- DibFreeLock(&state->demod_lock);
dibx000_exit_i2c_master(&st->i2c_master);
i2c_del_adapter(&st->tuner_adap);
@@ -1875,7 +1862,7 @@ static int dib9000_sleep(struct dvb_frontend *fe)
u8 index_frontend;
int ret = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -1887,7 +1874,7 @@ static int dib9000_sleep(struct dvb_frontend *fe)
ret = dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
error:
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -1905,7 +1892,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe)
int ret = 0;
if (state->get_frontend_internal == 0) {
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -1964,7 +1951,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe)
return_value:
if (state->get_frontend_internal == 0)
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -2012,7 +1999,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
}
state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return 0;
}
@@ -2081,7 +2068,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
/* check the tune result */
if (exit_condition == 1) { /* tune failed */
dprintk("tune failed");
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
/* tune failed; put all the pid filtering cmd to junk */
state->pid_ctrl_index = -1;
return 0;
@@ -2137,7 +2124,7 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
/* turn off the diversity for the last frontend */
dib9000_fw_set_diversity_in(state->fe[index_frontend - 1], 0);
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
if (state->pid_ctrl_index >= 0) {
u8 index_pid_filter_cmd;
u8 pid_ctrl_index = state->pid_ctrl_index;
@@ -2175,7 +2162,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
u8 index_frontend;
u16 lock = 0, lock_slave = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -2197,7 +2184,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
if ((lock & 0x0008) || (lock_slave & 0x0008))
*stat |= FE_HAS_LOCK;
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return 0;
}
@@ -2208,30 +2195,30 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
u16 *c;
int ret = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
ret = -EINTR;
goto error;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR,
state->i2c_read_buffer, 16 * 2);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
c = (u16 *)state->i2c_read_buffer;
*ber = c[10] << 16 | c[11];
error:
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -2243,7 +2230,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
u16 val;
int ret = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -2256,18 +2243,18 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
*strength += val;
}
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
ret = -EINTR;
goto error;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
val = 65535 - c[4];
if (val > 65535 - *strength)
@@ -2276,7 +2263,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
*strength += val;
error:
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -2287,16 +2274,16 @@ static u32 dib9000_get_snr(struct dvb_frontend *fe)
u32 n, s, exp;
u16 val;
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
return 0;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
return 0;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
val = c[7];
n = (val >> 4) & 0xff;
@@ -2326,7 +2313,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
u8 index_frontend;
u32 snr_master;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
@@ -2340,7 +2327,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
} else
*snr = 0;
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return 0;
}
@@ -2351,27 +2338,27 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
u16 *c = (u16 *)state->i2c_read_buffer;
int ret = 0;
- if (DibAcquireLock(&state->demod_lock) < 0) {
+ if (mutex_lock_interruptible(&state->demod_lock) < 0) {
dprintk("could not get the lock");
return -EINTR;
}
- if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) {
+ if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
dprintk("could not get the lock");
ret = -EINTR;
goto error;
}
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
- DibReleaseLock(&state->platform.risc.mem_mbx_lock);
+ mutex_unlock(&state->platform.risc.mem_mbx_lock);
*unc = c[12];
error:
- DibReleaseLock(&state->demod_lock);
+ mutex_unlock(&state->demod_lock);
return ret;
}
@@ -2514,11 +2501,11 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
st->gpio_val = DIB9000_GPIO_DEFAULT_VALUES;
st->gpio_pwm_pos = DIB9000_GPIO_DEFAULT_PWM_POS;
- DibInitLock(&st->platform.risc.mbx_if_lock);
- DibInitLock(&st->platform.risc.mbx_lock);
- DibInitLock(&st->platform.risc.mem_lock);
- DibInitLock(&st->platform.risc.mem_mbx_lock);
- DibInitLock(&st->demod_lock);
+ mutex_init(&st->platform.risc.mbx_if_lock);
+ mutex_init(&st->platform.risc.mbx_lock);
+ mutex_init(&st->platform.risc.mem_lock);
+ mutex_init(&st->platform.risc.mem_mbx_lock);
+ mutex_init(&st->demod_lock);
st->get_frontend_internal = 0;
st->pid_ctrl_index = -2;
diff --git a/drivers/media/dvb/frontends/drxd.h b/drivers/media/dvb/frontends/drxd.h
index 34398738f9bc..216c8c3702f8 100644
--- a/drivers/media/dvb/frontends/drxd.h
+++ b/drivers/media/dvb/frontends/drxd.h
@@ -51,9 +51,23 @@ struct drxd_config {
s16(*osc_deviation) (void *priv, s16 dev, int flag);
};
+#if defined(CONFIG_DVB_DRXD) || \
+ (defined(CONFIG_DVB_DRXD_MODULE) && defined(MODULE))
extern
struct dvb_frontend *drxd_attach(const struct drxd_config *config,
void *priv, struct i2c_adapter *i2c,
struct device *dev);
+#else
+static inline
+struct dvb_frontend *drxd_attach(const struct drxd_config *config,
+ void *priv, struct i2c_adapter *i2c,
+ struct device *dev)
+{
+ printk(KERN_INFO "%s: not probed - driver disabled by Kconfig\n",
+ __func__);
+ return NULL;
+}
+#endif
+
extern int drxd_config_i2c(struct dvb_frontend *, int);
#endif
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
index a414b1f2b6a5..60b868faeacf 100644
--- a/drivers/media/dvb/frontends/drxk_hard.c
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -1380,20 +1380,20 @@ static int DownloadMicrocode(struct drxk_state *state,
const u8 pMCImage[], u32 Length)
{
const u8 *pSrc = pMCImage;
- u16 Flags;
- u16 Drain;
u32 Address;
u16 nBlocks;
u16 BlockSize;
- u16 BlockCRC;
u32 offset = 0;
u32 i;
int status = 0;
dprintk(1, "\n");
- /* down the drain (we don care about MAGIC_WORD) */
+ /* down the drain (we don't care about MAGIC_WORD) */
+#if 0
+ /* For future reference */
Drain = (pSrc[0] << 8) | pSrc[1];
+#endif
pSrc += sizeof(u16);
offset += sizeof(u16);
nBlocks = (pSrc[0] << 8) | pSrc[1];
@@ -1410,11 +1410,17 @@ static int DownloadMicrocode(struct drxk_state *state,
pSrc += sizeof(u16);
offset += sizeof(u16);
+#if 0
+ /* For future reference */
Flags = (pSrc[0] << 8) | pSrc[1];
+#endif
pSrc += sizeof(u16);
offset += sizeof(u16);
+#if 0
+ /* For future reference */
BlockCRC = (pSrc[0] << 8) | pSrc[1];
+#endif
pSrc += sizeof(u16);
offset += sizeof(u16);
@@ -5829,7 +5835,7 @@ static int WriteGPIO(struct drxk_state *state)
}
if (state->UIO_mask & 0x0002) { /* UIO-2 */
/* write to io pad configuration register - output mode */
- status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ status = write16(state, SIO_PDR_SMA_RX_CFG__A, state->m_GPIOCfg);
if (status < 0)
goto error;
@@ -5848,7 +5854,7 @@ static int WriteGPIO(struct drxk_state *state)
}
if (state->UIO_mask & 0x0004) { /* UIO-3 */
/* write to io pad configuration register - output mode */
- status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ status = write16(state, SIO_PDR_GPIO_CFG__A, state->m_GPIOCfg);
if (status < 0)
goto error;
diff --git a/drivers/media/dvb/frontends/drxk_map.h b/drivers/media/dvb/frontends/drxk_map.h
index 9b11a8328869..23e16c12f234 100644
--- a/drivers/media/dvb/frontends/drxk_map.h
+++ b/drivers/media/dvb/frontends/drxk_map.h
@@ -432,6 +432,7 @@
#define SIO_PDR_UIO_OUT_LO__A 0x7F0016
#define SIO_PDR_OHW_CFG__A 0x7F001F
#define SIO_PDR_OHW_CFG_FREF_SEL__M 0x3
+#define SIO_PDR_GPIO_CFG__A 0x7F0021
#define SIO_PDR_MSTRT_CFG__A 0x7F0025
#define SIO_PDR_MERR_CFG__A 0x7F0026
#define SIO_PDR_MCLK_CFG__A 0x7F0028
@@ -446,4 +447,5 @@
#define SIO_PDR_MD5_CFG__A 0x7F0030
#define SIO_PDR_MD6_CFG__A 0x7F0031
#define SIO_PDR_MD7_CFG__A 0x7F0032
+#define SIO_PDR_SMA_RX_CFG__A 0x7F0037
#define SIO_PDR_SMA_TX_CFG__A 0x7F0038
diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
index af65d013db11..4c8ac2657c4a 100644
--- a/drivers/media/dvb/frontends/ds3000.c
+++ b/drivers/media/dvb/frontends/ds3000.c
@@ -1114,7 +1114,10 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
ds3000_writereg(state,
ds3000_dvbs2_init_tab[i],
ds3000_dvbs2_init_tab[i + 1]);
- ds3000_writereg(state, 0xfe, 0x98);
+ if (c->symbol_rate >= 30000000)
+ ds3000_writereg(state, 0xfe, 0x54);
+ else
+ ds3000_writereg(state, 0xfe, 0x98);
break;
default:
return 1;
diff --git a/drivers/media/dvb/frontends/it913x-fe.c b/drivers/media/dvb/frontends/it913x-fe.c
index 84df03c29179..708cbf197913 100644
--- a/drivers/media/dvb/frontends/it913x-fe.c
+++ b/drivers/media/dvb/frontends/it913x-fe.c
@@ -633,10 +633,9 @@ static int it913x_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
static int it913x_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct it913x_fe_state *state = fe->demodulator_priv;
- int ret;
u8 reg[5];
/* Read Aborted Packets and Pre-Viterbi error rate 5 bytes */
- ret = it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg));
+ it913x_read_reg(state, RSD_ABORT_PKT_LSB, reg, sizeof(reg));
state->ucblocks += (u32)(reg[1] << 8) | reg[0];
*ber = (u32)(reg[4] << 16) | (reg[3] << 8) | reg[2];
return 0;
@@ -658,10 +657,9 @@ static int it913x_fe_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct it913x_fe_state *state = fe->demodulator_priv;
- int ret;
u8 reg[8];
- ret = it913x_read_reg(state, REG_TPSD_TX_MODE, reg, sizeof(reg));
+ it913x_read_reg(state, REG_TPSD_TX_MODE, reg, sizeof(reg));
if (reg[3] < 3)
p->modulation = fe_con[reg[3]];
@@ -691,25 +689,25 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct it913x_fe_state *state = fe->demodulator_priv;
- int ret, i;
+ int i;
u8 empty_ch, last_ch;
state->it913x_status = 0;
/* Set bw*/
- ret = it913x_fe_select_bw(state, p->bandwidth_hz,
+ it913x_fe_select_bw(state, p->bandwidth_hz,
state->adcFrequency);
/* Training Mode Off */
- ret = it913x_write_reg(state, PRO_LINK, TRAINING_MODE, 0x0);
+ it913x_write_reg(state, PRO_LINK, TRAINING_MODE, 0x0);
/* Clear Empty Channel */
- ret = it913x_write_reg(state, PRO_DMOD, EMPTY_CHANNEL_STATUS, 0x0);
+ it913x_write_reg(state, PRO_DMOD, EMPTY_CHANNEL_STATUS, 0x0);
/* Clear bits */
- ret = it913x_write_reg(state, PRO_DMOD, MP2IF_SYNC_LK, 0x0);
+ it913x_write_reg(state, PRO_DMOD, MP2IF_SYNC_LK, 0x0);
/* LED on */
- ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1);
+ it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x1);
/* Select Band*/
if ((p->frequency >= 51000000) && (p->frequency <= 230000000))
i = 0;
@@ -720,7 +718,7 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe)
else
return -EOPNOTSUPP;
- ret = it913x_write_reg(state, PRO_DMOD, FREE_BAND, i);
+ it913x_write_reg(state, PRO_DMOD, FREE_BAND, i);
deb_info("Frontend Set Tuner Type %02x", state->tuner_type);
switch (state->tuner_type) {
@@ -730,7 +728,7 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe)
case IT9135_60:
case IT9135_61:
case IT9135_62:
- ret = it9137_set_tuner(state,
+ it9137_set_tuner(state,
p->bandwidth_hz, p->frequency);
break;
default:
@@ -742,9 +740,9 @@ static int it913x_fe_set_frontend(struct dvb_frontend *fe)
break;
}
/* LED off */
- ret = it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0);
+ it913x_write_reg(state, PRO_LINK, GPIOH3_O, 0x0);
/* Trigger ofsm */
- ret = it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0);
+ it913x_write_reg(state, PRO_DMOD, TRIGGER_OFSM, 0x0);
last_ch = 2;
for (i = 0; i < 40; ++i) {
empty_ch = it913x_read_reg_u8(state, EMPTY_CHANNEL_STATUS);
diff --git a/drivers/media/dvb/frontends/lg2160.c b/drivers/media/dvb/frontends/lg2160.c
new file mode 100644
index 000000000000..cc11260e99df
--- /dev/null
+++ b/drivers/media/dvb/frontends/lg2160.c
@@ -0,0 +1,1468 @@
+/*
+ * Support for LG2160 - ATSC/MH
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@linuxtv.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/jiffies.h>
+#include <linux/dvb/frontend.h>
+#include "lg2160.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "set debug level (info=1, reg=2 (or-able))");
+
+#define DBG_INFO 1
+#define DBG_REG 2
+
+#define lg_printk(kern, fmt, arg...) \
+ printk(kern "%s: " fmt, __func__, ##arg)
+
+#define lg_info(fmt, arg...) printk(KERN_INFO "lg2160: " fmt, ##arg)
+#define lg_warn(fmt, arg...) lg_printk(KERN_WARNING, fmt, ##arg)
+#define lg_err(fmt, arg...) lg_printk(KERN_ERR, fmt, ##arg)
+#define lg_dbg(fmt, arg...) if (debug & DBG_INFO) \
+ lg_printk(KERN_DEBUG, fmt, ##arg)
+#define lg_reg(fmt, arg...) if (debug & DBG_REG) \
+ lg_printk(KERN_DEBUG, fmt, ##arg)
+
+#define lg_fail(ret) \
+({ \
+ int __ret; \
+ __ret = (ret < 0); \
+ if (__ret) \
+ lg_err("error %d on line %d\n", ret, __LINE__); \
+ __ret; \
+})
+
+struct lg216x_state {
+ struct i2c_adapter *i2c_adap;
+ const struct lg2160_config *cfg;
+
+ struct dvb_frontend frontend;
+
+ u32 current_frequency;
+ u8 parade_id;
+ u8 fic_ver;
+ unsigned int last_reset;
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_write_reg(struct lg216x_state *state, u16 reg, u8 val)
+{
+ int ret;
+ u8 buf[] = { reg >> 8, reg & 0xff, val };
+ struct i2c_msg msg = {
+ .addr = state->cfg->i2c_addr, .flags = 0,
+ .buf = buf, .len = 3,
+ };
+
+ lg_reg("reg: 0x%04x, val: 0x%02x\n", reg, val);
+
+ ret = i2c_transfer(state->i2c_adap, &msg, 1);
+
+ if (ret != 1) {
+ lg_err("error (addr %02x %02x <- %02x, err = %i)\n",
+ msg.buf[0], msg.buf[1], msg.buf[2], ret);
+ if (ret < 0)
+ return ret;
+ else
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int lg216x_read_reg(struct lg216x_state *state, u16 reg, u8 *val)
+{
+ int ret;
+ u8 reg_buf[] = { reg >> 8, reg & 0xff };
+ struct i2c_msg msg[] = {
+ { .addr = state->cfg->i2c_addr,
+ .flags = 0, .buf = reg_buf, .len = 2 },
+ { .addr = state->cfg->i2c_addr,
+ .flags = I2C_M_RD, .buf = val, .len = 1 },
+ };
+
+ lg_reg("reg: 0x%04x\n", reg);
+
+ ret = i2c_transfer(state->i2c_adap, msg, 2);
+
+ if (ret != 2) {
+ lg_err("error (addr %02x reg %04x error (ret == %i)\n",
+ state->cfg->i2c_addr, reg, ret);
+ if (ret < 0)
+ return ret;
+ else
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+struct lg216x_reg {
+ u16 reg;
+ u8 val;
+};
+
+static int lg216x_write_regs(struct lg216x_state *state,
+ struct lg216x_reg *regs, int len)
+{
+ int i, ret;
+
+ lg_reg("writing %d registers...\n", len);
+
+ for (i = 0; i < len; i++) {
+ ret = lg216x_write_reg(state, regs[i].reg, regs[i].val);
+ if (lg_fail(ret))
+ return ret;
+ }
+ return 0;
+}
+
+static int lg216x_set_reg_bit(struct lg216x_state *state,
+ u16 reg, int bit, int onoff)
+{
+ u8 val;
+ int ret;
+
+ lg_reg("reg: 0x%04x, bit: %d, level: %d\n", reg, bit, onoff);
+
+ ret = lg216x_read_reg(state, reg, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= ~(1 << bit);
+ val |= (onoff & 1) << bit;
+
+ ret = lg216x_write_reg(state, reg, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret;
+
+ if (state->cfg->deny_i2c_rptr)
+ return 0;
+
+ lg_dbg("(%d)\n", enable);
+
+ ret = lg216x_set_reg_bit(state, 0x0000, 0, enable ? 0 : 1);
+
+ msleep(1);
+
+ return ret;
+}
+
+static int lg216x_soft_reset(struct lg216x_state *state)
+{
+ int ret;
+
+ lg_dbg("\n");
+
+ ret = lg216x_write_reg(state, 0x0002, 0x00);
+ if (lg_fail(ret))
+ goto fail;
+
+ msleep(20);
+ ret = lg216x_write_reg(state, 0x0002, 0x01);
+ if (lg_fail(ret))
+ goto fail;
+
+ state->last_reset = jiffies_to_msecs(jiffies);
+fail:
+ return ret;
+}
+
+static int lg216x_initialize(struct lg216x_state *state)
+{
+ int ret;
+
+ static struct lg216x_reg lg2160_init[] = {
+#if 0
+ { .reg = 0x0015, .val = 0xe6 },
+#else
+ { .reg = 0x0015, .val = 0xf7 },
+ { .reg = 0x001b, .val = 0x52 },
+ { .reg = 0x0208, .val = 0x00 },
+ { .reg = 0x0209, .val = 0x82 },
+ { .reg = 0x0210, .val = 0xf9 },
+ { .reg = 0x020a, .val = 0x00 },
+ { .reg = 0x020b, .val = 0x82 },
+ { .reg = 0x020d, .val = 0x28 },
+ { .reg = 0x020f, .val = 0x14 },
+#endif
+ };
+
+ static struct lg216x_reg lg2161_init[] = {
+ { .reg = 0x0000, .val = 0x41 },
+ { .reg = 0x0001, .val = 0xfb },
+ { .reg = 0x0216, .val = 0x00 },
+ { .reg = 0x0219, .val = 0x00 },
+ { .reg = 0x021b, .val = 0x55 },
+ { .reg = 0x0606, .val = 0x0a },
+ };
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_write_regs(state,
+ lg2160_init, ARRAY_SIZE(lg2160_init));
+ break;
+ case LG2161:
+ ret = lg216x_write_regs(state,
+ lg2161_init, ARRAY_SIZE(lg2161_init));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_soft_reset(state);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_set_if(struct lg216x_state *state)
+{
+ u8 val;
+ int ret;
+
+ lg_dbg("%d KHz\n", state->cfg->if_khz);
+
+ ret = lg216x_read_reg(state, 0x0132, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfb;
+ val |= (0 == state->cfg->if_khz) ? 0x04 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0132, val);
+ lg_fail(ret);
+
+ /* if NOT zero IF, 6 MHz is the default */
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg2160_agc_fix(struct lg216x_state *state,
+ int if_agc_fix, int rf_agc_fix)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0100, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xf3;
+ val |= (if_agc_fix) ? 0x08 : 0x00;
+ val |= (rf_agc_fix) ? 0x04 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0100, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+#if 0
+static int lg2160_agc_freeze(struct lg216x_state *state,
+ int if_agc_freeze, int rf_agc_freeze)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0100, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xcf;
+ val |= (if_agc_freeze) ? 0x20 : 0x00;
+ val |= (rf_agc_freeze) ? 0x10 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0100, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+#endif
+
+static int lg2160_agc_polarity(struct lg216x_state *state,
+ int if_agc_polarity, int rf_agc_polarity)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0100, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfc;
+ val |= (if_agc_polarity) ? 0x02 : 0x00;
+ val |= (rf_agc_polarity) ? 0x01 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0100, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg2160_tuner_pwr_save_polarity(struct lg216x_state *state,
+ int polarity)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0008, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfe;
+ val |= (polarity) ? 0x01 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0008, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg2160_spectrum_polarity(struct lg216x_state *state,
+ int inverted)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0132, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfd;
+ val |= (inverted) ? 0x02 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0132, val);
+ lg_fail(ret);
+fail:
+ return lg216x_soft_reset(state);
+}
+
+static int lg2160_tuner_pwr_save(struct lg216x_state *state, int onoff)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0007, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xbf;
+ val |= (onoff) ? 0x40 : 0x00;
+
+ ret = lg216x_write_reg(state, 0x0007, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg216x_set_parade(struct lg216x_state *state, int id)
+{
+ int ret;
+
+ ret = lg216x_write_reg(state, 0x013e, id & 0x7f);
+ if (lg_fail(ret))
+ goto fail;
+
+ state->parade_id = id & 0x7f;
+fail:
+ return ret;
+}
+
+static int lg216x_set_ensemble(struct lg216x_state *state, int id)
+{
+ int ret;
+ u16 reg;
+ u8 val;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ reg = 0x0400;
+ break;
+ case LG2161:
+ default:
+ reg = 0x0500;
+ break;
+ }
+
+ ret = lg216x_read_reg(state, reg, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xfe;
+ val |= (id) ? 0x01 : 0x00;
+
+ ret = lg216x_write_reg(state, reg, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg2160_set_spi_clock(struct lg216x_state *state)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0014, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0xf3;
+ val |= (state->cfg->spi_clock << 2);
+
+ ret = lg216x_write_reg(state, 0x0014, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg2161_set_output_interface(struct lg216x_state *state)
+{
+ u8 val;
+ int ret;
+
+ ret = lg216x_read_reg(state, 0x0014, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= ~0x07;
+ val |= state->cfg->output_if; /* FIXME: needs sanity check */
+
+ ret = lg216x_write_reg(state, 0x0014, val);
+ lg_fail(ret);
+fail:
+ return ret;
+}
+
+static int lg216x_enable_fic(struct lg216x_state *state, int onoff)
+{
+ int ret;
+
+ ret = lg216x_write_reg(state, 0x0017, 0x23);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_write_reg(state, 0x0016, 0xfc);
+ if (lg_fail(ret))
+ goto fail;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_write_reg(state, 0x0016,
+ 0xfc | ((onoff) ? 0x02 : 0x00));
+ break;
+ case LG2161:
+ ret = lg216x_write_reg(state, 0x0016, (onoff) ? 0x10 : 0x00);
+ break;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_initialize(state);
+ if (lg_fail(ret))
+ goto fail;
+
+ if (onoff) {
+ ret = lg216x_write_reg(state, 0x0017, 0x03);
+ lg_fail(ret);
+ }
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_get_fic_version(struct lg216x_state *state, u8 *ficver)
+{
+ u8 val;
+ int ret;
+
+ *ficver = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0128, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *ficver = (val >> 3) & 0x1f;
+fail:
+ return ret;
+}
+
+#if 0
+static int lg2160_get_parade_id(struct lg216x_state *state, u8 *id)
+{
+ u8 val;
+ int ret;
+
+ *id = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0123, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *id = val & 0x7f;
+fail:
+ return ret;
+}
+#endif
+
+static int lg216x_get_nog(struct lg216x_state *state, u8 *nog)
+{
+ u8 val;
+ int ret;
+
+ *nog = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0124, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *nog = ((val >> 4) & 0x07) + 1;
+fail:
+ return ret;
+}
+
+static int lg216x_get_tnog(struct lg216x_state *state, u8 *tnog)
+{
+ u8 val;
+ int ret;
+
+ *tnog = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0125, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *tnog = val & 0x1f;
+fail:
+ return ret;
+}
+
+static int lg216x_get_sgn(struct lg216x_state *state, u8 *sgn)
+{
+ u8 val;
+ int ret;
+
+ *sgn = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0124, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *sgn = val & 0x0f;
+fail:
+ return ret;
+}
+
+static int lg216x_get_prc(struct lg216x_state *state, u8 *prc)
+{
+ u8 val;
+ int ret;
+
+ *prc = 0xff; /* invalid value */
+
+ ret = lg216x_read_reg(state, 0x0125, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *prc = ((val >> 5) & 0x07) + 1;
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_get_rs_frame_mode(struct lg216x_state *state,
+ enum atscmh_rs_frame_mode *rs_framemode)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0410, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0513, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ switch ((val >> 4) & 0x03) {
+#if 1
+ default:
+#endif
+ case 0x00:
+ *rs_framemode = ATSCMH_RSFRAME_PRI_ONLY;
+ break;
+ case 0x01:
+ *rs_framemode = ATSCMH_RSFRAME_PRI_SEC;
+ break;
+#if 0
+ default:
+ *rs_framemode = ATSCMH_RSFRAME_RES;
+ break;
+#endif
+ }
+fail:
+ return ret;
+}
+
+static
+int lg216x_get_rs_frame_ensemble(struct lg216x_state *state,
+ enum atscmh_rs_frame_ensemble *rs_frame_ens)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0400, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0500, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ val &= 0x01;
+ *rs_frame_ens = (enum atscmh_rs_frame_ensemble) val;
+fail:
+ return ret;
+}
+
+static int lg216x_get_rs_code_mode(struct lg216x_state *state,
+ enum atscmh_rs_code_mode *rs_code_pri,
+ enum atscmh_rs_code_mode *rs_code_sec)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0410, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0513, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ *rs_code_pri = (enum atscmh_rs_code_mode) ((val >> 2) & 0x03);
+ *rs_code_sec = (enum atscmh_rs_code_mode) (val & 0x03);
+fail:
+ return ret;
+}
+
+static int lg216x_get_sccc_block_mode(struct lg216x_state *state,
+ enum atscmh_sccc_block_mode *sccc_block)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0315, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0511, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ switch (val & 0x03) {
+ case 0x00:
+ *sccc_block = ATSCMH_SCCC_BLK_SEP;
+ break;
+ case 0x01:
+ *sccc_block = ATSCMH_SCCC_BLK_COMB;
+ break;
+ default:
+ *sccc_block = ATSCMH_SCCC_BLK_RES;
+ break;
+ }
+fail:
+ return ret;
+}
+
+static int lg216x_get_sccc_code_mode(struct lg216x_state *state,
+ enum atscmh_sccc_code_mode *mode_a,
+ enum atscmh_sccc_code_mode *mode_b,
+ enum atscmh_sccc_code_mode *mode_c,
+ enum atscmh_sccc_code_mode *mode_d)
+{
+ u8 val;
+ int ret;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0316, &val);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x0512, &val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ switch ((val >> 6) & 0x03) {
+ case 0x00:
+ *mode_a = ATSCMH_SCCC_CODE_HLF;
+ break;
+ case 0x01:
+ *mode_a = ATSCMH_SCCC_CODE_QTR;
+ break;
+ default:
+ *mode_a = ATSCMH_SCCC_CODE_RES;
+ break;
+ }
+
+ switch ((val >> 4) & 0x03) {
+ case 0x00:
+ *mode_b = ATSCMH_SCCC_CODE_HLF;
+ break;
+ case 0x01:
+ *mode_b = ATSCMH_SCCC_CODE_QTR;
+ break;
+ default:
+ *mode_b = ATSCMH_SCCC_CODE_RES;
+ break;
+ }
+
+ switch ((val >> 2) & 0x03) {
+ case 0x00:
+ *mode_c = ATSCMH_SCCC_CODE_HLF;
+ break;
+ case 0x01:
+ *mode_c = ATSCMH_SCCC_CODE_QTR;
+ break;
+ default:
+ *mode_c = ATSCMH_SCCC_CODE_RES;
+ break;
+ }
+
+ switch (val & 0x03) {
+ case 0x00:
+ *mode_d = ATSCMH_SCCC_CODE_HLF;
+ break;
+ case 0x01:
+ *mode_d = ATSCMH_SCCC_CODE_QTR;
+ break;
+ default:
+ *mode_d = ATSCMH_SCCC_CODE_RES;
+ break;
+ }
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+#if 0
+static int lg216x_read_fic_err_count(struct lg216x_state *state, u8 *err)
+{
+ u8 fic_err;
+ int ret;
+
+ *err = 0;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg216x_read_reg(state, 0x0012, &fic_err);
+ break;
+ case LG2161:
+ ret = lg216x_read_reg(state, 0x001e, &fic_err);
+ break;
+ }
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = fic_err;
+fail:
+ return ret;
+}
+
+static int lg2160_read_crc_err_count(struct lg216x_state *state, u16 *err)
+{
+ u8 crc_err1, crc_err2;
+ int ret;
+
+ *err = 0;
+
+ ret = lg216x_read_reg(state, 0x0411, &crc_err1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0412, &crc_err2);
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = (u16)(((crc_err2 & 0x0f) << 8) | crc_err1);
+fail:
+ return ret;
+}
+
+static int lg2161_read_crc_err_count(struct lg216x_state *state, u16 *err)
+{
+ u8 crc_err;
+ int ret;
+
+ *err = 0;
+
+ ret = lg216x_read_reg(state, 0x0612, &crc_err);
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = (u16)crc_err;
+fail:
+ return ret;
+}
+
+static int lg216x_read_crc_err_count(struct lg216x_state *state, u16 *err)
+{
+ int ret;
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg2160_read_crc_err_count(state, err);
+ break;
+ case LG2161:
+ ret = lg2161_read_crc_err_count(state, err);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int lg2160_read_rs_err_count(struct lg216x_state *state, u16 *err)
+{
+ u8 rs_err1, rs_err2;
+ int ret;
+
+ *err = 0;
+
+ ret = lg216x_read_reg(state, 0x0413, &rs_err1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0414, &rs_err2);
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = (u16)(((rs_err2 & 0x0f) << 8) | rs_err1);
+fail:
+ return ret;
+}
+
+static int lg2161_read_rs_err_count(struct lg216x_state *state, u16 *err)
+{
+ u8 rs_err1, rs_err2;
+ int ret;
+
+ *err = 0;
+
+ ret = lg216x_read_reg(state, 0x0613, &rs_err1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0614, &rs_err2);
+ if (lg_fail(ret))
+ goto fail;
+
+ *err = (u16)((rs_err1 << 8) | rs_err2);
+fail:
+ return ret;
+}
+
+static int lg216x_read_rs_err_count(struct lg216x_state *state, u16 *err)
+{
+ int ret;
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg2160_read_rs_err_count(state, err);
+ break;
+ case LG2161:
+ ret = lg2161_read_rs_err_count(state, err);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+#endif
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_get_frontend(struct dvb_frontend *fe)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret;
+
+ lg_dbg("\n");
+
+ fe->dtv_property_cache.modulation = VSB_8;
+ fe->dtv_property_cache.frequency = state->current_frequency;
+ fe->dtv_property_cache.delivery_system = SYS_ATSCMH;
+
+ ret = lg216x_get_fic_version(state,
+ &fe->dtv_property_cache.atscmh_fic_ver);
+ if (lg_fail(ret))
+ goto fail;
+ if (state->fic_ver != fe->dtv_property_cache.atscmh_fic_ver) {
+ state->fic_ver = fe->dtv_property_cache.atscmh_fic_ver;
+
+#if 0
+ ret = lg2160_get_parade_id(state,
+ &fe->dtv_property_cache.atscmh_parade_id);
+ if (lg_fail(ret))
+ goto fail;
+/* #else */
+ fe->dtv_property_cache.atscmh_parade_id = state->parade_id;
+#endif
+ ret = lg216x_get_nog(state,
+ &fe->dtv_property_cache.atscmh_nog);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_tnog(state,
+ &fe->dtv_property_cache.atscmh_tnog);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_sgn(state,
+ &fe->dtv_property_cache.atscmh_sgn);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_prc(state,
+ &fe->dtv_property_cache.atscmh_prc);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_get_rs_frame_mode(state,
+ (enum atscmh_rs_frame_mode *)
+ &fe->dtv_property_cache.atscmh_rs_frame_mode);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_rs_frame_ensemble(state,
+ (enum atscmh_rs_frame_ensemble *)
+ &fe->dtv_property_cache.atscmh_rs_frame_ensemble);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_rs_code_mode(state,
+ (enum atscmh_rs_code_mode *)
+ &fe->dtv_property_cache.atscmh_rs_code_mode_pri,
+ (enum atscmh_rs_code_mode *)
+ &fe->dtv_property_cache.atscmh_rs_code_mode_sec);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_sccc_block_mode(state,
+ (enum atscmh_sccc_block_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_block_mode);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_get_sccc_code_mode(state,
+ (enum atscmh_sccc_code_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_code_mode_a,
+ (enum atscmh_sccc_code_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_code_mode_b,
+ (enum atscmh_sccc_code_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_code_mode_c,
+ (enum atscmh_sccc_code_mode *)
+ &fe->dtv_property_cache.atscmh_sccc_code_mode_d);
+ if (lg_fail(ret))
+ goto fail;
+ }
+#if 0
+ ret = lg216x_read_fic_err_count(state,
+ (u8 *)&fe->dtv_property_cache.atscmh_fic_err);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_read_crc_err_count(state,
+ &fe->dtv_property_cache.atscmh_crc_err);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_read_rs_err_count(state,
+ &fe->dtv_property_cache.atscmh_rs_err);
+ if (lg_fail(ret))
+ goto fail;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ if (((fe->dtv_property_cache.atscmh_rs_err >= 240) &&
+ (fe->dtv_property_cache.atscmh_crc_err >= 240)) &&
+ ((jiffies_to_msecs(jiffies) - state->last_reset) > 6000))
+ ret = lg216x_soft_reset(state);
+ break;
+ case LG2161:
+ /* no fix needed here (as far as we know) */
+ ret = 0;
+ break;
+ }
+ lg_fail(ret);
+#endif
+fail:
+ return ret;
+}
+
+static int lg216x_get_property(struct dvb_frontend *fe,
+ struct dtv_property *tvp)
+{
+ return (DTV_ATSCMH_FIC_VER == tvp->cmd) ?
+ lg216x_get_frontend(fe) : 0;
+}
+
+
+static int lg2160_set_frontend(struct dvb_frontend *fe)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret;
+
+ lg_dbg("(%d)\n", fe->dtv_property_cache.frequency);
+
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ if (lg_fail(ret))
+ goto fail;
+ state->current_frequency = fe->dtv_property_cache.frequency;
+ }
+
+ ret = lg2160_agc_fix(state, 0, 0);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg2160_agc_polarity(state, 0, 0);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg2160_tuner_pwr_save_polarity(state, 1);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg216x_set_if(state);
+ if (lg_fail(ret))
+ goto fail;
+ ret = lg2160_spectrum_polarity(state, state->cfg->spectral_inversion);
+ if (lg_fail(ret))
+ goto fail;
+
+ /* be tuned before this point */
+ ret = lg216x_soft_reset(state);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg2160_tuner_pwr_save(state, 0);
+ if (lg_fail(ret))
+ goto fail;
+
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg2160_set_spi_clock(state);
+ if (lg_fail(ret))
+ goto fail;
+ break;
+ case LG2161:
+ ret = lg2161_set_output_interface(state);
+ if (lg_fail(ret))
+ goto fail;
+ break;
+ }
+
+ ret = lg216x_set_parade(state, fe->dtv_property_cache.atscmh_parade_id);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_set_ensemble(state,
+ fe->dtv_property_cache.atscmh_rs_frame_ensemble);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_initialize(state);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_enable_fic(state, 1);
+ lg_fail(ret);
+
+ lg216x_get_frontend(fe);
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg2160_read_lock_status(struct lg216x_state *state,
+ int *acq_lock, int *sync_lock)
+{
+ u8 val;
+ int ret;
+
+ *acq_lock = 0;
+ *sync_lock = 0;
+
+ ret = lg216x_read_reg(state, 0x011b, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *sync_lock = (val & 0x20) ? 0 : 1;
+ *acq_lock = (val & 0x40) ? 0 : 1;
+fail:
+ return ret;
+}
+
+#ifdef USE_LG2161_LOCK_BITS
+static int lg2161_read_lock_status(struct lg216x_state *state,
+ int *acq_lock, int *sync_lock)
+{
+ u8 val;
+ int ret;
+
+ *acq_lock = 0;
+ *sync_lock = 0;
+
+ ret = lg216x_read_reg(state, 0x0304, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *sync_lock = (val & 0x80) ? 0 : 1;
+
+ ret = lg216x_read_reg(state, 0x011b, &val);
+ if (lg_fail(ret))
+ goto fail;
+
+ *acq_lock = (val & 0x40) ? 0 : 1;
+fail:
+ return ret;
+}
+#endif
+
+static int lg216x_read_lock_status(struct lg216x_state *state,
+ int *acq_lock, int *sync_lock)
+{
+#ifdef USE_LG2161_LOCK_BITS
+ int ret;
+ switch (state->cfg->lg_chip) {
+ case LG2160:
+ ret = lg2160_read_lock_status(state, acq_lock, sync_lock);
+ break;
+ case LG2161:
+ ret = lg2161_read_lock_status(state, acq_lock, sync_lock);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+#else
+ return lg2160_read_lock_status(state, acq_lock, sync_lock);
+#endif
+}
+
+static int lg216x_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret, acq_lock, sync_lock;
+
+ *status = 0;
+
+ ret = lg216x_read_lock_status(state, &acq_lock, &sync_lock);
+ if (lg_fail(ret))
+ goto fail;
+
+ lg_dbg("%s%s\n",
+ acq_lock ? "SIGNALEXIST " : "",
+ sync_lock ? "SYNCLOCK" : "");
+
+ if (acq_lock)
+ *status |= FE_HAS_SIGNAL;
+ if (sync_lock)
+ *status |= FE_HAS_SYNC;
+
+ if (*status)
+ *status |= FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_LOCK;
+
+fail:
+ return ret;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg2160_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ u8 snr1, snr2;
+ int ret;
+
+ *snr = 0;
+
+ ret = lg216x_read_reg(state, 0x0202, &snr1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0203, &snr2);
+ if (lg_fail(ret))
+ goto fail;
+
+ if ((snr1 == 0xba) || (snr2 == 0xdf))
+ *snr = 0;
+ else
+#if 1
+ *snr = ((snr1 >> 4) * 100) + ((snr1 & 0x0f) * 10) + (snr2 >> 4);
+#else /* BCD */
+ *snr = (snr2 | (snr1 << 8));
+#endif
+fail:
+ return ret;
+}
+
+static int lg2161_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ u8 snr1, snr2;
+ int ret;
+
+ *snr = 0;
+
+ ret = lg216x_read_reg(state, 0x0302, &snr1);
+ if (lg_fail(ret))
+ goto fail;
+
+ ret = lg216x_read_reg(state, 0x0303, &snr2);
+ if (lg_fail(ret))
+ goto fail;
+
+ if ((snr1 == 0xba) || (snr2 == 0xfd))
+ *snr = 0;
+ else
+
+ *snr = ((snr1 >> 4) * 100) + ((snr1 & 0x0f) * 10) + (snr2 & 0x0f);
+fail:
+ return ret;
+}
+
+static int lg216x_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+#if 0
+ /* borrowed from lgdt330x.c
+ *
+ * Calculate strength from SNR up to 35dB
+ * Even though the SNR can go higher than 35dB,
+ * there is some comfort factor in having a range of
+ * strong signals that can show at 100%
+ */
+ struct lg216x_state *state = fe->demodulator_priv;
+ u16 snr;
+ int ret;
+#endif
+ *strength = 0;
+#if 0
+ ret = fe->ops.read_snr(fe, &snr);
+ if (lg_fail(ret))
+ goto fail;
+ /* Rather than use the 8.8 value snr, use state->snr which is 8.24 */
+ /* scale the range 0 - 35*2^24 into 0 - 65535 */
+ if (state->snr >= 8960 * 0x10000)
+ *strength = 0xffff;
+ else
+ *strength = state->snr / 8960;
+fail:
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int lg216x_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+#if 0
+ struct lg216x_state *state = fe->demodulator_priv;
+ int ret;
+
+ ret = lg216x_read_rs_err_count(state,
+ &fe->dtv_property_cache.atscmh_rs_err);
+ if (lg_fail(ret))
+ goto fail;
+
+ *ucblocks = fe->dtv_property_cache.atscmh_rs_err;
+fail:
+#else
+ *ucblocks = 0;
+#endif
+ return 0;
+}
+
+static int lg216x_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings
+ *fe_tune_settings)
+{
+ fe_tune_settings->min_delay_ms = 500;
+ lg_dbg("\n");
+ return 0;
+}
+
+static void lg216x_release(struct dvb_frontend *fe)
+{
+ struct lg216x_state *state = fe->demodulator_priv;
+ lg_dbg("\n");
+ kfree(state);
+}
+
+static struct dvb_frontend_ops lg2160_ops = {
+ .delsys = { SYS_ATSCMH },
+ .info = {
+ .name = "LG Electronics LG2160 ATSC/MH Frontend",
+ .frequency_min = 54000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 62500,
+ },
+ .i2c_gate_ctrl = lg216x_i2c_gate_ctrl,
+#if 0
+ .init = lg216x_init,
+ .sleep = lg216x_sleep,
+#endif
+ .get_property = lg216x_get_property,
+
+ .set_frontend = lg2160_set_frontend,
+ .get_frontend = lg216x_get_frontend,
+ .get_tune_settings = lg216x_get_tune_settings,
+ .read_status = lg216x_read_status,
+#if 0
+ .read_ber = lg216x_read_ber,
+#endif
+ .read_signal_strength = lg216x_read_signal_strength,
+ .read_snr = lg2160_read_snr,
+ .read_ucblocks = lg216x_read_ucblocks,
+ .release = lg216x_release,
+};
+
+static struct dvb_frontend_ops lg2161_ops = {
+ .delsys = { SYS_ATSCMH },
+ .info = {
+ .name = "LG Electronics LG2161 ATSC/MH Frontend",
+ .frequency_min = 54000000,
+ .frequency_max = 858000000,
+ .frequency_stepsize = 62500,
+ },
+ .i2c_gate_ctrl = lg216x_i2c_gate_ctrl,
+#if 0
+ .init = lg216x_init,
+ .sleep = lg216x_sleep,
+#endif
+ .get_property = lg216x_get_property,
+
+ .set_frontend = lg2160_set_frontend,
+ .get_frontend = lg216x_get_frontend,
+ .get_tune_settings = lg216x_get_tune_settings,
+ .read_status = lg216x_read_status,
+#if 0
+ .read_ber = lg216x_read_ber,
+#endif
+ .read_signal_strength = lg216x_read_signal_strength,
+ .read_snr = lg2161_read_snr,
+ .read_ucblocks = lg216x_read_ucblocks,
+ .release = lg216x_release,
+};
+
+struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ struct lg216x_state *state = NULL;
+
+ lg_dbg("(%d-%04x)\n",
+ i2c_adap ? i2c_adapter_id(i2c_adap) : 0,
+ config ? config->i2c_addr : 0);
+
+ state = kzalloc(sizeof(struct lg216x_state), GFP_KERNEL);
+ if (state == NULL)
+ goto fail;
+
+ state->cfg = config;
+ state->i2c_adap = i2c_adap;
+ state->fic_ver = 0xff;
+ state->parade_id = 0xff;
+
+ switch (config->lg_chip) {
+ default:
+ lg_warn("invalid chip requested, defaulting to LG2160");
+ /* fall-thru */
+ case LG2160:
+ memcpy(&state->frontend.ops, &lg2160_ops,
+ sizeof(struct dvb_frontend_ops));
+ break;
+ case LG2161:
+ memcpy(&state->frontend.ops, &lg2161_ops,
+ sizeof(struct dvb_frontend_ops));
+ break;
+ }
+
+ state->frontend.demodulator_priv = state;
+ state->current_frequency = -1;
+ /* parade 1 by default */
+ state->frontend.dtv_property_cache.atscmh_parade_id = 1;
+
+ return &state->frontend;
+fail:
+ lg_warn("unable to detect LG216x hardware\n");
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(lg2160_attach);
+
+MODULE_DESCRIPTION("LG Electronics LG216x ATSC/MH Demodulator Driver");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.3");
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/dvb/frontends/lg2160.h b/drivers/media/dvb/frontends/lg2160.h
new file mode 100644
index 000000000000..9e2c0f41199a
--- /dev/null
+++ b/drivers/media/dvb/frontends/lg2160.h
@@ -0,0 +1,84 @@
+/*
+ * Support for LG2160 - ATSC/MH
+ *
+ * Copyright (C) 2010 Michael Krufky <mkrufky@linuxtv.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _LG2160_H_
+#define _LG2160_H_
+
+#include <linux/i2c.h>
+#include "dvb_frontend.h"
+
+enum lg_chip_type {
+ LG2160 = 0,
+ LG2161 = 1,
+};
+
+#define LG2161_1019 LG2161
+#define LG2161_1040 LG2161
+
+enum lg2160_spi_clock {
+ LG2160_SPI_3_125_MHZ = 0,
+ LG2160_SPI_6_25_MHZ = 1,
+ LG2160_SPI_12_5_MHZ = 2,
+};
+
+#if 0
+enum lg2161_oif {
+ LG2161_OIF_EBI2_SLA = 1,
+ LG2161_OIF_SDIO_SLA = 2,
+ LG2161_OIF_SPI_SLA = 3,
+ LG2161_OIF_SPI_MAS = 4,
+ LG2161_OIF_SERIAL_TS = 7,
+};
+#endif
+
+struct lg2160_config {
+ u8 i2c_addr;
+
+ /* user defined IF frequency in KHz */
+ u16 if_khz;
+
+ /* disable i2c repeater - 0:repeater enabled 1:repeater disabled */
+ int deny_i2c_rptr:1;
+
+ /* spectral inversion - 0:disabled 1:enabled */
+ int spectral_inversion:1;
+
+ unsigned int output_if;
+ enum lg2160_spi_clock spi_clock;
+ enum lg_chip_type lg_chip;
+};
+
+#if defined(CONFIG_DVB_LG2160) || (defined(CONFIG_DVB_LG2160_MODULE) && \
+ defined(MODULE))
+extern
+struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
+ struct i2c_adapter *i2c_adap);
+#else
+static inline
+struct dvb_frontend *lg2160_attach(const struct lg2160_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif /* CONFIG_DVB_LG2160 */
+
+#endif /* _LG2160_H_ */
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index 4de1d3520cd2..568363a10a31 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -262,7 +262,6 @@ static int lgs8gxx_set_mode_auto(struct lgs8gxx_state *priv)
static int lgs8gxx_set_mode_manual(struct lgs8gxx_state *priv)
{
- int ret = 0;
u8 t;
if (priv->config->prod == LGS8GXX_PROD_LGS8G75) {
@@ -296,7 +295,7 @@ static int lgs8gxx_set_mode_manual(struct lgs8gxx_state *priv)
if (priv->config->prod == LGS8GXX_PROD_LGS8913)
lgs8gxx_write_reg(priv, 0xC1, 0);
- ret = lgs8gxx_read_reg(priv, 0xC5, &t);
+ lgs8gxx_read_reg(priv, 0xC5, &t);
t = (t & 0xE0) | 0x06;
lgs8gxx_write_reg(priv, 0xC5, t);
diff --git a/drivers/media/dvb/frontends/m88rs2000.c b/drivers/media/dvb/frontends/m88rs2000.c
index 045ee5a6f7ae..312588e84dae 100644
--- a/drivers/media/dvb/frontends/m88rs2000.c
+++ b/drivers/media/dvb/frontends/m88rs2000.c
@@ -416,9 +416,25 @@ static int m88rs2000_tab_set(struct m88rs2000_state *state,
static int m88rs2000_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t volt)
{
- deb_info("%s: %s\n", __func__,
- volt == SEC_VOLTAGE_13 ? "SEC_VOLTAGE_13" :
- volt == SEC_VOLTAGE_18 ? "SEC_VOLTAGE_18" : "??");
+ struct m88rs2000_state *state = fe->demodulator_priv;
+ u8 data;
+
+ data = m88rs2000_demod_read(state, 0xb2);
+ data |= 0x03; /* bit0 V/H, bit1 off/on */
+
+ switch (volt) {
+ case SEC_VOLTAGE_18:
+ data &= ~0x03;
+ break;
+ case SEC_VOLTAGE_13:
+ data &= ~0x03;
+ data |= 0x01;
+ break;
+ case SEC_VOLTAGE_OFF:
+ break;
+ }
+
+ m88rs2000_demod_write(state, 0xb2, data);
return 0;
}
@@ -654,7 +670,6 @@ static int m88rs2000_set_tuner(struct dvb_frontend *fe, u16 *offset)
static int m88rs2000_set_fec(struct m88rs2000_state *state,
fe_code_rate_t fec)
{
- int ret;
u16 fec_set;
switch (fec) {
/* This is not confirmed kept for reference */
@@ -677,7 +692,7 @@ static int m88rs2000_set_fec(struct m88rs2000_state *state,
default:
fec_set = 0x08;
}
- ret = m88rs2000_demod_write(state, 0x76, fec_set);
+ m88rs2000_demod_write(state, 0x76, fec_set);
return 0;
}
@@ -772,13 +787,13 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
return -ENODEV;
for (i = 0; i < 25; i++) {
- u8 reg = m88rs2000_demod_read(state, 0x8c);
+ reg = m88rs2000_demod_read(state, 0x8c);
if ((reg & 0x7) == 0x7) {
status = FE_HAS_LOCK;
break;
}
state->no_lock_count++;
- if (state->no_lock_count > 15) {
+ if (state->no_lock_count == 15) {
reg = m88rs2000_demod_read(state, 0x70);
reg ^= 0x4;
m88rs2000_demod_write(state, 0x70, reg);
diff --git a/drivers/media/dvb/frontends/rtl2830.c b/drivers/media/dvb/frontends/rtl2830.c
index 45196c5b0736..93612ebac519 100644
--- a/drivers/media/dvb/frontends/rtl2830.c
+++ b/drivers/media/dvb/frontends/rtl2830.c
@@ -374,6 +374,118 @@ err:
return ret;
}
+static int rtl2830_get_frontend(struct dvb_frontend *fe)
+{
+ struct rtl2830_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ u8 buf[3];
+
+ if (priv->sleeping)
+ return 0;
+
+ ret = rtl2830_rd_regs(priv, 0x33c, buf, 2);
+ if (ret)
+ goto err;
+
+ ret = rtl2830_rd_reg(priv, 0x351, &buf[2]);
+ if (ret)
+ goto err;
+
+ dbg("%s: TPS=%02x %02x %02x", __func__, buf[0], buf[1], buf[2]);
+
+ switch ((buf[0] >> 2) & 3) {
+ case 0:
+ c->modulation = QPSK;
+ break;
+ case 1:
+ c->modulation = QAM_16;
+ break;
+ case 2:
+ c->modulation = QAM_64;
+ break;
+ }
+
+ switch ((buf[2] >> 2) & 1) {
+ case 0:
+ c->transmission_mode = TRANSMISSION_MODE_2K;
+ break;
+ case 1:
+ c->transmission_mode = TRANSMISSION_MODE_8K;
+ }
+
+ switch ((buf[2] >> 0) & 3) {
+ case 0:
+ c->guard_interval = GUARD_INTERVAL_1_32;
+ break;
+ case 1:
+ c->guard_interval = GUARD_INTERVAL_1_16;
+ break;
+ case 2:
+ c->guard_interval = GUARD_INTERVAL_1_8;
+ break;
+ case 3:
+ c->guard_interval = GUARD_INTERVAL_1_4;
+ break;
+ }
+
+ switch ((buf[0] >> 4) & 7) {
+ case 0:
+ c->hierarchy = HIERARCHY_NONE;
+ break;
+ case 1:
+ c->hierarchy = HIERARCHY_1;
+ break;
+ case 2:
+ c->hierarchy = HIERARCHY_2;
+ break;
+ case 3:
+ c->hierarchy = HIERARCHY_4;
+ break;
+ }
+
+ switch ((buf[1] >> 3) & 7) {
+ case 0:
+ c->code_rate_HP = FEC_1_2;
+ break;
+ case 1:
+ c->code_rate_HP = FEC_2_3;
+ break;
+ case 2:
+ c->code_rate_HP = FEC_3_4;
+ break;
+ case 3:
+ c->code_rate_HP = FEC_5_6;
+ break;
+ case 4:
+ c->code_rate_HP = FEC_7_8;
+ break;
+ }
+
+ switch ((buf[1] >> 0) & 7) {
+ case 0:
+ c->code_rate_LP = FEC_1_2;
+ break;
+ case 1:
+ c->code_rate_LP = FEC_2_3;
+ break;
+ case 2:
+ c->code_rate_LP = FEC_3_4;
+ break;
+ case 3:
+ c->code_rate_LP = FEC_5_6;
+ break;
+ case 4:
+ c->code_rate_LP = FEC_7_8;
+ break;
+ }
+
+ return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
static int rtl2830_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct rtl2830_priv *priv = fe->demodulator_priv;
@@ -404,14 +516,72 @@ err:
static int rtl2830_read_snr(struct dvb_frontend *fe, u16 *snr)
{
- *snr = 0;
+ struct rtl2830_priv *priv = fe->demodulator_priv;
+ int ret, hierarchy, constellation;
+ u8 buf[2], tmp;
+ u16 tmp16;
+#define CONSTELLATION_NUM 3
+#define HIERARCHY_NUM 4
+ static const u32 snr_constant[CONSTELLATION_NUM][HIERARCHY_NUM] = {
+ { 70705899, 70705899, 70705899, 70705899 },
+ { 82433173, 82433173, 87483115, 94445660 },
+ { 92888734, 92888734, 95487525, 99770748 },
+ };
+
+ if (priv->sleeping)
+ return 0;
+
+ /* reports SNR in resolution of 0.1 dB */
+
+ ret = rtl2830_rd_reg(priv, 0x33c, &tmp);
+ if (ret)
+ goto err;
+
+ constellation = (tmp >> 2) & 0x03; /* [3:2] */
+ if (constellation > CONSTELLATION_NUM - 1)
+ goto err;
+
+ hierarchy = (tmp >> 4) & 0x07; /* [6:4] */
+ if (hierarchy > HIERARCHY_NUM - 1)
+ goto err;
+
+ ret = rtl2830_rd_regs(priv, 0x40c, buf, 2);
+ if (ret)
+ goto err;
+
+ tmp16 = buf[0] << 8 | buf[1];
+
+ if (tmp16)
+ *snr = (snr_constant[constellation][hierarchy] -
+ intlog10(tmp16)) / ((1 << 24) / 100);
+ else
+ *snr = 0;
+
return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
}
static int rtl2830_read_ber(struct dvb_frontend *fe, u32 *ber)
{
- *ber = 0;
+ struct rtl2830_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 buf[2];
+
+ if (priv->sleeping)
+ return 0;
+
+ ret = rtl2830_rd_regs(priv, 0x34e, buf, 2);
+ if (ret)
+ goto err;
+
+ *ber = buf[0] << 8 | buf[1];
+
return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
}
static int rtl2830_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
@@ -422,8 +592,32 @@ static int rtl2830_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static int rtl2830_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
- *strength = 0;
+ struct rtl2830_priv *priv = fe->demodulator_priv;
+ int ret;
+ u8 buf[2];
+ u16 if_agc_raw, if_agc;
+
+ if (priv->sleeping)
+ return 0;
+
+ ret = rtl2830_rd_regs(priv, 0x359, buf, 2);
+ if (ret)
+ goto err;
+
+ if_agc_raw = (buf[0] << 8 | buf[1]) & 0x3fff;
+
+ if (if_agc_raw & (1 << 9))
+ if_agc = -(~(if_agc_raw - 1) & 0x1ff);
+ else
+ if_agc = if_agc_raw;
+
+ *strength = (u8) (55 - if_agc / 182);
+ *strength |= *strength << 8;
+
return 0;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
}
static struct dvb_frontend_ops rtl2830_ops;
@@ -549,6 +743,7 @@ static struct dvb_frontend_ops rtl2830_ops = {
.get_tune_settings = rtl2830_get_tune_settings,
.set_frontend = rtl2830_set_frontend,
+ .get_frontend = rtl2830_get_frontend,
.read_status = rtl2830_read_status,
.read_snr = rtl2830_read_snr,
diff --git a/drivers/media/dvb/frontends/rtl2830_priv.h b/drivers/media/dvb/frontends/rtl2830_priv.h
index 4a464761b5b8..9b20557ccf6c 100644
--- a/drivers/media/dvb/frontends/rtl2830_priv.h
+++ b/drivers/media/dvb/frontends/rtl2830_priv.h
@@ -22,6 +22,7 @@
#define RTL2830_PRIV_H
#include "dvb_frontend.h"
+#include "dvb_math.h"
#include "rtl2830.h"
#define LOG_PREFIX "rtl2830"
diff --git a/drivers/media/dvb/frontends/stb0899_drv.c b/drivers/media/dvb/frontends/stb0899_drv.c
index dd08f4ac64a8..8b0dc74a3298 100644
--- a/drivers/media/dvb/frontends/stb0899_drv.c
+++ b/drivers/media/dvb/frontends/stb0899_drv.c
@@ -637,11 +637,9 @@ static void stb0899_init_calc(struct stb0899_state *state)
struct stb0899_internal *internal = &state->internal;
int master_clk;
u8 agc[2];
- u8 agc1cn;
u32 reg;
/* Read registers (in burst mode) */
- agc1cn = stb0899_read_reg(state, STB0899_AGC1CN);
stb0899_read_regs(state, STB0899_AGC1REF, agc, 2); /* AGC1R and AGC2O */
/* Initial calculations */
@@ -823,15 +821,12 @@ static int stb0899_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t
static int stb0899_diseqc_init(struct stb0899_state *state)
{
- struct dvb_diseqc_master_cmd tx_data;
/*
struct dvb_diseqc_slave_reply rx_data;
*/
- u8 f22_tx, f22_rx, reg;
+ u8 f22_tx, reg;
u32 mclk, tx_freq = 22000;/* count = 0, i; */
- tx_data.msg[0] = 0xe2;
- tx_data.msg_len = 3;
reg = stb0899_read_reg(state, STB0899_DISCNTRL2);
STB0899_SETFIELD_VAL(ONECHIP_TRX, reg, 0);
stb0899_write_reg(state, STB0899_DISCNTRL2, reg);
@@ -849,7 +844,6 @@ static int stb0899_diseqc_init(struct stb0899_state *state)
f22_tx = mclk / (tx_freq * 32);
stb0899_write_reg(state, STB0899_DISF22, f22_tx); /* DiSEqC Tx freq */
state->rx_freq = 20000;
- f22_rx = mclk / (state->rx_freq * 32);
return 0;
}
diff --git a/drivers/media/dvb/frontends/stb6100.c b/drivers/media/dvb/frontends/stb6100.c
index def88abb30bf..2e93e65d2cdb 100644
--- a/drivers/media/dvb/frontends/stb6100.c
+++ b/drivers/media/dvb/frontends/stb6100.c
@@ -158,7 +158,6 @@ static int stb6100_read_regs(struct stb6100_state *state, u8 regs[])
static int stb6100_read_reg(struct stb6100_state *state, u8 reg)
{
u8 regs[STB6100_NUMREGS];
- int rc;
struct i2c_msg msg = {
.addr = state->config->tuner_address + reg,
@@ -167,7 +166,7 @@ static int stb6100_read_reg(struct stb6100_state *state, u8 reg)
.len = 1
};
- rc = i2c_transfer(state->i2c, &msg, 1);
+ i2c_transfer(state->i2c, &msg, 1);
if (unlikely(reg >= STB6100_NUMREGS)) {
dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg);
diff --git a/drivers/media/dvb/frontends/stv0297.c b/drivers/media/dvb/frontends/stv0297.c
index 85c157a1fe5e..d40f226160ef 100644
--- a/drivers/media/dvb/frontends/stv0297.c
+++ b/drivers/media/dvb/frontends/stv0297.c
@@ -414,7 +414,6 @@ static int stv0297_set_frontend(struct dvb_frontend *fe)
int delay;
int sweeprate;
int carrieroffset;
- unsigned long starttime;
unsigned long timeout;
fe_spectral_inversion_t inversion;
@@ -543,7 +542,6 @@ static int stv0297_set_frontend(struct dvb_frontend *fe)
stv0297_writereg_mask(state, 0x43, 0x10, 0x10);
/* wait for WGAGC lock */
- starttime = jiffies;
timeout = jiffies + msecs_to_jiffies(2000);
while (time_before(jiffies, timeout)) {
msleep(10);
diff --git a/drivers/media/dvb/frontends/stv0900_sw.c b/drivers/media/dvb/frontends/stv0900_sw.c
index ba0709b2d433..4af20780fb9c 100644
--- a/drivers/media/dvb/frontends/stv0900_sw.c
+++ b/drivers/media/dvb/frontends/stv0900_sw.c
@@ -835,7 +835,6 @@ static void stv0900_track_optimization(struct dvb_frontend *fe)
blind_tun_sw = 0,
modulation;
- enum fe_stv0900_rolloff rolloff;
enum fe_stv0900_modcode foundModcod;
dprintk("%s\n", __func__);
@@ -940,7 +939,6 @@ static void stv0900_track_optimization(struct dvb_frontend *fe)
freq1 = stv0900_read_reg(intp, CFR2);
freq0 = stv0900_read_reg(intp, CFR1);
- rolloff = stv0900_get_bits(intp, ROLLOFF_STATUS);
if (intp->srch_algo[demod] == STV0900_BLIND_SEARCH) {
stv0900_write_reg(intp, SFRSTEP, 0x00);
stv0900_write_bits(intp, SCAN_ENABLE, 0);
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 4aef1877ed42..d79e69f65cbb 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -2842,7 +2842,6 @@ static int stv090x_optimize_track(struct stv090x_state *state)
{
struct dvb_frontend *fe = &state->frontend;
- enum stv090x_rolloff rolloff;
enum stv090x_modcod modcod;
s32 srate, pilots, aclc, f_1, f_0, i = 0, blind_tune = 0;
@@ -2966,7 +2965,6 @@ static int stv090x_optimize_track(struct stv090x_state *state)
f_1 = STV090x_READ_DEMOD(state, CFR2);
f_0 = STV090x_READ_DEMOD(state, CFR1);
reg = STV090x_READ_DEMOD(state, TMGOBS);
- rolloff = STV090x_GETFIELD_Px(reg, ROLLOFF_STATUS_FIELD);
if (state->algo == STV090x_BLIND_SEARCH) {
STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00);
diff --git a/drivers/media/dvb/frontends/zl10353.c b/drivers/media/dvb/frontends/zl10353.c
index ac7237891374..82946cd517f5 100644
--- a/drivers/media/dvb/frontends/zl10353.c
+++ b/drivers/media/dvb/frontends/zl10353.c
@@ -525,7 +525,7 @@ static int zl10353_read_snr(struct dvb_frontend *fe, u16 *snr)
zl10353_dump_regs(fe);
_snr = zl10353_read_register(state, SNR);
- *snr = (_snr << 8) | _snr;
+ *snr = 10 * _snr / 8;
return 0;
}
@@ -559,7 +559,6 @@ static int zl10353_init(struct dvb_frontend *fe)
{
struct zl10353_state *state = fe->demodulator_priv;
u8 zl10353_reset_attach[6] = { 0x50, 0x03, 0x64, 0x46, 0x15, 0x0F };
- int rc = 0;
if (debug_regs)
zl10353_dump_regs(fe);
@@ -573,7 +572,7 @@ static int zl10353_init(struct dvb_frontend *fe)
/* Do a "hard" reset if not already done */
if (zl10353_read_register(state, 0x50) != zl10353_reset_attach[1] ||
zl10353_read_register(state, 0x51) != zl10353_reset_attach[2]) {
- rc = zl10353_write(fe, zl10353_reset_attach,
+ zl10353_write(fe, zl10353_reset_attach,
sizeof(zl10353_reset_attach));
if (debug_regs)
zl10353_dump_regs(fe);
diff --git a/drivers/media/dvb/mantis/hopper_cards.c b/drivers/media/dvb/mantis/hopper_cards.c
index 71622f65c037..cc0251e01077 100644
--- a/drivers/media/dvb/mantis/hopper_cards.c
+++ b/drivers/media/dvb/mantis/hopper_cards.c
@@ -65,7 +65,7 @@ static int devs;
static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
{
- u32 stat = 0, mask = 0, lstat = 0;
+ u32 stat = 0, mask = 0;
u32 rst_stat = 0, rst_mask = 0;
struct mantis_pci *mantis;
@@ -80,7 +80,6 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
stat = mmread(MANTIS_INT_STAT);
mask = mmread(MANTIS_INT_MASK);
- lstat = stat & ~MANTIS_INT_RISCSTAT;
if (!(stat & mask))
return IRQ_NONE;
diff --git a/drivers/media/dvb/mantis/mantis_cards.c b/drivers/media/dvb/mantis/mantis_cards.c
index c2bb90b3e529..095cf3a994e2 100644
--- a/drivers/media/dvb/mantis/mantis_cards.c
+++ b/drivers/media/dvb/mantis/mantis_cards.c
@@ -73,7 +73,7 @@ static char *label[10] = {
static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
{
- u32 stat = 0, mask = 0, lstat = 0;
+ u32 stat = 0, mask = 0;
u32 rst_stat = 0, rst_mask = 0;
struct mantis_pci *mantis;
@@ -88,7 +88,6 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
stat = mmread(MANTIS_INT_STAT);
mask = mmread(MANTIS_INT_MASK);
- lstat = stat & ~MANTIS_INT_RISCSTAT;
if (!(stat & mask))
return IRQ_NONE;
diff --git a/drivers/media/dvb/mantis/mantis_dma.c b/drivers/media/dvb/mantis/mantis_dma.c
index c61ca7d3daea..566c407175a4 100644
--- a/drivers/media/dvb/mantis/mantis_dma.c
+++ b/drivers/media/dvb/mantis/mantis_dma.c
@@ -199,10 +199,6 @@ void mantis_dma_start(struct mantis_pci *mantis)
void mantis_dma_stop(struct mantis_pci *mantis)
{
- u32 stat = 0, mask = 0;
-
- stat = mmread(MANTIS_INT_STAT);
- mask = mmread(MANTIS_INT_MASK);
dprintk(MANTIS_DEBUG, 1, "Mantis Stop DMA engine");
mmwrite((mmread(MANTIS_GPIF_ADDR) & (~(MANTIS_GPIF_HIFRDWRN))), MANTIS_GPIF_ADDR);
diff --git a/drivers/media/dvb/mantis/mantis_evm.c b/drivers/media/dvb/mantis/mantis_evm.c
index 36f2256ebb0e..71ce52875c38 100644
--- a/drivers/media/dvb/mantis/mantis_evm.c
+++ b/drivers/media/dvb/mantis/mantis_evm.c
@@ -41,10 +41,9 @@ static void mantis_hifevm_work(struct work_struct *work)
struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work);
struct mantis_pci *mantis = ca->ca_priv;
- u32 gpif_stat, gpif_mask;
+ u32 gpif_stat;
gpif_stat = mmread(MANTIS_GPIF_STATUS);
- gpif_mask = mmread(MANTIS_GPIF_IRQCFG);
if (gpif_stat & MANTIS_GPIF_DETSTAT) {
if (gpif_stat & MANTIS_CARD_PLUGIN) {
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index f129a9303f80..39857384af10 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -1409,10 +1409,8 @@ static int ngene_start(struct ngene *dev)
if (stat < 0)
goto fail;
- if (!stat)
- return stat;
+ return 0;
- /* otherwise error: fall through */
fail:
ngwritel(0, NGENE_INT_ENABLE);
free_irq(dev->pci_dev->irq, dev);
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index e1f20c236989..f148b19a206a 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -481,14 +481,6 @@ static int lg_tdtpe001p_tuner_set_params(struct dvb_frontend *fe)
if (p->bandwidth_hz == 8000000)
buf[3] |= 0x08;
- if (sizeof(buf) == 6) {
- buf[4] = buf[2];
- buf[4] &= ~0x1c;
- buf[4] |= 0x18;
-
- buf[5] = (0 << 7) | (2 << 4);
- }
-
msg.addr = I2C_ADDR_TUA6034 >> 1;
msg.flags = 0;
msg.buf = buf;
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c
index 91f8c8291e2b..d6f3f100699a 100644
--- a/drivers/media/dvb/siano/smssdio.c
+++ b/drivers/media/dvb/siano/smssdio.c
@@ -114,7 +114,7 @@ out:
static void smssdio_interrupt(struct sdio_func *func)
{
- int ret, isr;
+ int ret;
struct smssdio_device *smsdev;
struct smscore_buffer_t *cb;
@@ -127,7 +127,7 @@ static void smssdio_interrupt(struct sdio_func *func)
* The interrupt register has no defined meaning. It is just
* a way of turning of the level triggered interrupt.
*/
- isr = sdio_readb(func, SMSSDIO_INT, &ret);
+ (void)sdio_readb(func, SMSSDIO_INT, &ret);
if (ret) {
sms_err("Unable to read interrupt register!\n");
return;
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index b1fe5137df09..664e460f247b 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -542,6 +542,10 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0xc090),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xc0a0),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xf5a0),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ } /* Terminating entry */
};
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index ee8ee1d481fa..1b2d15140a1d 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -107,7 +107,7 @@ static struct v4l2_input inputs[4] = {
.index = 1,
.name = "Television",
.type = V4L2_INPUT_TYPE_TUNER,
- .audioset = 2,
+ .audioset = 1,
.tuner = 0,
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
@@ -494,7 +494,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
dprintk(2, "VIDIOC_S_INPUT: %d\n", input);
if (!av7110->analog_tuner_flags)
- return 0;
+ return input ? -EINVAL : 0;
if (input >= 4)
return -EINVAL;
@@ -503,19 +503,38 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
return av7110_dvb_c_switch(fh);
}
+static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
+{
+ dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index);
+ if (a->index != 0)
+ return -EINVAL;
+ *a = msp3400_v4l2_audio;
+ return 0;
+}
+
static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index);
if (a->index != 0)
return -EINVAL;
- memcpy(a, &msp3400_v4l2_audio, sizeof(struct v4l2_audio));
+ if (av7110->current_input >= 2)
+ return -EINVAL;
+ *a = msp3400_v4l2_audio;
return 0;
}
static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
+
dprintk(2, "VIDIOC_S_AUDIO: %d\n", a->index);
- return 0;
+ if (av7110->current_input >= 2)
+ return -EINVAL;
+ return a->index ? -EINVAL : 0;
}
static int vidioc_g_sliced_vbi_cap(struct file *file, void *fh,
@@ -802,26 +821,39 @@ int av7110_init_v4l(struct av7110 *av7110)
ERR("cannot init capture device. skipping\n");
return -ENODEV;
}
- vv_data->ops.vidioc_enum_input = vidioc_enum_input;
- vv_data->ops.vidioc_g_input = vidioc_g_input;
- vv_data->ops.vidioc_s_input = vidioc_s_input;
- vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
- vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
- vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
- vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
- vv_data->ops.vidioc_g_audio = vidioc_g_audio;
- vv_data->ops.vidioc_s_audio = vidioc_s_audio;
- vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
- vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
- vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
+ vv_data->vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data->vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data->vid_ops.vidioc_s_input = vidioc_s_input;
+ vv_data->vid_ops.vidioc_g_tuner = vidioc_g_tuner;
+ vv_data->vid_ops.vidioc_s_tuner = vidioc_s_tuner;
+ vv_data->vid_ops.vidioc_g_frequency = vidioc_g_frequency;
+ vv_data->vid_ops.vidioc_s_frequency = vidioc_s_frequency;
+ vv_data->vid_ops.vidioc_enumaudio = vidioc_enumaudio;
+ vv_data->vid_ops.vidioc_g_audio = vidioc_g_audio;
+ vv_data->vid_ops.vidioc_s_audio = vidioc_s_audio;
+ vv_data->vid_ops.vidioc_g_fmt_vbi_cap = NULL;
+
+ vv_data->vbi_ops.vidioc_g_tuner = vidioc_g_tuner;
+ vv_data->vbi_ops.vidioc_s_tuner = vidioc_s_tuner;
+ vv_data->vbi_ops.vidioc_g_frequency = vidioc_g_frequency;
+ vv_data->vbi_ops.vidioc_s_frequency = vidioc_s_frequency;
+ vv_data->vbi_ops.vidioc_g_fmt_vbi_cap = NULL;
+ vv_data->vbi_ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
+ vv_data->vbi_ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
+ vv_data->vbi_ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
+
+ if (FW_VERSION(av7110->arm_app) < 0x2623)
+ vv_data->capabilities &= ~V4L2_CAP_SLICED_VBI_OUTPUT;
if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
ERR("cannot register capture device. skipping\n");
saa7146_vv_release(dev);
return -ENODEV;
}
- if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI))
- ERR("cannot register vbi v4l2 device. skipping\n");
+ if (FW_VERSION(av7110->arm_app) >= 0x2623) {
+ if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI))
+ ERR("cannot register vbi v4l2 device. skipping\n");
+ }
return 0;
}
@@ -905,7 +937,7 @@ static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std)
static struct saa7146_ext_vv av7110_vv_data_st = {
.inputs = 1,
.audios = 1,
- .capabilities = V4L2_CAP_SLICED_VBI_OUTPUT,
+ .capabilities = V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO,
.flags = 0,
.stds = &standard[0],
@@ -920,7 +952,7 @@ static struct saa7146_ext_vv av7110_vv_data_st = {
static struct saa7146_ext_vv av7110_vv_data_c = {
.inputs = 1,
.audios = 1,
- .capabilities = V4L2_CAP_TUNER | V4L2_CAP_SLICED_VBI_OUTPUT,
+ .capabilities = V4L2_CAP_TUNER | V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO,
.flags = SAA7146_USE_PORT_B_FOR_VBI,
.stds = &standard[0],
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 8b32e282bf5d..12ddb53c58dc 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -1483,9 +1483,9 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
ERR("cannot init vv subsystem\n");
return err;
}
- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
- vv_data.ops.vidioc_g_input = vidioc_g_input;
- vv_data.ops.vidioc_s_input = vidioc_s_input;
+ vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
/* fixme: proper cleanup here */
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 056138f63c7d..e1cd13283407 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -214,23 +214,76 @@ EXPORT_SYMBOL_GPL(media_entity_graph_walk_next);
* pipeline pointer must be identical for all nested calls to
* media_entity_pipeline_start().
*/
-void media_entity_pipeline_start(struct media_entity *entity,
- struct media_pipeline *pipe)
+__must_check int media_entity_pipeline_start(struct media_entity *entity,
+ struct media_pipeline *pipe)
{
struct media_device *mdev = entity->parent;
struct media_entity_graph graph;
+ struct media_entity *entity_err = entity;
+ int ret;
mutex_lock(&mdev->graph_mutex);
media_entity_graph_walk_start(&graph, entity);
while ((entity = media_entity_graph_walk_next(&graph))) {
+ unsigned int i;
+
entity->stream_count++;
WARN_ON(entity->pipe && entity->pipe != pipe);
entity->pipe = pipe;
+
+ /* Already streaming --- no need to check. */
+ if (entity->stream_count > 1)
+ continue;
+
+ if (!entity->ops || !entity->ops->link_validate)
+ continue;
+
+ for (i = 0; i < entity->num_links; i++) {
+ struct media_link *link = &entity->links[i];
+
+ /* Is this pad part of an enabled link? */
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ /* Are we the sink or not? */
+ if (link->sink->entity != entity)
+ continue;
+
+ ret = entity->ops->link_validate(link);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto error;
+ }
}
mutex_unlock(&mdev->graph_mutex);
+
+ return 0;
+
+error:
+ /*
+ * Link validation on graph failed. We revert what we did and
+ * return the error.
+ */
+ media_entity_graph_walk_start(&graph, entity_err);
+
+ while ((entity_err = media_entity_graph_walk_next(&graph))) {
+ entity_err->stream_count--;
+ if (entity_err->stream_count == 0)
+ entity_err->pipe = NULL;
+
+ /*
+ * We haven't increased stream_count further than this
+ * so we quit here.
+ */
+ if (entity_err == entity)
+ break;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(media_entity_pipeline_start);
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 8db2d7f4b52a..c257da13d766 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -320,7 +320,7 @@ config RADIO_MIROPCM20
module will be called radio-miropcm20.
config RADIO_SF16FMI
- tristate "SF16-FMI/SF16-FMP Radio"
+ tristate "SF16-FMI/SF16-FMP/SF16-FMD Radio"
depends on ISA && VIDEO_V4L2
---help---
Choose Y here if you have one of these FM radio cards.
@@ -329,7 +329,7 @@ config RADIO_SF16FMI
module will be called radio-sf16fmi.
config RADIO_SF16FMR2
- tristate "SF16FMR2 Radio"
+ tristate "SF16-FMR2/SF16-FMD2 Radio"
depends on ISA && VIDEO_V4L2 && SND
---help---
Choose Y here if you have one of these FM radio cards.
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index f36905b63645..63b112b555b2 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -1,92 +1,37 @@
/* A driver for the D-Link DSB-R100 USB radio and Gemtek USB Radio 21.
- The device plugs into both the USB and an analog audio input, so this thing
- only deals with initialisation and frequency setting, the
- audio data has to be handled by a sound driver.
-
- Major issue: I can't find out where the device reports the signal
- strength, and indeed the windows software appearantly just looks
- at the stereo indicator as well. So, scanning will only find
- stereo stations. Sad, but I can't help it.
-
- Also, the windows program sends oodles of messages over to the
- device, and I couldn't figure out their meaning. My suspicion
- is that they don't have any:-)
-
- You might find some interesting stuff about this module at
- http://unimut.fsk.uni-heidelberg.de/unimut/demi/dsbr
-
- Copyright (c) 2000 Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- History:
-
- Version 0.46:
- Removed usb_dsbr100_open/close calls and radio->users counter. Also,
- radio->muted changed to radio->status and suspend/resume calls updated.
-
- Version 0.45:
- Converted to v4l2_device.
-
- Version 0.44:
- Add suspend/resume functions, fix unplug of device,
- a lot of cleanups and fixes by Alexey Klimov <klimov.linux@gmail.com>
-
- Version 0.43:
- Oliver Neukum: avoided DMA coherency issue
-
- Version 0.42:
- Converted dsbr100 to use video_ioctl2
- by Douglas Landgraf <dougsland@gmail.com>
-
- Version 0.41-ac1:
- Alan Cox: Some cleanups and fixes
-
- Version 0.41:
- Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
-
- Version 0.40:
- Markus: Updates for 2.6.x kernels, code layout changes, name sanitizing
-
- Version 0.30:
- Markus: Updates for 2.5.x kernel and more ISO compliant source
-
- Version 0.25:
- PSL and Markus: Cleanup, radio now doesn't stop on device close
-
- Version 0.24:
- Markus: Hope I got these silly VIDEO_TUNER_LOW issues finally
- right. Some minor cleanup, improved standalone compilation
-
- Version 0.23:
- Markus: Sign extension bug fixed by declaring transfer_buffer unsigned
-
- Version 0.22:
- Markus: Some (brown bag) cleanup in what VIDIOCSTUNER returns,
- thanks to Mike Cox for pointing the problem out.
-
- Version 0.21:
- Markus: Minor cleanup, warnings if something goes wrong, lame attempt
- to adhere to Documentation/CodingStyle
-
- Version 0.2:
- Brad Hards <bradh@dynamite.com.au>: Fixes to make it work as non-module
- Markus: Copyright clarification
-
- Version 0.01: Markus: initial release
-
+ * The device plugs into both the USB and an analog audio input, so this thing
+ * only deals with initialisation and frequency setting, the
+ * audio data has to be handled by a sound driver.
+ *
+ * Major issue: I can't find out where the device reports the signal
+ * strength, and indeed the windows software appearantly just looks
+ * at the stereo indicator as well. So, scanning will only find
+ * stereo stations. Sad, but I can't help it.
+ *
+ * Also, the windows program sends oodles of messages over to the
+ * device, and I couldn't figure out their meaning. My suspicion
+ * is that they don't have any:-)
+ *
+ * You might find some interesting stuff about this module at
+ * http://unimut.fsk.uni-heidelberg.de/unimut/demi/dsbr
+ *
+ * Fully tested with the Keene USB FM Transmitter and the v4l2-compliance tool.
+ *
+ * Copyright (c) 2000 Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
@@ -95,17 +40,19 @@
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/videodev2.h>
+#include <linux/usb.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
-#include <linux/usb.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
/*
* Version Information
*/
-#define DRIVER_VERSION "0.4.7"
-
-#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
-#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver"
+MODULE_AUTHOR("Markus Demleitner <msdemlei@tucana.harvard.edu>");
+MODULE_DESCRIPTION("D-Link DSB-R100 USB FM radio driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1.0");
#define DSB100_VENDOR 0x04b4
#define DSB100_PRODUCT 0x1002
@@ -122,19 +69,8 @@ devices, that would be 76 and 91. */
#define FREQ_MAX 108.0
#define FREQ_MUL 16000
-/* defines for radio->status */
-#define STARTED 0
-#define STOPPED 1
-
#define v4l2_dev_to_radio(d) container_of(d, struct dsbr100_device, v4l2_dev)
-static int usb_dsbr100_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
-static void usb_dsbr100_disconnect(struct usb_interface *intf);
-static int usb_dsbr100_suspend(struct usb_interface *intf,
- pm_message_t message);
-static int usb_dsbr100_resume(struct usb_interface *intf);
-
static int radio_nr = -1;
module_param(radio_nr, int, 0);
@@ -143,179 +79,92 @@ struct dsbr100_device {
struct usb_device *usbdev;
struct video_device videodev;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
u8 *transfer_buffer;
struct mutex v4l2_lock;
int curfreq;
- int stereo;
- int status;
-};
-
-static struct usb_device_id usb_dsbr100_device_table [] = {
- { USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, usb_dsbr100_device_table);
-
-/* USB subsystem interface */
-static struct usb_driver usb_dsbr100_driver = {
- .name = "dsbr100",
- .probe = usb_dsbr100_probe,
- .disconnect = usb_dsbr100_disconnect,
- .id_table = usb_dsbr100_device_table,
- .suspend = usb_dsbr100_suspend,
- .resume = usb_dsbr100_resume,
- .reset_resume = usb_dsbr100_resume,
- .supports_autosuspend = 0,
+ bool stereo;
+ bool muted;
};
/* Low-level device interface begins here */
-/* switch on radio */
-static int dsbr100_start(struct dsbr100_device *radio)
+/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
+static int dsbr100_setfreq(struct dsbr100_device *radio, unsigned freq)
{
- int retval;
- int request;
-
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- USB_REQ_GET_STATUS,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00, 0xC7, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = USB_REQ_GET_STATUS;
- goto usb_control_msg_failed;
+ unsigned f = (freq / 16 * 80) / 1000 + 856;
+ int retval = 0;
+
+ if (!radio->muted) {
+ retval = usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ DSB100_TUNE,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ (f >> 8) & 0x00ff, f & 0xff,
+ radio->transfer_buffer, 8, 300);
+ if (retval >= 0)
+ mdelay(1);
}
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- DSB100_ONOFF,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x01, 0x00, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = DSB100_ONOFF;
- goto usb_control_msg_failed;
+ if (retval >= 0) {
+ radio->curfreq = freq;
+ return 0;
}
-
- radio->status = STARTED;
- return (radio->transfer_buffer)[0];
-
-usb_control_msg_failed:
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
- __func__, retval, request);
+ __func__, retval, DSB100_TUNE);
return retval;
-
}
-/* switch off radio */
-static int dsbr100_stop(struct dsbr100_device *radio)
+/* switch on radio */
+static int dsbr100_start(struct dsbr100_device *radio)
{
- int retval;
- int request;
-
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- USB_REQ_GET_STATUS,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x16, 0x1C, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = USB_REQ_GET_STATUS;
- goto usb_control_msg_failed;
- }
-
- retval = usb_control_msg(radio->usbdev,
+ int retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00, 0x00, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = DSB100_ONOFF;
- goto usb_control_msg_failed;
- }
-
- radio->status = STOPPED;
- return (radio->transfer_buffer)[0];
+ 0x01, 0x00, radio->transfer_buffer, 8, 300);
-usb_control_msg_failed:
+ if (retval >= 0)
+ return dsbr100_setfreq(radio, radio->curfreq);
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
- __func__, retval, request);
+ __func__, retval, DSB100_ONOFF);
return retval;
}
-/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
-static int dsbr100_setfreq(struct dsbr100_device *radio)
+/* switch off radio */
+static int dsbr100_stop(struct dsbr100_device *radio)
{
- int retval;
- int request;
- int freq = (radio->curfreq / 16 * 80) / 1000 + 856;
-
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- DSB100_TUNE,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- (freq >> 8) & 0x00ff, freq & 0xff,
- radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = DSB100_TUNE;
- goto usb_control_msg_failed;
- }
-
- retval = usb_control_msg(radio->usbdev,
+ int retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
- USB_REQ_GET_STATUS,
+ DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x96, 0xB7, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = USB_REQ_GET_STATUS;
- goto usb_control_msg_failed;
- }
-
- retval = usb_control_msg(radio->usbdev,
- usb_rcvctrlpipe(radio->usbdev, 0),
- USB_REQ_GET_STATUS,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00, 0x24, radio->transfer_buffer, 8, 300);
-
- if (retval < 0) {
- request = USB_REQ_GET_STATUS;
- goto usb_control_msg_failed;
- }
-
- radio->stereo = !((radio->transfer_buffer)[0] & 0x01);
- return (radio->transfer_buffer)[0];
+ 0x00, 0x00, radio->transfer_buffer, 8, 300);
-usb_control_msg_failed:
- radio->stereo = -1;
+ if (retval >= 0)
+ return 0;
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
- __func__, retval, request);
+ __func__, retval, DSB100_ONOFF);
return retval;
+
}
/* return the device status. This is, in effect, just whether it
sees a stereo signal or not. Pity. */
static void dsbr100_getstat(struct dsbr100_device *radio)
{
- int retval;
-
- retval = usb_control_msg(radio->usbdev,
+ int retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x00 , 0x24, radio->transfer_buffer, 8, 300);
+ 0x00, 0x24, radio->transfer_buffer, 8, 300);
if (retval < 0) {
- radio->stereo = -1;
+ radio->stereo = false;
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, USB_REQ_GET_STATUS);
@@ -332,7 +181,8 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "dsbr100", sizeof(v->driver));
strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->capabilities = V4L2_CAP_TUNER;
+ v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -349,13 +199,11 @@ static int vidioc_g_tuner(struct file *file, void *priv,
v->type = V4L2_TUNER_RADIO;
v->rangelow = FREQ_MIN * FREQ_MUL;
v->rangehigh = FREQ_MAX * FREQ_MUL;
- v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- v->capability = V4L2_TUNER_CAP_LOW;
- if(radio->stereo)
- v->audmode = V4L2_TUNER_MODE_STEREO;
- else
- v->audmode = V4L2_TUNER_MODE_MONO;
- v->signal = 0xffff; /* We can't get the signal strength */
+ v->rxsubchans = radio->stereo ? V4L2_TUNER_SUB_STEREO :
+ V4L2_TUNER_SUB_MONO;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ v->audmode = V4L2_TUNER_MODE_STEREO;
+ v->signal = radio->stereo ? 0xffff : 0; /* We can't get the signal strength */
return 0;
}
@@ -369,14 +217,12 @@ static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct dsbr100_device *radio = video_drvdata(file);
- int retval;
- radio->curfreq = f->frequency;
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
- retval = dsbr100_setfreq(radio);
- if (retval < 0)
- dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
- return 0;
+ return dsbr100_setfreq(radio, clamp_t(unsigned, f->frequency,
+ FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL));
}
static int vidioc_g_frequency(struct file *file, void *priv,
@@ -384,90 +230,26 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct dsbr100_device *radio = video_drvdata(file);
+ if (f->tuner)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = radio->curfreq;
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- }
-
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int usb_dsbr100_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct dsbr100_device *radio = video_drvdata(file);
+ struct dsbr100_device *radio =
+ container_of(ctrl->handler, struct dsbr100_device, hdl);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- ctrl->value = radio->status;
- return 0;
+ radio->muted = ctrl->val;
+ return radio->muted ? dsbr100_stop(radio) : dsbr100_start(radio);
}
return -EINVAL;
}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct dsbr100_device *radio = video_drvdata(file);
- int retval;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value) {
- retval = dsbr100_stop(radio);
- if (retval < 0) {
- dev_warn(&radio->usbdev->dev,
- "Radio did not respond properly\n");
- return -EBUSY;
- }
- } else {
- retval = dsbr100_start(radio);
- if (retval < 0) {
- dev_warn(&radio->usbdev->dev,
- "Radio did not respond properly\n");
- return -EBUSY;
- }
- }
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- if (a->index > 1)
- return -EINVAL;
-
- strcpy(a->name, "Radio");
- a->capability = V4L2_AUDCAP_STEREO;
- return 0;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return i ? -EINVAL : 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- return a->index ? -EINVAL : 0;
-}
/* USB subsystem interface begins here */
@@ -481,8 +263,17 @@ static void usb_dsbr100_disconnect(struct usb_interface *intf)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
- v4l2_device_get(&radio->v4l2_dev);
mutex_lock(&radio->v4l2_lock);
+ /*
+ * Disconnect is also called on unload, and in that case we need to
+ * mute the device. This call will silently fail if it is called
+ * after a physical disconnect.
+ */
+ usb_control_msg(radio->usbdev,
+ usb_rcvctrlpipe(radio->usbdev, 0),
+ DSB100_ONOFF,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0x00, 0x00, radio->transfer_buffer, 8, 300);
usb_set_intfdata(intf, NULL);
video_unregister_device(&radio->videodev);
v4l2_device_disconnect(&radio->v4l2_dev);
@@ -495,25 +286,13 @@ static void usb_dsbr100_disconnect(struct usb_interface *intf)
static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
- int retval;
mutex_lock(&radio->v4l2_lock);
- if (radio->status == STARTED) {
- retval = dsbr100_stop(radio);
- if (retval < 0)
- dev_warn(&intf->dev, "dsbr100_stop failed\n");
-
- /* After dsbr100_stop() status set to STOPPED.
- * If we want driver to start radio on resume
- * we set status equal to STARTED.
- * On resume we will check status and run radio if needed.
- */
- radio->status = STARTED;
- }
+ if (!radio->muted && dsbr100_stop(radio) < 0)
+ dev_warn(&intf->dev, "dsbr100_stop failed\n");
mutex_unlock(&radio->v4l2_lock);
dev_info(&intf->dev, "going into suspend..\n");
-
return 0;
}
@@ -521,18 +300,13 @@ static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message)
static int usb_dsbr100_resume(struct usb_interface *intf)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
- int retval;
mutex_lock(&radio->v4l2_lock);
- if (radio->status == STARTED) {
- retval = dsbr100_start(radio);
- if (retval < 0)
- dev_warn(&intf->dev, "dsbr100_start failed\n");
- }
+ if (!radio->muted && dsbr100_start(radio) < 0)
+ dev_warn(&intf->dev, "dsbr100_start failed\n");
mutex_unlock(&radio->v4l2_lock);
dev_info(&intf->dev, "coming out of suspend..\n");
-
return 0;
}
@@ -541,15 +315,23 @@ static void usb_dsbr100_release(struct v4l2_device *v4l2_dev)
{
struct dsbr100_device *radio = v4l2_dev_to_radio(v4l2_dev);
+ v4l2_ctrl_handler_free(&radio->hdl);
v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio->transfer_buffer);
kfree(radio);
}
+static const struct v4l2_ctrl_ops usb_dsbr100_ctrl_ops = {
+ .s_ctrl = usb_dsbr100_s_ctrl,
+};
+
/* File system interface */
static const struct v4l2_file_operations usb_dsbr100_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
};
static const struct v4l2_ioctl_ops usb_dsbr100_ioctl_ops = {
@@ -558,13 +340,9 @@ static const struct v4l2_ioctl_ops usb_dsbr100_ioctl_ops = {
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/* check if the device is present and register with v4l and usb if it is */
@@ -593,11 +371,17 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
retval = v4l2_device_register(&intf->dev, v4l2_dev);
if (retval < 0) {
v4l2_err(v4l2_dev, "couldn't register v4l2_device\n");
- kfree(radio->transfer_buffer);
- kfree(radio);
- return retval;
+ goto err_reg_dev;
}
+ v4l2_ctrl_handler_init(&radio->hdl, 1);
+ v4l2_ctrl_new_std(&radio->hdl, &usb_dsbr100_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ if (radio->hdl.error) {
+ retval = radio->hdl.error;
+ v4l2_err(v4l2_dev, "couldn't register control\n");
+ goto err_reg_ctrl;
+ }
mutex_init(&radio->v4l2_lock);
strlcpy(radio->videodev.name, v4l2_dev->name, sizeof(radio->videodev.name));
radio->videodev.v4l2_dev = v4l2_dev;
@@ -605,28 +389,46 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
radio->videodev.ioctl_ops = &usb_dsbr100_ioctl_ops;
radio->videodev.release = video_device_release_empty;
radio->videodev.lock = &radio->v4l2_lock;
+ radio->videodev.ctrl_handler = &radio->hdl;
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->videodev.flags);
radio->usbdev = interface_to_usbdev(intf);
radio->curfreq = FREQ_MIN * FREQ_MUL;
- radio->status = STOPPED;
+ radio->muted = true;
video_set_drvdata(&radio->videodev, radio);
+ usb_set_intfdata(intf, radio);
retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr);
- if (retval < 0) {
- v4l2_err(v4l2_dev, "couldn't register video device\n");
- v4l2_device_unregister(v4l2_dev);
- kfree(radio->transfer_buffer);
- kfree(radio);
- return -EIO;
- }
- usb_set_intfdata(intf, radio);
- return 0;
+ if (retval == 0)
+ return 0;
+ v4l2_err(v4l2_dev, "couldn't register video device\n");
+
+err_reg_ctrl:
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(v4l2_dev);
+err_reg_dev:
+ kfree(radio->transfer_buffer);
+ kfree(radio);
+ return retval;
}
-module_usb_driver(usb_dsbr100_driver);
+static struct usb_device_id usb_dsbr100_device_table[] = {
+ { USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) },
+ { } /* Terminating entry */
+};
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
+MODULE_DEVICE_TABLE(usb, usb_dsbr100_device_table);
+
+/* USB subsystem interface */
+static struct usb_driver usb_dsbr100_driver = {
+ .name = "dsbr100",
+ .probe = usb_dsbr100_probe,
+ .disconnect = usb_dsbr100_disconnect,
+ .id_table = usb_dsbr100_device_table,
+ .suspend = usb_dsbr100_suspend,
+ .resume = usb_dsbr100_resume,
+ .reset_resume = usb_dsbr100_resume,
+};
+
+module_usb_driver(usb_dsbr100_driver);
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 2e639ce6f256..235c0e349820 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -29,6 +29,7 @@
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
#include <linux/io.h> /* outb, outb_p */
+#include <linux/pnp.h>
#include <linux/slab.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
@@ -283,6 +284,16 @@ static const struct radio_isa_ops gemtek_ops = {
static const int gemtek_ioports[] = { 0x20c, 0x30c, 0x24c, 0x34c, 0x248, 0x28c };
+#ifdef CONFIG_PNP
+static struct pnp_device_id gemtek_pnp_devices[] = {
+ /* AOpen FX-3D/Pro Radio */
+ {.id = "ADS7183", .driver_data = 0},
+ {.id = ""}
+};
+
+MODULE_DEVICE_TABLE(pnp, gemtek_pnp_devices);
+#endif
+
static struct radio_isa_driver gemtek_driver = {
.driver = {
.match = radio_isa_match,
@@ -292,6 +303,14 @@ static struct radio_isa_driver gemtek_driver = {
.name = "radio-gemtek",
},
},
+#ifdef CONFIG_PNP
+ .pnp_driver = {
+ .name = "radio-gemtek",
+ .id_table = gemtek_pnp_devices,
+ .probe = radio_isa_pnp_probe,
+ .remove = radio_isa_pnp_remove,
+ },
+#endif
.io_params = io,
.radio_nr_params = radio_nr,
.io_ports = gemtek_ioports,
@@ -305,12 +324,18 @@ static struct radio_isa_driver gemtek_driver = {
static int __init gemtek_init(void)
{
gemtek_driver.probe = probe;
+#ifdef CONFIG_PNP
+ pnp_register_driver(&gemtek_driver.pnp_driver);
+#endif
return isa_register_driver(&gemtek_driver.driver, GEMTEK_MAX);
}
static void __exit gemtek_exit(void)
{
hardmute = 1; /* Turn off PLL */
+#ifdef CONFIG_PNP
+ pnp_unregister_driver(&gemtek_driver.pnp_driver);
+#endif
isa_unregister_driver(&gemtek_driver.driver);
}
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index 06f906351fad..3c0067de4324 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -150,14 +150,6 @@ static int radio_isa_log_status(struct file *file, void *priv)
return 0;
}
-static int radio_isa_subscribe_event(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- if (sub->type == V4L2_EVENT_CTRL)
- return v4l2_event_subscribe(fh, sub, 0);
- return -EINVAL;
-}
-
static const struct v4l2_ctrl_ops radio_isa_ctrl_ops = {
.s_ctrl = radio_isa_s_ctrl,
};
@@ -177,7 +169,7 @@ static const struct v4l2_ioctl_ops radio_isa_ioctl_ops = {
.vidioc_g_frequency = radio_isa_g_frequency,
.vidioc_s_frequency = radio_isa_s_frequency,
.vidioc_log_status = radio_isa_log_status,
- .vidioc_subscribe_event = radio_isa_subscribe_event,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -199,56 +191,31 @@ static bool radio_isa_valid_io(const struct radio_isa_driver *drv, int io)
return false;
}
-int radio_isa_probe(struct device *pdev, unsigned int dev)
+struct radio_isa_card *radio_isa_alloc(struct radio_isa_driver *drv,
+ struct device *pdev)
{
- struct radio_isa_driver *drv = pdev->platform_data;
- const struct radio_isa_ops *ops = drv->ops;
struct v4l2_device *v4l2_dev;
- struct radio_isa_card *isa;
- int res;
+ struct radio_isa_card *isa = drv->ops->alloc();
+ if (!isa)
+ return NULL;
- isa = drv->ops->alloc();
- if (isa == NULL)
- return -ENOMEM;
dev_set_drvdata(pdev, isa);
isa->drv = drv;
- isa->io = drv->io_params[dev];
v4l2_dev = &isa->v4l2_dev;
strlcpy(v4l2_dev->name, dev_name(pdev), sizeof(v4l2_dev->name));
- if (drv->probe && ops->probe) {
- int i;
-
- for (i = 0; i < drv->num_of_io_ports; ++i) {
- int io = drv->io_ports[i];
-
- if (request_region(io, drv->region_size, v4l2_dev->name)) {
- bool found = ops->probe(isa, io);
-
- release_region(io, drv->region_size);
- if (found) {
- isa->io = io;
- break;
- }
- }
- }
- }
-
- if (!radio_isa_valid_io(drv, isa->io)) {
- int i;
+ return isa;
+}
- if (isa->io < 0)
- return -ENODEV;
- v4l2_err(v4l2_dev, "you must set an I/O address with io=0x%03x",
- drv->io_ports[0]);
- for (i = 1; i < drv->num_of_io_ports; i++)
- printk(KERN_CONT "/0x%03x", drv->io_ports[i]);
- printk(KERN_CONT ".\n");
- kfree(isa);
- return -EINVAL;
- }
+int radio_isa_common_probe(struct radio_isa_card *isa, struct device *pdev,
+ int radio_nr, unsigned region_size)
+{
+ const struct radio_isa_driver *drv = isa->drv;
+ const struct radio_isa_ops *ops = drv->ops;
+ struct v4l2_device *v4l2_dev = &isa->v4l2_dev;
+ int res;
- if (!request_region(isa->io, drv->region_size, v4l2_dev->name)) {
+ if (!request_region(isa->io, region_size, v4l2_dev->name)) {
v4l2_err(v4l2_dev, "port 0x%x already in use\n", isa->io);
kfree(isa);
return -EBUSY;
@@ -299,42 +266,126 @@ int radio_isa_probe(struct device *pdev, unsigned int dev)
res = ops->s_stereo(isa, isa->stereo);
if (res < 0) {
v4l2_err(v4l2_dev, "Could not setup card\n");
- goto err_node_reg;
+ goto err_hdl;
}
- res = video_register_device(&isa->vdev, VFL_TYPE_RADIO,
- drv->radio_nr_params[dev]);
+ res = video_register_device(&isa->vdev, VFL_TYPE_RADIO, radio_nr);
+
if (res < 0) {
v4l2_err(v4l2_dev, "Could not register device node\n");
- goto err_node_reg;
+ goto err_hdl;
}
v4l2_info(v4l2_dev, "Initialized radio card %s on port 0x%03x\n",
drv->card, isa->io);
return 0;
-err_node_reg:
- v4l2_ctrl_handler_free(&isa->hdl);
err_hdl:
- v4l2_device_unregister(&isa->v4l2_dev);
+ v4l2_ctrl_handler_free(&isa->hdl);
err_dev_reg:
- release_region(isa->io, drv->region_size);
+ release_region(isa->io, region_size);
kfree(isa);
return res;
}
-EXPORT_SYMBOL_GPL(radio_isa_probe);
-int radio_isa_remove(struct device *pdev, unsigned int dev)
+int radio_isa_common_remove(struct radio_isa_card *isa, unsigned region_size)
{
- struct radio_isa_card *isa = dev_get_drvdata(pdev);
const struct radio_isa_ops *ops = isa->drv->ops;
ops->s_mute_volume(isa, true, isa->volume ? isa->volume->cur.val : 0);
video_unregister_device(&isa->vdev);
v4l2_ctrl_handler_free(&isa->hdl);
v4l2_device_unregister(&isa->v4l2_dev);
- release_region(isa->io, isa->drv->region_size);
+ release_region(isa->io, region_size);
v4l2_info(&isa->v4l2_dev, "Removed radio card %s\n", isa->drv->card);
kfree(isa);
return 0;
}
+
+int radio_isa_probe(struct device *pdev, unsigned int dev)
+{
+ struct radio_isa_driver *drv = pdev->platform_data;
+ const struct radio_isa_ops *ops = drv->ops;
+ struct v4l2_device *v4l2_dev;
+ struct radio_isa_card *isa;
+
+ isa = radio_isa_alloc(drv, pdev);
+ if (!isa)
+ return -ENOMEM;
+ isa->io = drv->io_params[dev];
+ v4l2_dev = &isa->v4l2_dev;
+
+ if (drv->probe && ops->probe) {
+ int i;
+
+ for (i = 0; i < drv->num_of_io_ports; ++i) {
+ int io = drv->io_ports[i];
+
+ if (request_region(io, drv->region_size, v4l2_dev->name)) {
+ bool found = ops->probe(isa, io);
+
+ release_region(io, drv->region_size);
+ if (found) {
+ isa->io = io;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!radio_isa_valid_io(drv, isa->io)) {
+ int i;
+
+ if (isa->io < 0)
+ return -ENODEV;
+ v4l2_err(v4l2_dev, "you must set an I/O address with io=0x%03x",
+ drv->io_ports[0]);
+ for (i = 1; i < drv->num_of_io_ports; i++)
+ printk(KERN_CONT "/0x%03x", drv->io_ports[i]);
+ printk(KERN_CONT ".\n");
+ kfree(isa);
+ return -EINVAL;
+ }
+
+ return radio_isa_common_probe(isa, pdev, drv->radio_nr_params[dev],
+ drv->region_size);
+}
+EXPORT_SYMBOL_GPL(radio_isa_probe);
+
+int radio_isa_remove(struct device *pdev, unsigned int dev)
+{
+ struct radio_isa_card *isa = dev_get_drvdata(pdev);
+
+ return radio_isa_common_remove(isa, isa->drv->region_size);
+}
EXPORT_SYMBOL_GPL(radio_isa_remove);
+
+#ifdef CONFIG_PNP
+int radio_isa_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
+{
+ struct pnp_driver *pnp_drv = to_pnp_driver(dev->dev.driver);
+ struct radio_isa_driver *drv = container_of(pnp_drv,
+ struct radio_isa_driver, pnp_driver);
+ struct radio_isa_card *isa;
+
+ if (!pnp_port_valid(dev, 0))
+ return -ENODEV;
+
+ isa = radio_isa_alloc(drv, &dev->dev);
+ if (!isa)
+ return -ENOMEM;
+
+ isa->io = pnp_port_start(dev, 0);
+
+ return radio_isa_common_probe(isa, &dev->dev, drv->radio_nr_params[0],
+ pnp_port_len(dev, 0));
+}
+EXPORT_SYMBOL_GPL(radio_isa_pnp_probe);
+
+void radio_isa_pnp_remove(struct pnp_dev *dev)
+{
+ struct radio_isa_card *isa = dev_get_drvdata(&dev->dev);
+
+ radio_isa_common_remove(isa, pnp_port_len(dev, 0));
+}
+EXPORT_SYMBOL_GPL(radio_isa_pnp_remove);
+#endif
diff --git a/drivers/media/radio/radio-isa.h b/drivers/media/radio/radio-isa.h
index 8a0ea84d86de..ba4c01f1bd0c 100644
--- a/drivers/media/radio/radio-isa.h
+++ b/drivers/media/radio/radio-isa.h
@@ -24,6 +24,7 @@
#define _RADIO_ISA_H_
#include <linux/isa.h>
+#include <linux/pnp.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -76,6 +77,9 @@ struct radio_isa_ops {
/* Top level structure needed to instantiate the cards */
struct radio_isa_driver {
struct isa_driver driver;
+#ifdef CONFIG_PNP
+ struct pnp_driver pnp_driver;
+#endif
const struct radio_isa_ops *ops;
/* The module_param_array with the specified I/O ports */
int *io_params;
@@ -101,5 +105,10 @@ struct radio_isa_driver {
int radio_isa_match(struct device *pdev, unsigned int dev);
int radio_isa_probe(struct device *pdev, unsigned int dev);
int radio_isa_remove(struct device *pdev, unsigned int dev);
+#ifdef CONFIG_PNP
+int radio_isa_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id);
+void radio_isa_pnp_remove(struct pnp_dev *dev);
+#endif
#endif
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index 55bd1d2937c8..79adf3e654e5 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -28,7 +28,6 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <linux/usb.h>
-#include <linux/version.h>
#include <linux/mutex.h>
/* driver and module definitions */
@@ -149,7 +148,6 @@ static void usb_keene_disconnect(struct usb_interface *intf)
{
struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf));
- v4l2_device_get(&radio->v4l2_dev);
mutex_lock(&radio->lock);
usb_set_intfdata(intf, NULL);
video_unregister_device(&radio->vdev);
@@ -158,6 +156,23 @@ static void usb_keene_disconnect(struct usb_interface *intf)
v4l2_device_put(&radio->v4l2_dev);
}
+static int usb_keene_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf));
+
+ return keene_cmd_main(radio, 0, false);
+}
+
+static int usb_keene_resume(struct usb_interface *intf)
+{
+ struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf));
+
+ mdelay(50);
+ keene_cmd_set(radio);
+ keene_cmd_main(radio, radio->curfreq, true);
+ return 0;
+}
+
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
@@ -256,18 +271,6 @@ static int keene_s_ctrl(struct v4l2_ctrl *ctrl)
return -EINVAL;
}
-static int vidioc_subscribe_event(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- switch (sub->type) {
- case V4L2_EVENT_CTRL:
- return v4l2_event_subscribe(fh, sub, 0);
- default:
- return -EINVAL;
- }
-}
-
-
/* File system interface */
static const struct v4l2_file_operations usb_keene_fops = {
.owner = THIS_MODULE,
@@ -288,7 +291,7 @@ static const struct v4l2_ioctl_ops usb_keene_ioctl_ops = {
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -404,6 +407,9 @@ static struct usb_driver usb_keene_driver = {
.probe = usb_keene_probe,
.disconnect = usb_keene_disconnect,
.id_table = usb_keene_device_table,
+ .suspend = usb_keene_suspend,
+ .resume = usb_keene_resume,
+ .reset_resume = usb_keene_resume,
};
static int __init keene_init(void)
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 740a3d5520c7..b415211d0c4b 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -157,7 +157,7 @@ static int __devinit maxiradio_probe(struct pci_dev *pdev, const struct pci_devi
goto err_out_free_region;
dev->io = pci_resource_start(pdev, 0);
- if (snd_tea575x_init(&dev->tea)) {
+ if (snd_tea575x_init(&dev->tea, THIS_MODULE)) {
printk(KERN_ERR "radio-maxiradio: Unable to detect TEA575x tuner\n");
goto err_out_free_region;
}
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index a860a72a58ec..94cb6bc690f5 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -62,6 +62,8 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <linux/usb.h>
#include <linux/mutex.h>
@@ -101,12 +103,17 @@ devices, that would be 76 and 91. */
* List isn't full and will be updated with implementation of new functions
*/
#define AMRADIO_SET_FREQ 0xa4
+#define AMRADIO_GET_READY_FLAG 0xa5
+#define AMRADIO_GET_SIGNAL 0xa7
+#define AMRADIO_GET_FREQ 0xa8
+#define AMRADIO_SET_SEARCH_UP 0xa9
+#define AMRADIO_SET_SEARCH_DOWN 0xaa
#define AMRADIO_SET_MUTE 0xab
+#define AMRADIO_SET_RIGHT_MUTE 0xac
+#define AMRADIO_SET_LEFT_MUTE 0xad
#define AMRADIO_SET_MONO 0xae
-
-/* Comfortable defines for amradio_set_mute */
-#define AMRADIO_START 0x00
-#define AMRADIO_STOP 0x01
+#define AMRADIO_SET_SEARCH_LVL 0xb0
+#define AMRADIO_STOP_SEARCH 0xb1
/* Comfortable defines for amradio_set_stereo */
#define WANT_STEREO 0x00
@@ -117,29 +124,20 @@ static int radio_nr = -1;
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr, "Radio Nr");
-static int usb_amradio_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
-static void usb_amradio_disconnect(struct usb_interface *intf);
-static int usb_amradio_open(struct file *file);
-static int usb_amradio_close(struct file *file);
-static int usb_amradio_suspend(struct usb_interface *intf,
- pm_message_t message);
-static int usb_amradio_resume(struct usb_interface *intf);
-
/* Data for one (physical) device */
struct amradio_device {
/* reference to USB and video device */
struct usb_device *usbdev;
struct usb_interface *intf;
- struct video_device videodev;
+ struct video_device vdev;
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
- unsigned char *buffer;
+ u8 *buffer;
struct mutex lock; /* buffer locking */
int curfreq;
int stereo;
int muted;
- int initialized;
};
static inline struct amradio_device *to_amradio_dev(struct v4l2_device *v4l2_dev)
@@ -147,29 +145,8 @@ static inline struct amradio_device *to_amradio_dev(struct v4l2_device *v4l2_dev
return container_of(v4l2_dev, struct amradio_device, v4l2_dev);
}
-/* USB Device ID List */
-static struct usb_device_id usb_amradio_device_table[] = {
- {USB_DEVICE_AND_INTERFACE_INFO(USB_AMRADIO_VENDOR, USB_AMRADIO_PRODUCT,
- USB_CLASS_HID, 0, 0) },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, usb_amradio_device_table);
-
-/* USB subsystem interface */
-static struct usb_driver usb_amradio_driver = {
- .name = MR800_DRIVER_NAME,
- .probe = usb_amradio_probe,
- .disconnect = usb_amradio_disconnect,
- .suspend = usb_amradio_suspend,
- .resume = usb_amradio_resume,
- .reset_resume = usb_amradio_resume,
- .id_table = usb_amradio_device_table,
- .supports_autosuspend = 1,
-};
-
-/* switch on/off the radio. Send 8 bytes to device */
-static int amradio_set_mute(struct amradio_device *radio, char argument)
+static int amradio_send_cmd(struct amradio_device *radio, u8 cmd, u8 arg,
+ u8 *extra, u8 extralen, bool reply)
{
int retval;
int size;
@@ -177,99 +154,92 @@ static int amradio_set_mute(struct amradio_device *radio, char argument)
radio->buffer[0] = 0x00;
radio->buffer[1] = 0x55;
radio->buffer[2] = 0xaa;
- radio->buffer[3] = 0x00;
- radio->buffer[4] = AMRADIO_SET_MUTE;
- radio->buffer[5] = argument;
+ radio->buffer[3] = extralen;
+ radio->buffer[4] = cmd;
+ radio->buffer[5] = arg;
radio->buffer[6] = 0x00;
- radio->buffer[7] = 0x00;
+ radio->buffer[7] = extra || reply ? 8 : 0;
retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
- (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT);
+ radio->buffer, BUFFER_LENGTH, &size, USB_TIMEOUT);
if (retval < 0 || size != BUFFER_LENGTH) {
- amradio_dev_warn(&radio->videodev.dev, "set mute failed\n");
- return retval;
+ if (video_is_registered(&radio->vdev))
+ amradio_dev_warn(&radio->vdev.dev,
+ "cmd %02x failed\n", cmd);
+ return retval ? retval : -EIO;
}
+ if (!extra && !reply)
+ return 0;
- radio->muted = argument;
+ if (extra) {
+ memcpy(radio->buffer, extra, extralen);
+ memset(radio->buffer + extralen, 0, 8 - extralen);
+ retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
+ radio->buffer, BUFFER_LENGTH, &size, USB_TIMEOUT);
+ } else {
+ memset(radio->buffer, 0, 8);
+ retval = usb_bulk_msg(radio->usbdev, usb_rcvbulkpipe(radio->usbdev, 0x81),
+ radio->buffer, BUFFER_LENGTH, &size, USB_TIMEOUT);
+ }
+ if (retval == 0 && size == BUFFER_LENGTH)
+ return 0;
+ if (video_is_registered(&radio->vdev) && cmd != AMRADIO_GET_READY_FLAG)
+ amradio_dev_warn(&radio->vdev.dev, "follow-up to cmd %02x failed\n", cmd);
+ return retval ? retval : -EIO;
+}
- return retval;
+/* switch on/off the radio. Send 8 bytes to device */
+static int amradio_set_mute(struct amradio_device *radio, bool mute)
+{
+ int ret = amradio_send_cmd(radio,
+ AMRADIO_SET_MUTE, mute, NULL, 0, false);
+
+ if (!ret)
+ radio->muted = mute;
+ return ret;
}
/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
-static int amradio_setfreq(struct amradio_device *radio, int freq)
+static int amradio_set_freq(struct amradio_device *radio, int freq)
{
- int retval;
- int size;
unsigned short freq_send = 0x10 + (freq >> 3) / 25;
-
- radio->buffer[0] = 0x00;
- radio->buffer[1] = 0x55;
- radio->buffer[2] = 0xaa;
- radio->buffer[3] = 0x03;
- radio->buffer[4] = AMRADIO_SET_FREQ;
- radio->buffer[5] = 0x00;
- radio->buffer[6] = 0x00;
- radio->buffer[7] = 0x08;
-
- retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
- (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT);
-
- if (retval < 0 || size != BUFFER_LENGTH)
- goto out_err;
+ u8 buf[3];
+ int retval;
/* frequency is calculated from freq_send and placed in first 2 bytes */
- radio->buffer[0] = (freq_send >> 8) & 0xff;
- radio->buffer[1] = freq_send & 0xff;
- radio->buffer[2] = 0x01;
- radio->buffer[3] = 0x00;
- radio->buffer[4] = 0x00;
- /* 5 and 6 bytes of buffer already = 0x00 */
- radio->buffer[7] = 0x00;
-
- retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
- (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT);
-
- if (retval < 0 || size != BUFFER_LENGTH)
- goto out_err;
+ buf[0] = (freq_send >> 8) & 0xff;
+ buf[1] = freq_send & 0xff;
+ buf[2] = 0x01;
+ retval = amradio_send_cmd(radio, AMRADIO_SET_FREQ, 0, buf, 3, false);
+ if (retval)
+ return retval;
radio->curfreq = freq;
- goto out;
-
-out_err:
- amradio_dev_warn(&radio->videodev.dev, "set frequency failed\n");
-out:
- return retval;
+ msleep(40);
+ return 0;
}
-static int amradio_set_stereo(struct amradio_device *radio, char argument)
+static int amradio_set_stereo(struct amradio_device *radio, bool stereo)
{
- int retval;
- int size;
+ int ret = amradio_send_cmd(radio,
+ AMRADIO_SET_MONO, !stereo, NULL, 0, false);
- radio->buffer[0] = 0x00;
- radio->buffer[1] = 0x55;
- radio->buffer[2] = 0xaa;
- radio->buffer[3] = 0x00;
- radio->buffer[4] = AMRADIO_SET_MONO;
- radio->buffer[5] = argument;
- radio->buffer[6] = 0x00;
- radio->buffer[7] = 0x00;
-
- retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2),
- (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT);
-
- if (retval < 0 || size != BUFFER_LENGTH) {
- amradio_dev_warn(&radio->videodev.dev, "set stereo failed\n");
- return retval;
- }
+ if (!ret)
+ radio->stereo = stereo;
+ return ret;
+}
- if (argument == WANT_STEREO)
- radio->stereo = 1;
- else
- radio->stereo = 0;
+static int amradio_get_stat(struct amradio_device *radio, bool *is_stereo, u32 *signal)
+{
+ int ret = amradio_send_cmd(radio,
+ AMRADIO_GET_SIGNAL, 0, NULL, 0, true);
- return retval;
+ if (ret)
+ return ret;
+ *is_stereo = radio->buffer[2] >> 7;
+ *signal = (radio->buffer[3] & 0xf0) << 8;
+ return 0;
}
/* Handle unplugging the device.
@@ -282,25 +252,26 @@ static void usb_amradio_disconnect(struct usb_interface *intf)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
- /* increase the device node's refcount */
- get_device(&radio->videodev.dev);
+ video_unregister_device(&radio->vdev);
+ amradio_set_mute(radio, true);
+ usb_set_intfdata(intf, NULL);
v4l2_device_disconnect(&radio->v4l2_dev);
- video_unregister_device(&radio->videodev);
mutex_unlock(&radio->lock);
- /* decrease the device node's refcount, allowing it to be released */
- put_device(&radio->videodev.dev);
+ v4l2_device_put(&radio->v4l2_dev);
}
/* vidioc_querycap - query device capabilities */
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
- struct amradio_device *radio = file->private_data;
+ struct amradio_device *radio = video_drvdata(file);
strlcpy(v->driver, "radio-mr800", sizeof(v->driver));
strlcpy(v->card, "AverMedia MR 800 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->capabilities = V4L2_CAP_TUNER;
+ v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER |
+ V4L2_CAP_HW_FREQ_SEEK;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -308,44 +279,34 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- struct amradio_device *radio = file->private_data;
+ struct amradio_device *radio = video_drvdata(file);
+ bool is_stereo = false;
int retval;
if (v->index > 0)
return -EINVAL;
-/* TODO: Add function which look is signal stereo or not
- * amradio_getstat(radio);
- */
-
-/* we call amradio_set_stereo to set radio->stereo
- * Honestly, amradio_getstat should cover this in future and
- * amradio_set_stereo shouldn't be here
- */
- retval = amradio_set_stereo(radio, WANT_STEREO);
+ v->signal = 0;
+ retval = amradio_get_stat(radio, &is_stereo, &v->signal);
+ if (retval)
+ return retval;
strcpy(v->name, "FM");
v->type = V4L2_TUNER_RADIO;
v->rangelow = FREQ_MIN * FREQ_MUL;
v->rangehigh = FREQ_MAX * FREQ_MUL;
- v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- v->capability = V4L2_TUNER_CAP_LOW;
- if (radio->stereo)
- v->audmode = V4L2_TUNER_MODE_STEREO;
- else
- v->audmode = V4L2_TUNER_MODE_MONO;
- v->signal = 0xffff; /* Can't get the signal strength, sad.. */
- v->afc = 0; /* Don't know what is this */
-
- return retval;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ v->rxsubchans = is_stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO;
+ v->audmode = radio->stereo ?
+ V4L2_TUNER_MODE_STEREO : V4L2_TUNER_MODE_MONO;
+ return 0;
}
/* vidioc_s_tuner - set tuner attributes */
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- struct amradio_device *radio = file->private_data;
- int retval = -EINVAL;
+ struct amradio_device *radio = video_drvdata(file);
if (v->index > 0)
return -EINVAL;
@@ -353,34 +314,31 @@ static int vidioc_s_tuner(struct file *file, void *priv,
/* mono/stereo selector */
switch (v->audmode) {
case V4L2_TUNER_MODE_MONO:
- retval = amradio_set_stereo(radio, WANT_MONO);
- break;
- case V4L2_TUNER_MODE_STEREO:
- retval = amradio_set_stereo(radio, WANT_STEREO);
- break;
+ return amradio_set_stereo(radio, WANT_MONO);
+ default:
+ return amradio_set_stereo(radio, WANT_STEREO);
}
-
- return retval;
}
/* vidioc_s_frequency - set tuner radio frequency */
static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct amradio_device *radio = file->private_data;
+ struct amradio_device *radio = video_drvdata(file);
- if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ if (f->tuner != 0)
return -EINVAL;
- return amradio_setfreq(radio, f->frequency);
+ return amradio_set_freq(radio, clamp_t(unsigned, f->frequency,
+ FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL));
}
/* vidioc_g_frequency - get tuner radio frequency */
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct amradio_device *radio = file->private_data;
+ struct amradio_device *radio = video_drvdata(file);
- if (f->tuner != 0)
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = radio->curfreq;
@@ -388,148 +346,101 @@ static int vidioc_g_frequency(struct file *file, void *priv,
return 0;
}
-/* vidioc_queryctrl - enumerate control items */
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
+static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
+ struct v4l2_hw_freq_seek *seek)
{
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- }
-
- return -EINVAL;
-}
+ static u8 buf[8] = {
+ 0x3d, 0x32, 0x0f, 0x08, 0x3d, 0x32, 0x0f, 0x08
+ };
+ struct amradio_device *radio = video_drvdata(file);
+ unsigned long timeout;
+ int retval;
-/* vidioc_g_ctrl - get the value of a control */
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct amradio_device *radio = file->private_data;
+ if (seek->tuner != 0 || !seek->wrap_around)
+ return -EINVAL;
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = radio->muted;
- return 0;
+ retval = amradio_send_cmd(radio,
+ AMRADIO_SET_SEARCH_LVL, 0, buf, 8, false);
+ if (retval)
+ return retval;
+ amradio_set_freq(radio, radio->curfreq);
+ retval = amradio_send_cmd(radio,
+ seek->seek_upward ? AMRADIO_SET_SEARCH_UP : AMRADIO_SET_SEARCH_DOWN,
+ 0, NULL, 0, false);
+ if (retval)
+ return retval;
+ timeout = jiffies + msecs_to_jiffies(30000);
+ for (;;) {
+ if (time_after(jiffies, timeout)) {
+ retval = -EAGAIN;
+ break;
+ }
+ if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ retval = amradio_send_cmd(radio, AMRADIO_GET_READY_FLAG,
+ 0, NULL, 0, true);
+ if (retval)
+ continue;
+ amradio_send_cmd(radio, AMRADIO_GET_FREQ, 0, NULL, 0, true);
+ if (radio->buffer[1] || radio->buffer[2]) {
+ radio->curfreq = (radio->buffer[1] << 8) | radio->buffer[2];
+ radio->curfreq = (radio->curfreq - 0x10) * 200;
+ amradio_send_cmd(radio, AMRADIO_STOP_SEARCH,
+ 0, NULL, 0, false);
+ amradio_set_freq(radio, radio->curfreq);
+ retval = 0;
+ break;
+ }
}
-
- return -EINVAL;
+ amradio_send_cmd(radio, AMRADIO_STOP_SEARCH, 0, NULL, 0, false);
+ amradio_set_freq(radio, radio->curfreq);
+ return retval;
}
-/* vidioc_s_ctrl - set the value of a control */
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int usb_amradio_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct amradio_device *radio = file->private_data;
- int retval = -EINVAL;
+ struct amradio_device *radio =
+ container_of(ctrl->handler, struct amradio_device, hdl);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value)
- retval = amradio_set_mute(radio, AMRADIO_STOP);
- else
- retval = amradio_set_mute(radio, AMRADIO_START);
-
- break;
+ return amradio_set_mute(radio, ctrl->val);
}
- return retval;
-}
-
-/* vidioc_g_audio - get audio attributes */
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- if (a->index > 1)
- return -EINVAL;
-
- strcpy(a->name, "Radio");
- a->capability = V4L2_AUDCAP_STEREO;
- return 0;
-}
-
-/* vidioc_s_audio - set audio attributes */
-static int vidioc_s_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- if (a->index != 0)
- return -EINVAL;
- return 0;
-}
-
-/* vidioc_g_input - get input */
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-/* vidioc_s_input - set input */
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- if (i != 0)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
static int usb_amradio_init(struct amradio_device *radio)
{
int retval;
- retval = amradio_set_mute(radio, AMRADIO_STOP);
+ retval = amradio_set_mute(radio, true);
if (retval)
goto out_err;
-
- retval = amradio_set_stereo(radio, WANT_STEREO);
+ retval = amradio_set_stereo(radio, true);
if (retval)
goto out_err;
-
- radio->initialized = 1;
- goto out;
-
-out_err:
- amradio_dev_err(&radio->videodev.dev, "initialization failed\n");
-out:
- return retval;
-}
-
-/* open device - amradio_start() and amradio_setfreq() */
-static int usb_amradio_open(struct file *file)
-{
- struct amradio_device *radio = video_drvdata(file);
- int retval;
-
- file->private_data = radio;
- retval = usb_autopm_get_interface(radio->intf);
+ retval = amradio_set_freq(radio, radio->curfreq);
if (retval)
- return retval;
+ goto out_err;
+ return 0;
- if (unlikely(!radio->initialized)) {
- retval = usb_amradio_init(radio);
- if (retval)
- usb_autopm_put_interface(radio->intf);
- }
+out_err:
+ amradio_dev_err(&radio->vdev.dev, "initialization failed\n");
return retval;
}
-/*close device */
-static int usb_amradio_close(struct file *file)
-{
- struct amradio_device *radio = file->private_data;
-
- if (video_is_registered(&radio->videodev))
- usb_autopm_put_interface(radio->intf);
- return 0;
-}
-
/* Suspend device - stop device. Need to be checked and fixed */
static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message)
{
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
- if (!radio->muted && radio->initialized) {
- amradio_set_mute(radio, AMRADIO_STOP);
- radio->muted = 0;
+ if (!radio->muted) {
+ amradio_set_mute(radio, true);
+ radio->muted = false;
}
mutex_unlock(&radio->lock);
@@ -543,31 +454,28 @@ static int usb_amradio_resume(struct usb_interface *intf)
struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf));
mutex_lock(&radio->lock);
- if (unlikely(!radio->initialized))
- goto unlock;
-
- if (radio->stereo)
- amradio_set_stereo(radio, WANT_STEREO);
- else
- amradio_set_stereo(radio, WANT_MONO);
-
- amradio_setfreq(radio, radio->curfreq);
+ amradio_set_stereo(radio, radio->stereo);
+ amradio_set_freq(radio, radio->curfreq);
if (!radio->muted)
- amradio_set_mute(radio, AMRADIO_START);
+ amradio_set_mute(radio, false);
-unlock:
mutex_unlock(&radio->lock);
dev_info(&intf->dev, "coming out of suspend..\n");
return 0;
}
+static const struct v4l2_ctrl_ops usb_amradio_ctrl_ops = {
+ .s_ctrl = usb_amradio_s_ctrl,
+};
+
/* File system interface */
static const struct v4l2_file_operations usb_amradio_fops = {
.owner = THIS_MODULE,
- .open = usb_amradio_open,
- .release = usb_amradio_close,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
.unlocked_ioctl = video_ioctl2,
};
@@ -577,20 +485,19 @@ static const struct v4l2_ioctl_ops usb_amradio_ioctl_ops = {
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
+ .vidioc_s_hw_freq_seek = vidioc_s_hw_freq_seek,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static void usb_amradio_video_device_release(struct video_device *videodev)
+static void usb_amradio_release(struct v4l2_device *v4l2_dev)
{
- struct amradio_device *radio = video_get_drvdata(videodev);
+ struct amradio_device *radio = to_amradio_dev(v4l2_dev);
/* free rest memory */
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio->buffer);
kfree(radio);
}
@@ -624,23 +531,38 @@ static int usb_amradio_probe(struct usb_interface *intf,
goto err_v4l2;
}
+ v4l2_ctrl_handler_init(&radio->hdl, 1);
+ v4l2_ctrl_new_std(&radio->hdl, &usb_amradio_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ if (radio->hdl.error) {
+ retval = radio->hdl.error;
+ dev_err(&intf->dev, "couldn't register control\n");
+ goto err_ctrl;
+ }
mutex_init(&radio->lock);
- strlcpy(radio->videodev.name, radio->v4l2_dev.name,
- sizeof(radio->videodev.name));
- radio->videodev.v4l2_dev = &radio->v4l2_dev;
- radio->videodev.fops = &usb_amradio_fops;
- radio->videodev.ioctl_ops = &usb_amradio_ioctl_ops;
- radio->videodev.release = usb_amradio_video_device_release;
- radio->videodev.lock = &radio->lock;
+ radio->v4l2_dev.ctrl_handler = &radio->hdl;
+ radio->v4l2_dev.release = usb_amradio_release;
+ strlcpy(radio->vdev.name, radio->v4l2_dev.name,
+ sizeof(radio->vdev.name));
+ radio->vdev.v4l2_dev = &radio->v4l2_dev;
+ radio->vdev.fops = &usb_amradio_fops;
+ radio->vdev.ioctl_ops = &usb_amradio_ioctl_ops;
+ radio->vdev.release = video_device_release_empty;
+ radio->vdev.lock = &radio->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags);
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
+ usb_set_intfdata(intf, &radio->v4l2_dev);
radio->curfreq = 95.16 * FREQ_MUL;
- video_set_drvdata(&radio->videodev, radio);
+ video_set_drvdata(&radio->vdev, radio);
+ retval = usb_amradio_init(radio);
+ if (retval)
+ goto err_vdev;
- retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
+ retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO,
radio_nr);
if (retval < 0) {
dev_err(&intf->dev, "could not register video device\n");
@@ -650,6 +572,8 @@ static int usb_amradio_probe(struct usb_interface *intf,
return 0;
err_vdev:
+ v4l2_ctrl_handler_free(&radio->hdl);
+err_ctrl:
v4l2_device_unregister(&radio->v4l2_dev);
err_v4l2:
kfree(radio->buffer);
@@ -659,4 +583,24 @@ err:
return retval;
}
+/* USB Device ID List */
+static struct usb_device_id usb_amradio_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(USB_AMRADIO_VENDOR, USB_AMRADIO_PRODUCT,
+ USB_CLASS_HID, 0, 0) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, usb_amradio_device_table);
+
+/* USB subsystem interface */
+static struct usb_driver usb_amradio_driver = {
+ .name = MR800_DRIVER_NAME,
+ .probe = usb_amradio_probe,
+ .disconnect = usb_amradio_disconnect,
+ .suspend = usb_amradio_suspend,
+ .resume = usb_amradio_resume,
+ .reset_resume = usb_amradio_resume,
+ .id_table = usb_amradio_device_table,
+};
+
module_usb_driver(usb_amradio_driver);
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index b275c5d0fe9a..b1f844c64fde 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -17,6 +17,7 @@
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
#include <linux/io.h> /* outb, outb_p */
+#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include "radio-isa.h"
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 22c5743bf9db..a81d723b8c77 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -1,4 +1,4 @@
-/* SF16-FMI and SF16-FMP radio driver for Linux radio support
+/* SF16-FMI, SF16-FMP and SF16-FMD radio driver for Linux radio support
* heavily based on rtrack driver...
* (c) 1997 M. Kirkwood
* (c) 1998 Petr Vandrovec, vandrove@vc.cvut.cz
@@ -11,7 +11,7 @@
*
* Frequency control is done digitally -- ie out(port,encodefreq(95.8));
* No volume control - only mute/unmute - you have to use line volume
- * control on SB-part of SF16-FMI/SF16-FMP
+ * control on SB-part of SF16-FMI/SF16-FMP/SF16-FMD
*
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
*/
@@ -29,7 +29,7 @@
#include <media/v4l2-ioctl.h>
MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood");
-MODULE_DESCRIPTION("A driver for the SF16-FMI and SF16-FMP radio.");
+MODULE_DESCRIPTION("A driver for the SF16-FMI, SF16-FMP and SF16-FMD radio.");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.0.3");
@@ -37,7 +37,7 @@ static int io = -1;
static int radio_nr = -1;
module_param(io, int, 0);
-MODULE_PARM_DESC(io, "I/O address of the SF16-FMI or SF16-FMP card (0x284 or 0x384)");
+MODULE_PARM_DESC(io, "I/O address of the SF16-FMI/SF16-FMP/SF16-FMD card (0x284 or 0x384)");
module_param(radio_nr, int, 0);
struct fmi
@@ -130,7 +130,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
strlcpy(v->driver, "radio-sf16fmi", sizeof(v->driver));
- strlcpy(v->card, "SF16-FMx radio", sizeof(v->card));
+ strlcpy(v->card, "SF16-FMI/FMP/FMD radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
@@ -277,8 +277,12 @@ static const struct v4l2_ioctl_ops fmi_ioctl_ops = {
/* ladis: this is my card. does any other types exist? */
static struct isapnp_device_id id_table[] __devinitdata = {
+ /* SF16-FMI */
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('M','F','R'), ISAPNP_FUNCTION(0xad10), 0},
+ /* SF16-FMD */
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('M','F','R'), ISAPNP_FUNCTION(0xad12), 0},
{ ISAPNP_CARD_END, },
};
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 7c69214334bf..4efcbec74c52 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -1,4 +1,4 @@
-/* SF16-FMR2 radio driver for Linux
+/* SF16-FMR2 and SF16-FMD2 radio driver for Linux
* Copyright (c) 2011 Ondrej Zary
*
* Original driver was (c) 2000-2002 Ziglio Frediano, freddy77@angelfire.com
@@ -13,15 +13,19 @@
#include <linux/ioport.h> /* request_region */
#include <linux/io.h> /* outb, outb_p */
#include <linux/isa.h>
+#include <linux/pnp.h>
#include <sound/tea575x-tuner.h>
MODULE_AUTHOR("Ondrej Zary");
-MODULE_DESCRIPTION("MediaForte SF16-FMR2 FM radio card driver");
+MODULE_DESCRIPTION("MediaForte SF16-FMR2 and SF16-FMD2 FM radio card driver");
MODULE_LICENSE("GPL");
-static int radio_nr = -1;
-module_param(radio_nr, int, 0444);
-MODULE_PARM_DESC(radio_nr, "Radio device number");
+/* these cards can only use two different ports (0x384 and 0x284) */
+#define FMR2_MAX 2
+
+static int radio_nr[FMR2_MAX] = { [0 ... (FMR2_MAX - 1)] = -1 };
+module_param_array(radio_nr, int, NULL, 0444);
+MODULE_PARM_DESC(radio_nr, "Radio device numbers");
struct fmr2 {
int io;
@@ -29,9 +33,15 @@ struct fmr2 {
struct snd_tea575x tea;
struct v4l2_ctrl *volume;
struct v4l2_ctrl *balance;
+ bool is_fmd2;
};
-/* the port is hardwired so no need to support multiple cards */
+static int num_fmr2_cards;
+static struct fmr2 *fmr2_cards[FMR2_MAX];
+static bool isa_registered;
+static bool pnp_registered;
+
+/* the port is hardwired on SF16-FMR2 */
#define FMR2_PORT 0x384
/* TEA575x tuner pins */
@@ -174,7 +184,8 @@ static int fmr2_tea_ext_init(struct snd_tea575x *tea)
{
struct fmr2 *fmr2 = tea->private_data;
- if (inb(fmr2->io) & FMR2_HASVOL) {
+ /* FMR2 can have volume control, FMD2 can't (uses SB16 mixer) */
+ if (!fmr2->is_fmd2 && inb(fmr2->io) & FMR2_HASVOL) {
fmr2->volume = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 68, 2, 56);
fmr2->balance = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_BALANCE, -68, 68, 2, 0);
if (tea->ctrl_handler.error) {
@@ -186,22 +197,28 @@ static int fmr2_tea_ext_init(struct snd_tea575x *tea)
return 0;
}
-static int __devinit fmr2_probe(struct device *pdev, unsigned int dev)
+static struct pnp_device_id fmr2_pnp_ids[] __devinitdata = {
+ { .id = "MFRad13" }, /* tuner subdevice of SF16-FMD2 */
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, fmr2_pnp_ids);
+
+static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
{
- struct fmr2 *fmr2;
- int err;
+ int err, i;
+ char *card_name = fmr2->is_fmd2 ? "SF16-FMD2" : "SF16-FMR2";
- fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
- if (fmr2 == NULL)
- return -ENOMEM;
+ /* avoid errors if a card was already registered at given port */
+ for (i = 0; i < num_fmr2_cards; i++)
+ if (io == fmr2_cards[i]->io)
+ return -EBUSY;
- strlcpy(fmr2->v4l2_dev.name, dev_name(pdev),
- sizeof(fmr2->v4l2_dev.name));
- fmr2->io = FMR2_PORT;
+ strlcpy(fmr2->v4l2_dev.name, "radio-sf16fmr2",
+ sizeof(fmr2->v4l2_dev.name)),
+ fmr2->io = io;
if (!request_region(fmr2->io, 2, fmr2->v4l2_dev.name)) {
printk(KERN_ERR "radio-sf16fmr2: I/O port 0x%x already in use\n", fmr2->io);
- kfree(fmr2);
return -EBUSY;
}
@@ -210,56 +227,121 @@ static int __devinit fmr2_probe(struct device *pdev, unsigned int dev)
if (err < 0) {
v4l2_err(&fmr2->v4l2_dev, "Could not register v4l2_device\n");
release_region(fmr2->io, 2);
- kfree(fmr2);
return err;
}
fmr2->tea.v4l2_dev = &fmr2->v4l2_dev;
fmr2->tea.private_data = fmr2;
- fmr2->tea.radio_nr = radio_nr;
+ fmr2->tea.radio_nr = radio_nr[num_fmr2_cards];
fmr2->tea.ops = &fmr2_tea_ops;
fmr2->tea.ext_init = fmr2_tea_ext_init;
- strlcpy(fmr2->tea.card, "SF16-FMR2", sizeof(fmr2->tea.card));
- snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "ISA:%s",
- fmr2->v4l2_dev.name);
+ strlcpy(fmr2->tea.card, card_name, sizeof(fmr2->tea.card));
+ snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "%s:%s",
+ fmr2->is_fmd2 ? "PnP" : "ISA", dev_name(pdev));
- if (snd_tea575x_init(&fmr2->tea)) {
+ if (snd_tea575x_init(&fmr2->tea, THIS_MODULE)) {
printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
release_region(fmr2->io, 2);
- kfree(fmr2);
return -ENODEV;
}
- printk(KERN_INFO "radio-sf16fmr2: SF16-FMR2 radio card at 0x%x.\n", fmr2->io);
+ printk(KERN_INFO "radio-sf16fmr2: %s radio card at 0x%x.\n",
+ card_name, fmr2->io);
return 0;
}
-static int __exit fmr2_remove(struct device *pdev, unsigned int dev)
+static int __devinit fmr2_isa_match(struct device *pdev, unsigned int ndev)
+{
+ struct fmr2 *fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
+ if (!fmr2)
+ return 0;
+
+ if (fmr2_probe(fmr2, pdev, FMR2_PORT)) {
+ kfree(fmr2);
+ return 0;
+ }
+ dev_set_drvdata(pdev, fmr2);
+ fmr2_cards[num_fmr2_cards++] = fmr2;
+
+ return 1;
+}
+
+static int __devinit fmr2_pnp_probe(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
{
- struct fmr2 *fmr2 = dev_get_drvdata(pdev);
+ int ret;
+ struct fmr2 *fmr2 = kzalloc(sizeof(*fmr2), GFP_KERNEL);
+ if (!fmr2)
+ return -ENOMEM;
+ fmr2->is_fmd2 = true;
+ ret = fmr2_probe(fmr2, &pdev->dev, pnp_port_start(pdev, 0));
+ if (ret) {
+ kfree(fmr2);
+ return ret;
+ }
+ pnp_set_drvdata(pdev, fmr2);
+ fmr2_cards[num_fmr2_cards++] = fmr2;
+
+ return 0;
+}
+
+static void __devexit fmr2_remove(struct fmr2 *fmr2)
+{
snd_tea575x_exit(&fmr2->tea);
release_region(fmr2->io, 2);
v4l2_device_unregister(&fmr2->v4l2_dev);
kfree(fmr2);
+}
+
+static int __devexit fmr2_isa_remove(struct device *pdev, unsigned int ndev)
+{
+ fmr2_remove(dev_get_drvdata(pdev));
+ dev_set_drvdata(pdev, NULL);
+
return 0;
}
-struct isa_driver fmr2_driver = {
- .probe = fmr2_probe,
- .remove = fmr2_remove,
+static void __devexit fmr2_pnp_remove(struct pnp_dev *pdev)
+{
+ fmr2_remove(pnp_get_drvdata(pdev));
+ pnp_set_drvdata(pdev, NULL);
+}
+
+struct isa_driver fmr2_isa_driver = {
+ .match = fmr2_isa_match,
+ .remove = __devexit_p(fmr2_isa_remove),
.driver = {
.name = "radio-sf16fmr2",
},
};
+struct pnp_driver fmr2_pnp_driver = {
+ .name = "radio-sf16fmr2",
+ .id_table = fmr2_pnp_ids,
+ .probe = fmr2_pnp_probe,
+ .remove = __devexit_p(fmr2_pnp_remove),
+};
+
static int __init fmr2_init(void)
{
- return isa_register_driver(&fmr2_driver, 1);
+ int ret;
+
+ ret = pnp_register_driver(&fmr2_pnp_driver);
+ if (!ret)
+ pnp_registered = true;
+ ret = isa_register_driver(&fmr2_isa_driver, 1);
+ if (!ret)
+ isa_registered = true;
+
+ return (pnp_registered || isa_registered) ? 0 : ret;
}
static void __exit fmr2_exit(void)
{
- isa_unregister_driver(&fmr2_driver);
+ if (pnp_registered)
+ pnp_unregister_driver(&fmr2_pnp_driver);
+ if (isa_registered)
+ isa_unregister_driver(&fmr2_isa_driver);
}
module_init(fmr2_init);
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index 5d9a90ac3a1c..7052adc0c0b0 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -223,7 +223,7 @@ static struct platform_driver timbradio_platform_driver = {
.owner = THIS_MODULE,
},
.probe = timbradio_probe,
- .remove = timbradio_remove,
+ .remove = __devexit_p(timbradio_remove),
};
module_platform_driver(timbradio_platform_driver);
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index 9474706350f8..bb953ef75f61 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -430,7 +430,7 @@ static struct i2c_driver saa7706h_driver = {
.name = DRIVER_NAME,
},
.probe = saa7706h_probe,
- .remove = saa7706h_remove,
+ .remove = __devexit_p(saa7706h_remove),
.id_table = saa7706h_id,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 0e740c98786c..969cf494d85b 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -196,9 +196,9 @@ static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
}
if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
- dev_warn(&radio->videodev->dev, "tune does not complete\n");
+ dev_warn(&radio->videodev.dev, "tune does not complete\n");
if (timed_out)
- dev_warn(&radio->videodev->dev,
+ dev_warn(&radio->videodev.dev,
"tune timed out after %u ms\n", tune_timeout);
stop:
@@ -262,7 +262,7 @@ static int si470x_get_freq(struct si470x_device *radio, unsigned int *freq)
*/
int si470x_set_freq(struct si470x_device *radio, unsigned int freq)
{
- unsigned int spacing, band_bottom;
+ unsigned int spacing, band_bottom, band_top;
unsigned short chan;
/* Spacing (kHz) */
@@ -278,19 +278,26 @@ int si470x_set_freq(struct si470x_device *radio, unsigned int freq)
spacing = 0.050 * FREQ_MUL; break;
};
- /* Bottom of Band (MHz) */
+ /* Bottom/Top of Band (MHz) */
switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
/* 0: 87.5 - 108 MHz (USA, Europe) */
case 0:
- band_bottom = 87.5 * FREQ_MUL; break;
+ band_bottom = 87.5 * FREQ_MUL;
+ band_top = 108 * FREQ_MUL;
+ break;
/* 1: 76 - 108 MHz (Japan wide band) */
default:
- band_bottom = 76 * FREQ_MUL; break;
+ band_bottom = 76 * FREQ_MUL;
+ band_top = 108 * FREQ_MUL;
+ break;
/* 2: 76 - 90 MHz (Japan) */
case 2:
- band_bottom = 76 * FREQ_MUL; break;
+ band_bottom = 76 * FREQ_MUL;
+ band_top = 90 * FREQ_MUL;
+ break;
};
+ freq = clamp(freq, band_bottom, band_top);
/* Chan = [ Freq (Mhz) - Bottom of Band (MHz) ] / Spacing (kHz) */
chan = (freq - band_bottom) / spacing;
@@ -320,7 +327,7 @@ static int si470x_set_seek(struct si470x_device *radio,
radio->registers[POWERCFG] &= ~POWERCFG_SEEKUP;
retval = si470x_set_register(radio, POWERCFG);
if (retval < 0)
- goto done;
+ return retval;
/* currently I2C driver only uses interrupt way to seek */
if (radio->stci_enabled) {
@@ -344,24 +351,19 @@ static int si470x_set_seek(struct si470x_device *radio,
}
if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
- dev_warn(&radio->videodev->dev, "seek does not complete\n");
+ dev_warn(&radio->videodev.dev, "seek does not complete\n");
if (radio->registers[STATUSRSSI] & STATUSRSSI_SF)
- dev_warn(&radio->videodev->dev,
+ dev_warn(&radio->videodev.dev,
"seek failed / band limit reached\n");
- if (timed_out)
- dev_warn(&radio->videodev->dev,
- "seek timed out after %u ms\n", seek_timeout);
stop:
/* stop seeking */
radio->registers[POWERCFG] &= ~POWERCFG_SEEK;
retval = si470x_set_register(radio, POWERCFG);
-done:
/* try again, if timed out */
- if ((retval == 0) && timed_out)
- retval = -EAGAIN;
-
+ if (retval == 0 && timed_out)
+ return -EAGAIN;
return retval;
}
@@ -463,7 +465,6 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
unsigned int block_count = 0;
/* switch on rds reception */
- mutex_lock(&radio->lock);
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
si470x_rds_on(radio);
@@ -505,7 +506,6 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
}
done:
- mutex_unlock(&radio->lock);
return retval;
}
@@ -517,19 +517,19 @@ static unsigned int si470x_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
+ unsigned long req_events = poll_requested_events(pts);
+ int retval = v4l2_ctrl_poll(file, pts);
- /* switch on rds reception */
-
- mutex_lock(&radio->lock);
- if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
- si470x_rds_on(radio);
- mutex_unlock(&radio->lock);
+ if (req_events & (POLLIN | POLLRDNORM)) {
+ /* switch on rds reception */
+ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
+ si470x_rds_on(radio);
- poll_wait(file, &radio->read_queue, pts);
+ poll_wait(file, &radio->read_queue, pts);
- if (radio->rd_index != radio->wr_index)
- retval = POLLIN | POLLRDNORM;
+ if (radio->rd_index != radio->wr_index)
+ retval |= POLLIN | POLLRDNORM;
+ }
return retval;
}
@@ -553,134 +553,26 @@ static const struct v4l2_file_operations si470x_fops = {
* Video4Linux Interface
**************************************************************************/
-/*
- * si470x_vidioc_queryctrl - enumerate control items
- */
-static int si470x_vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct si470x_device *radio = video_drvdata(file);
- int retval = -EINVAL;
-
- /* abort if qc->id is below V4L2_CID_BASE */
- if (qc->id < V4L2_CID_BASE)
- goto done;
-
- /* search video control */
- switch (qc->id) {
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 15, 1, 15);
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- }
-
- /* disable unsupported base controls */
- /* to satisfy kradio and such apps */
- if ((retval == -EINVAL) && (qc->id < V4L2_CID_LASTP1)) {
- qc->flags = V4L2_CTRL_FLAG_DISABLED;
- retval = 0;
- }
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "query controls failed with %d\n", retval);
- return retval;
-}
-
-
-/*
- * si470x_vidioc_g_ctrl - get the value of a control
- */
-static int si470x_vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = radio->registers[SYSCONFIG2] &
- SYSCONFIG2_VOLUME;
- break;
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = ((radio->registers[POWERCFG] &
- POWERCFG_DMUTE) == 0) ? 1 : 0;
- break;
- default:
- retval = -EINVAL;
- }
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "get control failed with %d\n", retval);
-
- mutex_unlock(&radio->lock);
- return retval;
-}
-
-
-/*
- * si470x_vidioc_s_ctrl - set the value of a control
- */
-static int si470x_vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int si470x_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
+ struct si470x_device *radio =
+ container_of(ctrl->handler, struct si470x_device, hdl);
switch (ctrl->id) {
case V4L2_CID_AUDIO_VOLUME:
radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_VOLUME;
- radio->registers[SYSCONFIG2] |= ctrl->value;
- retval = si470x_set_register(radio, SYSCONFIG2);
- break;
+ radio->registers[SYSCONFIG2] |= ctrl->val;
+ return si470x_set_register(radio, SYSCONFIG2);
case V4L2_CID_AUDIO_MUTE:
- if (ctrl->value == 1)
+ if (ctrl->val)
radio->registers[POWERCFG] &= ~POWERCFG_DMUTE;
else
radio->registers[POWERCFG] |= POWERCFG_DMUTE;
- retval = si470x_set_register(radio, POWERCFG);
- break;
+ return si470x_set_register(radio, POWERCFG);
default:
- retval = -EINVAL;
+ return -EINVAL;
}
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "set control failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
-}
-
-
-/*
- * si470x_vidioc_g_audio - get audio attributes
- */
-static int si470x_vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *audio)
-{
- /* driver constants */
- audio->index = 0;
- strcpy(audio->name, "Radio");
- audio->capability = V4L2_AUDCAP_STEREO;
- audio->mode = 0;
-
- return 0;
}
@@ -691,22 +583,14 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *tuner)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
+ int retval;
- if (tuner->index != 0) {
- retval = -EINVAL;
- goto done;
- }
+ if (tuner->index != 0)
+ return -EINVAL;
retval = si470x_get_register(radio, STATUSRSSI);
if (retval < 0)
- goto done;
+ return retval;
/* driver constants */
strcpy(tuner->name, "FM");
@@ -737,7 +621,7 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
if ((radio->registers[STATUSRSSI] & STATUSRSSI_ST) == 0)
tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
else
- tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+ tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
/* If there is a reliable method of detecting an RDS channel,
then this code should check for that before setting this
RDS subchannel. */
@@ -754,16 +638,13 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
tuner->signal = (radio->registers[STATUSRSSI] & STATUSRSSI_RSSI);
/* the ideal factor is 0xffff/75 = 873,8 */
tuner->signal = (tuner->signal * 873) + (8 * tuner->signal / 10);
+ if (tuner->signal > 0xffff)
+ tuner->signal = 0xffff;
/* automatic frequency control: -1: freq to low, 1 freq to high */
/* AFCRL does only indicate that freq. differs, not if too low/high */
tuner->afc = (radio->registers[STATUSRSSI] & STATUSRSSI_AFCRL) ? 1 : 0;
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "get tuner failed with %d\n", retval);
- mutex_unlock(&radio->lock);
return retval;
}
@@ -775,16 +656,9 @@ static int si470x_vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *tuner)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
if (tuner->index != 0)
- goto done;
+ return -EINVAL;
/* mono/stereo selector */
switch (tuner->audmode) {
@@ -792,20 +666,12 @@ static int si470x_vidioc_s_tuner(struct file *file, void *priv,
radio->registers[POWERCFG] |= POWERCFG_MONO; /* force mono */
break;
case V4L2_TUNER_MODE_STEREO:
+ default:
radio->registers[POWERCFG] &= ~POWERCFG_MONO; /* try stereo */
break;
- default:
- goto done;
}
- retval = si470x_set_register(radio, POWERCFG);
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "set tuner failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
+ return si470x_set_register(radio, POWERCFG);
}
@@ -816,28 +682,12 @@ static int si470x_vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *freq)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- /* safety checks */
- mutex_lock(&radio->lock);
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
- if (freq->tuner != 0) {
- retval = -EINVAL;
- goto done;
- }
+ if (freq->tuner != 0)
+ return -EINVAL;
freq->type = V4L2_TUNER_RADIO;
- retval = si470x_get_freq(radio, &freq->frequency);
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "get frequency failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
+ return si470x_get_freq(radio, &freq->frequency);
}
@@ -848,27 +698,11 @@ static int si470x_vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *freq)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
- if (freq->tuner != 0) {
- retval = -EINVAL;
- goto done;
- }
+ if (freq->tuner != 0)
+ return -EINVAL;
- retval = si470x_set_freq(radio, freq->frequency);
-
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "set frequency failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
+ return si470x_set_freq(radio, freq->frequency);
}
@@ -879,44 +713,29 @@ static int si470x_vidioc_s_hw_freq_seek(struct file *file, void *priv,
struct v4l2_hw_freq_seek *seek)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- mutex_lock(&radio->lock);
- /* safety checks */
- retval = si470x_disconnect_check(radio);
- if (retval)
- goto done;
-
- if (seek->tuner != 0) {
- retval = -EINVAL;
- goto done;
- }
- retval = si470x_set_seek(radio, seek->wrap_around, seek->seek_upward);
+ if (seek->tuner != 0)
+ return -EINVAL;
-done:
- if (retval < 0)
- dev_warn(&radio->videodev->dev,
- "set hardware frequency seek failed with %d\n", retval);
- mutex_unlock(&radio->lock);
- return retval;
+ return si470x_set_seek(radio, seek->wrap_around, seek->seek_upward);
}
+const struct v4l2_ctrl_ops si470x_ctrl_ops = {
+ .s_ctrl = si470x_s_ctrl,
+};
/*
* si470x_ioctl_ops - video device ioctl operations
*/
static const struct v4l2_ioctl_ops si470x_ioctl_ops = {
.vidioc_querycap = si470x_vidioc_querycap,
- .vidioc_queryctrl = si470x_vidioc_queryctrl,
- .vidioc_g_ctrl = si470x_vidioc_g_ctrl,
- .vidioc_s_ctrl = si470x_vidioc_s_ctrl,
- .vidioc_g_audio = si470x_vidioc_g_audio,
.vidioc_g_tuner = si470x_vidioc_g_tuner,
.vidioc_s_tuner = si470x_vidioc_s_tuner,
.vidioc_g_frequency = si470x_vidioc_g_frequency,
.vidioc_s_frequency = si470x_vidioc_s_frequency,
.vidioc_s_hw_freq_seek = si470x_vidioc_s_hw_freq_seek,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -926,6 +745,6 @@ static const struct v4l2_ioctl_ops si470x_ioctl_ops = {
struct video_device si470x_viddev_template = {
.fops = &si470x_fops,
.name = DRIVER_NAME,
- .release = video_device_release,
+ .release = video_device_release_empty,
.ioctl_ops = &si470x_ioctl_ops,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 9b546a5523f3..a80044c5874e 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -162,20 +162,6 @@ static int si470x_get_all_registers(struct si470x_device *radio)
/**************************************************************************
- * General Driver Functions - DISCONNECT_CHECK
- **************************************************************************/
-
-/*
- * si470x_disconnect_check - check whether radio disconnects
- */
-int si470x_disconnect_check(struct si470x_device *radio)
-{
- return 0;
-}
-
-
-
-/**************************************************************************
* File Operations Interface
**************************************************************************/
@@ -185,12 +171,12 @@ int si470x_disconnect_check(struct si470x_device *radio)
int si470x_fops_open(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
+ int retval = v4l2_fh_open(file);
- mutex_lock(&radio->lock);
- radio->users++;
+ if (retval)
+ return retval;
- if (radio->users == 1) {
+ if (v4l2_fh_is_singular_file(file)) {
/* start radio */
retval = si470x_start(radio);
if (retval < 0)
@@ -205,7 +191,8 @@ int si470x_fops_open(struct file *file)
}
done:
- mutex_unlock(&radio->lock);
+ if (retval)
+ v4l2_fh_release(file);
return retval;
}
@@ -216,21 +203,12 @@ done:
int si470x_fops_release(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- /* safety check */
- if (!radio)
- return -ENODEV;
- mutex_lock(&radio->lock);
- radio->users--;
- if (radio->users == 0)
+ if (v4l2_fh_is_singular_file(file))
/* stop radio */
- retval = si470x_stop(radio);
+ si470x_stop(radio);
- mutex_unlock(&radio->lock);
-
- return retval;
+ return v4l2_fh_release(file);
}
@@ -371,32 +349,25 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
goto err_initial;
}
- radio->users = 0;
radio->client = client;
mutex_init(&radio->lock);
- /* video device allocation and initialization */
- radio->videodev = video_device_alloc();
- if (!radio->videodev) {
- retval = -ENOMEM;
- goto err_radio;
- }
- memcpy(radio->videodev, &si470x_viddev_template,
- sizeof(si470x_viddev_template));
- video_set_drvdata(radio->videodev, radio);
+ /* video device initialization */
+ radio->videodev = si470x_viddev_template;
+ video_set_drvdata(&radio->videodev, radio);
/* power up : need 110ms */
radio->registers[POWERCFG] = POWERCFG_ENABLE;
if (si470x_set_register(radio, POWERCFG) < 0) {
retval = -EIO;
- goto err_video;
+ goto err_radio;
}
msleep(110);
/* get device and chip versions */
if (si470x_get_all_registers(radio) < 0) {
retval = -EIO;
- goto err_video;
+ goto err_radio;
}
dev_info(&client->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n",
radio->registers[DEVICEID], radio->registers[CHIPID]);
@@ -427,7 +398,7 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
if (!radio->buffer) {
retval = -EIO;
- goto err_video;
+ goto err_radio;
}
/* rds buffer configuration */
@@ -447,7 +418,7 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
}
/* register video device */
- retval = video_register_device(radio->videodev, VFL_TYPE_RADIO,
+ retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
radio_nr);
if (retval) {
dev_warn(&client->dev, "Could not register video device\n");
@@ -460,8 +431,6 @@ err_all:
free_irq(client->irq, radio);
err_rds:
kfree(radio->buffer);
-err_video:
- video_device_release(radio->videodev);
err_radio:
kfree(radio);
err_initial:
@@ -477,7 +446,7 @@ static __devexit int si470x_i2c_remove(struct i2c_client *client)
struct si470x_device *radio = i2c_get_clientdata(client);
free_irq(client->irq, radio);
- video_unregister_device(radio->videodev);
+ video_unregister_device(&radio->videodev);
kfree(radio);
return 0;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index b7debb67932a..f412f7ab270b 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -51,6 +51,8 @@ static struct usb_device_id si470x_usb_driver_id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) },
/* Sanei Electric, Inc. FM USB Radio (sold as DealExtreme.com PCear) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x10c5, 0x819a, USB_CLASS_HID, 0, 0) },
+ /* Axentia ALERT FM USB Receiver */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x12cf, 0x7111, USB_CLASS_HID, 0, 0) },
/* Terminating entry */
{ }
};
@@ -367,23 +369,6 @@ static int si470x_get_scratch_page_versions(struct si470x_device *radio)
/**************************************************************************
- * General Driver Functions - DISCONNECT_CHECK
- **************************************************************************/
-
-/*
- * si470x_disconnect_check - check whether radio disconnects
- */
-int si470x_disconnect_check(struct si470x_device *radio)
-{
- if (radio->disconnected)
- return -EIO;
- else
- return 0;
-}
-
-
-
-/**************************************************************************
* RDS Driver Functions
**************************************************************************/
@@ -414,9 +399,6 @@ static void si470x_int_in_callback(struct urb *urb)
}
}
- /* safety checks */
- if (radio->disconnected)
- return;
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
goto resubmit;
@@ -501,112 +483,30 @@ resubmit:
}
-
-/**************************************************************************
- * File Operations Interface
- **************************************************************************/
-
-/*
- * si470x_fops_open - file open
- */
int si470x_fops_open(struct file *file)
{
- struct si470x_device *radio = video_drvdata(file);
- int retval;
-
- mutex_lock(&radio->lock);
- radio->users++;
-
- retval = usb_autopm_get_interface(radio->intf);
- if (retval < 0) {
- radio->users--;
- retval = -EIO;
- goto done;
- }
-
- if (radio->users == 1) {
- /* start radio */
- retval = si470x_start(radio);
- if (retval < 0) {
- usb_autopm_put_interface(radio->intf);
- goto done;
- }
-
- /* initialize interrupt urb */
- usb_fill_int_urb(radio->int_in_urb, radio->usbdev,
- usb_rcvintpipe(radio->usbdev,
- radio->int_in_endpoint->bEndpointAddress),
- radio->int_in_buffer,
- le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize),
- si470x_int_in_callback,
- radio,
- radio->int_in_endpoint->bInterval);
-
- radio->int_in_running = 1;
- mb();
-
- retval = usb_submit_urb(radio->int_in_urb, GFP_KERNEL);
- if (retval) {
- dev_info(&radio->intf->dev,
- "submitting int urb failed (%d)\n", retval);
- radio->int_in_running = 0;
- usb_autopm_put_interface(radio->intf);
- }
- }
-
-done:
- mutex_unlock(&radio->lock);
- return retval;
+ return v4l2_fh_open(file);
}
-
-/*
- * si470x_fops_release - file release
- */
int si470x_fops_release(struct file *file)
{
- struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- /* safety check */
- if (!radio) {
- retval = -ENODEV;
- goto done;
- }
-
- mutex_lock(&radio->lock);
- radio->users--;
- if (radio->users == 0) {
- /* shutdown interrupt handler */
- if (radio->int_in_running) {
- radio->int_in_running = 0;
- if (radio->int_in_urb)
- usb_kill_urb(radio->int_in_urb);
- }
-
- if (radio->disconnected) {
- video_unregister_device(radio->videodev);
- kfree(radio->int_in_buffer);
- kfree(radio->buffer);
- mutex_unlock(&radio->lock);
- kfree(radio);
- goto done;
- }
+ return v4l2_fh_release(file);
+}
- /* cancel read processes */
- wake_up_interruptible(&radio->read_queue);
+static void si470x_usb_release(struct v4l2_device *v4l2_dev)
+{
+ struct si470x_device *radio =
+ container_of(v4l2_dev, struct si470x_device, v4l2_dev);
- /* stop radio */
- retval = si470x_stop(radio);
- usb_autopm_put_interface(radio->intf);
- }
- mutex_unlock(&radio->lock);
-done:
- return retval;
+ usb_free_urb(radio->int_in_urb);
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
+ kfree(radio->int_in_buffer);
+ kfree(radio->buffer);
+ kfree(radio);
}
-
/**************************************************************************
* Video4Linux Interface
**************************************************************************/
@@ -623,13 +523,45 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
usb_make_path(radio->usbdev, capability->bus_info,
sizeof(capability->bus_info));
- capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
+ capability->device_caps = V4L2_CAP_HW_FREQ_SEEK |
V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
-
+ capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
+static int si470x_start_usb(struct si470x_device *radio)
+{
+ int retval;
+
+ /* start radio */
+ retval = si470x_start(radio);
+ if (retval < 0)
+ return retval;
+
+ v4l2_ctrl_handler_setup(&radio->hdl);
+
+ /* initialize interrupt urb */
+ usb_fill_int_urb(radio->int_in_urb, radio->usbdev,
+ usb_rcvintpipe(radio->usbdev,
+ radio->int_in_endpoint->bEndpointAddress),
+ radio->int_in_buffer,
+ le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize),
+ si470x_int_in_callback,
+ radio,
+ radio->int_in_endpoint->bInterval);
+
+ radio->int_in_running = 1;
+ mb();
+
+ retval = usb_submit_urb(radio->int_in_urb, GFP_KERNEL);
+ if (retval) {
+ dev_info(&radio->intf->dev,
+ "submitting int urb failed (%d)\n", retval);
+ radio->int_in_running = 0;
+ }
+ return retval;
+}
/**************************************************************************
* USB Interface
@@ -653,8 +585,6 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
retval = -ENOMEM;
goto err_initial;
}
- radio->users = 0;
- radio->disconnected = 0;
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
mutex_init(&radio->lock);
@@ -691,20 +621,35 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
goto err_intbuffer;
}
- /* video device allocation and initialization */
- radio->videodev = video_device_alloc();
- if (!radio->videodev) {
- retval = -ENOMEM;
+ radio->v4l2_dev.release = si470x_usb_release;
+ retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+ if (retval < 0) {
+ dev_err(&intf->dev, "couldn't register v4l2_device\n");
goto err_urb;
}
- memcpy(radio->videodev, &si470x_viddev_template,
- sizeof(si470x_viddev_template));
- video_set_drvdata(radio->videodev, radio);
+
+ v4l2_ctrl_handler_init(&radio->hdl, 2);
+ v4l2_ctrl_new_std(&radio->hdl, &si470x_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(&radio->hdl, &si470x_ctrl_ops,
+ V4L2_CID_AUDIO_VOLUME, 0, 15, 1, 15);
+ if (radio->hdl.error) {
+ retval = radio->hdl.error;
+ dev_err(&intf->dev, "couldn't register control\n");
+ goto err_dev;
+ }
+ radio->videodev = si470x_viddev_template;
+ radio->videodev.ctrl_handler = &radio->hdl;
+ radio->videodev.lock = &radio->lock;
+ radio->videodev.v4l2_dev = &radio->v4l2_dev;
+ radio->videodev.release = video_device_release_empty;
+ set_bit(V4L2_FL_USE_FH_PRIO, &radio->videodev.flags);
+ video_set_drvdata(&radio->videodev, radio);
/* get device and chip versions */
if (si470x_get_all_registers(radio) < 0) {
retval = -EIO;
- goto err_video;
+ goto err_ctrl;
}
dev_info(&intf->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n",
radio->registers[DEVICEID], radio->registers[CHIPID]);
@@ -721,7 +666,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
/* get software and hardware versions */
if (si470x_get_scratch_page_versions(radio) < 0) {
retval = -EIO;
- goto err_video;
+ goto err_ctrl;
}
dev_info(&intf->dev, "software version %d, hardware version %d\n",
radio->software_version, radio->hardware_version);
@@ -764,28 +709,35 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
if (!radio->buffer) {
retval = -EIO;
- goto err_video;
+ goto err_ctrl;
}
/* rds buffer configuration */
radio->wr_index = 0;
radio->rd_index = 0;
init_waitqueue_head(&radio->read_queue);
+ usb_set_intfdata(intf, radio);
+
+ /* start radio */
+ retval = si470x_start_usb(radio);
+ if (retval < 0)
+ goto err_all;
/* register video device */
- retval = video_register_device(radio->videodev, VFL_TYPE_RADIO,
+ retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
radio_nr);
if (retval) {
- dev_warn(&intf->dev, "Could not register video device\n");
+ dev_err(&intf->dev, "Could not register video device\n");
goto err_all;
}
- usb_set_intfdata(intf, radio);
return 0;
err_all:
kfree(radio->buffer);
-err_video:
- video_device_release(radio->videodev);
+err_ctrl:
+ v4l2_ctrl_handler_free(&radio->hdl);
+err_dev:
+ v4l2_device_unregister(&radio->v4l2_dev);
err_urb:
usb_free_urb(radio->int_in_urb);
err_intbuffer:
@@ -803,8 +755,22 @@ err_initial:
static int si470x_usb_driver_suspend(struct usb_interface *intf,
pm_message_t message)
{
+ struct si470x_device *radio = usb_get_intfdata(intf);
+
dev_info(&intf->dev, "suspending now...\n");
+ /* shutdown interrupt handler */
+ if (radio->int_in_running) {
+ radio->int_in_running = 0;
+ if (radio->int_in_urb)
+ usb_kill_urb(radio->int_in_urb);
+ }
+
+ /* cancel read processes */
+ wake_up_interruptible(&radio->read_queue);
+
+ /* stop radio */
+ si470x_stop(radio);
return 0;
}
@@ -814,9 +780,12 @@ static int si470x_usb_driver_suspend(struct usb_interface *intf,
*/
static int si470x_usb_driver_resume(struct usb_interface *intf)
{
+ struct si470x_device *radio = usb_get_intfdata(intf);
+
dev_info(&intf->dev, "resuming now...\n");
- return 0;
+ /* start radio */
+ return si470x_start_usb(radio);
}
@@ -828,28 +797,22 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
struct si470x_device *radio = usb_get_intfdata(intf);
mutex_lock(&radio->lock);
- radio->disconnected = 1;
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ video_unregister_device(&radio->videodev);
usb_set_intfdata(intf, NULL);
- if (radio->users == 0) {
- /* set led to disconnect state */
- si470x_set_led_state(radio, BLINK_ORANGE_LED);
-
- /* Free data structures. */
- usb_free_urb(radio->int_in_urb);
-
- kfree(radio->int_in_buffer);
- video_unregister_device(radio->videodev);
- kfree(radio->buffer);
- mutex_unlock(&radio->lock);
- kfree(radio);
- } else {
- mutex_unlock(&radio->lock);
- }
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
}
/*
* si470x_usb_driver - usb driver interface
+ *
+ * A note on suspend/resume: this driver had only empty suspend/resume
+ * functions, and when I tried to test suspend/resume it always disconnected
+ * instead of resuming (using my ADS InstantFM stick). So I've decided to
+ * remove these callbacks until someone else with better hardware can
+ * implement and test this.
*/
static struct usb_driver si470x_usb_driver = {
.name = DRIVER_NAME,
@@ -857,8 +820,8 @@ static struct usb_driver si470x_usb_driver = {
.disconnect = si470x_usb_driver_disconnect,
.suspend = si470x_usb_driver_suspend,
.resume = si470x_usb_driver_resume,
+ .reset_resume = si470x_usb_driver_resume,
.id_table = si470x_usb_driver_id_table,
- .supports_autosuspend = 1,
};
module_usb_driver(si470x_usb_driver);
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index f300a55ed85c..4921cab8e0fa 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -36,6 +36,9 @@
#include <linux/mutex.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-device.h>
#include <asm/unaligned.h>
@@ -141,10 +144,9 @@
* si470x_device - private data
*/
struct si470x_device {
- struct video_device *videodev;
-
- /* driver management */
- unsigned int users;
+ struct v4l2_device v4l2_dev;
+ struct video_device videodev;
+ struct v4l2_ctrl_handler hdl;
/* Silabs internal registers (0..15) */
unsigned short registers[RADIO_REGISTER_NUM];
@@ -174,9 +176,6 @@ struct si470x_device {
/* scratch page */
unsigned char software_version;
unsigned char hardware_version;
-
- /* driver management */
- unsigned char disconnected;
#endif
#if defined(CONFIG_I2C_SI470X) || defined(CONFIG_I2C_SI470X_MODULE)
@@ -213,6 +212,7 @@ struct si470x_device {
* Common Functions
**************************************************************************/
extern struct video_device si470x_viddev_template;
+extern const struct v4l2_ctrl_ops si470x_ctrl_ops;
int si470x_get_register(struct si470x_device *radio, int regnr);
int si470x_set_register(struct si470x_device *radio, int regnr);
int si470x_disconnect_check(struct si470x_device *radio);
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 6418c4c9faf1..06d47e5cce9f 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -211,7 +211,7 @@ static struct i2c_driver tef6862_driver = {
.name = DRIVER_NAME,
},
.probe = tef6862_probe,
- .remove = tef6862_remove,
+ .remove = __devexit_p(tef6862_remove),
.id_table = tef6862_id,
};
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 077d369a0173..080b96a61f1a 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -518,6 +518,10 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
video_set_drvdata(gradio_dev, fmdev);
gradio_dev->lock = &fmdev->mutex;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &gradio_dev->flags);
/* Register with V4L2 subsystem as RADIO device */
if (video_register_device(gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index a3fbb21350e9..f97eeb870455 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -69,6 +69,7 @@ config IR_JVC_DECODER
config IR_SONY_DECODER
tristate "Enable IR raw decoder for the Sony protocol"
depends on RC_CORE
+ select BITREVERSE
default y
---help---
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index baf907b3ce76..7be377fc1be8 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -1,7 +1,7 @@
/*
* USB ATI Remote support
*
- * Copyright (c) 2011 Anssi Hannula <anssi.hannula@iki.fi>
+ * Copyright (c) 2011, 2012 Anssi Hannula <anssi.hannula@iki.fi>
* Version 2.2.0 Copyright (c) 2004 Torrey Hoffman <thoffman@arnor.net>
* Version 2.1.1 Copyright (c) 2002 Vladimir Dergachev
*
@@ -151,13 +151,57 @@ MODULE_PARM_DESC(mouse, "Enable mouse device, default = yes");
#undef err
#define err(format, arg...) printk(KERN_ERR format , ## arg)
+struct ati_receiver_type {
+ /* either default_keymap or get_default_keymap should be set */
+ const char *default_keymap;
+ const char *(*get_default_keymap)(struct usb_interface *interface);
+};
+
+static const char *get_medion_keymap(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+
+ /*
+ * There are many different Medion remotes shipped with a receiver
+ * with the same usb id, but the receivers have subtle differences
+ * in the USB descriptors allowing us to detect them.
+ */
+
+ if (udev->manufacturer && udev->product) {
+ if (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP) {
+
+ if (!strcmp(udev->manufacturer, "X10 Wireless Technology Inc")
+ && !strcmp(udev->product, "USB Receiver"))
+ return RC_MAP_MEDION_X10_DIGITAINER;
+
+ if (!strcmp(udev->manufacturer, "X10 WTI")
+ && !strcmp(udev->product, "RF receiver"))
+ return RC_MAP_MEDION_X10_OR2X;
+ } else {
+
+ if (!strcmp(udev->manufacturer, "X10 Wireless Technology Inc")
+ && !strcmp(udev->product, "USB Receiver"))
+ return RC_MAP_MEDION_X10;
+ }
+ }
+
+ dev_info(&interface->dev,
+ "Unknown Medion X10 receiver, using default ati_remote Medion keymap\n");
+
+ return RC_MAP_MEDION_X10;
+}
+
+static const struct ati_receiver_type type_ati = { .default_keymap = RC_MAP_ATI_X10 };
+static const struct ati_receiver_type type_medion = { .get_default_keymap = get_medion_keymap };
+static const struct ati_receiver_type type_firefly = { .default_keymap = RC_MAP_SNAPSTREAM_FIREFLY };
+
static struct usb_device_id ati_remote_table[] = {
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_ATI_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_MEDION_X10 },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, FIREFLY_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)RC_MAP_SNAPSTREAM_FIREFLY },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_medion },
+ { USB_DEVICE(ATI_REMOTE_VENDOR_ID, FIREFLY_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_firefly },
{} /* Terminating entry */
};
@@ -445,6 +489,7 @@ static void ati_remote_input_report(struct urb *urb)
int acc;
int remote_num;
unsigned char scancode;
+ u32 wheel_keycode = KEY_RESERVED;
int i;
/*
@@ -484,26 +529,33 @@ static void ati_remote_input_report(struct urb *urb)
*/
scancode = data[2] & 0x7f;
- /* Look up event code index in the mouse translation table. */
- for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
- if (scancode == ati_remote_tbl[i].data) {
- index = i;
- break;
+ dbginfo(&ati_remote->interface->dev,
+ "channel 0x%02x; key data %02x, scancode %02x\n",
+ remote_num, data[2], scancode);
+
+ if (scancode >= 0x70) {
+ /*
+ * This is either a mouse or scrollwheel event, depending on
+ * the remote/keymap.
+ * Get the keycode assigned to scancode 0x78/0x70. If it is
+ * set, assume this is a scrollwheel up/down event.
+ */
+ wheel_keycode = rc_g_keycode_from_table(ati_remote->rdev,
+ scancode & 0x78);
+
+ if (wheel_keycode == KEY_RESERVED) {
+ /* scrollwheel was not mapped, assume mouse */
+
+ /* Look up event code index in the mouse translation table. */
+ for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+ if (scancode == ati_remote_tbl[i].data) {
+ index = i;
+ break;
+ }
+ }
}
}
- if (index >= 0) {
- dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
- remote_num, data[2], index, ati_remote_tbl[index].code);
- if (!dev)
- return; /* no mouse device */
- } else
- dbginfo(&ati_remote->interface->dev,
- "channel 0x%02x; key data %02x, scancode %02x\n",
- remote_num, data[2], scancode);
-
-
if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
input_event(dev, ati_remote_tbl[index].type,
ati_remote_tbl[index].code,
@@ -542,15 +594,29 @@ static void ati_remote_input_report(struct urb *urb)
if (index < 0) {
/* Not a mouse event, hand it to rc-core. */
-
- /*
- * We don't use the rc-core repeat handling yet as
- * it would cause ghost repeats which would be a
- * regression for this driver.
- */
- rc_keydown_notimeout(ati_remote->rdev, scancode,
- data[2]);
- rc_keyup(ati_remote->rdev);
+ int count = 1;
+
+ if (wheel_keycode != KEY_RESERVED) {
+ /*
+ * This is a scrollwheel event, send the
+ * scroll up (0x78) / down (0x70) scancode
+ * repeatedly as many times as indicated by
+ * rest of the scancode.
+ */
+ count = (scancode & 0x07) + 1;
+ scancode &= 0x78;
+ }
+
+ while (count--) {
+ /*
+ * We don't use the rc-core repeat handling yet as
+ * it would cause ghost repeats which would be a
+ * regression for this driver.
+ */
+ rc_keydown_notimeout(ati_remote->rdev, scancode,
+ data[2]);
+ rc_keyup(ati_remote->rdev);
+ }
return;
}
@@ -766,6 +832,7 @@ static int ati_remote_probe(struct usb_interface *interface, const struct usb_de
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *iface_host = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint_in, *endpoint_out;
+ struct ati_receiver_type *type = (struct ati_receiver_type *)id->driver_info;
struct ati_remote *ati_remote;
struct input_dev *input_dev;
struct rc_dev *rc_dev;
@@ -827,10 +894,15 @@ static int ati_remote_probe(struct usb_interface *interface, const struct usb_de
snprintf(ati_remote->mouse_name, sizeof(ati_remote->mouse_name),
"%s mouse", ati_remote->rc_name);
- if (id->driver_info)
- rc_dev->map_name = (const char *)id->driver_info;
- else
- rc_dev->map_name = RC_MAP_ATI_X10;
+ rc_dev->map_name = RC_MAP_ATI_X10; /* default map */
+
+ /* set default keymap according to receiver model */
+ if (type) {
+ if (type->default_keymap)
+ rc_dev->map_name = type->default_keymap;
+ else if (type->get_default_keymap)
+ rc_dev->map_name = type->get_default_keymap(interface);
+ }
ati_remote_rc_init(ati_remote);
mutex_init(&ati_remote->open_mutex);
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 4a3a238bcfbc..6aabf7ae3a31 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -556,11 +556,11 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
FINTEK_DRIVER_NAME, (void *)fintek))
- goto failure;
+ goto failure2;
ret = rc_register_device(rdev);
if (ret)
- goto failure;
+ goto failure3;
device_init_wakeup(&pdev->dev, true);
fintek->rdev = rdev;
@@ -570,12 +570,11 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
return 0;
+failure3:
+ free_irq(fintek->cir_irq, fintek);
+failure2:
+ release_region(fintek->cir_addr, fintek->cir_port_len);
failure:
- if (fintek->cir_irq)
- free_irq(fintek->cir_irq, fintek);
- if (fintek->cir_addr)
- release_region(fintek->cir_addr, fintek->cir_port_len);
-
rc_free_device(rdev);
kfree(fintek);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 7f26fdf2e54e..5dd0386604f0 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -255,7 +255,7 @@ static struct usb_device_id imon_usb_id_table[] = {
static struct usb_driver imon_driver = {
.name = MOD_NAME,
.probe = imon_probe,
- .disconnect = imon_disconnect,
+ .disconnect = __devexit_p(imon_disconnect),
.suspend = imon_suspend,
.resume = imon_resume,
.id_table = imon_usb_id_table,
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 95e630998aaf..a82025121345 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -46,9 +46,9 @@ static int ir_raw_event_thread(void *data)
while (!kthread_should_stop()) {
spin_lock_irq(&raw->lock);
- retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
+ retval = kfifo_len(&raw->kfifo);
- if (!retval) {
+ if (retval < sizeof(ev)) {
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop())
@@ -59,11 +59,9 @@ static int ir_raw_event_thread(void *data)
continue;
}
+ retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
spin_unlock_irq(&raw->lock);
-
- BUG_ON(retval != sizeof(ev));
-
mutex_lock(&ir_raw_handler_lock);
list_for_each_entry(handler, &ir_raw_handler_list, list)
handler->decode(raw->dev, ev);
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index d38fbdd0b25a..7e54ec57bcf9 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -56,7 +56,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
{
struct sanyo_dec *data = &dev->raw->sanyo;
u32 scancode;
- u8 address, not_address, command, not_command;
+ u8 address, command, not_command;
if (!(dev->raw->enabled_protocols & RC_TYPE_SANYO))
return 0;
@@ -154,7 +154,7 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
break;
address = bitrev16((data->bits >> 29) & 0x1fff) >> 3;
- not_address = bitrev16((data->bits >> 16) & 0x1fff) >> 3;
+ /* not_address = bitrev16((data->bits >> 16) & 0x1fff) >> 3; */
command = bitrev8((data->bits >> 8) & 0xff);
not_command = bitrev8((data->bits >> 0) & 0xff);
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 0e49c99abf68..36fe5a349b95 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1598,24 +1598,22 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
ITE_DRIVER_NAME, (void *)itdev))
- goto failure;
+ goto failure2;
ret = rc_register_device(rdev);
if (ret)
- goto failure;
+ goto failure3;
itdev->rdev = rdev;
ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
return 0;
+failure3:
+ free_irq(itdev->cir_irq, itdev);
+failure2:
+ release_region(itdev->cir_addr, itdev->params.io_region_size);
failure:
- if (itdev->cir_irq)
- free_irq(itdev->cir_irq, itdev);
-
- if (itdev->cir_addr)
- release_region(itdev->cir_addr, itdev->params.io_region_size);
-
rc_free_device(rdev);
kfree(itdev);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 49ce2662f56b..ab84d66c67c1 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-anysee.o \
rc-apac-viewcomp.o \
rc-asus-pc39.o \
+ rc-asus-ps3-100.o \
rc-ati-tv-wonder-hd-600.o \
rc-ati-x10.o \
rc-avermedia-a16d.o \
@@ -52,6 +53,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-lme2510.o \
rc-manli.o \
rc-medion-x10.o \
+ rc-medion-x10-digitainer.o \
+ rc-medion-x10-or2x.o \
rc-msi-digivox-ii.o \
rc-msi-digivox-iii.o \
rc-msi-tvanywhere.o \
diff --git a/drivers/media/rc/keymaps/rc-asus-ps3-100.c b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
new file mode 100644
index 000000000000..ba76609c5936
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-asus-ps3-100.c
@@ -0,0 +1,91 @@
+/* asus-ps3-100.h - Keytable for asus_ps3_100 Remote Controller
+ *
+ * Copyright (c) 2012 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Based on a previous patch from Remi Schwartz <remi.schwartz@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table asus_ps3_100[] = {
+ { 0x081c, KEY_HOME }, /* home */
+ { 0x081e, KEY_TV }, /* tv */
+ { 0x0803, KEY_TEXT }, /* teletext */
+ { 0x0829, KEY_POWER }, /* close */
+
+ { 0x080b, KEY_RED }, /* red */
+ { 0x080d, KEY_YELLOW }, /* yellow */
+ { 0x0806, KEY_BLUE }, /* blue */
+ { 0x0807, KEY_GREEN }, /* green */
+
+ /* Keys 0 to 9 */
+ { 0x082a, KEY_0 },
+ { 0x0816, KEY_1 },
+ { 0x0812, KEY_2 },
+ { 0x0814, KEY_3 },
+ { 0x0836, KEY_4 },
+ { 0x0832, KEY_5 },
+ { 0x0834, KEY_6 },
+ { 0x080e, KEY_7 },
+ { 0x080a, KEY_8 },
+ { 0x080c, KEY_9 },
+
+ { 0x0815, KEY_VOLUMEUP },
+ { 0x0826, KEY_VOLUMEDOWN },
+ { 0x0835, KEY_CHANNELUP }, /* channel / program + */
+ { 0x0824, KEY_CHANNELDOWN }, /* channel / program - */
+
+ { 0x0808, KEY_UP },
+ { 0x0804, KEY_DOWN },
+ { 0x0818, KEY_LEFT },
+ { 0x0810, KEY_RIGHT },
+ { 0x0825, KEY_ENTER }, /* enter */
+
+ { 0x0822, KEY_EXIT }, /* back */
+ { 0x082c, KEY_AB }, /* recall */
+
+ { 0x0820, KEY_AUDIO }, /* TV audio */
+ { 0x0837, KEY_SCREEN }, /* snapshot */
+ { 0x082e, KEY_ZOOM }, /* full screen */
+ { 0x0802, KEY_MUTE }, /* mute */
+
+ { 0x0831, KEY_REWIND }, /* backward << */
+ { 0x0811, KEY_RECORD }, /* recording */
+ { 0x0809, KEY_STOP },
+ { 0x0805, KEY_FASTFORWARD }, /* forward >> */
+ { 0x0821, KEY_PREVIOUS }, /* rew */
+ { 0x081a, KEY_PAUSE }, /* pause */
+ { 0x0839, KEY_PLAY }, /* play */
+ { 0x0819, KEY_NEXT }, /* forward */
+};
+
+static struct rc_map_list asus_ps3_100_map = {
+.map = {
+ .scan = asus_ps3_100,
+ .size = ARRAY_SIZE(asus_ps3_100),
+ .rc_type = RC_TYPE_RC5,
+ .name = RC_MAP_ASUS_PS3_100,
+}
+};
+
+static int __init init_rc_map_asus_ps3_100(void)
+{
+return rc_map_register(&asus_ps3_100_map);
+}
+
+static void __exit exit_rc_map_asus_ps3_100(void)
+{
+rc_map_unregister(&asus_ps3_100_map);
+}
+
+module_init(init_rc_map_asus_ps3_100)
+module_exit(exit_rc_map_asus_ps3_100)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
diff --git a/drivers/media/rc/keymaps/rc-it913x-v2.c b/drivers/media/rc/keymaps/rc-it913x-v2.c
index 28e376e18b99..bd42a30ec06f 100644
--- a/drivers/media/rc/keymaps/rc-it913x-v2.c
+++ b/drivers/media/rc/keymaps/rc-it913x-v2.c
@@ -40,7 +40,7 @@ static struct rc_map_table it913x_v2_rc[] = {
/* Type 2 */
/* keys stereo, snapshot unassigned */
{ 0x866b00, KEY_0 },
- { 0x866b1b, KEY_1 },
+ { 0x866b01, KEY_1 },
{ 0x866b02, KEY_2 },
{ 0x866b03, KEY_3 },
{ 0x866b04, KEY_4 },
diff --git a/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c b/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c
new file mode 100644
index 000000000000..966f9b3c71da
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-medion-x10-digitainer.c
@@ -0,0 +1,123 @@
+/*
+ * Medion X10 RF remote keytable (Digitainer variant)
+ *
+ * Copyright (C) 2012 Anssi Hannula <anssi.hannula@iki.fi>
+ *
+ * This keymap is for a variant that has a distinctive scrollwheel instead of
+ * up/down buttons (tested with P/N 40009936 / 20018268), reportedly
+ * originally shipped with Medion Digitainer but now sold separately simply as
+ * an "X10" remote.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table medion_x10_digitainer[] = {
+ { 0x02, KEY_POWER },
+
+ { 0x2c, KEY_TV },
+ { 0x2d, KEY_VIDEO },
+ { 0x04, KEY_DVD }, /* CD/DVD */
+ { 0x16, KEY_TEXT }, /* "teletext" icon, i.e. a screen with lines */
+ { 0x06, KEY_AUDIO },
+ { 0x2e, KEY_RADIO },
+ { 0x31, KEY_EPG }, /* a screen with an open book */
+ { 0x05, KEY_IMAGES }, /* Photo */
+ { 0x2f, KEY_INFO },
+
+ { 0x78, KEY_UP }, /* scrollwheel up 1 notch */
+ /* 0x79..0x7f: 2-8 notches, driver repeats 0x78 entry */
+
+ { 0x70, KEY_DOWN }, /* scrollwheel down 1 notch */
+ /* 0x71..0x77: 2-8 notches, driver repeats 0x70 entry */
+
+ { 0x19, KEY_MENU },
+ { 0x1d, KEY_LEFT },
+ { 0x1e, KEY_OK }, /* scrollwheel press */
+ { 0x1f, KEY_RIGHT },
+ { 0x20, KEY_BACK },
+
+ { 0x09, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x00, KEY_MUTE },
+
+ { 0x1b, KEY_SELECT }, /* also has "U" rotated 90 degrees CCW */
+
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+ { 0x1c, KEY_LAST },
+
+ { 0x32, KEY_RED }, /* also Audio */
+ { 0x33, KEY_GREEN }, /* also Subtitle */
+ { 0x34, KEY_YELLOW }, /* also Angle */
+ { 0x35, KEY_BLUE }, /* also Title */
+
+ { 0x28, KEY_STOP },
+ { 0x29, KEY_PAUSE },
+ { 0x25, KEY_PLAY },
+ { 0x21, KEY_PREVIOUS },
+ { 0x18, KEY_CAMERA },
+ { 0x23, KEY_NEXT },
+ { 0x24, KEY_REWIND },
+ { 0x27, KEY_RECORD },
+ { 0x26, KEY_FORWARD },
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+
+ /* these do not actually exist on this remote, but these scancodes
+ * exist on all other Medion X10 remotes and adding them here allows
+ * such remotes to be adequately usable with this keymap in case
+ * this keymap is wrongly used with them (which is quite possible as
+ * there are lots of different Medion X10 remotes): */
+ { 0x1a, KEY_UP },
+ { 0x22, KEY_DOWN },
+};
+
+static struct rc_map_list medion_x10_digitainer_map = {
+ .map = {
+ .scan = medion_x10_digitainer,
+ .size = ARRAY_SIZE(medion_x10_digitainer),
+ .rc_type = RC_TYPE_OTHER,
+ .name = RC_MAP_MEDION_X10_DIGITAINER,
+ }
+};
+
+static int __init init_rc_map_medion_x10_digitainer(void)
+{
+ return rc_map_register(&medion_x10_digitainer_map);
+}
+
+static void __exit exit_rc_map_medion_x10_digitainer(void)
+{
+ rc_map_unregister(&medion_x10_digitainer_map);
+}
+
+module_init(init_rc_map_medion_x10_digitainer)
+module_exit(exit_rc_map_medion_x10_digitainer)
+
+MODULE_DESCRIPTION("Medion X10 RF remote keytable (Digitainer variant)");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/keymaps/rc-medion-x10-or2x.c b/drivers/media/rc/keymaps/rc-medion-x10-or2x.c
new file mode 100644
index 000000000000..b077300ecb5c
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-medion-x10-or2x.c
@@ -0,0 +1,108 @@
+/*
+ * Medion X10 OR22/OR24 RF remote keytable
+ *
+ * Copyright (C) 2012 Anssi Hannula <anssi.hannula@iki.fi>
+ *
+ * This keymap is for several Medion X10 remotes that have the Windows MCE
+ * button. This has been tested with a "RF VISTA Remote Control", OR24V,
+ * P/N 20035335, but should work with other variants that have the same
+ * buttons, such as OR22V and OR24E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table medion_x10_or2x[] = {
+ { 0x02, KEY_POWER },
+ { 0x16, KEY_TEXT }, /* "T" in a box, for teletext */
+
+ { 0x09, KEY_VOLUMEUP },
+ { 0x08, KEY_VOLUMEDOWN },
+ { 0x00, KEY_MUTE },
+ { 0x0b, KEY_CHANNELUP },
+ { 0x0c, KEY_CHANNELDOWN },
+
+ { 0x32, KEY_RED },
+ { 0x33, KEY_GREEN },
+ { 0x34, KEY_YELLOW },
+ { 0x35, KEY_BLUE },
+
+ { 0x18, KEY_PVR }, /* record symbol inside a tv symbol */
+ { 0x04, KEY_DVD }, /* disc symbol */
+ { 0x31, KEY_EPG }, /* a tv schedule symbol */
+ { 0x1c, KEY_TV }, /* play symbol inside a tv symbol */
+ { 0x20, KEY_BACK },
+ { 0x2f, KEY_INFO },
+
+ { 0x1a, KEY_UP },
+ { 0x22, KEY_DOWN },
+ { 0x1d, KEY_LEFT },
+ { 0x1f, KEY_RIGHT },
+ { 0x1e, KEY_OK },
+
+ { 0x1b, KEY_MEDIA }, /* Windows MCE button */
+
+ { 0x21, KEY_PREVIOUS },
+ { 0x23, KEY_NEXT },
+ { 0x24, KEY_REWIND },
+ { 0x26, KEY_FORWARD },
+ { 0x25, KEY_PLAY },
+ { 0x28, KEY_STOP },
+ { 0x29, KEY_PAUSE },
+ { 0x27, KEY_RECORD },
+
+ { 0x0d, KEY_1 },
+ { 0x0e, KEY_2 },
+ { 0x0f, KEY_3 },
+ { 0x10, KEY_4 },
+ { 0x11, KEY_5 },
+ { 0x12, KEY_6 },
+ { 0x13, KEY_7 },
+ { 0x14, KEY_8 },
+ { 0x15, KEY_9 },
+ { 0x17, KEY_0 },
+ { 0x30, KEY_CLEAR },
+ { 0x36, KEY_ENTER },
+ { 0x37, KEY_NUMERIC_STAR },
+ { 0x38, KEY_NUMERIC_POUND },
+};
+
+static struct rc_map_list medion_x10_or2x_map = {
+ .map = {
+ .scan = medion_x10_or2x,
+ .size = ARRAY_SIZE(medion_x10_or2x),
+ .rc_type = RC_TYPE_OTHER,
+ .name = RC_MAP_MEDION_X10_OR2X,
+ }
+};
+
+static int __init init_rc_map_medion_x10_or2x(void)
+{
+ return rc_map_register(&medion_x10_or2x_map);
+}
+
+static void __exit exit_rc_map_medion_x10_or2x(void)
+{
+ rc_map_unregister(&medion_x10_or2x_map);
+}
+
+module_init(init_rc_map_medion_x10_or2x)
+module_exit(exit_rc_map_medion_x10_or2x)
+
+MODULE_DESCRIPTION("Medion X10 OR22/OR24 RF remote keytable");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index e150a2e29a4b..84e06d3aa696 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -520,7 +520,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
{
char codes[USB_BUFLEN * 3 + 1];
char inout[9];
- u8 cmd, subcmd, data1, data2, data3, data4, data5;
+ u8 cmd, subcmd, data1, data2, data3, data4;
struct device *dev = ir->dev;
int i, start, skip = 0;
u32 carrier, period;
@@ -553,7 +553,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
data2 = buf[start + 3] & 0xff;
data3 = buf[start + 4] & 0xff;
data4 = buf[start + 5] & 0xff;
- data5 = buf[start + 6] & 0xff;
switch (cmd) {
case MCE_CMD_NULL:
@@ -1443,7 +1442,7 @@ static int mceusb_dev_resume(struct usb_interface *intf)
static struct usb_driver mceusb_dev_driver = {
.name = DRIVER_NAME,
.probe = mceusb_dev_probe,
- .disconnect = mceusb_dev_disconnect,
+ .disconnect = __devexit_p(mceusb_dev_disconnect),
.suspend = mceusb_dev_suspend,
.resume = mceusb_dev_resume,
.reset_resume = mceusb_dev_resume,
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 8b2c071ac0ab..dc8a7dddccd4 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -1075,19 +1075,19 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
- goto failure;
+ goto failure2;
if (!request_region(nvt->cir_wake_addr,
CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
- goto failure;
+ goto failure3;
if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
NVT_DRIVER_NAME, (void *)nvt))
- goto failure;
+ goto failure4;
ret = rc_register_device(rdev);
if (ret)
- goto failure;
+ goto failure5;
device_init_wakeup(&pdev->dev, true);
nvt->rdev = rdev;
@@ -1099,17 +1099,15 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
return 0;
+failure5:
+ free_irq(nvt->cir_wake_irq, nvt);
+failure4:
+ release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
+failure3:
+ free_irq(nvt->cir_irq, nvt);
+failure2:
+ release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
failure:
- if (nvt->cir_irq)
- free_irq(nvt->cir_irq, nvt);
- if (nvt->cir_addr)
- release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
-
- if (nvt->cir_wake_irq)
- free_irq(nvt->cir_wake_irq, nvt);
- if (nvt->cir_wake_addr)
- release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
-
rc_free_device(rdev);
kfree(nvt);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index efc6a514348a..fae1615e0ff2 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -221,7 +221,6 @@ static int __init loop_init(void)
rc->s_idle = loop_set_idle;
rc->s_learning_mode = loop_set_learning_mode;
rc->s_carrier_report = loop_set_carrier_report;
- rc->priv = &loopdev;
loopdev.txmask = RXMASK_REGULAR;
loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index ad95c67a4dba..2878b0ed9741 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -1277,7 +1277,7 @@ static int redrat3_dev_resume(struct usb_interface *intf)
static struct usb_driver redrat3_dev_driver = {
.name = DRIVER_NAME,
.probe = redrat3_dev_probe,
- .disconnect = redrat3_dev_disconnect,
+ .disconnect = __devexit_p(redrat3_dev_disconnect),
.suspend = redrat3_dev_suspend,
.resume = redrat3_dev_resume,
.reset_resume = redrat3_dev_resume,
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 342c2c8c1ddf..54ee34872d14 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -232,7 +232,7 @@ MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
static bool txandrx; /* default = 0 */
module_param(txandrx, bool, 0444);
-MODULE_PARM_DESC(invert, "Allow simultaneous TX and RX");
+MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX");
static unsigned int wake_sc = 0x800F040C;
module_param(wake_sc, uint, 0644);
@@ -1032,6 +1032,8 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->tx_ir = wbcir_tx;
data->dev->priv = data;
data->dev->dev.parent = &device->dev;
+ data->dev->timeout = MS_TO_NS(100);
+ data->dev->allowed_protos = RC_TYPE_ALL;
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index ce1e7ba940f6..99937c94d7df 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -472,6 +472,9 @@ comment "Camera sensor devices"
config VIDEO_APTINA_PLL
tristate
+config VIDEO_SMIAPP_PLL
+ tristate
+
config VIDEO_OV7670
tristate "OmniVision OV7670 sensor support"
depends on I2C && VIDEO_V4L2
@@ -556,6 +559,8 @@ config VIDEO_S5K6AA
This is a V4L2 sensor-level driver for Samsung S5K6AA(FX) 1.3M
camera sensor with an embedded SoC image signal processor.
+source "drivers/media/video/smiapp/Kconfig"
+
comment "Flash devices"
config VIDEO_ADP1653
@@ -644,6 +649,8 @@ menuconfig V4L_USB_DRIVERS
if V4L_USB_DRIVERS
+source "drivers/media/video/au0828/Kconfig"
+
source "drivers/media/video/uvc/Kconfig"
source "drivers/media/video/gspca/Kconfig"
@@ -662,8 +669,6 @@ source "drivers/media/video/tm6000/Kconfig"
source "drivers/media/video/usbvision/Kconfig"
-source "drivers/media/video/et61x251/Kconfig"
-
source "drivers/media/video/sn9c102/Kconfig"
source "drivers/media/video/pwc/Kconfig"
@@ -721,8 +726,6 @@ menuconfig V4L_PCI_DRIVERS
if V4L_PCI_DRIVERS
-source "drivers/media/video/au0828/Kconfig"
-
source "drivers/media/video/bt8xx/Kconfig"
source "drivers/media/video/cx18/Kconfig"
@@ -794,6 +797,19 @@ source "drivers/media/video/saa7164/Kconfig"
source "drivers/media/video/zoran/Kconfig"
+config STA2X11_VIP
+ tristate "STA2X11 VIP Video For Linux"
+ depends on STA2X11
+ select VIDEO_ADV7180 if VIDEO_HELPER_CHIPS_AUTO
+ select VIDEOBUF_DMA_CONTIG
+ depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS
+ help
+ Say Y for support for STA2X11 VIP (Video Input Port) capture
+ device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sta2x11_vip.
+
endif # V4L_PCI_DRIVERS
#
@@ -1127,19 +1143,6 @@ config VIDEO_MX2
This is a v4l2 driver for the i.MX27 and the i.MX25 Camera Sensor
Interface
-config VIDEO_SAMSUNG_S5P_FIMC
- tristate "Samsung S5P and EXYNOS4 camera interface driver (EXPERIMENTAL)"
- depends on VIDEO_V4L2 && I2C && PLAT_S5P && PM_RUNTIME && \
- VIDEO_V4L2_SUBDEV_API && EXPERIMENTAL
- select VIDEOBUF2_DMA_CONTIG
- select V4L2_MEM2MEM_DEV
- ---help---
- This is a v4l2 driver for Samsung S5P and EXYNOS4 camera
- host interface and video postprocessor.
-
- To compile this driver as a module, choose M here: the
- module will be called s5p-fimc.
-
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
depends on VIDEO_DEV && SOC_CAMERA && ARCH_AT91
@@ -1148,16 +1151,7 @@ config VIDEO_ATMEL_ISI
This module makes the ATMEL Image Sensor Interface available
as a v4l2 device.
-config VIDEO_S5P_MIPI_CSIS
- tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
- depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P
- depends on VIDEO_V4L2_SUBDEV_API && REGULATOR
- ---help---
- This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver.
-
- To compile this driver as a module, choose M here: the
- module will be called s5p-csis.
-
+source "drivers/media/video/s5p-fimc/Kconfig"
source "drivers/media/video/s5p-tv/Kconfig"
endif # V4L_PLATFORM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index a6282a3a6a82..d209de0e0ca8 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -79,9 +79,12 @@ obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
obj-$(CONFIG_VIDEO_S5K6AA) += s5k6aa.o
+obj-$(CONFIG_VIDEO_SMIAPP) += smiapp/
obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_VIDEO_AS3645A) += as3645a.o
+obj-$(CONFIG_VIDEO_SMIAPP_PLL) += smiapp-pll.o
+
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
@@ -120,6 +123,7 @@ obj-$(CONFIG_VIDEO_TM6000) += tm6000/
obj-$(CONFIG_VIDEO_MXB) += mxb.o
obj-$(CONFIG_VIDEO_HEXIUM_ORION) += hexium_orion.o
obj-$(CONFIG_VIDEO_HEXIUM_GEMINI) += hexium_gemini.o
+obj-$(CONFIG_STA2X11_VIP) += sta2x11_vip.o
obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
@@ -152,7 +156,6 @@ obj-$(CONFIG_USB_ZR364XX) += zr364xx.o
obj-$(CONFIG_USB_STKWEBCAM) += stkwebcam.o
obj-$(CONFIG_USB_SN9C102) += sn9c102/
-obj-$(CONFIG_USB_ET61X251) += et61x251/
obj-$(CONFIG_USB_PWC) += pwc/
obj-$(CONFIG_USB_GSPCA) += gspca/
diff --git a/drivers/media/video/adp1653.c b/drivers/media/video/adp1653.c
index 5b045b4a66fe..57e87090388d 100644
--- a/drivers/media/video/adp1653.c
+++ b/drivers/media/video/adp1653.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <media/adp1653.h>
#include <media/v4l2-device.h>
@@ -282,19 +281,19 @@ adp1653_init_device(struct adp1653_flash *flash)
return -EIO;
}
- mutex_lock(&flash->ctrls.lock);
+ mutex_lock(flash->ctrls.lock);
/* Reset faults before reading new ones. */
flash->fault = 0;
rval = adp1653_get_fault(flash);
- mutex_unlock(&flash->ctrls.lock);
+ mutex_unlock(flash->ctrls.lock);
if (rval > 0) {
dev_err(&client->dev, "faults detected: 0x%1.1x\n", rval);
return -EIO;
}
- mutex_lock(&flash->ctrls.lock);
+ mutex_lock(flash->ctrls.lock);
rval = adp1653_update_hw(flash);
- mutex_unlock(&flash->ctrls.lock);
+ mutex_unlock(flash->ctrls.lock);
if (rval) {
dev_err(&client->dev,
"adp1653_update_hw failed at %s\n", __func__);
diff --git a/drivers/media/video/adv7180.c b/drivers/media/video/adv7180.c
index b8b6c4b0cad4..174bffacf117 100644
--- a/drivers/media/video/adv7180.c
+++ b/drivers/media/video/adv7180.c
@@ -48,6 +48,7 @@
#define ADV7180_INPUT_CONTROL_PAL_COMB_N_PED 0xd0
#define ADV7180_INPUT_CONTROL_PAL_SECAM 0xe0
#define ADV7180_INPUT_CONTROL_PAL_SECAM_PED 0xf0
+#define ADV7180_INPUT_CONTROL_INSEL_MASK 0x0f
#define ADV7180_EXTENDED_OUTPUT_CONTROL_REG 0x04
#define ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS 0xC5
@@ -55,9 +56,29 @@
#define ADV7180_AUTODETECT_ENABLE_REG 0x07
#define ADV7180_AUTODETECT_DEFAULT 0x7f
+#define ADV7180_CON_REG 0x08 /*Unsigned */
+#define CON_REG_MIN 0
+#define CON_REG_DEF 128
+#define CON_REG_MAX 255
+
+#define ADV7180_BRI_REG 0x0a /*Signed */
+#define BRI_REG_MIN -128
+#define BRI_REG_DEF 0
+#define BRI_REG_MAX 127
+
+#define ADV7180_HUE_REG 0x0b /*Signed, inverted */
+#define HUE_REG_MIN -127
+#define HUE_REG_DEF 0
+#define HUE_REG_MAX 128
+
#define ADV7180_ADI_CTRL_REG 0x0e
#define ADV7180_ADI_CTRL_IRQ_SPACE 0x20
+#define ADV7180_PWR_MAN_REG 0x0f
+#define ADV7180_PWR_MAN_ON 0x04
+#define ADV7180_PWR_MAN_OFF 0x24
+#define ADV7180_PWR_MAN_RES 0x80
+
#define ADV7180_STATUS1_REG 0x10
#define ADV7180_STATUS1_IN_LOCK 0x01
#define ADV7180_STATUS1_AUTOD_MASK 0x70
@@ -78,6 +99,12 @@
#define ADV7180_ICONF1_PSYNC_ONLY 0x10
#define ADV7180_ICONF1_ACTIVE_TO_CLR 0xC0
+#define ADV7180_SD_SAT_CB_REG 0xe3 /*Unsigned */
+#define ADV7180_SD_SAT_CR_REG 0xe4 /*Unsigned */
+#define SAT_REG_MIN 0
+#define SAT_REG_DEF 128
+#define SAT_REG_MAX 255
+
#define ADV7180_IRQ1_LOCK 0x01
#define ADV7180_IRQ1_UNLOCK 0x02
#define ADV7180_ISR1_ADI 0x42
@@ -90,6 +117,9 @@
#define ADV7180_IMR3_ADI 0x4C
#define ADV7180_IMR4_ADI 0x50
+#define ADV7180_NTSC_V_BIT_END_REG 0xE6
+#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
+
struct adv7180_state {
struct v4l2_subdev sd;
struct work_struct work;
@@ -97,6 +127,11 @@ struct adv7180_state {
int irq;
v4l2_std_id curr_norm;
bool autodetect;
+ s8 brightness;
+ s16 hue;
+ u8 contrast;
+ u8 saturation;
+ u8 input;
};
static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
@@ -155,7 +190,7 @@ static u32 adv7180_status_to_v4l2(u8 status1)
}
static int __adv7180_status(struct i2c_client *client, u32 *status,
- v4l2_std_id *std)
+ v4l2_std_id *std)
{
int status1 = i2c_smbus_read_byte_data(client, ADV7180_STATUS1_REG);
@@ -192,6 +227,36 @@ static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
return err;
}
+static int adv7180_s_routing(struct v4l2_subdev *sd, u32 input,
+ u32 output, u32 config)
+{
+ struct adv7180_state *state = to_state(sd);
+ int ret = mutex_lock_interruptible(&state->mutex);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (ret)
+ return ret;
+
+ /*We cannot discriminate between LQFP and 40-pin LFCSP, so accept
+ * all inputs and let the card driver take care of validation
+ */
+ if ((input & ADV7180_INPUT_CONTROL_INSEL_MASK) != input)
+ goto out;
+
+ ret = i2c_smbus_read_byte_data(client, ADV7180_INPUT_CONTROL_REG);
+
+ if (ret < 0)
+ goto out;
+
+ ret &= ~ADV7180_INPUT_CONTROL_INSEL_MASK;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_INPUT_CONTROL_REG, ret | input);
+ state->input = input;
+out:
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
static int adv7180_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
struct adv7180_state *state = to_state(sd);
@@ -205,7 +270,7 @@ static int adv7180_g_input_status(struct v4l2_subdev *sd, u32 *status)
}
static int adv7180_g_chip_ident(struct v4l2_subdev *sd,
- struct v4l2_dbg_chip_ident *chip)
+ struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -222,9 +287,10 @@ static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
/* all standards -> autodetect */
if (std == V4L2_STD_ALL) {
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_INPUT_CONTROL_REG,
- ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM);
+ ret =
+ i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
+ ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM
+ | state->input);
if (ret < 0)
goto out;
@@ -236,7 +302,8 @@ static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
goto out;
ret = i2c_smbus_write_byte_data(client,
- ADV7180_INPUT_CONTROL_REG, ret);
+ ADV7180_INPUT_CONTROL_REG,
+ ret | state->input);
if (ret < 0)
goto out;
@@ -249,14 +316,138 @@ out:
return ret;
}
+static int adv7180_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
+{
+ switch (qc->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return v4l2_ctrl_query_fill(qc, BRI_REG_MIN, BRI_REG_MAX,
+ 1, BRI_REG_DEF);
+ case V4L2_CID_HUE:
+ return v4l2_ctrl_query_fill(qc, HUE_REG_MIN, HUE_REG_MAX,
+ 1, HUE_REG_DEF);
+ case V4L2_CID_CONTRAST:
+ return v4l2_ctrl_query_fill(qc, CON_REG_MIN, CON_REG_MAX,
+ 1, CON_REG_DEF);
+ case V4L2_CID_SATURATION:
+ return v4l2_ctrl_query_fill(qc, SAT_REG_MIN, SAT_REG_MAX,
+ 1, SAT_REG_DEF);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int adv7180_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct adv7180_state *state = to_state(sd);
+ int ret = mutex_lock_interruptible(&state->mutex);
+ if (ret)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrl->value = state->brightness;
+ break;
+ case V4L2_CID_HUE:
+ ctrl->value = state->hue;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrl->value = state->contrast;
+ break;
+ case V4L2_CID_SATURATION:
+ ctrl->value = state->saturation;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
+static int adv7180_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct adv7180_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = mutex_lock_interruptible(&state->mutex);
+ if (ret)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ if ((ctrl->value > BRI_REG_MAX)
+ || (ctrl->value < BRI_REG_MIN)) {
+ ret = -ERANGE;
+ break;
+ }
+ state->brightness = ctrl->value;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_BRI_REG,
+ state->brightness);
+ break;
+ case V4L2_CID_HUE:
+ if ((ctrl->value > HUE_REG_MAX)
+ || (ctrl->value < HUE_REG_MIN)) {
+ ret = -ERANGE;
+ break;
+ }
+ state->hue = ctrl->value;
+ /*Hue is inverted according to HSL chart */
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_HUE_REG, -state->hue);
+ break;
+ case V4L2_CID_CONTRAST:
+ if ((ctrl->value > CON_REG_MAX)
+ || (ctrl->value < CON_REG_MIN)) {
+ ret = -ERANGE;
+ break;
+ }
+ state->contrast = ctrl->value;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_CON_REG,
+ state->contrast);
+ break;
+ case V4L2_CID_SATURATION:
+ if ((ctrl->value > SAT_REG_MAX)
+ || (ctrl->value < SAT_REG_MIN)) {
+ ret = -ERANGE;
+ break;
+ }
+ /*
+ *This could be V4L2_CID_BLUE_BALANCE/V4L2_CID_RED_BALANCE
+ *Let's not confuse the user, everybody understands saturation
+ */
+ state->saturation = ctrl->value;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_SD_SAT_CB_REG,
+ state->saturation);
+ if (ret < 0)
+ break;
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_SD_SAT_CR_REG,
+ state->saturation);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
+ .s_routing = adv7180_s_routing,
};
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.g_chip_ident = adv7180_g_chip_ident,
.s_std = adv7180_s_std,
+ .queryctrl = adv7180_queryctrl,
+ .g_ctrl = adv7180_g_ctrl,
+ .s_ctrl = adv7180_s_ctrl,
};
static const struct v4l2_subdev_ops adv7180_ops = {
@@ -267,13 +458,13 @@ static const struct v4l2_subdev_ops adv7180_ops = {
static void adv7180_work(struct work_struct *work)
{
struct adv7180_state *state = container_of(work, struct adv7180_state,
- work);
+ work);
struct i2c_client *client = v4l2_get_subdevdata(&state->sd);
u8 isr3;
mutex_lock(&state->mutex);
i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
- ADV7180_ADI_CTRL_IRQ_SPACE);
+ ADV7180_ADI_CTRL_IRQ_SPACE);
isr3 = i2c_smbus_read_byte_data(client, ADV7180_ISR3_ADI);
/* clear */
i2c_smbus_write_byte_data(client, ADV7180_ICR3_ADI, isr3);
@@ -297,56 +488,51 @@ static irqreturn_t adv7180_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-/*
- * Generic i2c probe
- * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
- */
-
-static __devinit int adv7180_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int init_device(struct i2c_client *client, struct adv7180_state *state)
{
- struct adv7180_state *state;
- struct v4l2_subdev *sd;
int ret;
- /* Check if the adapter supports the needed features */
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -EIO;
-
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr << 1, client->adapter->name);
-
- state = kzalloc(sizeof(struct adv7180_state), GFP_KERNEL);
- if (state == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- state->irq = client->irq;
- INIT_WORK(&state->work, adv7180_work);
- mutex_init(&state->mutex);
- state->autodetect = true;
- sd = &state->sd;
- v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
-
/* Initialize adv7180 */
/* Enable autodetection */
- ret = i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
- ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM);
- if (ret < 0)
- goto err_unreg_subdev;
+ if (state->autodetect) {
+ ret =
+ i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
+ ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM
+ | state->input);
+ if (ret < 0)
+ return ret;
- ret = i2c_smbus_write_byte_data(client, ADV7180_AUTODETECT_ENABLE_REG,
- ADV7180_AUTODETECT_DEFAULT);
- if (ret < 0)
- goto err_unreg_subdev;
+ ret =
+ i2c_smbus_write_byte_data(client,
+ ADV7180_AUTODETECT_ENABLE_REG,
+ ADV7180_AUTODETECT_DEFAULT);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = v4l2_std_to_adv7180(state->curr_norm);
+ if (ret < 0)
+ return ret;
+ ret =
+ i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
+ ret | state->input);
+ if (ret < 0)
+ return ret;
+
+ }
/* ITU-R BT.656-4 compatible */
ret = i2c_smbus_write_byte_data(client,
- ADV7180_EXTENDED_OUTPUT_CONTROL_REG,
- ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS);
+ ADV7180_EXTENDED_OUTPUT_CONTROL_REG,
+ ADV7180_EXTENDED_OUTPUT_CONTROL_NTSCDIS);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
+
+ /* Manually set V bit end position in NTSC mode */
+ ret = i2c_smbus_write_byte_data(client,
+ ADV7180_NTSC_V_BIT_END_REG,
+ ADV7180_NTSC_V_BIT_END_MANUAL_NVEND);
+ if (ret < 0)
+ return ret;
/* read current norm */
__adv7180_status(client, NULL, &state->curr_norm);
@@ -354,45 +540,109 @@ static __devinit int adv7180_probe(struct i2c_client *client,
/* register for interrupts */
if (state->irq > 0) {
ret = request_irq(state->irq, adv7180_irq, 0, DRIVER_NAME,
- state);
+ state);
if (ret)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
- ADV7180_ADI_CTRL_IRQ_SPACE);
+ ADV7180_ADI_CTRL_IRQ_SPACE);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
/* config the Interrupt pin to be active low */
ret = i2c_smbus_write_byte_data(client, ADV7180_ICONF1_ADI,
- ADV7180_ICONF1_ACTIVE_LOW | ADV7180_ICONF1_PSYNC_ONLY);
+ ADV7180_ICONF1_ACTIVE_LOW |
+ ADV7180_ICONF1_PSYNC_ONLY);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR1_ADI, 0);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR2_ADI, 0);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
/* enable AD change interrupts interrupts */
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR3_ADI,
- ADV7180_IRQ3_AD_CHANGE);
+ ADV7180_IRQ3_AD_CHANGE);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_IMR4_ADI, 0);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
ret = i2c_smbus_write_byte_data(client, ADV7180_ADI_CTRL_REG,
- 0);
+ 0);
if (ret < 0)
- goto err_unreg_subdev;
+ return ret;
}
+ /*Set default value for controls */
+ ret = i2c_smbus_write_byte_data(client, ADV7180_BRI_REG,
+ state->brightness);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_HUE_REG, state->hue);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_CON_REG,
+ state->contrast);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CB_REG,
+ state->saturation);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CR_REG,
+ state->saturation);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static __devinit int adv7180_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adv7180_state *state;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr, client->adapter->name);
+
+ state = kzalloc(sizeof(struct adv7180_state), GFP_KERNEL);
+ if (state == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ state->irq = client->irq;
+ INIT_WORK(&state->work, adv7180_work);
+ mutex_init(&state->mutex);
+ state->autodetect = true;
+ state->brightness = BRI_REG_DEF;
+ state->hue = HUE_REG_DEF;
+ state->contrast = CON_REG_DEF;
+ state->saturation = SAT_REG_DEF;
+ state->input = 0;
+ sd = &state->sd;
+ v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
+
+ ret = init_device(client, state);
+ if (0 != ret)
+ goto err_unreg_subdev;
return 0;
err_unreg_subdev:
@@ -432,16 +682,49 @@ static const struct i2c_device_id adv7180_id[] = {
{},
};
+#ifdef CONFIG_PM
+static int adv7180_suspend(struct i2c_client *client, pm_message_t state)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG,
+ ADV7180_PWR_MAN_OFF);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int adv7180_resume(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7180_state *state = to_state(sd);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, ADV7180_PWR_MAN_REG,
+ ADV7180_PWR_MAN_ON);
+ if (ret < 0)
+ return ret;
+ ret = init_device(client, state);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+#endif
+
MODULE_DEVICE_TABLE(i2c, adv7180_id);
static struct i2c_driver adv7180_driver = {
.driver = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- },
- .probe = adv7180_probe,
- .remove = __devexit_p(adv7180_remove),
- .id_table = adv7180_id,
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = adv7180_probe,
+ .remove = __devexit_p(adv7180_remove),
+#ifdef CONFIG_PM
+ .suspend = adv7180_suspend,
+ .resume = adv7180_resume,
+#endif
+ .id_table = adv7180_id,
};
module_i2c_driver(adv7180_driver);
diff --git a/drivers/media/video/adv7343.c b/drivers/media/video/adv7343.c
index 119b60401bf3..2b5aa676a84e 100644
--- a/drivers/media/video/adv7343.c
+++ b/drivers/media/video/adv7343.c
@@ -130,14 +130,12 @@ static int adv7343_setstd(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7343_state *state = to_state(sd);
struct adv7343_std_info *std_info;
- int output_idx, num_std;
+ int num_std;
char *fsc_ptr;
u8 reg, val;
int err = 0;
int i = 0;
- output_idx = state->output;
-
std_info = (struct adv7343_std_info *)stdinfo;
num_std = ARRAY_SIZE(stdinfo);
diff --git a/drivers/media/video/aptina-pll.c b/drivers/media/video/aptina-pll.c
index 0bd3813bb59d..8153a449846b 100644
--- a/drivers/media/video/aptina-pll.c
+++ b/drivers/media/video/aptina-pll.c
@@ -148,9 +148,8 @@ int aptina_pll_calculate(struct device *dev,
unsigned int mf_high;
unsigned int mf_low;
- mf_low = max(roundup(mf_min, mf_inc),
- DIV_ROUND_UP(pll->ext_clock * p1,
- limits->int_clock_max * div));
+ mf_low = roundup(max(mf_min, DIV_ROUND_UP(pll->ext_clock * p1,
+ limits->int_clock_max * div)), mf_inc);
mf_high = min(mf_max, pll->ext_clock * p1 /
(limits->int_clock_min * div));
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index b6ed44aebe30..e346d32d08ce 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -31,6 +31,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -403,7 +404,8 @@ static int ar_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -709,6 +711,8 @@ static int ar_initialize(struct ar *ar)
static const struct v4l2_file_operations ar_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
.read = ar_read,
.unlocked_ioctl = video_ioctl2,
};
@@ -769,6 +773,7 @@ static int __init ar_init(void)
ar->vdev.fops = &ar_fops;
ar->vdev.ioctl_ops = &ar_ioctl_ops;
ar->vdev.release = video_device_release_empty;
+ set_bit(V4L2_FL_USE_FH_PRIO, &ar->vdev.flags);
video_set_drvdata(&ar->vdev, ar);
if (vga) {
diff --git a/drivers/media/video/as3645a.c b/drivers/media/video/as3645a.c
index 7a3371f044fc..c4b03572dce8 100644
--- a/drivers/media/video/as3645a.c
+++ b/drivers/media/video/as3645a.c
@@ -713,7 +713,7 @@ static int as3645a_resume(struct device *dev)
* The number of LEDs reported in platform data is used to compute default
* limits. Parameters passed through platform data can override those limits.
*/
-static int as3645a_init_controls(struct as3645a *flash)
+static int __devinit as3645a_init_controls(struct as3645a *flash)
{
const struct as3645a_platform_data *pdata = flash->pdata;
struct v4l2_ctrl *ctrl;
@@ -804,8 +804,8 @@ static int as3645a_init_controls(struct as3645a *flash)
return flash->ctrls.error;
}
-static int as3645a_probe(struct i2c_client *client,
- const struct i2c_device_id *devid)
+static int __devinit as3645a_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
{
struct as3645a *flash;
int ret;
@@ -846,7 +846,7 @@ done:
return ret;
}
-static int __exit as3645a_remove(struct i2c_client *client)
+static int __devexit as3645a_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
@@ -877,7 +877,7 @@ static struct i2c_driver as3645a_i2c_driver = {
.pm = &as3645a_pm_ops,
},
.probe = as3645a_probe,
- .remove = __exit_p(as3645a_remove),
+ .remove = __devexit_p(as3645a_remove),
.id_table = as3645a_id_table,
};
diff --git a/drivers/media/video/atmel-isi.c b/drivers/media/video/atmel-isi.c
index ec3f6a06f9c3..6274a91c25c7 100644
--- a/drivers/media/video/atmel-isi.c
+++ b/drivers/media/video/atmel-isi.c
@@ -260,7 +260,7 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct atmel_isi *isi = ici->priv;
unsigned long size;
- int ret, bytes_per_line;
+ int ret;
/* Reset ISI */
ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
@@ -271,13 +271,7 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
/* Disable all interrupts */
isi_writel(isi, ISI_INTDIS, ~0UL);
- bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- size = bytes_per_line * icd->user_height;
+ size = icd->sizeimage;
if (!*nbuffers || *nbuffers > MAX_BUFFER_NUM)
*nbuffers = MAX_BUFFER_NUM;
@@ -316,13 +310,8 @@ static int buffer_prepare(struct vb2_buffer *vb)
struct atmel_isi *isi = ici->priv;
unsigned long size;
struct isi_dma_desc *desc;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
- size = bytes_per_line * icd->user_height;
+ size = icd->sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(icd->parent, "%s data will not fit into plane (%lu < %lu)\n",
@@ -638,6 +627,7 @@ static const struct soc_mbus_pixelfmt isi_camera_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
};
diff --git a/drivers/media/video/au0828/Kconfig b/drivers/media/video/au0828/Kconfig
index 81ba9d9d1b52..23f7fd22f0eb 100644
--- a/drivers/media/video/au0828/Kconfig
+++ b/drivers/media/video/au0828/Kconfig
@@ -6,7 +6,8 @@ config VIDEO_AU0828
select I2C_ALGOBIT
select VIDEO_TVEEPROM
select VIDEOBUF_VMALLOC
- select DVB_AU8522 if !DVB_FE_CUSTOMISE
+ select DVB_AU8522_DTV if !DVB_FE_CUSTOMISE
+ select DVB_AU8522_V4L if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMISE
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 1c6015a04f96..e3fe9a6637f6 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -325,6 +325,8 @@ struct usb_device_id au0828_usb_id_table[] = {
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
{ USB_DEVICE(0x2040, 0x7281),
.driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
+ { USB_DEVICE(0x05e1, 0x0480),
+ .driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
{ USB_DEVICE(0x2040, 0x8200),
.driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
{ USB_DEVICE(0x2040, 0x7260),
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index 518216743c9c..39ece8e24985 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -25,6 +25,7 @@
#include <linux/device.h>
#include <linux/suspend.h>
#include <media/v4l2-common.h>
+#include <media/tuner.h>
#include "au0828.h"
#include "au8522.h"
@@ -79,9 +80,16 @@ static struct au8522_config hauppauge_woodbury_config = {
.vsb_if = AU8522_IF_3_25MHZ,
};
-static struct xc5000_config hauppauge_hvr950q_tunerconfig = {
+static struct xc5000_config hauppauge_xc5000a_config = {
.i2c_address = 0x61,
.if_khz = 6000,
+ .chip_id = XC5000A,
+};
+
+static struct xc5000_config hauppauge_xc5000c_config = {
+ .i2c_address = 0x61,
+ .if_khz = 6000,
+ .chip_id = XC5000C,
};
static struct mxl5007t_config mxl5007t_hvr950q_config = {
@@ -383,8 +391,19 @@ int au0828_dvb_register(struct au0828_dev *dev)
&hauppauge_hvr950q_config,
&dev->i2c_adap);
if (dvb->frontend != NULL)
- dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap,
- &hauppauge_hvr950q_tunerconfig);
+ switch (dev->board.tuner_type) {
+ default:
+ case TUNER_XC5000:
+ dvb_attach(xc5000_attach, dvb->frontend,
+ &dev->i2c_adap,
+ &hauppauge_xc5000a_config);
+ break;
+ case TUNER_XC5000C:
+ dvb_attach(xc5000_attach, dvb->frontend,
+ &dev->i2c_adap,
+ &hauppauge_xc5000c_config);
+ break;
+ }
break;
case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL:
dvb->frontend = dvb_attach(au8522_attach,
@@ -411,7 +430,7 @@ int au0828_dvb_register(struct au0828_dev *dev)
if (dvb->frontend != NULL) {
dvb_attach(xc5000_attach, dvb->frontend,
&dev->i2c_adap,
- &hauppauge_hvr950q_tunerconfig);
+ &hauppauge_xc5000a_config);
}
break;
default:
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 0b3e481ffe8c..ac3dd733ab81 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -120,7 +120,7 @@ static void au0828_irq_callback(struct urb *urb)
struct au0828_dmaqueue *dma_q = urb->context;
struct au0828_dev *dev = container_of(dma_q, struct au0828_dev, vidq);
unsigned long flags = 0;
- int rc, i;
+ int i;
switch (urb->status) {
case 0: /* success */
@@ -138,7 +138,7 @@ static void au0828_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock_irqsave(&dev->slock, flags);
- rc = dev->isoc_ctl.isoc_copy(dev, urb);
+ dev->isoc_ctl.isoc_copy(dev, urb);
spin_unlock_irqrestore(&dev->slock, flags);
/* Reset urb buffers */
@@ -1881,7 +1881,7 @@ int au0828_analog_register(struct au0828_dev *dev,
int retval = -ENOMEM;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
- int i;
+ int i, ret;
dprintk(1, "au0828_analog_register called!\n");
@@ -1951,8 +1951,8 @@ int au0828_analog_register(struct au0828_dev *dev,
dev->vbi_dev = video_device_alloc();
if (NULL == dev->vbi_dev) {
dprintk(1, "Can't allocate vbi_device.\n");
- kfree(dev->vdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_vdev;
}
/* Fill the video capture device struct */
@@ -1971,8 +1971,8 @@ int au0828_analog_register(struct au0828_dev *dev,
if (retval != 0) {
dprintk(1, "unable to register video device (error = %d).\n",
retval);
- video_device_release(dev->vdev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_vbi_dev;
}
/* Register the vbi device */
@@ -1981,13 +1981,18 @@ int au0828_analog_register(struct au0828_dev *dev,
if (retval != 0) {
dprintk(1, "unable to register vbi device (error = %d).\n",
retval);
- video_device_release(dev->vbi_dev);
- video_device_release(dev->vdev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_vbi_dev;
}
dprintk(1, "%s completed!\n", __func__);
return 0;
+
+err_vbi_dev:
+ video_device_release(dev->vbi_dev);
+err_vdev:
+ video_device_release(dev->vdev);
+ return ret;
}
diff --git a/drivers/media/video/blackfin/bfin_capture.c b/drivers/media/video/blackfin/bfin_capture.c
index 514fcf742f5a..0aba45e34f70 100644
--- a/drivers/media/video/blackfin/bfin_capture.c
+++ b/drivers/media/video/blackfin/bfin_capture.c
@@ -942,6 +942,10 @@ static int __devinit bcap_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&bcap_dev->dma_queue);
vfd->lock = &bcap_dev->mutex;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
/* register video device */
ret = video_register_device(bcap_dev->video_dev, VFL_TYPE_GRABBER, -1);
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index ff2933ab705f..856ab962cd63 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -371,7 +371,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -384,7 +383,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -398,7 +396,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 4, 0, 2, 3 },
.gpiomute = 1,
.no_msp34xx = 1,
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -414,7 +411,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -427,7 +423,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0, 1, 0, 1 },
.gpiomute = 3,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -440,7 +435,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x0f,
.gpiomux = { 0x0c, 0x04, 0x08, 0x04 },
/* 0x04 for some cards ?? */
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.audio_mode_gpio= avermedia_tvphone_audio,
@@ -454,7 +448,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -469,7 +462,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0xc00, 0x800, 0x400 },
.gpiomute = 0xc00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -482,7 +474,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 3,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 1, 1, 2, 3 },
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
@@ -496,7 +487,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 0, 1, 1),
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -510,7 +500,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20001,0x10001, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -524,7 +513,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 15,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 13, 14, 11, 7 },
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -536,7 +524,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 15,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 13, 14, 11, 7 },
- .needs_tvaudio = 1,
.msp34xx_alt = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -553,7 +540,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 2, 1, 3 }, /* old: {0, 1, 2, 3, 4} */
.gpiomute = 4,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -567,7 +553,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 1, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -583,7 +568,6 @@ struct tvcard bttv_tvcards[] = {
/* 2003-10-20 by "Anton A. Arapov" <arapov@mail.ru> */
.gpiomux = { 0x001e00, 0, 0x018000, 0x014000 },
.gpiomute = 0x002000,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -597,7 +581,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1, 0),
.gpiomux = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 },
.gpiomute = 0xcfa007,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.volume_gpio = winview_volume,
@@ -611,7 +594,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 1, 0, 0, 0 },
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -660,7 +642,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 0x800, 0x400 },
.gpiomute = 0xc00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -691,7 +672,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = {0x400, 0x400, 0x400, 0x400 },
.gpiomute = 0xc00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -706,7 +686,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20000, 0x30000, 0x10000, 0 },
.gpiomute = 0x40000,
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.audio_mode_gpio= terratv_audio,
@@ -720,7 +699,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 0, 1, 1),
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -748,7 +726,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20000, 0x30000, 0x10000, 0x00000 },
.gpiomute = 0x40000,
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.audio_mode_gpio= terratv_audio,
@@ -793,7 +770,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.muxsel_hook = PXC200_muxsel,
@@ -834,7 +810,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -847,7 +822,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x500, 0, 0x300, 0x900 },
.gpiomute = 0x900,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -874,7 +848,6 @@ struct tvcard bttv_tvcards[] = {
Note: There exists another variant "Winfast 2000" with tv stereo !?
Note: eeprom only contains FF and pci subsystem id 107d:6606
*/
- .needs_tvaudio = 0,
.pll = PLL_28,
.has_radio = 1,
.tuner_type = TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */
@@ -934,7 +907,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0x551400, 0x551200, 0, 0 },
.gpiomute = 0x551c00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
@@ -949,7 +921,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0xd0001, 0, 0 },
.gpiomute = 1,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -966,7 +937,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 4, 0, 2, 3 },
.gpiomute = 1,
.no_msp34xx = 1,
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -980,7 +950,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 15,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 13, 4, 11, 7 },
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -995,7 +964,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 0, 0},
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
@@ -1066,7 +1034,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20000, 0x30000, 0x10000, 0 },
.gpiomute = 0x40000,
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_35,
.tuner_type = TUNER_PHILIPS_PAL_I,
@@ -1084,7 +1051,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = {2,0,0,0 },
.gpiomute = 1,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -1163,7 +1129,6 @@ struct tvcard bttv_tvcards[] = {
MUX2 (mask 0x30000):
0,2,3= from MSP34xx
1= FM stereo Radio from Tuner */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -1179,7 +1144,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 0x10, 8 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1218,7 +1182,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1250,7 +1213,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(3, 1),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_35,
.tuner_type = TUNER_ABSENT,
@@ -1266,7 +1228,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x400, 0x400, 0x400, 0x400 },
.gpiomute = 0x800,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_4036FY5_NTSC,
.tuner_addr = ADDR_UNSET,
@@ -1312,7 +1273,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2),
.gpiomux = { },
.no_msp34xx = 1,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -1329,7 +1289,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 1, 0, 4, 4 },
.gpiomute = 9,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1379,7 +1338,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x1800,
.audio_mode_gpio= fv2000s_audio,
.no_msp34xx = 1,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1393,7 +1351,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x500, 0x500, 0x300, 0x900 },
.gpiomute = 0x900,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1477,7 +1434,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 11, 7 }, /* TV and Radio with same GPIO ! */
.gpiomute = 13,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_LG_PAL_I_FM,
.tuner_addr = ADDR_UNSET,
@@ -1514,7 +1470,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x01, 0x00, 0x03, 0x03 },
.gpiomute = 0x09,
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1540,7 +1495,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -1567,7 +1521,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 1, 1),
.gpiomux = { 0, 1, 2, 2 },
.gpiomute = 4,
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -1597,7 +1550,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -1619,7 +1571,6 @@ struct tvcard bttv_tvcards[] = {
* btwincap uses 0x80000/0x80003
*/
.gpiomute = 4,
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1655,7 +1606,6 @@ struct tvcard bttv_tvcards[] = {
/* .audio_inputs= 1, */
.svhs = 2,
.muxsel = MUXSEL(2, 0, 1, 1),
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -1875,7 +1825,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 3},
.gpiomute = 4,
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -1902,7 +1851,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -1920,7 +1868,6 @@ struct tvcard bttv_tvcards[] = {
/* Tuner, Radio, external, internal, off, on */
.gpiomux = { 0x08, 0x0f, 0x0a, 0x08 },
.gpiomute = 0x0f,
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_NTSC,
@@ -1936,7 +1883,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = 2,
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1, 1),
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -2034,7 +1980,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -2049,7 +1994,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2062,7 +2006,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2079,7 +2022,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 0),
.muxsel_hook = phytec_muxsel,
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2094,7 +2036,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 1),
.muxsel_hook = phytec_muxsel,
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2118,7 +2059,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.svhs = NO_SVHS, /* card has no svhs */
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.no_tda7432 = 1,
.gpiomask = 0x00,
@@ -2168,7 +2108,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 3,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 1, 1, 1, 1 },
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.pll = PLL_35,
@@ -2210,7 +2149,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.no_msp34xx = 1,
.no_tda7432 = 1,
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -2222,7 +2160,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.svhs = 2,
- .needs_tvaudio = 0,
.gpiomask = 0x68,
.muxsel = MUXSEL(2, 3, 1),
.gpiomux = { 0x68, 0x68, 0x61, 0x61 },
@@ -2241,7 +2178,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 2 },
.gpiomute = 3,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -2265,7 +2201,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
.pll = PLL_28,
- .needs_tvaudio = 0,
.muxsel_hook = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2358,7 +2293,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -2405,7 +2339,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_addr = ADDR_UNSET,
.gpiomask = 0x008007,
.gpiomux = { 0, 0x000001,0,0 },
- .needs_tvaudio = 1,
.has_radio = 1,
},
[BTTV_BOARD_TIBET_CS16] = {
@@ -2518,7 +2451,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x001e00, 0, 0x018000, 0x014000 },
.gpiomute = 0x002000,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_YMEC_TVF66T5_B_DFF,
.tuner_addr = 0xc1 >>1,
@@ -2534,7 +2466,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 2 },
.gpiomute = 3,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TENA_9533_DI,
.tuner_addr = ADDR_UNSET,
@@ -2615,7 +2546,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 1,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
@@ -2714,7 +2644,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20001,0x10001, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
@@ -2746,7 +2675,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 2 }, /* CONTVFMi */
.gpiomute = 3, /* CONTVFMi */
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MK3 */
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -2785,7 +2713,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(0, 2, 3, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2799,7 +2726,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2813,7 +2739,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(3, 2, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2877,7 +2802,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -3649,7 +3573,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
struct tuner_setup tun_setup;
/* Load tuner module before issuing tuner config call! */
- if (bttv_tvcards[btv->c.type].has_radio)
+ if (btv->has_radio)
v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
&btv->c.i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
@@ -3664,7 +3588,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
tun_setup.type = btv->tuner_type;
tun_setup.addr = addr;
- if (bttv_tvcards[btv->c.type].has_radio)
+ if (btv->has_radio)
tun_setup.mode_mask |= T_RADIO;
bttv_call_all(btv, tuner, s_type_addr, &tun_setup);
@@ -3724,6 +3648,10 @@ static void __devinit hauppauge_eeprom(struct bttv *btv)
bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name);
btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB;
}
+
+ /* The 61334 needs the msp3410 to do the radio demod to get sound */
+ if (tv.model == 61334)
+ btv->radio_uses_msp_demodulator = 1;
}
static int terratec_active_radio_upgrade(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index e581b37be789..ff7a589d8e0f 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -663,7 +663,7 @@ static const struct v4l2_queryctrl bttv_ctls[] = {
.minimum = 0,
.maximum = 65535,
.step = 128,
- .default_value = 32768,
+ .default_value = 27648,
.type = V4L2_CTRL_TYPE_INTEGER,
},{
.id = V4L2_CID_SATURATION,
@@ -1218,6 +1218,11 @@ audio_mux(struct bttv *btv, int input, int mute)
For now this is sufficient. */
switch (input) {
case TVAUDIO_INPUT_RADIO:
+ /* Some boards need the msp do to the radio demod */
+ if (btv->radio_uses_msp_demodulator) {
+ in = MSP_INPUT_DEFAULT;
+ break;
+ }
in = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
break;
@@ -4394,7 +4399,7 @@ static int __devinit bttv_probe(struct pci_dev *dev,
if (!bttv_tvcards[btv->c.type].no_video) {
bttv_register_video(btv);
bt848_bright(btv,32768);
- bt848_contrast(btv,32768);
+ bt848_contrast(btv, 27648);
bt848_hue(btv,32768);
bt848_sat(btv,32768);
audio_mute(btv, 1);
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index c5171619ac79..acfe2f3b92d9 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -236,7 +236,6 @@ struct tvcard {
/* i2c audio flags */
unsigned int no_msp34xx:1;
unsigned int no_tda7432:1;
- unsigned int needs_tvaudio:1;
unsigned int msp34xx_alt:1;
/* Note: currently no card definition needs to mark the presence
of a RDS saa6588 chip. If this is ever needed, then add a new
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index db943a8d580d..70fd4f23f605 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -440,6 +440,7 @@ struct bttv {
/* radio data/state */
int has_radio;
int radio_user;
+ int radio_uses_msp_demodulator;
/* miro/pinnacle + Aimslab VHX
philips matchbox (tea5757 radio tuner) support */
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index f09df9dffaae..5b75a64b199b 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -77,6 +77,9 @@ OTHER DEALINGS IN THE SOFTWARE.
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
/* One from column A... */
#define QC_NOTSET 0
@@ -103,6 +106,7 @@ OTHER DEALINGS IN THE SOFTWARE.
struct qcam {
struct v4l2_device v4l2_dev;
struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
struct pardevice *pdev;
struct parport *pport;
struct mutex lock;
@@ -603,8 +607,9 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
}
o = i * pixels_per_line + pixels_read + k;
if (o < len) {
+ u8 ch = invert - buffer[k];
got++;
- put_user((invert - buffer[k]) << shift, buf + o);
+ put_user(ch << shift, buf + o);
}
}
pixels_read += bytes;
@@ -644,9 +649,10 @@ static int qcam_querycap(struct file *file, void *priv,
struct qcam *qcam = video_drvdata(file);
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
- strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
- strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
+ strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -674,72 +680,6 @@ static int qcam_s_input(struct file *file, void *fh, unsigned int inp)
return (inp > 0) ? -EINVAL : 0;
}
-static int qcam_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 180);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 192);
- case V4L2_CID_GAMMA:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 105);
- }
- return -EINVAL;
-}
-
-static int qcam_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct qcam *qcam = video_drvdata(file);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = qcam->brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = qcam->contrast;
- break;
- case V4L2_CID_GAMMA:
- ctrl->value = qcam->whitebal;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int qcam_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct qcam *qcam = video_drvdata(file);
- int ret = 0;
-
- mutex_lock(&qcam->lock);
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- qcam->brightness = ctrl->value;
- break;
- case V4L2_CID_CONTRAST:
- qcam->contrast = ctrl->value;
- break;
- case V4L2_CID_GAMMA:
- qcam->whitebal = ctrl->value;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret == 0) {
- qc_setscanmode(qcam);
- qcam->status |= QC_PARAM_CHANGE;
- }
- mutex_unlock(&qcam->lock);
- return ret;
-}
-
static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct qcam *qcam = video_drvdata(file);
@@ -749,8 +689,8 @@ static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
pix->height = qcam->height / qcam->transfer_scale;
pix->pixelformat = (qcam->bpp == 4) ? V4L2_PIX_FMT_Y4 : V4L2_PIX_FMT_Y6;
pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = qcam->width;
- pix->sizeimage = qcam->width * qcam->height;
+ pix->bytesperline = pix->width;
+ pix->sizeimage = pix->width * pix->height;
/* Just a guess */
pix->colorspace = V4L2_COLORSPACE_SRGB;
return 0;
@@ -818,7 +758,7 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
"4-Bit Monochrome", V4L2_PIX_FMT_Y4,
{ 0, 0, 0, 0 }
},
- { 0, 0, 0,
+ { 1, 0, 0,
"6-Bit Monochrome", V4L2_PIX_FMT_Y6,
{ 0, 0, 0, 0 }
},
@@ -833,6 +773,25 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
return 0;
}
+static int qcam_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ static const struct v4l2_frmsize_discrete sizes[] = {
+ { 80, 60 },
+ { 160, 120 },
+ { 320, 240 },
+ };
+
+ if (fsize->index > 2)
+ return -EINVAL;
+ if (fsize->pixel_format != V4L2_PIX_FMT_Y4 &&
+ fsize->pixel_format != V4L2_PIX_FMT_Y6)
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete = sizes[fsize->index];
+ return 0;
+}
+
static ssize_t qcam_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -856,8 +815,45 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
return len;
}
+static unsigned int qcam_poll(struct file *filp, poll_table *wait)
+{
+ return v4l2_ctrl_poll(filp, wait) | POLLIN | POLLRDNORM;
+}
+
+static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct qcam *qcam =
+ container_of(ctrl->handler, struct qcam, hdl);
+ int ret = 0;
+
+ mutex_lock(&qcam->lock);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ qcam->brightness = ctrl->val;
+ break;
+ case V4L2_CID_CONTRAST:
+ qcam->contrast = ctrl->val;
+ break;
+ case V4L2_CID_GAMMA:
+ qcam->whitebal = ctrl->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret == 0) {
+ qc_setscanmode(qcam);
+ qcam->status |= QC_PARAM_CHANGE;
+ }
+ mutex_unlock(&qcam->lock);
+ return ret;
+}
+
static const struct v4l2_file_operations qcam_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = qcam_poll,
.unlocked_ioctl = video_ioctl2,
.read = qcam_read,
};
@@ -867,13 +863,18 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
.vidioc_g_input = qcam_g_input,
.vidioc_s_input = qcam_s_input,
.vidioc_enum_input = qcam_enum_input,
- .vidioc_queryctrl = qcam_queryctrl,
- .vidioc_g_ctrl = qcam_g_ctrl,
- .vidioc_s_ctrl = qcam_s_ctrl,
.vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = qcam_enum_framesizes,
.vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_ctrl_ops qcam_ctrl_ops = {
+ .s_ctrl = qcam_s_ctrl,
};
/* Initialize the QuickCam driver control structure. This is where
@@ -889,27 +890,43 @@ static struct qcam *qcam_init(struct parport *port)
return NULL;
v4l2_dev = &qcam->v4l2_dev;
- strlcpy(v4l2_dev->name, "bw-qcam", sizeof(v4l2_dev->name));
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "bw-qcam%d", num_cams);
- if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+ if (v4l2_device_register(port->dev, v4l2_dev) < 0) {
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
kfree(qcam);
return NULL;
}
+ v4l2_ctrl_handler_init(&qcam->hdl, 3);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 180);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 192);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 255, 1, 105);
+ if (qcam->hdl.error) {
+ v4l2_err(v4l2_dev, "couldn't register controls\n");
+ v4l2_ctrl_handler_free(&qcam->hdl);
+ kfree(qcam);
+ return NULL;
+ }
qcam->pport = port;
- qcam->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
+ qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
NULL, 0, NULL);
if (qcam->pdev == NULL) {
v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
+ v4l2_ctrl_handler_free(&qcam->hdl);
kfree(qcam);
return NULL;
}
strlcpy(qcam->vdev.name, "Connectix QuickCam", sizeof(qcam->vdev.name));
qcam->vdev.v4l2_dev = v4l2_dev;
+ qcam->vdev.ctrl_handler = &qcam->hdl;
qcam->vdev.fops = &qcam_fops;
qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
+ set_bit(V4L2_FL_USE_FH_PRIO, &qcam->vdev.flags);
qcam->vdev.release = video_device_release_empty;
video_set_drvdata(&qcam->vdev, qcam);
@@ -984,6 +1001,7 @@ static int init_bwqcam(struct parport *port)
return -ENODEV;
}
qc_calibrate(qcam);
+ v4l2_ctrl_handler_setup(&qcam->hdl);
parport_release(qcam->pdev);
@@ -1003,6 +1021,7 @@ static int init_bwqcam(struct parport *port)
static void close_bwqcam(struct qcam *qcam)
{
video_unregister_device(&qcam->vdev);
+ v4l2_ctrl_handler_free(&qcam->hdl);
parport_unregister_device(qcam->pdev);
kfree(qcam);
}
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index fda32f52554a..ec51e1f12e82 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -40,10 +40,14 @@
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
struct qcam {
struct v4l2_device v4l2_dev;
struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
struct pardevice *pdev;
struct parport *pport;
int width, height;
@@ -378,7 +382,7 @@ get_fragment:
static long qc_capture(struct qcam *qcam, char __user *buf, unsigned long len)
{
struct v4l2_device *v4l2_dev = &qcam->v4l2_dev;
- unsigned lines, pixelsperline, bitsperxfer;
+ unsigned lines, pixelsperline;
unsigned int is_bi_dir = qcam->bidirectional;
size_t wantlen, outptr = 0;
char tmpbuf[BUFSZ];
@@ -404,7 +408,6 @@ static long qc_capture(struct qcam *qcam, char __user *buf, unsigned long len)
lines = qcam->height;
pixelsperline = qcam->width;
- bitsperxfer = (is_bi_dir) ? 24 : 8;
if (is_bi_dir) {
/* Turn the port around */
@@ -516,7 +519,8 @@ static int qcam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Color Quickcam", sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -544,73 +548,6 @@ static int qcam_s_input(struct file *file, void *fh, unsigned int inp)
return (inp > 0) ? -EINVAL : 0;
}
-static int qcam_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 240);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 192);
- case V4L2_CID_GAMMA:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- }
- return -EINVAL;
-}
-
-static int qcam_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct qcam *qcam = video_drvdata(file);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = qcam->brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = qcam->contrast;
- break;
- case V4L2_CID_GAMMA:
- ctrl->value = qcam->whitebal;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int qcam_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct qcam *qcam = video_drvdata(file);
- int ret = 0;
-
- mutex_lock(&qcam->lock);
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- qcam->brightness = ctrl->value;
- break;
- case V4L2_CID_CONTRAST:
- qcam->contrast = ctrl->value;
- break;
- case V4L2_CID_GAMMA:
- qcam->whitebal = ctrl->value;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret == 0) {
- parport_claim_or_block(qcam->pdev);
- qc_setup(qcam);
- parport_release(qcam->pdev);
- }
- mutex_unlock(&qcam->lock);
- return ret;
-}
-
static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct qcam *qcam = video_drvdata(file);
@@ -714,8 +651,41 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
return len;
}
+static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct qcam *qcam =
+ container_of(ctrl->handler, struct qcam, hdl);
+ int ret = 0;
+
+ mutex_lock(&qcam->lock);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ qcam->brightness = ctrl->val;
+ break;
+ case V4L2_CID_CONTRAST:
+ qcam->contrast = ctrl->val;
+ break;
+ case V4L2_CID_GAMMA:
+ qcam->whitebal = ctrl->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret == 0) {
+ parport_claim_or_block(qcam->pdev);
+ qc_setup(qcam);
+ parport_release(qcam->pdev);
+ }
+ mutex_unlock(&qcam->lock);
+ return ret;
+}
+
static const struct v4l2_file_operations qcam_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
.unlocked_ioctl = video_ioctl2,
.read = qcam_read,
};
@@ -725,13 +695,17 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
.vidioc_g_input = qcam_g_input,
.vidioc_s_input = qcam_s_input,
.vidioc_enum_input = qcam_enum_input,
- .vidioc_queryctrl = qcam_queryctrl,
- .vidioc_g_ctrl = qcam_g_ctrl,
- .vidioc_s_ctrl = qcam_s_ctrl,
- .vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_ctrl_ops qcam_ctrl_ops = {
+ .s_ctrl = qcam_s_ctrl,
};
/* Initialize the QuickCam driver control structure. */
@@ -754,6 +728,20 @@ static struct qcam *qcam_init(struct parport *port)
return NULL;
}
+ v4l2_ctrl_handler_init(&qcam->hdl, 3);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 240);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 192);
+ v4l2_ctrl_new_std(&qcam->hdl, &qcam_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 255, 1, 128);
+ if (qcam->hdl.error) {
+ v4l2_err(v4l2_dev, "couldn't register controls\n");
+ v4l2_ctrl_handler_free(&qcam->hdl);
+ kfree(qcam);
+ return NULL;
+ }
+
qcam->pport = port;
qcam->pdev = parport_register_device(port, "c-qcam", NULL, NULL,
NULL, 0, NULL);
@@ -762,6 +750,7 @@ static struct qcam *qcam_init(struct parport *port)
if (qcam->pdev == NULL) {
v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
+ v4l2_ctrl_handler_free(&qcam->hdl);
kfree(qcam);
return NULL;
}
@@ -771,6 +760,8 @@ static struct qcam *qcam_init(struct parport *port)
qcam->vdev.fops = &qcam_fops;
qcam->vdev.ioctl_ops = &qcam_ioctl_ops;
qcam->vdev.release = video_device_release_empty;
+ qcam->vdev.ctrl_handler = &qcam->hdl;
+ set_bit(V4L2_FL_USE_FH_PRIO, &qcam->vdev.flags);
video_set_drvdata(&qcam->vdev, qcam);
mutex_init(&qcam->lock);
@@ -845,6 +836,7 @@ static int init_cqcam(struct parport *port)
static void close_cqcam(struct qcam *qcam)
{
video_unregister_device(&qcam->vdev);
+ v4l2_ctrl_handler_free(&qcam->hdl);
parport_unregister_device(qcam->pdev);
kfree(qcam);
}
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index ab252188981b..cdef677d57ec 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -32,11 +32,12 @@
#define __CPIA2_H__
#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
#include <linux/usb.h>
#include <linux/poll.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
-#include "cpia2dev.h"
#include "cpia2_registers.h"
/* define for verbose debug output */
@@ -65,7 +66,6 @@
/* Flicker Modes */
#define NEVER_FLICKER 0
-#define ANTI_FLICKER_ON 1
#define FLICKER_60 60
#define FLICKER_50 50
@@ -148,7 +148,6 @@ enum {
#define DEFAULT_BRIGHTNESS 0x46
#define DEFAULT_CONTRAST 0x93
#define DEFAULT_SATURATION 0x7f
-#define DEFAULT_TARGET_KB 0x30
/* Power state */
#define HI_POWER_MODE CPIA2_SYSTEM_CONTROL_HIGH_POWER
@@ -287,7 +286,6 @@ struct camera_params {
struct {
u8 cam_register;
u8 flicker_mode_req; /* 1 if flicker on, else never flicker */
- int mains_frequency;
} flicker_control;
struct {
@@ -337,7 +335,7 @@ struct camera_params {
u8 vc_control;
u8 vc_mp_direction;
u8 vc_mp_data;
- u8 target_kb;
+ u8 quality;
} vc_params;
struct {
@@ -366,23 +364,23 @@ struct framebuf {
struct framebuf *next;
};
-struct cpia2_fh {
- enum v4l2_priority prio;
- u8 mmapped;
-};
-
struct camera_data {
/* locks */
+ struct v4l2_device v4l2_dev;
struct mutex v4l2_lock; /* serialize file operations */
- struct v4l2_prio_state prio;
+ struct v4l2_ctrl_handler hdl;
+ struct {
+ /* Lights control cluster */
+ struct v4l2_ctrl *top_light;
+ struct v4l2_ctrl *bottom_light;
+ };
+ struct v4l2_ctrl *usb_alt;
/* camera status */
- volatile int present; /* Is the camera still present? */
- int open_count; /* # of process that have camera open */
int first_image_seen;
- u8 mains_freq; /* for flicker control */
enum sensors sensor_type;
u8 flush;
+ struct v4l2_fh *stream_fh;
u8 mmapped;
int streaming; /* 0 = no, 1 = yes */
int xfer_mode; /* XFER_BULK or XFER_ISOC */
@@ -390,7 +388,7 @@ struct camera_data {
/* v4l */
int video_size; /* VIDEO_SIZE_ */
- struct video_device *vdev; /* v4l videodev */
+ struct video_device vdev; /* v4l videodev */
u32 width;
u32 height; /* Its size */
__u32 pixelformat; /* Format fourcc */
@@ -425,6 +423,7 @@ struct camera_data {
/* v4l */
int cpia2_register_camera(struct camera_data *cam);
void cpia2_unregister_camera(struct camera_data *cam);
+void cpia2_camera_release(struct v4l2_device *v4l2_dev);
/* core */
int cpia2_reset_camera(struct camera_data *cam);
@@ -443,7 +442,7 @@ int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd);
int cpia2_do_command(struct camera_data *cam,
unsigned int command,
unsigned char direction, unsigned char param);
-struct camera_data *cpia2_init_camera_struct(void);
+struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf);
int cpia2_init_camera(struct camera_data *cam);
int cpia2_allocate_buffers(struct camera_data *cam);
void cpia2_free_buffers(struct camera_data *cam);
@@ -454,7 +453,6 @@ unsigned int cpia2_poll(struct camera_data *cam,
int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
void cpia2_set_property_flip(struct camera_data *cam, int prop_val);
void cpia2_set_property_mirror(struct camera_data *cam, int prop_val);
-int cpia2_set_target_kb(struct camera_data *cam, unsigned char value);
int cpia2_set_gpio(struct camera_data *cam, unsigned char setting);
int cpia2_set_fps(struct camera_data *cam, int framerate);
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index ee91e295c90a..17188e234770 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -66,7 +66,6 @@ static int config_sensor_410(struct camera_data *cam,
static int config_sensor_500(struct camera_data *cam,
int reqwidth, int reqheight);
static int set_all_properties(struct camera_data *cam);
-static void get_color_params(struct camera_data *cam);
static void wake_system(struct camera_data *cam);
static void set_lowlight_boost(struct camera_data *cam);
static void reset_camera_struct(struct camera_data *cam);
@@ -453,15 +452,6 @@ int cpia2_do_command(struct camera_data *cam,
cam->params.version.vp_device_hi = cmd.buffer.block_data[0];
cam->params.version.vp_device_lo = cmd.buffer.block_data[1];
break;
- case CPIA2_CMD_GET_VP_BRIGHTNESS:
- cam->params.color_params.brightness = cmd.buffer.block_data[0];
- break;
- case CPIA2_CMD_GET_CONTRAST:
- cam->params.color_params.contrast = cmd.buffer.block_data[0];
- break;
- case CPIA2_CMD_GET_VP_SATURATION:
- cam->params.color_params.saturation = cmd.buffer.block_data[0];
- break;
case CPIA2_CMD_GET_VP_GPIO_DATA:
cam->params.vp_params.gpio_data = cmd.buffer.block_data[0];
break;
@@ -617,6 +607,7 @@ int cpia2_reset_camera(struct camera_data *cam)
{
u8 tmp_reg;
int retval = 0;
+ int target_kb;
int i;
struct cpia2_command cmd;
@@ -800,9 +791,16 @@ int cpia2_reset_camera(struct camera_data *cam)
}
cpia2_do_command(cam, CPIA2_CMD_SET_VC_CONTROL, TRANSFER_WRITE,tmp_reg);
- /* Set target size (kb) on vc */
+ /* Set target size (kb) on vc
+ This is a heuristic based on the quality parameter and the raw
+ framesize in kB divided by 16 (the compression factor when the
+ quality is 100%) */
+ target_kb = (cam->width * cam->height * 2 / 16384) *
+ cam->params.vc_params.quality / 100;
+ if (target_kb < 1)
+ target_kb = 1;
cpia2_do_command(cam, CPIA2_CMD_SET_TARGET_KB,
- TRANSFER_WRITE, cam->params.vc_params.target_kb);
+ TRANSFER_WRITE, target_kb);
/* Wiggle VC Reset */
/***
@@ -1538,23 +1536,17 @@ static int set_all_properties(struct camera_data *cam)
* framerate and user_mode were already set (set_default_user_mode).
**/
- cpia2_set_color_params(cam);
-
cpia2_usb_change_streaming_alternate(cam,
cam->params.camera_state.stream_mode);
- cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
- cam->params.vp_params.user_effects);
-
- cpia2_set_flicker_mode(cam,
- cam->params.flicker_control.flicker_mode_req);
-
cpia2_do_command(cam,
CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION,
TRANSFER_WRITE, cam->params.vp_params.gpio_direction);
cpia2_do_command(cam, CPIA2_CMD_SET_VC_MP_GPIO_DATA, TRANSFER_WRITE,
cam->params.vp_params.gpio_data);
+ v4l2_ctrl_handler_setup(&cam->hdl);
+
wake_system(cam);
set_lowlight_boost(cam);
@@ -1569,7 +1561,6 @@ static int set_all_properties(struct camera_data *cam)
*****************************************************************************/
void cpia2_save_camera_state(struct camera_data *cam)
{
- get_color_params(cam);
cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS, TRANSFER_READ, 0);
cpia2_do_command(cam, CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION, TRANSFER_READ,
0);
@@ -1577,30 +1568,6 @@ void cpia2_save_camera_state(struct camera_data *cam)
/* Don't get framerate or target_kb. Trust the values we already have */
}
-/******************************************************************************
- *
- * get_color_params
- *
- *****************************************************************************/
-static void get_color_params(struct camera_data *cam)
-{
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS, TRANSFER_READ, 0);
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION, TRANSFER_READ, 0);
- cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST, TRANSFER_READ, 0);
-}
-
-/******************************************************************************
- *
- * cpia2_set_color_params
- *
- *****************************************************************************/
-void cpia2_set_color_params(struct camera_data *cam)
-{
- DBG("Setting color params\n");
- cpia2_set_brightness(cam, cam->params.color_params.brightness);
- cpia2_set_contrast(cam, cam->params.color_params.contrast);
- cpia2_set_saturation(cam, cam->params.color_params.saturation);
-}
/******************************************************************************
*
@@ -1664,15 +1631,9 @@ int cpia2_set_flicker_mode(struct camera_data *cam, int mode)
switch(mode) {
case NEVER_FLICKER:
- cam->params.flicker_control.flicker_mode_req = mode;
- break;
case FLICKER_60:
- cam->params.flicker_control.flicker_mode_req = mode;
- cam->params.flicker_control.mains_frequency = 60;
- break;
case FLICKER_50:
cam->params.flicker_control.flicker_mode_req = mode;
- cam->params.flicker_control.mains_frequency = 50;
break;
default:
err = -EINVAL;
@@ -1701,6 +1662,7 @@ void cpia2_set_property_flip(struct camera_data *cam, int prop_val)
{
cam_reg &= ~CPIA2_VP_USER_EFFECTS_FLIP;
}
+ cam->params.vp_params.user_effects = cam_reg;
cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
cam_reg);
}
@@ -1725,37 +1687,13 @@ void cpia2_set_property_mirror(struct camera_data *cam, int prop_val)
{
cam_reg &= ~CPIA2_VP_USER_EFFECTS_MIRROR;
}
+ cam->params.vp_params.user_effects = cam_reg;
cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
cam_reg);
}
/******************************************************************************
*
- * set_target_kb
- *
- * The new Target KB is set in cam->params.vc_params.target_kb and
- * activates on reset.
- *****************************************************************************/
-
-int cpia2_set_target_kb(struct camera_data *cam, unsigned char value)
-{
- DBG("Requested target_kb = %d\n", value);
- if (value != cam->params.vc_params.target_kb) {
-
- cpia2_usb_stream_pause(cam);
-
- /* reset camera for new target_kb */
- cam->params.vc_params.target_kb = value;
- cpia2_reset_camera(cam);
-
- cpia2_usb_stream_resume(cam);
- }
-
- return 0;
-}
-
-/******************************************************************************
- *
* cpia2_set_gpio
*
*****************************************************************************/
@@ -1843,7 +1781,7 @@ void cpia2_set_brightness(struct camera_data *cam, unsigned char value)
if (cam->params.pnp_id.device_type == DEVICE_STV_672 && value == 0)
value++;
DBG("Setting brightness to %d (0x%0x)\n", value, value);
- cpia2_do_command(cam,CPIA2_CMD_SET_VP_BRIGHTNESS, TRANSFER_WRITE,value);
+ cpia2_do_command(cam, CPIA2_CMD_SET_VP_BRIGHTNESS, TRANSFER_WRITE, value);
}
/******************************************************************************
@@ -1854,7 +1792,6 @@ void cpia2_set_brightness(struct camera_data *cam, unsigned char value)
void cpia2_set_contrast(struct camera_data *cam, unsigned char value)
{
DBG("Setting contrast to %d (0x%0x)\n", value, value);
- cam->params.color_params.contrast = value;
cpia2_do_command(cam, CPIA2_CMD_SET_CONTRAST, TRANSFER_WRITE, value);
}
@@ -1866,7 +1803,6 @@ void cpia2_set_contrast(struct camera_data *cam, unsigned char value)
void cpia2_set_saturation(struct camera_data *cam, unsigned char value)
{
DBG("Setting saturation to %d (0x%0x)\n", value, value);
- cam->params.color_params.saturation = value;
cpia2_do_command(cam,CPIA2_CMD_SET_VP_SATURATION, TRANSFER_WRITE,value);
}
@@ -2168,14 +2104,10 @@ static void reset_camera_struct(struct camera_data *cam)
/***
* The following parameter values are the defaults from the register map.
***/
- cam->params.color_params.brightness = DEFAULT_BRIGHTNESS;
- cam->params.color_params.contrast = DEFAULT_CONTRAST;
- cam->params.color_params.saturation = DEFAULT_SATURATION;
cam->params.vp_params.lowlight_boost = 0;
/* FlickerModes */
cam->params.flicker_control.flicker_mode_req = NEVER_FLICKER;
- cam->params.flicker_control.mains_frequency = 60;
/* jpeg params */
cam->params.compression.jpeg_options = CPIA2_VC_VC_JPEG_OPT_DEFAULT;
@@ -2188,7 +2120,7 @@ static void reset_camera_struct(struct camera_data *cam)
cam->params.vp_params.gpio_data = 0;
/* Target kb params */
- cam->params.vc_params.target_kb = DEFAULT_TARGET_KB;
+ cam->params.vc_params.quality = 100;
/***
* Set Sensor FPS as fast as possible.
@@ -2228,7 +2160,7 @@ static void reset_camera_struct(struct camera_data *cam)
*
* Initializes camera struct, does not call reset to fill in defaults.
*****************************************************************************/
-struct camera_data *cpia2_init_camera_struct(void)
+struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf)
{
struct camera_data *cam;
@@ -2239,8 +2171,13 @@ struct camera_data *cpia2_init_camera_struct(void)
return NULL;
}
+ cam->v4l2_dev.release = cpia2_camera_release;
+ if (v4l2_device_register(&intf->dev, &cam->v4l2_dev) < 0) {
+ v4l2_err(&cam->v4l2_dev, "couldn't register v4l2_device\n");
+ kfree(cam);
+ return NULL;
+ }
- cam->present = 1;
mutex_init(&cam->v4l2_lock);
init_waitqueue_head(&cam->wq_stream);
@@ -2373,11 +2310,6 @@ long cpia2_read(struct camera_data *cam,
return -EINVAL;
}
- if (!cam->present) {
- LOG("%s: camera removed\n",__func__);
- return 0; /* EOF */
- }
-
if (!cam->streaming) {
/* Start streaming */
cpia2_usb_stream_start(cam,
@@ -2393,12 +2325,12 @@ long cpia2_read(struct camera_data *cam,
if (frame->status != FRAME_READY) {
mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
- !cam->present ||
+ !video_is_registered(&cam->vdev) ||
(frame = cam->curbuff)->status == FRAME_READY);
mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
- if (!cam->present)
+ if (!video_is_registered(&cam->vdev))
return 0;
}
@@ -2423,17 +2355,10 @@ long cpia2_read(struct camera_data *cam,
unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
poll_table *wait)
{
- unsigned int status=0;
+ unsigned int status = v4l2_ctrl_poll(filp, wait);
- if (!cam) {
- ERR("%s: Internal error, camera_data not found!\n",__func__);
- return POLLERR;
- }
-
- if (!cam->present)
- return POLLHUP;
-
- if(!cam->streaming) {
+ if ((poll_requested_events(wait) & (POLLIN | POLLRDNORM)) &&
+ !cam->streaming) {
/* Start streaming */
cpia2_usb_stream_start(cam,
cam->params.camera_state.stream_mode);
@@ -2441,10 +2366,8 @@ unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
poll_wait(filp, &cam->wq_stream, wait);
- if(!cam->present)
- status = POLLHUP;
- else if(cam->curbuff->status == FRAME_READY)
- status = POLLIN | POLLRDNORM;
+ if (cam->curbuff->status == FRAME_READY)
+ status |= POLLIN | POLLRDNORM;
return status;
}
@@ -2462,12 +2385,9 @@ int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma)
unsigned long start = (unsigned long) adr;
unsigned long page, pos;
- if (!cam)
- return -ENODEV;
-
DBG("mmap offset:%ld size:%ld\n", start_offset, size);
- if (!cam->present)
+ if (!video_is_registered(&cam->vdev))
return -ENODEV;
if (size > cam->frame_size*cam->num_frames ||
diff --git a/drivers/media/video/cpia2/cpia2_usb.c b/drivers/media/video/cpia2/cpia2_usb.c
index 59c797c15277..95b5d6e7cdc4 100644
--- a/drivers/media/video/cpia2/cpia2_usb.c
+++ b/drivers/media/video/cpia2/cpia2_usb.c
@@ -54,6 +54,8 @@ static void cpia2_usb_complete(struct urb *urb);
static int cpia2_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void cpia2_usb_disconnect(struct usb_interface *intf);
+static int cpia2_usb_suspend(struct usb_interface *intf, pm_message_t message);
+static int cpia2_usb_resume(struct usb_interface *intf);
static void free_sbufs(struct camera_data *cam);
static void add_APPn(struct camera_data *cam);
@@ -74,6 +76,9 @@ static struct usb_driver cpia2_driver = {
.name = "cpia2",
.probe = cpia2_usb_probe,
.disconnect = cpia2_usb_disconnect,
+ .suspend = cpia2_usb_suspend,
+ .resume = cpia2_usb_resume,
+ .reset_resume = cpia2_usb_resume,
.id_table = cpia2_id_table
};
@@ -218,10 +223,9 @@ static void cpia2_usb_complete(struct urb *urb)
return;
}
- if (!cam->streaming || !cam->present || cam->open_count == 0) {
- LOG("Will now stop the streaming: streaming = %d, "
- "present=%d, open_count=%d\n",
- cam->streaming, cam->present, cam->open_count);
+ if (!cam->streaming || !video_is_registered(&cam->vdev)) {
+ LOG("Will now stop the streaming: streaming = %d, present=%d\n",
+ cam->streaming, video_is_registered(&cam->vdev));
return;
}
@@ -392,7 +396,7 @@ static int configure_transfer_mode(struct camera_data *cam, unsigned int alt)
struct cpia2_command cmd;
unsigned char reg;
- if(!cam->present)
+ if (!video_is_registered(&cam->vdev))
return -ENODEV;
/***
@@ -752,8 +756,8 @@ int cpia2_usb_stream_pause(struct camera_data *cam)
{
int ret = 0;
if(cam->streaming) {
- ret = set_alternate(cam, USBIF_CMDONLY);
free_sbufs(cam);
+ ret = set_alternate(cam, USBIF_CMDONLY);
}
return ret;
}
@@ -770,6 +774,10 @@ int cpia2_usb_stream_resume(struct camera_data *cam)
cam->first_image_seen = 0;
ret = set_alternate(cam, cam->params.camera_state.stream_mode);
if(ret == 0) {
+ /* for some reason the user effects need to be set
+ again when starting streaming. */
+ cpia2_do_command(cam, CPIA2_CMD_SET_USER_EFFECTS, TRANSFER_WRITE,
+ cam->params.vp_params.user_effects);
ret = submit_urbs(cam);
}
}
@@ -784,6 +792,7 @@ int cpia2_usb_stream_resume(struct camera_data *cam)
int cpia2_usb_stream_stop(struct camera_data *cam)
{
int ret;
+
ret = cpia2_usb_stream_pause(cam);
cam->streaming = 0;
configure_transfer_mode(cam, 0);
@@ -812,7 +821,8 @@ static int cpia2_usb_probe(struct usb_interface *intf,
/* If we get to this point, we found a CPiA2 camera */
LOG("CPiA2 USB camera found\n");
- if((cam = cpia2_init_camera_struct()) == NULL)
+ cam = cpia2_init_camera_struct(intf);
+ if (cam == NULL)
return -ENOMEM;
cam->dev = udev;
@@ -825,16 +835,9 @@ static int cpia2_usb_probe(struct usb_interface *intf,
return ret;
}
- if ((ret = cpia2_register_camera(cam)) < 0) {
- ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret);
- kfree(cam);
- return ret;
- }
-
if((ret = cpia2_init_camera(cam)) < 0) {
ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret);
- cpia2_unregister_camera(cam);
kfree(cam);
return ret;
}
@@ -853,6 +856,13 @@ static int cpia2_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, cam);
+ ret = cpia2_register_camera(cam);
+ if (ret < 0) {
+ ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret);
+ kfree(cam);
+ return ret;
+ }
+
return 0;
}
@@ -865,13 +875,16 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
{
struct camera_data *cam = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- cam->present = 0;
DBG("Stopping stream\n");
cpia2_usb_stream_stop(cam);
+ mutex_lock(&cam->v4l2_lock);
DBG("Unregistering camera\n");
cpia2_unregister_camera(cam);
+ v4l2_device_disconnect(&cam->v4l2_dev);
+ mutex_unlock(&cam->v4l2_lock);
+ v4l2_device_put(&cam->v4l2_dev);
if(cam->buffers) {
DBG("Wakeup waiting processes\n");
@@ -884,14 +897,41 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
DBG("Releasing interface\n");
usb_driver_release_interface(&cpia2_driver, intf);
- if (cam->open_count == 0) {
- DBG("Freeing camera structure\n");
- kfree(cam);
+ LOG("CPiA2 camera disconnected.\n");
+}
+
+static int cpia2_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct camera_data *cam = usb_get_intfdata(intf);
+
+ mutex_lock(&cam->v4l2_lock);
+ if (cam->streaming) {
+ cpia2_usb_stream_stop(cam);
+ cam->streaming = 1;
}
+ mutex_unlock(&cam->v4l2_lock);
- LOG("CPiA2 camera disconnected.\n");
+ dev_info(&intf->dev, "going into suspend..\n");
+ return 0;
}
+/* Resume device - start device. */
+static int cpia2_usb_resume(struct usb_interface *intf)
+{
+ struct camera_data *cam = usb_get_intfdata(intf);
+
+ mutex_lock(&cam->v4l2_lock);
+ v4l2_ctrl_handler_setup(&cam->hdl);
+ if (cam->streaming) {
+ cam->streaming = 0;
+ cpia2_usb_stream_start(cam,
+ cam->params.camera_state.stream_mode);
+ }
+ mutex_unlock(&cam->v4l2_lock);
+
+ dev_info(&intf->dev, "coming out of suspend..\n");
+ return 0;
+}
/******************************************************************************
*
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 077eb1db80a1..55e92902a76c 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -39,15 +39,15 @@
#include <linux/videodev2.h>
#include <linux/stringify.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include "cpia2.h"
-#include "cpia2dev.h"
static int video_nr = -1;
module_param(video_nr, int, 0);
-MODULE_PARM_DESC(video_nr,"video device to register (0=/dev/video0, etc)");
+MODULE_PARM_DESC(video_nr, "video device to register (0=/dev/video0, etc)");
-static int buffer_size = 68*1024;
+static int buffer_size = 68 * 1024;
module_param(buffer_size, int, 0);
MODULE_PARM_DESC(buffer_size, "Size for each frame buffer in bytes (default 68k)");
@@ -62,18 +62,10 @@ MODULE_PARM_DESC(alternate, "USB Alternate (" __stringify(USBIF_ISO_1) "-"
__stringify(USBIF_ISO_6) ", default "
__stringify(DEFAULT_ALT) ")");
-static int flicker_freq = 60;
-module_param(flicker_freq, int, 0);
-MODULE_PARM_DESC(flicker_freq, "Flicker frequency (" __stringify(50) "or"
- __stringify(60) ", default "
- __stringify(60) ")");
-
-static int flicker_mode = NEVER_FLICKER;
+static int flicker_mode;
module_param(flicker_mode, int, 0);
-MODULE_PARM_DESC(flicker_mode,
- "Flicker supression (" __stringify(NEVER_FLICKER) "or"
- __stringify(ANTI_FLICKER_ON) ", default "
- __stringify(NEVER_FLICKER) ")");
+MODULE_PARM_DESC(flicker_mode, "Flicker frequency (0 (disabled), " __stringify(50) " or "
+ __stringify(60) ", default 0)");
MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
@@ -82,153 +74,7 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(CPIA_VERSION);
#define ABOUT "V4L-Driver for Vision CPiA2 based cameras"
-
-struct control_menu_info {
- int value;
- char name[32];
-};
-
-static struct control_menu_info framerate_controls[] =
-{
- { CPIA2_VP_FRAMERATE_6_25, "6.25 fps" },
- { CPIA2_VP_FRAMERATE_7_5, "7.5 fps" },
- { CPIA2_VP_FRAMERATE_12_5, "12.5 fps" },
- { CPIA2_VP_FRAMERATE_15, "15 fps" },
- { CPIA2_VP_FRAMERATE_25, "25 fps" },
- { CPIA2_VP_FRAMERATE_30, "30 fps" },
-};
-#define NUM_FRAMERATE_CONTROLS (ARRAY_SIZE(framerate_controls))
-
-static struct control_menu_info flicker_controls[] =
-{
- { NEVER_FLICKER, "Off" },
- { FLICKER_50, "50 Hz" },
- { FLICKER_60, "60 Hz" },
-};
-#define NUM_FLICKER_CONTROLS (ARRAY_SIZE(flicker_controls))
-
-static struct control_menu_info lights_controls[] =
-{
- { 0, "Off" },
- { 64, "Top" },
- { 128, "Bottom" },
- { 192, "Both" },
-};
-#define NUM_LIGHTS_CONTROLS (ARRAY_SIZE(lights_controls))
-#define GPIO_LIGHTS_MASK 192
-
-static struct v4l2_queryctrl controls[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = DEFAULT_BRIGHTNESS,
- },
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = DEFAULT_CONTRAST,
- },
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = DEFAULT_SATURATION,
- },
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = CPIA2_CID_TARGET_KB,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Target KB",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = DEFAULT_TARGET_KB,
- },
- {
- .id = CPIA2_CID_GPIO,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "GPIO",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = CPIA2_CID_FLICKER_MODE,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Flicker Reduction",
- .minimum = 0,
- .maximum = NUM_FLICKER_CONTROLS-1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = CPIA2_CID_FRAMERATE,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Framerate",
- .minimum = 0,
- .maximum = NUM_FRAMERATE_CONTROLS-1,
- .step = 1,
- .default_value = NUM_FRAMERATE_CONTROLS-1,
- },
- {
- .id = CPIA2_CID_USB_ALT,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "USB Alternate",
- .minimum = USBIF_ISO_1,
- .maximum = USBIF_ISO_6,
- .step = 1,
- .default_value = DEFAULT_ALT,
- },
- {
- .id = CPIA2_CID_LIGHTS,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Lights",
- .minimum = 0,
- .maximum = NUM_LIGHTS_CONTROLS-1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = CPIA2_CID_RESET_CAMERA,
- .type = V4L2_CTRL_TYPE_BUTTON,
- .name = "Reset Camera",
- .minimum = 0,
- .maximum = 0,
- .step = 0,
- .default_value = 0,
- },
-};
-#define NUM_CONTROLS (ARRAY_SIZE(controls))
-
+#define CPIA2_CID_USB_ALT (V4L2_CID_USER_BASE | 0xf000)
/******************************************************************************
*
@@ -238,38 +84,27 @@ static struct v4l2_queryctrl controls[] = {
static int cpia2_open(struct file *file)
{
struct camera_data *cam = video_drvdata(file);
- struct cpia2_fh *fh;
-
- if (!cam) {
- ERR("Internal error, camera_data not found!\n");
- return -ENODEV;
- }
+ int retval = v4l2_fh_open(file);
- if (!cam->present)
- return -ENODEV;
+ if (retval)
+ return retval;
- if (cam->open_count == 0) {
- if (cpia2_allocate_buffers(cam))
+ if (v4l2_fh_is_singular_file(file)) {
+ if (cpia2_allocate_buffers(cam)) {
+ v4l2_fh_release(file);
return -ENOMEM;
+ }
/* reset the camera */
- if (cpia2_reset_camera(cam) < 0)
+ if (cpia2_reset_camera(cam) < 0) {
+ v4l2_fh_release(file);
return -EIO;
+ }
cam->APP_len = 0;
cam->COM_len = 0;
}
- fh = kmalloc(sizeof(*fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
- file->private_data = fh;
- fh->prio = V4L2_PRIORITY_UNSET;
- v4l2_prio_open(&cam->prio, &fh->prio);
- fh->mmapped = 0;
-
- ++cam->open_count;
-
cpia2_dbg_dump_registers(cam);
return 0;
}
@@ -283,37 +118,22 @@ static int cpia2_close(struct file *file)
{
struct video_device *dev = video_devdata(file);
struct camera_data *cam = video_get_drvdata(dev);
- struct cpia2_fh *fh = file->private_data;
- if (cam->present &&
- (cam->open_count == 1 || fh->prio == V4L2_PRIORITY_RECORD)) {
+ if (video_is_registered(&cam->vdev) && v4l2_fh_is_singular_file(file)) {
cpia2_usb_stream_stop(cam);
- if (cam->open_count == 1) {
- /* save camera state for later open */
- cpia2_save_camera_state(cam);
+ /* save camera state for later open */
+ cpia2_save_camera_state(cam);
- cpia2_set_low_power(cam);
- cpia2_free_buffers(cam);
- }
+ cpia2_set_low_power(cam);
+ cpia2_free_buffers(cam);
}
- if (fh->mmapped)
+ if (cam->stream_fh == file->private_data) {
+ cam->stream_fh = NULL;
cam->mmapped = 0;
- v4l2_prio_close(&cam->prio, fh->prio);
- file->private_data = NULL;
- kfree(fh);
-
- if (--cam->open_count == 0) {
- cpia2_free_buffers(cam);
- if (!cam->present) {
- video_unregister_device(dev);
- kfree(cam);
- return 0;
- }
}
-
- return 0;
+ return v4l2_fh_release(file);
}
/******************************************************************************
@@ -327,16 +147,9 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
struct camera_data *cam = video_drvdata(file);
int noblock = file->f_flags&O_NONBLOCK;
- struct cpia2_fh *fh = file->private_data;
-
if(!cam)
return -EINVAL;
- /* Priority check */
- if(fh->prio != V4L2_PRIORITY_RECORD) {
- return -EBUSY;
- }
-
return cpia2_read(cam, buf, count, noblock);
}
@@ -349,15 +162,6 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
{
struct camera_data *cam = video_drvdata(filp);
- struct cpia2_fh *fh = filp->private_data;
-
- if(!cam)
- return POLLERR;
-
- /* Priority check */
- if(fh->prio != V4L2_PRIORITY_RECORD) {
- return POLLERR;
- }
return cpia2_poll(cam, filp, wait);
}
@@ -384,36 +188,13 @@ static int sync(struct camera_data *cam, int frame_nr)
mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
- if(!cam->present)
+ if (!video_is_registered(&cam->vdev))
return -ENOTTY;
}
}
/******************************************************************************
*
- * ioctl_set_gpio
- *
- *****************************************************************************/
-
-static long cpia2_default(struct file *file, void *fh, bool valid_prio,
- int cmd, void *arg)
-{
- struct camera_data *cam = video_drvdata(file);
- __u32 gpio_val;
-
- if (cmd != CPIA2_CID_GPIO)
- return -EINVAL;
-
- gpio_val = *(__u32*) arg;
-
- if (gpio_val &~ 0xFFU)
- return -EINVAL;
-
- return cpia2_set_gpio(cam, (unsigned char)gpio_val);
-}
-
-/******************************************************************************
- *
* ioctl_querycap
*
* V4L2 device capabilities
@@ -465,9 +246,11 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0)
memset(vc->bus_info,0, sizeof(vc->bus_info));
- vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ vc->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
+ vc->capabilities = vc->device_caps |
+ V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -610,22 +393,12 @@ static int cpia2_s_fmt_vid_cap(struct file *file, void *_fh,
struct v4l2_format *f)
{
struct camera_data *cam = video_drvdata(file);
- struct cpia2_fh *fh = _fh;
int err, frame;
- err = v4l2_prio_check(&cam->prio, fh->prio);
- if (err)
- return err;
err = cpia2_try_fmt_vid_cap(file, _fh, f);
if(err != 0)
return err;
- /* Ensure that only this process can change the format. */
- err = v4l2_prio_change(&cam->prio, &fh->prio, V4L2_PRIORITY_RECORD);
- if(err != 0) {
- return err;
- }
-
cam->pixelformat = f->fmt.pix.pixelformat;
/* NOTE: This should be set to 1 for MJPEG, but some apps don't handle
@@ -713,240 +486,126 @@ static int cpia2_cropcap(struct file *file, void *fh, struct v4l2_cropcap *c)
return 0;
}
-/******************************************************************************
- *
- * ioctl_queryctrl
- *
- * V4L2 query possible control variables
- *
- *****************************************************************************/
+struct framerate_info {
+ int value;
+ struct v4l2_fract period;
+};
-static int cpia2_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *c)
+static const struct framerate_info framerate_controls[] = {
+ { CPIA2_VP_FRAMERATE_6_25, { 4, 25 } },
+ { CPIA2_VP_FRAMERATE_7_5, { 2, 15 } },
+ { CPIA2_VP_FRAMERATE_12_5, { 2, 25 } },
+ { CPIA2_VP_FRAMERATE_15, { 1, 15 } },
+ { CPIA2_VP_FRAMERATE_25, { 1, 25 } },
+ { CPIA2_VP_FRAMERATE_30, { 1, 30 } },
+};
+
+static int cpia2_g_parm(struct file *file, void *fh, struct v4l2_streamparm *p)
{
struct camera_data *cam = video_drvdata(file);
+ struct v4l2_captureparm *cap = &p->parm.capture;
int i;
- for(i=0; i<NUM_CONTROLS; ++i) {
- if(c->id == controls[i].id) {
- memcpy(c, controls+i, sizeof(*c));
- break;
- }
- }
-
- if(i == NUM_CONTROLS)
+ if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- /* Some devices have additional limitations */
- switch(c->id) {
- case V4L2_CID_BRIGHTNESS:
- /***
- * Don't let the register be set to zero - bug in VP4
- * flash of full brightness
- ***/
- if (cam->params.pnp_id.device_type == DEVICE_STV_672)
- c->minimum = 1;
- break;
- case V4L2_CID_VFLIP:
- // VP5 Only
- if(cam->params.pnp_id.device_type == DEVICE_STV_672)
- c->flags |= V4L2_CTRL_FLAG_DISABLED;
- break;
- case CPIA2_CID_FRAMERATE:
- if(cam->params.pnp_id.device_type == DEVICE_STV_672 &&
- cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){
- // Maximum 15fps
- for(i=0; i<c->maximum; ++i) {
- if(framerate_controls[i].value ==
- CPIA2_VP_FRAMERATE_15) {
- c->maximum = i;
- c->default_value = i;
- }
- }
+ cap->capability = V4L2_CAP_TIMEPERFRAME;
+ cap->readbuffers = cam->num_frames;
+ for (i = 0; i < ARRAY_SIZE(framerate_controls); i++)
+ if (cam->params.vp_params.frame_rate == framerate_controls[i].value) {
+ cap->timeperframe = framerate_controls[i].period;
+ break;
}
- break;
- case CPIA2_CID_FLICKER_MODE:
- // Flicker control only valid for 672.
- if(cam->params.pnp_id.device_type != DEVICE_STV_672)
- c->flags |= V4L2_CTRL_FLAG_DISABLED;
- break;
- case CPIA2_CID_LIGHTS:
- // Light control only valid for the QX5 Microscope.
- if(cam->params.pnp_id.product != 0x151)
- c->flags |= V4L2_CTRL_FLAG_DISABLED;
- break;
- default:
- break;
- }
-
return 0;
}
-/******************************************************************************
- *
- * ioctl_querymenu
- *
- * V4L2 query possible control variables
- *
- *****************************************************************************/
-
-static int cpia2_querymenu(struct file *file, void *fh, struct v4l2_querymenu *m)
+static int cpia2_s_parm(struct file *file, void *fh, struct v4l2_streamparm *p)
{
struct camera_data *cam = video_drvdata(file);
+ struct v4l2_captureparm *cap = &p->parm.capture;
+ struct v4l2_fract tpf = cap->timeperframe;
+ int max = ARRAY_SIZE(framerate_controls) - 1;
+ int ret;
+ int i;
- switch(m->id) {
- case CPIA2_CID_FLICKER_MODE:
- if (m->index >= NUM_FLICKER_CONTROLS)
- return -EINVAL;
+ ret = cpia2_g_parm(file, fh, p);
+ if (ret || !tpf.denominator || !tpf.numerator)
+ return ret;
+
+ /* Maximum 15 fps for this model */
+ if (cam->params.pnp_id.device_type == DEVICE_STV_672 &&
+ cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500)
+ max -= 2;
+ for (i = 0; i <= max; i++) {
+ struct v4l2_fract f1 = tpf;
+ struct v4l2_fract f2 = framerate_controls[i].period;
+
+ f1.numerator *= f2.denominator;
+ f2.numerator *= f1.denominator;
+ if (f1.numerator >= f2.numerator)
+ break;
+ }
+ if (i > max)
+ i = max;
+ cap->timeperframe = framerate_controls[i].period;
+ return cpia2_set_fps(cam, framerate_controls[i].value);
+}
- strcpy(m->name, flicker_controls[m->index].name);
- break;
- case CPIA2_CID_FRAMERATE:
- {
- int maximum = NUM_FRAMERATE_CONTROLS - 1;
- if(cam->params.pnp_id.device_type == DEVICE_STV_672 &&
- cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){
- // Maximum 15fps
- int i;
- for(i=0; i<maximum; ++i) {
- if(framerate_controls[i].value ==
- CPIA2_VP_FRAMERATE_15)
- maximum = i;
- }
- }
- if (m->index > maximum)
- return -EINVAL;
+static const struct {
+ u32 width;
+ u32 height;
+} cpia2_framesizes[] = {
+ { 640, 480 },
+ { 352, 288 },
+ { 320, 240 },
+ { 288, 216 },
+ { 256, 192 },
+ { 224, 168 },
+ { 192, 144 },
+ { 176, 144 },
+};
- strcpy(m->name, framerate_controls[m->index].name);
- break;
- }
- case CPIA2_CID_LIGHTS:
- if (m->index >= NUM_LIGHTS_CONTROLS)
- return -EINVAL;
+static int cpia2_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
- strcpy(m->name, lights_controls[m->index].name);
- break;
- default:
+ if (fsize->pixel_format != V4L2_PIX_FMT_MJPEG &&
+ fsize->pixel_format != V4L2_PIX_FMT_JPEG)
return -EINVAL;
- }
+ if (fsize->index >= ARRAY_SIZE(cpia2_framesizes))
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = cpia2_framesizes[fsize->index].width;
+ fsize->discrete.height = cpia2_framesizes[fsize->index].height;
return 0;
}
-/******************************************************************************
- *
- * ioctl_g_ctrl
- *
- * V4L2 get the value of a control variable
- *
- *****************************************************************************/
-
-static int cpia2_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+static int cpia2_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
{
struct camera_data *cam = video_drvdata(file);
+ int max = ARRAY_SIZE(framerate_controls) - 1;
+ int i;
- switch(c->id) {
- case V4L2_CID_BRIGHTNESS:
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_BRIGHTNESS,
- TRANSFER_READ, 0);
- c->value = cam->params.color_params.brightness;
- break;
- case V4L2_CID_CONTRAST:
- cpia2_do_command(cam, CPIA2_CMD_GET_CONTRAST,
- TRANSFER_READ, 0);
- c->value = cam->params.color_params.contrast;
- break;
- case V4L2_CID_SATURATION:
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_SATURATION,
- TRANSFER_READ, 0);
- c->value = cam->params.color_params.saturation;
- break;
- case V4L2_CID_HFLIP:
- cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS,
- TRANSFER_READ, 0);
- c->value = (cam->params.vp_params.user_effects &
- CPIA2_VP_USER_EFFECTS_MIRROR) != 0;
- break;
- case V4L2_CID_VFLIP:
- cpia2_do_command(cam, CPIA2_CMD_GET_USER_EFFECTS,
- TRANSFER_READ, 0);
- c->value = (cam->params.vp_params.user_effects &
- CPIA2_VP_USER_EFFECTS_FLIP) != 0;
- break;
- case CPIA2_CID_TARGET_KB:
- c->value = cam->params.vc_params.target_kb;
- break;
- case CPIA2_CID_GPIO:
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA,
- TRANSFER_READ, 0);
- c->value = cam->params.vp_params.gpio_data;
- break;
- case CPIA2_CID_FLICKER_MODE:
- {
- int i, mode;
- cpia2_do_command(cam, CPIA2_CMD_GET_FLICKER_MODES,
- TRANSFER_READ, 0);
- if(cam->params.flicker_control.cam_register &
- CPIA2_VP_FLICKER_MODES_NEVER_FLICKER) {
- mode = NEVER_FLICKER;
- } else {
- if(cam->params.flicker_control.cam_register &
- CPIA2_VP_FLICKER_MODES_50HZ) {
- mode = FLICKER_50;
- } else {
- mode = FLICKER_60;
- }
- }
- for(i=0; i<NUM_FLICKER_CONTROLS; i++) {
- if(flicker_controls[i].value == mode) {
- c->value = i;
- break;
- }
- }
- if(i == NUM_FLICKER_CONTROLS)
- return -EINVAL;
- break;
- }
- case CPIA2_CID_FRAMERATE:
- {
- int maximum = NUM_FRAMERATE_CONTROLS - 1;
- int i;
- for(i=0; i<= maximum; i++) {
- if(cam->params.vp_params.frame_rate ==
- framerate_controls[i].value)
- break;
- }
- if(i > maximum)
- return -EINVAL;
- c->value = i;
- break;
- }
- case CPIA2_CID_USB_ALT:
- c->value = cam->params.camera_state.stream_mode;
- break;
- case CPIA2_CID_LIGHTS:
- {
- int i;
- cpia2_do_command(cam, CPIA2_CMD_GET_VP_GPIO_DATA,
- TRANSFER_READ, 0);
- for(i=0; i<NUM_LIGHTS_CONTROLS; i++) {
- if((cam->params.vp_params.gpio_data&GPIO_LIGHTS_MASK) ==
- lights_controls[i].value) {
- break;
- }
- }
- if(i == NUM_LIGHTS_CONTROLS)
- return -EINVAL;
- c->value = i;
- break;
- }
- case CPIA2_CID_RESET_CAMERA:
+ if (fival->pixel_format != V4L2_PIX_FMT_MJPEG &&
+ fival->pixel_format != V4L2_PIX_FMT_JPEG)
return -EINVAL;
- default:
- return -EINVAL;
- }
-
- DBG("Get control id:%d, value:%d\n", c->id, c->value);
+ /* Maximum 15 fps for this model */
+ if (cam->params.pnp_id.device_type == DEVICE_STV_672 &&
+ cam->params.version.sensor_flags == CPIA2_VP_SENSOR_FLAGS_500)
+ max -= 2;
+ if (fival->index > max)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(cpia2_framesizes); i++)
+ if (fival->width == cpia2_framesizes[i].width &&
+ fival->height == cpia2_framesizes[i].height)
+ break;
+ if (i == ARRAY_SIZE(cpia2_framesizes))
+ return -EINVAL;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = framerate_controls[fival->index].period;
return 0;
}
@@ -958,72 +617,54 @@ static int cpia2_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
*
*****************************************************************************/
-static int cpia2_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+static int cpia2_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct camera_data *cam = video_drvdata(file);
- int i;
- int retval = 0;
+ struct camera_data *cam =
+ container_of(ctrl->handler, struct camera_data, hdl);
+ static const int flicker_table[] = {
+ NEVER_FLICKER,
+ FLICKER_50,
+ FLICKER_60,
+ };
- DBG("Set control id:%d, value:%d\n", c->id, c->value);
-
- /* Check that the value is in range */
- for(i=0; i<NUM_CONTROLS; i++) {
- if(c->id == controls[i].id) {
- if(c->value < controls[i].minimum ||
- c->value > controls[i].maximum) {
- return -EINVAL;
- }
- break;
- }
- }
- if(i == NUM_CONTROLS)
- return -EINVAL;
+ DBG("Set control id:%d, value:%d\n", ctrl->id, ctrl->val);
- switch(c->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- cpia2_set_brightness(cam, c->value);
+ cpia2_set_brightness(cam, ctrl->val);
break;
case V4L2_CID_CONTRAST:
- cpia2_set_contrast(cam, c->value);
+ cpia2_set_contrast(cam, ctrl->val);
break;
case V4L2_CID_SATURATION:
- cpia2_set_saturation(cam, c->value);
+ cpia2_set_saturation(cam, ctrl->val);
break;
case V4L2_CID_HFLIP:
- cpia2_set_property_mirror(cam, c->value);
+ cpia2_set_property_mirror(cam, ctrl->val);
break;
case V4L2_CID_VFLIP:
- cpia2_set_property_flip(cam, c->value);
+ cpia2_set_property_flip(cam, ctrl->val);
break;
- case CPIA2_CID_TARGET_KB:
- retval = cpia2_set_target_kb(cam, c->value);
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ return cpia2_set_flicker_mode(cam, flicker_table[ctrl->val]);
+ case V4L2_CID_ILLUMINATORS_1:
+ return cpia2_set_gpio(cam, (cam->top_light->val << 6) |
+ (cam->bottom_light->val << 7));
+ case V4L2_CID_JPEG_ACTIVE_MARKER:
+ cam->params.compression.inhibit_htables =
+ !(ctrl->val & V4L2_JPEG_ACTIVE_MARKER_DHT);
break;
- case CPIA2_CID_GPIO:
- retval = cpia2_set_gpio(cam, c->value);
- break;
- case CPIA2_CID_FLICKER_MODE:
- retval = cpia2_set_flicker_mode(cam,
- flicker_controls[c->value].value);
- break;
- case CPIA2_CID_FRAMERATE:
- retval = cpia2_set_fps(cam, framerate_controls[c->value].value);
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ cam->params.vc_params.quality = ctrl->val;
break;
case CPIA2_CID_USB_ALT:
- retval = cpia2_usb_change_streaming_alternate(cam, c->value);
- break;
- case CPIA2_CID_LIGHTS:
- retval = cpia2_set_gpio(cam, lights_controls[c->value].value);
- break;
- case CPIA2_CID_RESET_CAMERA:
- cpia2_usb_stream_pause(cam);
- cpia2_reset_camera(cam);
- cpia2_usb_stream_resume(cam);
+ cam->params.camera_state.stream_mode = ctrl->val;
break;
default:
- retval = -EINVAL;
+ return -EINVAL;
}
- return retval;
+ return 0;
}
/******************************************************************************
@@ -1084,6 +725,8 @@ static int cpia2_s_jpegcomp(struct file *file, void *fh, struct v4l2_jpegcompres
cam->params.compression.inhibit_htables =
!(parms->jpeg_markers & V4L2_JPEG_MARKER_DHT);
+ parms->jpeg_markers &= V4L2_JPEG_MARKER_DQT | V4L2_JPEG_MARKER_DRI |
+ V4L2_JPEG_MARKER_DHT;
if(parms->APP_len != 0) {
if(parms->APP_len > 0 &&
@@ -1270,12 +913,12 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
struct framebuf *cb=cam->curbuff;
mutex_unlock(&cam->v4l2_lock);
wait_event_interruptible(cam->wq_stream,
- !cam->present ||
+ !video_is_registered(&cam->vdev) ||
(cb=cam->curbuff)->status == FRAME_READY);
mutex_lock(&cam->v4l2_lock);
if (signal_pending(current))
return -ERESTARTSYS;
- if(!cam->present)
+ if (!video_is_registered(&cam->vdev))
return -ENOTTY;
frame = cb->num;
}
@@ -1299,56 +942,39 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
return 0;
}
-static int cpia2_g_priority(struct file *file, void *_fh, enum v4l2_priority *p)
-{
- struct cpia2_fh *fh = _fh;
-
- *p = fh->prio;
- return 0;
-}
-
-static int cpia2_s_priority(struct file *file, void *_fh, enum v4l2_priority prio)
-{
- struct camera_data *cam = video_drvdata(file);
- struct cpia2_fh *fh = _fh;
-
- if (cam->streaming && prio != fh->prio &&
- fh->prio == V4L2_PRIORITY_RECORD)
- /* Can't drop record priority while streaming */
- return -EBUSY;
-
- if (prio == V4L2_PRIORITY_RECORD && prio != fh->prio &&
- v4l2_prio_max(&cam->prio) == V4L2_PRIORITY_RECORD)
- /* Only one program can record at a time */
- return -EBUSY;
- return v4l2_prio_change(&cam->prio, &fh->prio, prio);
-}
-
static int cpia2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct camera_data *cam = video_drvdata(file);
+ int ret = -EINVAL;
DBG("VIDIOC_STREAMON, streaming=%d\n", cam->streaming);
if (!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (!cam->streaming)
- return cpia2_usb_stream_start(cam,
+ if (!cam->streaming) {
+ ret = cpia2_usb_stream_start(cam,
cam->params.camera_state.stream_mode);
- return -EINVAL;
+ if (!ret)
+ v4l2_ctrl_grab(cam->usb_alt, true);
+ }
+ return ret;
}
static int cpia2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct camera_data *cam = video_drvdata(file);
+ int ret = -EINVAL;
DBG("VIDIOC_STREAMOFF, streaming=%d\n", cam->streaming);
if (!cam->mmapped || type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (cam->streaming)
- return cpia2_usb_stream_stop(cam);
- return -EINVAL;
+ if (cam->streaming) {
+ ret = cpia2_usb_stream_stop(cam);
+ if (!ret)
+ v4l2_ctrl_grab(cam->usb_alt, false);
+ }
+ return ret;
}
/******************************************************************************
@@ -1361,16 +987,10 @@ static int cpia2_mmap(struct file *file, struct vm_area_struct *area)
struct camera_data *cam = video_drvdata(file);
int retval;
- /* Priority check */
- struct cpia2_fh *fh = file->private_data;
- if(fh->prio != V4L2_PRIORITY_RECORD) {
- return -EBUSY;
- }
-
retval = cpia2_remap_buffer(cam, area);
if(!retval)
- fh->mmapped = 1;
+ cam->stream_fh = file->private_data;
return retval;
}
@@ -1388,15 +1008,13 @@ static void reset_camera_struct_v4l(struct camera_data *cam)
cam->frame_size = buffer_size;
cam->num_frames = num_buffers;
- /* FlickerModes */
+ /* Flicker modes */
cam->params.flicker_control.flicker_mode_req = flicker_mode;
- cam->params.flicker_control.mains_frequency = flicker_freq;
- /* streamMode */
+ /* stream modes */
cam->params.camera_state.stream_mode = alternate;
cam->pixelformat = V4L2_PIX_FMT_JPEG;
- v4l2_prio_init(&cam->prio);
}
static const struct v4l2_ioctl_ops cpia2_ioctl_ops = {
@@ -1408,10 +1026,6 @@ static const struct v4l2_ioctl_ops cpia2_ioctl_ops = {
.vidioc_g_fmt_vid_cap = cpia2_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = cpia2_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = cpia2_try_fmt_vid_cap,
- .vidioc_queryctrl = cpia2_queryctrl,
- .vidioc_querymenu = cpia2_querymenu,
- .vidioc_g_ctrl = cpia2_g_ctrl,
- .vidioc_s_ctrl = cpia2_s_ctrl,
.vidioc_g_jpegcomp = cpia2_g_jpegcomp,
.vidioc_s_jpegcomp = cpia2_s_jpegcomp,
.vidioc_cropcap = cpia2_cropcap,
@@ -1421,9 +1035,12 @@ static const struct v4l2_ioctl_ops cpia2_ioctl_ops = {
.vidioc_dqbuf = cpia2_dqbuf,
.vidioc_streamon = cpia2_streamon,
.vidioc_streamoff = cpia2_streamoff,
- .vidioc_g_priority = cpia2_g_priority,
- .vidioc_s_priority = cpia2_s_priority,
- .vidioc_default = cpia2_default,
+ .vidioc_s_parm = cpia2_s_parm,
+ .vidioc_g_parm = cpia2_g_parm,
+ .vidioc_enum_framesizes = cpia2_enum_framesizes,
+ .vidioc_enum_frameintervals = cpia2_enum_frameintervals,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/***
@@ -1444,7 +1061,21 @@ static struct video_device cpia2_template = {
.name = "CPiA2 Camera",
.fops = &cpia2_fops,
.ioctl_ops = &cpia2_ioctl_ops,
- .release = video_device_release,
+ .release = video_device_release_empty,
+};
+
+void cpia2_camera_release(struct v4l2_device *v4l2_dev)
+{
+ struct camera_data *cam =
+ container_of(v4l2_dev, struct camera_data, v4l2_dev);
+
+ v4l2_ctrl_handler_free(&cam->hdl);
+ v4l2_device_unregister(&cam->v4l2_dev);
+ kfree(cam);
+}
+
+static const struct v4l2_ctrl_ops cpia2_ctrl_ops = {
+ .s_ctrl = cpia2_s_ctrl,
};
/******************************************************************************
@@ -1454,20 +1085,78 @@ static struct video_device cpia2_template = {
*****************************************************************************/
int cpia2_register_camera(struct camera_data *cam)
{
- cam->vdev = video_device_alloc();
- if(!cam->vdev)
- return -ENOMEM;
+ struct v4l2_ctrl_handler *hdl = &cam->hdl;
+ struct v4l2_ctrl_config cpia2_usb_alt = {
+ .ops = &cpia2_ctrl_ops,
+ .id = CPIA2_CID_USB_ALT,
+ .name = "USB Alternate",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = USBIF_ISO_1,
+ .max = USBIF_ISO_6,
+ .step = 1,
+ };
+ int ret;
+
+ v4l2_ctrl_handler_init(hdl, 12);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_BRIGHTNESS,
+ cam->params.pnp_id.device_type == DEVICE_STV_672 ? 1 : 0,
+ 255, 1, DEFAULT_BRIGHTNESS);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, DEFAULT_CONTRAST);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, DEFAULT_SATURATION);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_JPEG_ACTIVE_MARKER, 0,
+ V4L2_JPEG_ACTIVE_MARKER_DHT, 0,
+ V4L2_JPEG_ACTIVE_MARKER_DHT);
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 1,
+ 100, 1, 100);
+ cpia2_usb_alt.def = alternate;
+ cam->usb_alt = v4l2_ctrl_new_custom(hdl, &cpia2_usb_alt, NULL);
+ /* VP5 Only */
+ if (cam->params.pnp_id.device_type != DEVICE_STV_672)
+ v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ /* Flicker control only valid for 672 */
+ if (cam->params.pnp_id.device_type == DEVICE_STV_672)
+ v4l2_ctrl_new_std_menu(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0);
+ /* Light control only valid for the QX5 Microscope */
+ if (cam->params.pnp_id.product == 0x151) {
+ cam->top_light = v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_1, 0, 1, 1, 0);
+ cam->bottom_light = v4l2_ctrl_new_std(hdl, &cpia2_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_2, 0, 1, 1, 0);
+ v4l2_ctrl_cluster(2, &cam->top_light);
+ }
- memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template));
- video_set_drvdata(cam->vdev, cam);
- cam->vdev->lock = &cam->v4l2_lock;
+ if (hdl->error) {
+ ret = hdl->error;
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+ }
+
+ cam->vdev = cpia2_template;
+ video_set_drvdata(&cam->vdev, cam);
+ cam->vdev.lock = &cam->v4l2_lock;
+ cam->vdev.ctrl_handler = hdl;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ set_bit(V4L2_FL_USE_FH_PRIO, &cam->vdev.flags);
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &cam->vdev.flags);
reset_camera_struct_v4l(cam);
/* register v4l device */
- if (video_register_device(cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
+ if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
ERR("video_register_device failed\n");
- video_device_release(cam->vdev);
return -ENODEV;
}
@@ -1481,13 +1170,7 @@ int cpia2_register_camera(struct camera_data *cam)
*****************************************************************************/
void cpia2_unregister_camera(struct camera_data *cam)
{
- if (!cam->open_count) {
- video_unregister_device(cam->vdev);
- } else {
- LOG("%s removed while open, deferring "
- "video_unregister_device\n",
- video_device_node_name(cam->vdev));
- }
+ video_unregister_device(&cam->vdev);
}
/******************************************************************************
@@ -1524,23 +1207,12 @@ static void __init check_parameters(void)
LOG("alternate specified is invalid, using %d\n", alternate);
}
- if (flicker_mode != NEVER_FLICKER && flicker_mode != ANTI_FLICKER_ON) {
- flicker_mode = NEVER_FLICKER;
+ if (flicker_mode != 0 && flicker_mode != FLICKER_50 && flicker_mode != FLICKER_60) {
+ flicker_mode = 0;
LOG("Flicker mode specified is invalid, using %d\n",
flicker_mode);
}
- if (flicker_freq != FLICKER_50 && flicker_freq != FLICKER_60) {
- flicker_freq = FLICKER_60;
- LOG("Flicker mode specified is invalid, using %d\n",
- flicker_freq);
- }
-
- if(video_nr < -1 || video_nr > 64) {
- video_nr = -1;
- LOG("invalid video_nr specified, must be -1 to 64\n");
- }
-
DBG("Using %d buffers, each %d bytes, alternate=%d\n",
num_buffers, buffer_size, alternate);
}
diff --git a/drivers/media/video/cpia2/cpia2dev.h b/drivers/media/video/cpia2/cpia2dev.h
deleted file mode 100644
index f66691fe5a35..000000000000
--- a/drivers/media/video/cpia2/cpia2dev.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/****************************************************************************
- *
- * Filename: cpia2dev.h
- *
- * Copyright 2001, STMicrolectronics, Inc.
- *
- * Contact: steve.miller@st.com
- *
- * Description:
- * This file provides definitions for applications wanting to use the
- * cpia2 driver beyond the generic v4l capabilities.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ****************************************************************************/
-
-#ifndef CPIA2_DEV_HEADER
-#define CPIA2_DEV_HEADER
-
-#include <linux/videodev2.h>
-
-/***
- * The following defines are ioctl numbers based on video4linux private ioctls,
- * which can range from 192 (BASE_VIDIOCPRIVATE) to 255. All of these take int
- * args
- */
-#define CPIA2_IOC_SET_GPIO _IOW('v', BASE_VIDIOC_PRIVATE + 17, __u32)
-
-/* V4L2 driver specific controls */
-#define CPIA2_CID_TARGET_KB (V4L2_CID_PRIVATE_BASE+0)
-#define CPIA2_CID_GPIO (V4L2_CID_PRIVATE_BASE+1)
-#define CPIA2_CID_FLICKER_MODE (V4L2_CID_PRIVATE_BASE+2)
-#define CPIA2_CID_FRAMERATE (V4L2_CID_PRIVATE_BASE+3)
-#define CPIA2_CID_USB_ALT (V4L2_CID_PRIVATE_BASE+4)
-#define CPIA2_CID_LIGHTS (V4L2_CID_PRIVATE_BASE+5)
-#define CPIA2_CID_RESET_CAMERA (V4L2_CID_PRIVATE_BASE+6)
-
-#endif
diff --git a/drivers/media/video/cx18/cx18-alsa-main.c b/drivers/media/video/cx18/cx18-alsa-main.c
index e118361c2e7b..6d2a98246b6d 100644
--- a/drivers/media/video/cx18/cx18-alsa-main.c
+++ b/drivers/media/video/cx18/cx18-alsa-main.c
@@ -285,6 +285,7 @@ static void __exit cx18_alsa_exit(void)
drv = driver_find("cx18", &pci_bus_type);
ret = driver_for_each_device(drv, NULL, NULL, cx18_alsa_exit_callback);
+ (void)ret; /* suppress compiler warning */
cx18_ext_init = NULL;
printk(KERN_INFO "cx18-alsa: module unload complete\n");
diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.c b/drivers/media/video/cx18/cx18-alsa-pcm.c
index 82d195be9197..7a5b84a86bb3 100644
--- a/drivers/media/video/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/video/cx18/cx18-alsa-pcm.c
@@ -190,7 +190,7 @@ static int snd_cx18_pcm_capture_open(struct snd_pcm_substream *substream)
ret = cx18_start_v4l2_encode_stream(s);
snd_cx18_unlock(cxsc);
- return 0;
+ return ret;
}
static int snd_cx18_pcm_capture_close(struct snd_pcm_substream *substream)
@@ -199,12 +199,11 @@ static int snd_cx18_pcm_capture_close(struct snd_pcm_substream *substream)
struct v4l2_device *v4l2_dev = cxsc->v4l2_dev;
struct cx18 *cx = to_cx18(v4l2_dev);
struct cx18_stream *s;
- int ret;
/* Instruct the cx18 to stop sending packets */
snd_cx18_lock(cxsc);
s = &cx->streams[CX18_ENC_STREAM_TYPE_PCM];
- ret = cx18_stop_v4l2_encode_stream(s, 0);
+ cx18_stop_v4l2_encode_stream(s, 0);
clear_bit(CX18_F_S_STREAMING, &s->s_flags);
cx18_release_stream(s);
@@ -252,13 +251,10 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
static int snd_cx18_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- int ret;
-
dprintk("%s called\n", __func__);
- ret = snd_pcm_alloc_vmalloc_buffer(substream,
+ return snd_pcm_alloc_vmalloc_buffer(substream,
params_buffer_bytes(params));
- return 0;
}
static int snd_cx18_pcm_hw_free(struct snd_pcm_substream *substream)
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index b55d57cc1a1c..7e5ffd6f5178 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -838,10 +838,10 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
}
CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%lx\n",
+ "irq: %d, latency: %d, memory: 0x%llx\n",
cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
- cx->pci_dev->irq, pci_latency, (unsigned long)cx->base_addr);
+ cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
return 0;
}
@@ -938,7 +938,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
if (retval)
goto err;
- CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
+ CX18_DEBUG_INFO("base addr: 0x%llx\n", (u64)cx->base_addr);
/* PCI Device Setup */
retval = cx18_setup_pci(cx, pci_dev, pci_id);
@@ -946,8 +946,8 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
goto free_workqueues;
/* map io memory */
- CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
+ CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
CX18_MEM_SIZE);
if (!cx->enc_mem) {
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 7a37e0ee136f..2767c64df0c8 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -622,7 +622,7 @@ struct cx18 {
unique ID. Starts at 1, so 0 can be used as
uninitialized value in the stream->id. */
- u32 base_addr;
+ resource_size_t base_addr;
u8 card_rev;
void __iomem *enc_mem, *reg_mem;
diff --git a/drivers/media/video/cx18/cx18-firmware.c b/drivers/media/video/cx18/cx18-firmware.c
index 1b3fb502e6be..b85c292a849a 100644
--- a/drivers/media/video/cx18/cx18-firmware.c
+++ b/drivers/media/video/cx18/cx18-firmware.c
@@ -164,8 +164,13 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
while (offset + sizeof(seghdr) < fw->size) {
- /* TODO: byteswapping */
- memcpy(&seghdr, src + offset / 4, sizeof(seghdr));
+ const u32 *shptr = src + offset / 4;
+
+ seghdr.sync1 = le32_to_cpu(shptr[0]);
+ seghdr.sync2 = le32_to_cpu(shptr[1]);
+ seghdr.addr = le32_to_cpu(shptr[2]);
+ seghdr.size = le32_to_cpu(shptr[3]);
+
offset += sizeof(seghdr);
if (seghdr.sync1 != APU_ROM_SYNC1 ||
seghdr.sync2 != APU_ROM_SYNC2) {
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index be49f68ddf37..35fde4e931f5 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -1137,7 +1137,7 @@ static long cx18_default(struct file *file, void *fh, bool valid_prio,
}
default:
- return -EINVAL;
+ return -ENOTTY;
}
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index 0c7796e76ac0..eabf00c6351b 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -434,6 +434,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
u32 handle, mdl_ack_offset, mdl_ack_count;
struct cx18_mailbox *mb;
+ int i;
mb = &order->mb;
handle = mb->args[0];
@@ -447,8 +448,9 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
return -1;
}
- cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
- sizeof(struct cx18_mdl_ack) * mdl_ack_count);
+ for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
+ ((u32 *)order->mdl_ack)[i / sizeof(u32)] =
+ cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
mb_ack_irq(cx, order);
@@ -538,6 +540,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
struct cx18_mailbox *order_mb;
struct cx18_in_work_order *order;
int submit;
+ int i;
switch (rpu) {
case CPU:
@@ -562,10 +565,12 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
order_mb = &order->mb;
/* mb->cmd and mb->args[0] through mb->args[2] */
- cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
+ for (i = 0; i < 4; i++)
+ (&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
+
/* mb->request and mb->ack. N.B. we want to read mb->ack last */
- cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
- 2 * sizeof(u32));
+ for (i = 0; i < 2; i++)
+ (&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
if (order_mb->request == order_mb->ack) {
CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
@@ -595,9 +600,8 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
{
const struct cx18_api_info *info = find_api_info(cmd);
- u32 state, irq, req, ack, err;
+ u32 irq, req, ack, err;
struct cx18_mailbox __iomem *mb;
- u32 __iomem *xpu_state;
wait_queue_head_t *waitq;
struct mutex *mb_lock;
unsigned long int t0, timeout, ret;
@@ -628,14 +632,12 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
mb_lock = &cx->epu2apu_mb_lock;
irq = IRQ_EPU_TO_APU;
mb = &cx->scb->epu2apu_mb;
- xpu_state = &cx->scb->apu_state;
break;
case CPU:
waitq = &cx->mb_cpu_waitq;
mb_lock = &cx->epu2cpu_mb_lock;
irq = IRQ_EPU_TO_CPU;
mb = &cx->scb->epu2cpu_mb;
- xpu_state = &cx->scb->cpu_state;
break;
default:
CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu);
@@ -653,7 +655,6 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
* by a signal, we may get here and find a busy mailbox. After waiting,
* mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
*/
- state = cx18_readl(cx, xpu_state);
req = cx18_readl(cx, &mb->request);
timeout = msecs_to_jiffies(10);
ret = wait_event_timeout(*waitq,
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 638cca156b58..4185bcb80ca3 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -980,7 +980,6 @@ void cx18_stop_all_captures(struct cx18 *cx)
int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
{
struct cx18 *cx = s->cx;
- unsigned long then;
if (!cx18_stream_enabled(s))
return -EINVAL;
@@ -999,8 +998,6 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
else
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle);
- then = jiffies;
-
if (s->type == CX18_ENC_STREAM_TYPE_MPG && gop_end) {
CX18_INFO("ignoring gop_end: not (yet?) supported by the firmware\n");
}
diff --git a/drivers/media/video/cx231xx/cx231xx-417.c b/drivers/media/video/cx231xx/cx231xx-417.c
index d4327dab5a36..ce2f62238a19 100644
--- a/drivers/media/video/cx231xx/cx231xx-417.c
+++ b/drivers/media/video/cx231xx/cx231xx-417.c
@@ -1095,7 +1095,7 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
{
int version;
int retval;
- u32 i, data[7];
+ u32 i;
u32 val = 0;
dprintk(1, "%s()\n", __func__);
@@ -1154,6 +1154,11 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
CX231xx_CUSTOM_EXTENSION_USR_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0);
*/
+
+#if 0
+ /* TODO */
+ u32 data[7];
+
/* Setup to capture VBI */
data[0] = 0x0001BD00;
data[1] = 1; /* frames per interrupt */
@@ -1162,7 +1167,7 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
data[4] = 0x206080C0; /* stop codes */
data[5] = 6; /* lines */
data[6] = 64; /* BPL */
-/*
+
cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_CONFIG, 7, 0, data[0], data[1],
data[2], data[3], data[4], data[5], data[6]);
@@ -1175,7 +1180,7 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
cx231xx_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0,
i | 0x80000000, valid, 0, 0, 0);
}
-*/
+#endif
/* cx231xx_api_cmd(dev, CX2341X_ENC_MUTE_AUDIO, 1, 0, CX231xx_UNMUTE);
msleep(60);
*/
@@ -1792,17 +1797,16 @@ static int vidioc_streamon(struct file *file, void *priv,
struct cx231xx_fh *fh = file->private_data;
struct cx231xx *dev = fh->dev;
- int rc = 0;
dprintk(3, "enter vidioc_streamon()\n");
cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
if (dev->USE_ISO)
- rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
CX231XX_NUM_BUFS,
dev->video_mode.max_pkt_size,
cx231xx_isoc_copy);
else {
- rc = cx231xx_init_bulk(dev, 320,
+ cx231xx_init_bulk(dev, 320,
5,
dev->ts1_mode.max_pkt_size,
cx231xx_bulk_copy);
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index a2c2b7d343ec..b4c99c7270cf 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -307,7 +307,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
urb->context = dev;
urb->pipe = usb_rcvisocpipe(dev->udev,
dev->adev.end_point_addr);
- urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->interval = 1;
urb->complete = cx231xx_audio_isocirq;
@@ -368,7 +368,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
urb->context = dev;
urb->pipe = usb_rcvbulkpipe(dev->udev,
dev->adev.end_point_addr);
- urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = 0;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->complete = cx231xx_audio_bulkirq;
urb->transfer_buffer_length = sb_size;
@@ -523,21 +523,24 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
static int snd_cx231xx_hw_capture_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- unsigned int channels, rate, format;
int ret;
dprintk("Setting capture parameters\n");
ret = snd_pcm_alloc_vmalloc_buffer(substream,
params_buffer_bytes(hw_params));
+#if 0
+ /* TODO: set up cx231xx audio chip to deliver the correct audio format,
+ current default is 48000hz multiplexed => 96000hz mono
+ which shouldn't matter since analogue TV only supports mono */
+ unsigned int channels, rate, format;
+
format = params_format(hw_params);
rate = params_rate(hw_params);
channels = params_channels(hw_params);
+#endif
- /* TODO: set up cx231xx audio chip to deliver the correct audio format,
- current default is 48000hz multiplexed => 96000hz mono
- which shouldn't matter since analogue TV only supports mono */
- return 0;
+ return ret;
}
static int snd_cx231xx_hw_capture_free(struct snd_pcm_substream *substream)
@@ -586,7 +589,7 @@ static int snd_cx231xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct cx231xx *dev = snd_pcm_substream_chip(substream);
- int retval;
+ int retval = 0;
if (dev->state & DEV_DISCONNECTED)
return -ENODEV;
@@ -601,12 +604,13 @@ static int snd_cx231xx_capture_trigger(struct snd_pcm_substream *substream,
break;
default:
retval = -EINVAL;
+ break;
}
spin_unlock(&dev->adev.slock);
schedule_work(&dev->wq_trigger);
- return 0;
+ return retval;
}
static snd_pcm_uframes_t snd_cx231xx_capture_pointer(struct snd_pcm_substream
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 53ff26e7abf7..b085a3c6dc04 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -934,33 +934,29 @@ int cx231xx_set_decoder_video_input(struct cx231xx *dev,
void cx231xx_enable656(struct cx231xx *dev)
{
u8 temp = 0;
- int status;
/*enable TS1 data[0:7] as output to export 656*/
- status = vid_blk_write_byte(dev, TS1_PIN_CTL0, 0xFF);
+ vid_blk_write_byte(dev, TS1_PIN_CTL0, 0xFF);
/*enable TS1 clock as output to export 656*/
- status = vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
+ vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
temp = temp|0x04;
- status = vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
-
+ vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
}
EXPORT_SYMBOL_GPL(cx231xx_enable656);
void cx231xx_disable656(struct cx231xx *dev)
{
u8 temp = 0;
- int status;
-
- status = vid_blk_write_byte(dev, TS1_PIN_CTL0, 0x00);
+ vid_blk_write_byte(dev, TS1_PIN_CTL0, 0x00);
- status = vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
+ vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp);
temp = temp&0xFB;
- status = vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
+ vid_blk_write_byte(dev, TS1_PIN_CTL1, temp);
}
EXPORT_SYMBOL_GPL(cx231xx_disable656);
@@ -1320,117 +1316,115 @@ void update_HH_register_after_set_DIF(struct cx231xx *dev)
void cx231xx_dump_HH_reg(struct cx231xx *dev)
{
- u8 status = 0;
u32 value = 0;
u16 i = 0;
value = 0x45005390;
- status = vid_blk_write_word(dev, 0x104, value);
+ vid_blk_write_word(dev, 0x104, value);
for (i = 0x100; i < 0x140; i++) {
- status = vid_blk_read_word(dev, i, &value);
+ vid_blk_read_word(dev, i, &value);
cx231xx_info("reg0x%x=0x%x\n", i, value);
i = i+3;
}
for (i = 0x300; i < 0x400; i++) {
- status = vid_blk_read_word(dev, i, &value);
+ vid_blk_read_word(dev, i, &value);
cx231xx_info("reg0x%x=0x%x\n", i, value);
i = i+3;
}
for (i = 0x400; i < 0x440; i++) {
- status = vid_blk_read_word(dev, i, &value);
+ vid_blk_read_word(dev, i, &value);
cx231xx_info("reg0x%x=0x%x\n", i, value);
i = i+3;
}
- status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390);
- status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
+ vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value);
cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value);
}
void cx231xx_dump_SC_reg(struct cx231xx *dev)
{
u8 value[4] = { 0, 0, 0, 0 };
- int status = 0;
cx231xx_info("cx231xx_dump_SC_reg!\n");
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0],
value[1], value[2], value[3]);
- status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
+ cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN,
value, 4);
cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0],
value[1], value[2], value[3]);
@@ -1441,18 +1435,15 @@ void cx231xx_dump_SC_reg(struct cx231xx *dev)
void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev)
{
- u8 status = 0;
u8 value = 0;
-
-
- status = afe_read_byte(dev, ADC_STATUS2_CH3, &value);
+ afe_read_byte(dev, ADC_STATUS2_CH3, &value);
value = (value & 0xFE)|0x01;
- status = afe_write_byte(dev, ADC_STATUS2_CH3, value);
+ afe_write_byte(dev, ADC_STATUS2_CH3, value);
- status = afe_read_byte(dev, ADC_STATUS2_CH3, &value);
+ afe_read_byte(dev, ADC_STATUS2_CH3, &value);
value = (value & 0xFE)|0x00;
- status = afe_write_byte(dev, ADC_STATUS2_CH3, value);
+ afe_write_byte(dev, ADC_STATUS2_CH3, value);
/*
@@ -1464,44 +1455,43 @@ void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev)
for low-if agc defect
*/
- status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &value);
+ afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &value);
value = (value & 0xFC)|0x00;
- status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, value);
+ afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, value);
- status = afe_read_byte(dev, ADC_INPUT_CH3, &value);
+ afe_read_byte(dev, ADC_INPUT_CH3, &value);
value = (value & 0xF9)|0x02;
- status = afe_write_byte(dev, ADC_INPUT_CH3, value);
+ afe_write_byte(dev, ADC_INPUT_CH3, value);
- status = afe_read_byte(dev, ADC_FB_FRCRST_CH3, &value);
+ afe_read_byte(dev, ADC_FB_FRCRST_CH3, &value);
value = (value & 0xFB)|0x04;
- status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, value);
+ afe_write_byte(dev, ADC_FB_FRCRST_CH3, value);
- status = afe_read_byte(dev, ADC_DCSERVO_DEM_CH3, &value);
+ afe_read_byte(dev, ADC_DCSERVO_DEM_CH3, &value);
value = (value & 0xFC)|0x03;
- status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, value);
+ afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, value);
- status = afe_read_byte(dev, ADC_CTRL_DAC1_CH3, &value);
+ afe_read_byte(dev, ADC_CTRL_DAC1_CH3, &value);
value = (value & 0xFB)|0x04;
- status = afe_write_byte(dev, ADC_CTRL_DAC1_CH3, value);
+ afe_write_byte(dev, ADC_CTRL_DAC1_CH3, value);
- status = afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
+ afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
value = (value & 0xF8)|0x06;
- status = afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
+ afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
- status = afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
+ afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value);
value = (value & 0x8F)|0x40;
- status = afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
+ afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value);
- status = afe_read_byte(dev, ADC_PWRDN_CLAMP_CH3, &value);
+ afe_read_byte(dev, ADC_PWRDN_CLAMP_CH3, &value);
value = (value & 0xDF)|0x20;
- status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, value);
+ afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, value);
}
void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
u8 spectral_invert, u32 mode)
{
u32 colibri_carrier_offset = 0;
- u8 status = 0;
u32 func_mode = 0x01; /* Device has a DIF if this function is called */
u32 standard = 0;
u8 value[4] = { 0, 0, 0, 0 };
@@ -1511,15 +1501,15 @@ void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq,
value[1] = (u8) 0x6F;
value[2] = (u8) 0x6F;
value[3] = (u8) 0x6F;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
PWR_CTL_EN, value, 4);
/*Set colibri for low IF*/
- status = cx231xx_afe_set_mode(dev, AFE_MODE_LOW_IF);
+ cx231xx_afe_set_mode(dev, AFE_MODE_LOW_IF);
/* Set C2HH for low IF operation.*/
standard = dev->norm;
- status = cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode,
+ cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode,
func_mode, standard);
/* Get colibri offsets.*/
@@ -1556,7 +1546,6 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
u8 spectral_invert, u32 mode)
{
unsigned long pll_freq_word;
- int status = 0;
u32 dif_misc_ctrl_value = 0;
u64 pll_freq_u64 = 0;
u32 i = 0;
@@ -1567,7 +1556,7 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
if (mode == TUNER_MODE_FM_RADIO) {
pll_freq_word = 0x905A1CAC;
- status = vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
+ vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
} else /*KSPROPERTY_TUNER_MODE_TV*/{
/* Calculate the PLL frequency word based on the adjusted if_freq*/
@@ -1576,23 +1565,23 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
do_div(pll_freq_u64, 50000000);
pll_freq_word = (u32)pll_freq_u64;
/*pll_freq_word = 0x3463497;*/
- status = vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
+ vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word);
if (spectral_invert) {
if_freq -= 400000;
/* Enable Spectral Invert*/
- status = vid_blk_read_word(dev, DIF_MISC_CTRL,
+ vid_blk_read_word(dev, DIF_MISC_CTRL,
&dif_misc_ctrl_value);
dif_misc_ctrl_value = dif_misc_ctrl_value | 0x00200000;
- status = vid_blk_write_word(dev, DIF_MISC_CTRL,
+ vid_blk_write_word(dev, DIF_MISC_CTRL,
dif_misc_ctrl_value);
} else {
if_freq += 400000;
/* Disable Spectral Invert*/
- status = vid_blk_read_word(dev, DIF_MISC_CTRL,
+ vid_blk_read_word(dev, DIF_MISC_CTRL,
&dif_misc_ctrl_value);
dif_misc_ctrl_value = dif_misc_ctrl_value & 0xFFDFFFFF;
- status = vid_blk_write_word(dev, DIF_MISC_CTRL,
+ vid_blk_write_word(dev, DIF_MISC_CTRL,
dif_misc_ctrl_value);
}
@@ -1606,10 +1595,10 @@ void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq,
}
cx231xx_info("Enter IF=%zd\n",
- sizeof(Dif_set_array)/sizeof(struct dif_settings));
- for (i = 0; i < sizeof(Dif_set_array)/sizeof(struct dif_settings); i++) {
+ ARRAY_SIZE(Dif_set_array));
+ for (i = 0; i < ARRAY_SIZE(Dif_set_array); i++) {
if (Dif_set_array[i].if_freq == if_freq) {
- status = vid_blk_write_word(dev,
+ vid_blk_write_word(dev,
Dif_set_array[i].register_address, Dif_set_array[i].value);
}
}
@@ -3090,31 +3079,30 @@ int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len)
*/
int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len)
{
- int status = 0;
int i = 0;
/* get the lock */
mutex_lock(&dev->gpio_i2c_lock);
/* start */
- status = cx231xx_gpio_i2c_start(dev);
+ cx231xx_gpio_i2c_start(dev);
/* write dev_addr */
- status = cx231xx_gpio_i2c_write_byte(dev, dev_addr << 1);
+ cx231xx_gpio_i2c_write_byte(dev, dev_addr << 1);
/* read Ack */
- status = cx231xx_gpio_i2c_read_ack(dev);
+ cx231xx_gpio_i2c_read_ack(dev);
for (i = 0; i < len; i++) {
/* Write data */
- status = cx231xx_gpio_i2c_write_byte(dev, buf[i]);
+ cx231xx_gpio_i2c_write_byte(dev, buf[i]);
/* read Ack */
- status = cx231xx_gpio_i2c_read_ack(dev);
+ cx231xx_gpio_i2c_read_ack(dev);
}
/* write End */
- status = cx231xx_gpio_i2c_end(dev);
+ cx231xx_gpio_i2c_end(dev);
/* release the lock */
mutex_unlock(&dev->gpio_i2c_lock);
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index 08dd930f882a..05358d486135 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -754,7 +754,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
}
}
- return 0;
+ return errCode ? -EINVAL : 0;
}
EXPORT_SYMBOL_GPL(cx231xx_set_mode);
@@ -764,7 +764,7 @@ int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
int actlen, ret = -ENOMEM;
u32 *buffer;
-buffer = kzalloc(4096, GFP_KERNEL);
+ buffer = kzalloc(4096, GFP_KERNEL);
if (buffer == NULL) {
cx231xx_info("out of mem\n");
return -ENOMEM;
@@ -772,16 +772,16 @@ buffer = kzalloc(4096, GFP_KERNEL);
memcpy(&buffer[0], firmware, 4096);
ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5),
- buffer, 4096, &actlen, 2000);
+ buffer, 4096, &actlen, 2000);
if (ret)
cx231xx_info("bulk message failed: %d (%d/%d)", ret,
- size, actlen);
+ size, actlen);
else {
errCode = actlen != size ? -1 : 0;
}
-kfree(buffer);
- return 0;
+ kfree(buffer);
+ return errCode;
}
/*****************************************************************
@@ -797,7 +797,7 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
- int rc, i;
+ int i;
switch (urb->status) {
case 0: /* success */
@@ -814,7 +814,7 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->video_mode.slock);
- rc = dev->video_mode.isoc_ctl.isoc_copy(dev, urb);
+ dev->video_mode.isoc_ctl.isoc_copy(dev, urb);
spin_unlock(&dev->video_mode.slock);
/* Reset urb buffers */
@@ -822,7 +822,6 @@ static void cx231xx_isoc_irq_callback(struct urb *urb)
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length = 0;
}
- urb->status = 0;
urb->status = usb_submit_urb(urb, GFP_ATOMIC);
if (urb->status) {
@@ -843,7 +842,6 @@ static void cx231xx_bulk_irq_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode);
- int rc;
switch (urb->status) {
case 0: /* success */
@@ -860,12 +858,10 @@ static void cx231xx_bulk_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->video_mode.slock);
- rc = dev->video_mode.bulk_ctl.bulk_copy(dev, urb);
+ dev->video_mode.bulk_ctl.bulk_copy(dev, urb);
spin_unlock(&dev->video_mode.slock);
/* Reset urb buffers */
- urb->status = 0;
-
urb->status = usb_submit_urb(urb, GFP_ATOMIC);
if (urb->status) {
cx231xx_isocdbg("urb resubmit failed (error=%i)\n",
@@ -1231,42 +1227,40 @@ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets,
EXPORT_SYMBOL_GPL(cx231xx_init_bulk);
void cx231xx_stop_TS1(struct cx231xx *dev)
{
- int status = 0;
u8 val[4] = { 0, 0, 0, 0 };
- val[0] = 0x00;
- val[1] = 0x03;
- val[2] = 0x00;
- val[3] = 0x00;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS_MODE_REG, val, 4);
-
- val[0] = 0x00;
- val[1] = 0x70;
- val[2] = 0x04;
- val[3] = 0x00;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS1_CFG_REG, val, 4);
+ val[0] = 0x00;
+ val[1] = 0x03;
+ val[2] = 0x00;
+ val[3] = 0x00;
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS_MODE_REG, val, 4);
+
+ val[0] = 0x00;
+ val[1] = 0x70;
+ val[2] = 0x04;
+ val[3] = 0x00;
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
}
/* EXPORT_SYMBOL_GPL(cx231xx_stop_TS1); */
void cx231xx_start_TS1(struct cx231xx *dev)
{
- int status = 0;
u8 val[4] = { 0, 0, 0, 0 };
- val[0] = 0x03;
- val[1] = 0x03;
- val[2] = 0x00;
- val[3] = 0x00;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS_MODE_REG, val, 4);
-
- val[0] = 0x04;
- val[1] = 0xA3;
- val[2] = 0x3B;
- val[3] = 0x00;
- status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
- TS1_CFG_REG, val, 4);
+ val[0] = 0x03;
+ val[1] = 0x03;
+ val[2] = 0x00;
+ val[3] = 0x00;
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS_MODE_REG, val, 4);
+
+ val[0] = 0x04;
+ val[1] = 0xA3;
+ val[2] = 0x3B;
+ val[3] = 0x00;
+ cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
+ TS1_CFG_REG, val, 4);
}
/* EXPORT_SYMBOL_GPL(cx231xx_start_TS1); */
/*****************************************************************
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
index 8cdee5f78f13..ac7db52f404f 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
@@ -83,7 +83,6 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
*/
static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
{
- struct cx231xx_buffer *buf;
struct cx231xx_dmaqueue *dma_q = urb->context;
int rc = 1;
unsigned char *p_buffer;
@@ -102,8 +101,6 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
- buf = dev->vbi_mode.bulk_ctl.buf;
-
/* get buffer pointer and length */
p_buffer = urb->transfer_buffer;
buffer_size = urb->actual_length;
@@ -310,7 +307,6 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
struct cx231xx_video_mode *vmode =
container_of(dma_q, struct cx231xx_video_mode, vidq);
struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
- int rc;
switch (urb->status) {
case 0: /* success */
@@ -328,7 +324,7 @@ static void cx231xx_irq_vbi_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->vbi_mode.slock);
- rc = dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
+ dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
spin_unlock(&dev->vbi_mode.slock);
/* Reset status */
@@ -452,7 +448,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
return -ENOMEM;
}
dev->vbi_mode.bulk_ctl.urb[i] = urb;
- urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = 0;
dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
kzalloc(sb_size, GFP_KERNEL);
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index 7f916f0685e9..523aa49d6b86 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -326,9 +326,7 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
*/
static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
{
- struct cx231xx_buffer *buf;
struct cx231xx_dmaqueue *dma_q = urb->context;
- unsigned char *outp = NULL;
int i, rc = 1;
unsigned char *p_buffer;
u32 bytes_parsed = 0, buffer_size = 0;
@@ -346,10 +344,6 @@ static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
- buf = dev->video_mode.isoc_ctl.buf;
- if (buf != NULL)
- outp = videobuf_to_vmalloc(&buf->vb);
-
for (i = 0; i < urb->number_of_packets; i++) {
int status = urb->iso_frame_desc[i].status;
@@ -429,9 +423,7 @@ static inline int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
static inline int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
{
- struct cx231xx_buffer *buf;
struct cx231xx_dmaqueue *dma_q = urb->context;
- unsigned char *outp = NULL;
int rc = 1;
unsigned char *p_buffer;
u32 bytes_parsed = 0, buffer_size = 0;
@@ -449,10 +441,6 @@ static inline int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
- buf = dev->video_mode.bulk_ctl.buf;
- if (buf != NULL)
- outp = videobuf_to_vmalloc(&buf->vb);
-
if (1) {
/* get buffer pointer and length */
@@ -701,13 +689,9 @@ void cx231xx_reset_video_buffer(struct cx231xx *dev,
buf = dev->video_mode.bulk_ctl.buf;
if (buf == NULL) {
- u8 *outp = NULL;
/* first try to get the buffer */
get_next_buf(dma_q, &buf);
- if (buf)
- outp = videobuf_to_vmalloc(&buf->vb);
-
dma_q->pos = 0;
dma_q->field1_done = 0;
dma_q->current_field = -1;
@@ -2561,6 +2545,10 @@ static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
vfd->release = video_device_release;
vfd->debug = video_debug;
vfd->lock = &dev->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 19b5499d2624..080e11157e5f 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -127,22 +127,37 @@ struct cx23885_board cx23885_boards[] = {
},
[CX23885_BOARD_HAUPPAUGE_HVR1250] = {
.name = "Hauppauge WinTV-HVR1250",
+ .porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .tuner_bus = 1,
+#endif
+ .force_bff = 1,
.input = {{
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
.type = CX23885_VMUX_TELEVISION,
- .vmux = 0,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1,
+ .amux = CX25840_AUDIO8,
.gpio0 = 0xff00,
}, {
- .type = CX23885_VMUX_DEBUG,
- .vmux = 0,
- .gpio0 = 0xff01,
- }, {
+#endif
.type = CX23885_VMUX_COMPOSITE1,
- .vmux = 1,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN6_CH1,
+ .amux = CX25840_AUDIO7,
.gpio0 = 0xff02,
}, {
.type = CX23885_VMUX_SVIDEO,
- .vmux = 2,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
.gpio0 = 0xff02,
} },
},
@@ -267,7 +282,55 @@ struct cx23885_board cx23885_boards[] = {
},
[CX23885_BOARD_HAUPPAUGE_HVR1255] = {
.name = "Hauppauge WinTV-HVR1255",
+ .porta = CX23885_ANALOG_VIDEO,
+ .portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN6_CH1,
+ .amux = CX25840_AUDIO7,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ } },
+ },
+ [CX23885_BOARD_HAUPPAUGE_HVR1255_22111] = {
+ .name = "Hauppauge WinTV-HVR1255",
+ .porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ } },
},
[CX23885_BOARD_HAUPPAUGE_HVR1210] = {
.name = "Hauppauge WinTV-HVR1210",
@@ -497,6 +560,10 @@ struct cx23885_board cx23885_boards[] = {
.name = "TerraTec Cinergy T PCIe Dual",
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
+ },
+ [CX23885_BOARD_TEVII_S471] = {
+ .name = "TeVii S471",
+ .portb = CX23885_MPEG_DVB,
}
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -620,7 +687,7 @@ struct cx23885_subid cx23885_subids[] = {
}, {
.subvendor = 0x0070,
.subdevice = 0x2259,
- .card = CX23885_BOARD_HAUPPAUGE_HVR1255,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR1255_22111,
}, {
.subvendor = 0x0070,
.subdevice = 0x2291,
@@ -705,6 +772,10 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x153b,
.subdevice = 0x117e,
.card = CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL,
+ }, {
+ .subvendor = 0xd471,
+ .subdevice = 0x9022,
+ .card = CX23885_BOARD_TEVII_S471,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -892,7 +963,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
struct cx23885_dev *dev = port->dev;
u32 bitmask = 0;
- if (command == XC2028_RESET_CLK)
+ if ((command == XC2028_RESET_CLK) || (command == XC2028_I2C_FLUSH))
return 0;
if (command != 0) {
@@ -1122,6 +1193,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
/* GPIO-6 I2C Gate which can isolate the demod from the bus */
@@ -1259,6 +1331,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* FIXME: Implement me */
break;
@@ -1416,6 +1489,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1460,6 +1534,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_TEVII_S471:
case CX23885_BOARD_DVBWORLD_2005:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
@@ -1502,6 +1577,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1517,10 +1593,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
*/
switch (dev->board) {
case CX23885_BOARD_TEVII_S470:
- case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Currently only enabled for the integrated IR controller */
if (!enable_885_ir)
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1530,6 +1606,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_MYGICA_X8506:
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index 6ad227029a0f..697728f09430 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -1046,6 +1046,13 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
if (cx23885_boards[dev->board].ci_type > 0)
cx_clear(RDR_RDRCTL1, 1 << 8);
+ switch (dev->board) {
+ case CX23885_BOARD_TEVII_S470:
+ case CX23885_BOARD_TEVII_S471:
+ cx_clear(RDR_RDRCTL1, 1 << 8);
+ break;
+ }
+
return 0;
}
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 6835eb1fc093..cd542684ba02 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -712,6 +712,7 @@ static int dvb_register(struct cx23885_tsport *port)
}
break;
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
i2c_bus = &dev->i2c_bus[0];
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
&hcw_s5h1411_config,
@@ -721,6 +722,11 @@ static int dvb_register(struct cx23885_tsport *port)
0x60, &dev->i2c_bus[1].i2c_adap,
&hauppauge_tda18271_config);
}
+
+ tda18271_attach(&dev->ts1.analog_fe,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_tda18271_config);
+
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
i2c_bus = &dev->i2c_bus[0];
@@ -1173,6 +1179,13 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_TEVII_S471:
+ i2c_bus = &dev->i2c_bus[1];
+
+ fe0->dvb.frontend = dvb_attach(ds3000_attach,
+ &tevii_ds3000_config,
+ &i2c_bus->i2c_adap);
+ break;
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
" isn't supported yet\n",
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index c654bdc7ccb2..22f8e7fbd665 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -505,6 +505,9 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) ||
(dev->board == CX23885_BOARD_MPX885) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1250) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) {
/* Configure audio routing */
v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
@@ -1578,7 +1581,9 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
fe = vfe->dvb.frontend;
- if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)
+ if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111))
fe = &dev->ts1.analog_fe;
if (fe && fe->ops.tuner_ops.set_analog_params) {
@@ -1608,6 +1613,8 @@ int cx23885_set_frequency(struct file *file, void *priv,
int ret;
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
ret = cx23885_set_freq_via_ops(dev, f);
break;
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index f020f0568df4..13c37ec07ae7 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -89,6 +89,8 @@
#define CX23885_BOARD_MPX885 32
#define CX23885_BOARD_MYGICA_X8507 33
#define CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL 34
+#define CX23885_BOARD_TEVII_S471 35
+#define CX23885_BOARD_HAUPPAUGE_HVR1255_22111 36
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c
index bb1ce346425d..c2bc39c58f82 100644
--- a/drivers/media/video/cx23885/cx23888-ir.c
+++ b/drivers/media/video/cx23885/cx23888-ir.c
@@ -331,9 +331,7 @@ static u64 ns_to_pulse_clocks(u32 ns)
static u16 pulse_clocks_to_clock_divider(u64 count)
{
- u32 rem;
-
- rem = do_div(count, (FIFO_RXTX << 2) | 0x3);
+ do_div(count, (FIFO_RXTX << 2) | 0x3);
/* net result needs to be rounded down and decremented by 1 */
if (count > RXCLK_RCD + 1)
diff --git a/drivers/media/video/cx25821/cx25821-alsa.c b/drivers/media/video/cx25821/cx25821-alsa.c
index 03cfac476b03..1858a45dd081 100644
--- a/drivers/media/video/cx25821/cx25821-alsa.c
+++ b/drivers/media/video/cx25821/cx25821-alsa.c
@@ -290,11 +290,9 @@ static irqreturn_t cx25821_irq(int irq, void *dev_id)
u32 status, pci_status;
u32 audint_status, audint_mask;
int loop, handled = 0;
- int audint_count = 0;
audint_status = cx_read(AUD_A_INT_STAT);
audint_mask = cx_read(AUD_A_INT_MSK);
- audint_count = cx_read(AUD_A_GPCNT);
status = cx_read(PCI_INT_STAT);
for (loop = 0; loop < 1; loop++) {
diff --git a/drivers/media/video/cx25821/cx25821-audio-upstream.c b/drivers/media/video/cx25821/cx25821-audio-upstream.c
index 20c7ca3351a8..8b2a99975c23 100644
--- a/drivers/media/video/cx25821/cx25821-audio-upstream.c
+++ b/drivers/media/video/cx25821/cx25821-audio-upstream.c
@@ -585,7 +585,7 @@ int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num,
static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
- u32 msk_stat, audio_status;
+ u32 audio_status;
int handled = 0;
struct sram_channel *sram_ch;
@@ -594,7 +594,6 @@ static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id)
sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels;
- msk_stat = cx_read(sram_ch->int_mstat);
audio_status = cx_read(sram_ch->int_stat);
/* Only deal with our interrupt */
diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
index 7930ca58349f..f11f6f07e915 100644
--- a/drivers/media/video/cx25821/cx25821-core.c
+++ b/drivers/media/video/cx25821/cx25821-core.c
@@ -379,14 +379,6 @@ static inline int i2c_slave_did_ack(struct i2c_adapter *i2c_adap)
return cx_read(bus->reg_stat) & 0x01;
}
-void cx_i2c_read_print(struct cx25821_dev *dev, u32 reg, const char *reg_string)
-{
- int tmp = 0;
- u32 value = 0;
-
- value = cx25821_i2c_read(&dev->i2c_bus[0], reg, &tmp);
-}
-
static void cx25821_registers_init(struct cx25821_dev *dev)
{
u32 tmp;
@@ -895,7 +887,7 @@ static void cx25821_iounmap(struct cx25821_dev *dev)
static int cx25821_dev_setup(struct cx25821_dev *dev)
{
- int io_size = 0, i;
+ int i;
pr_info("\n***********************************\n");
pr_info("cx25821 set up\n");
@@ -912,9 +904,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
list_add_tail(&dev->devlist, &cx25821_devlist);
mutex_unlock(&cx25821_devlist_mutex);
- strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
- strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
-
if (dev->pci->device != 0x8210) {
pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
__func__, dev->pci->device);
@@ -960,7 +949,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
/* PCIe stuff */
dev->base_io_addr = pci_resource_start(dev->pci, 0);
- io_size = pci_resource_len(dev->pci, 0);
if (!dev->base_io_addr) {
CX25821_ERR("No PCI Memory resources, exiting!\n");
@@ -1317,13 +1305,12 @@ void cx25821_free_buffer(struct videobuf_queue *q, struct cx25821_buffer *buf)
static irqreturn_t cx25821_irq(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
- u32 pci_status, pci_mask;
+ u32 pci_status;
u32 vid_status;
int i, handled = 0;
u32 mask[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
pci_status = cx_read(PCI_INT_STAT);
- pci_mask = cx_read(PCI_INT_MSK);
if (pci_status == 0)
goto out;
diff --git a/drivers/media/video/cx25821/cx25821-i2c.c b/drivers/media/video/cx25821/cx25821-i2c.c
index 12d7300fa1e9..6311180f430c 100644
--- a/drivers/media/video/cx25821/cx25821-i2c.c
+++ b/drivers/media/video/cx25821/cx25821-i2c.c
@@ -361,7 +361,6 @@ void cx25821_av_clk(struct cx25821_dev *dev, int enable)
int cx25821_i2c_read(struct cx25821_i2c *bus, u16 reg_addr, int *value)
{
struct i2c_client *client = &bus->i2c_client;
- int retval = 0;
int v = 0;
u8 addr[2] = { 0, 0 };
u8 buf[4] = { 0, 0, 0, 0 };
@@ -385,7 +384,7 @@ int cx25821_i2c_read(struct cx25821_i2c *bus, u16 reg_addr, int *value)
msgs[0].addr = 0x44;
msgs[1].addr = 0x44;
- retval = i2c_xfer(client->adapter, msgs, 2);
+ i2c_xfer(client->adapter, msgs, 2);
v = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
*value = v;
diff --git a/drivers/media/video/cx25821/cx25821-medusa-video.c b/drivers/media/video/cx25821/cx25821-medusa-video.c
index 298a68d98c2f..313fb20a0b47 100644
--- a/drivers/media/video/cx25821/cx25821-medusa-video.c
+++ b/drivers/media/video/cx25821/cx25821-medusa-video.c
@@ -35,7 +35,6 @@
static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel,
int enable)
{
- int ret_val = 1;
u32 value = 0;
u32 tmp = 0;
int out_ctrl = OUT_CTRL1;
@@ -79,13 +78,13 @@ static void medusa_enable_bluefield_output(struct cx25821_dev *dev, int channel,
value &= 0xFFFFFF7F; /* clear BLUE_FIELD_EN */
if (enable)
value |= 0x00000080; /* set BLUE_FIELD_EN */
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl, value);
+ cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl, value);
value = cx25821_i2c_read(&dev->i2c_bus[0], out_ctrl_ns, &tmp);
value &= 0xFFFFFF7F;
if (enable)
value |= 0x00000080; /* set BLUE_FIELD_EN */
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl_ns, value);
+ cx25821_i2c_write(&dev->i2c_bus[0], out_ctrl_ns, value);
}
static int medusa_initialize_ntsc(struct cx25821_dev *dev)
@@ -431,7 +430,6 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
{
int decoder = 0;
int decoder_count = 0;
- int ret_val = 0;
u32 hscale = 0x0;
u32 vscale = 0x0;
const int MAX_WIDTH = 720;
@@ -482,9 +480,9 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
for (; decoder < decoder_count; decoder++) {
/* write scaling values for each decoder */
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ cx25821_i2c_write(&dev->i2c_bus[0],
HSCALE_CTRL + (0x200 * decoder), hscale);
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0],
+ cx25821_i2c_write(&dev->i2c_bus[0],
VSCALE_CTRL + (0x200 * decoder), vscale);
}
@@ -494,7 +492,6 @@ void medusa_set_resolution(struct cx25821_dev *dev, int width,
static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
int duration)
{
- int ret_val = 0;
u32 fld_cnt = 0;
u32 tmp = 0;
u32 disp_cnt_reg = DISP_AB_CNT;
@@ -537,7 +534,7 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
fld_cnt |= ((u32) duration) << 16;
}
- ret_val = cx25821_i2c_write(&dev->i2c_bus[0], disp_cnt_reg, fld_cnt);
+ cx25821_i2c_write(&dev->i2c_bus[0], disp_cnt_reg, fld_cnt);
mutex_unlock(&dev->lock);
}
diff --git a/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
index 5a157cf4a95e..c8c94fbf5d8d 100644
--- a/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
+++ b/drivers/media/video/cx25821/cx25821-video-upstream-ch2.c
@@ -587,7 +587,7 @@ int cx25821_video_upstream_irq_ch2(struct cx25821_dev *dev, int chan_num,
static irqreturn_t cx25821_upstream_irq_ch2(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
- u32 msk_stat, vid_status;
+ u32 vid_status;
int handled = 0;
int channel_num = 0;
struct sram_channel *sram_ch;
@@ -598,7 +598,6 @@ static irqreturn_t cx25821_upstream_irq_ch2(int irq, void *dev_id)
channel_num = VID_UPSTREAM_SRAM_CHANNEL_J;
sram_ch = dev->channels[channel_num].sram_channels;
- msk_stat = cx_read(sram_ch->int_mstat);
vid_status = cx_read(sram_ch->int_stat);
/* Only deal with our interrupt */
diff --git a/drivers/media/video/cx25821/cx25821-video-upstream.c b/drivers/media/video/cx25821/cx25821-video-upstream.c
index 21e7d657f049..52c13e0b6492 100644
--- a/drivers/media/video/cx25821/cx25821-video-upstream.c
+++ b/drivers/media/video/cx25821/cx25821-video-upstream.c
@@ -637,7 +637,7 @@ int cx25821_video_upstream_irq(struct cx25821_dev *dev, int chan_num,
static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
- u32 msk_stat, vid_status;
+ u32 vid_status;
int handled = 0;
int channel_num = 0;
struct sram_channel *sram_ch;
@@ -649,7 +649,6 @@ static irqreturn_t cx25821_upstream_irq(int irq, void *dev_id)
sram_ch = dev->channels[channel_num].sram_channels;
- msk_stat = cx_read(sram_ch->int_mstat);
vid_status = cx_read(sram_ch->int_stat);
/* Only deal with our interrupt */
diff --git a/drivers/media/video/cx25821/cx25821-video.c b/drivers/media/video/cx25821/cx25821-video.c
index ffd8bc79c02e..b38d4379cc36 100644
--- a/drivers/media/video/cx25821/cx25821-video.c
+++ b/drivers/media/video/cx25821/cx25821-video.c
@@ -109,25 +109,6 @@ struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc)
return NULL;
}
-void cx25821_dump_video_queue(struct cx25821_dev *dev,
- struct cx25821_dmaqueue *q)
-{
- struct cx25821_buffer *buf;
- struct list_head *item;
- dprintk(1, "%s()\n", __func__);
-
- if (!list_empty(&q->active)) {
- list_for_each(item, &q->active)
- buf = list_entry(item, struct cx25821_buffer, vb.queue);
- }
-
- if (!list_empty(&q->queued)) {
- list_for_each(item, &q->queued)
- buf = list_entry(item, struct cx25821_buffer, vb.queue);
- }
-
-}
-
void cx25821_video_wakeup(struct cx25821_dev *dev, struct cx25821_dmaqueue *q,
u32 count)
{
@@ -557,7 +538,7 @@ int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct cx25821_buffer *buf =
container_of(vb, struct cx25821_buffer, vb);
int rc, init_buffer = 0;
- u32 line0_offset, line1_offset;
+ u32 line0_offset;
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
int bpl_local = LINE_SIZE_D1;
int channel_opened = fh->channel_id;
@@ -639,7 +620,6 @@ int cx25821_buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
case V4L2_FIELD_INTERLACED:
/* All other formats are top field first */
line0_offset = 0;
- line1_offset = buf->bpl;
dprintk(1, "top field first\n");
cx25821_risc_buffer(dev->pci, &buf->risc,
@@ -1830,7 +1810,6 @@ static long video_ioctl_set(struct file *file, unsigned int cmd,
int i = 0;
int cif_enable = 0;
int cif_width = 0;
- u32 value = 0;
data_from_user = (struct downstream_user_struct *)arg;
@@ -1914,7 +1893,7 @@ static long video_ioctl_set(struct file *file, unsigned int cmd,
cx_write(data_from_user->reg_address, data_from_user->reg_data);
break;
case MEDUSA_READ:
- value = cx25821_i2c_read(&dev->i2c_bus[0],
+ cx25821_i2c_read(&dev->i2c_bus[0],
(u16) data_from_user->reg_address,
&data_from_user->reg_data);
break;
diff --git a/drivers/media/video/cx25821/cx25821-video.h b/drivers/media/video/cx25821/cx25821-video.h
index d0d9538ca5b3..9652a5e35ba2 100644
--- a/drivers/media/video/cx25821/cx25821-video.h
+++ b/drivers/media/video/cx25821/cx25821-video.h
@@ -86,8 +86,6 @@ extern struct cx25821_fmt formats[];
extern struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc);
extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM];
-extern void cx25821_dump_video_queue(struct cx25821_dev *dev,
- struct cx25821_dmaqueue *q);
extern void cx25821_video_wakeup(struct cx25821_dev *dev,
struct cx25821_dmaqueue *q, u32 count);
diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
index b9aa801b00a7..029f2934a6d8 100644
--- a/drivers/media/video/cx25821/cx25821.h
+++ b/drivers/media/video/cx25821/cx25821.h
@@ -187,7 +187,7 @@ enum port {
};
struct cx25821_board {
- char *name;
+ const char *name;
enum port porta;
enum port portb;
enum port portc;
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index fc1ff69cffd0..d8eac3e30a7e 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -84,7 +84,7 @@ MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
/* ----------------------------------------------------------------------- */
-static void cx23885_std_setup(struct i2c_client *client);
+static void cx23888_std_setup(struct i2c_client *client);
int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
{
@@ -638,10 +638,13 @@ static void cx23885_initialize(struct i2c_client *client)
finish_wait(&state->fw_wait, &wait);
destroy_workqueue(q);
- /* Call the cx23885 specific std setup func, we no longer rely on
+ /* Call the cx23888 specific std setup func, we no longer rely on
* the generic cx24840 func.
*/
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
+ else
+ cx25840_std_setup(client);
/* (re)set input */
set_input(client, state->vid_input, state->aud_input);
@@ -1103,9 +1106,23 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write4(client, 0x410, 0xffff0dbf);
cx25840_write4(client, 0x414, 0x00137d03);
- cx25840_write4(client, 0x418, 0x01008080);
+
+ /* on the 887, 0x418 is HSCALE_CTRL, on the 888 it is
+ CHROMA_CTRL */
+ if (is_cx23888(state))
+ cx25840_write4(client, 0x418, 0x01008080);
+ else
+ cx25840_write4(client, 0x418, 0x01000000);
+
cx25840_write4(client, 0x41c, 0x00000000);
- cx25840_write4(client, 0x420, 0x001c3e0f);
+
+ /* on the 887, 0x420 is CHROMA_CTRL, on the 888 it is
+ CRUSH_CTRL */
+ if (is_cx23888(state))
+ cx25840_write4(client, 0x420, 0x001c3e0f);
+ else
+ cx25840_write4(client, 0x420, 0x001c8282);
+
cx25840_write4(client, 0x42c, 0x42600000);
cx25840_write4(client, 0x430, 0x0000039b);
cx25840_write4(client, 0x438, 0x00000000);
@@ -1233,7 +1250,7 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write4(client, 0x8d0, 0x1f063870);
}
- if (is_cx2388x(state)) {
+ if (is_cx23888(state)) {
/* HVR1850 */
/* AUD_IO_CTRL - I2S Input, Parallel1*/
/* - Channel 1 src - Parallel1 (Merlin out) */
@@ -1298,8 +1315,8 @@ static int set_v4lstd(struct i2c_client *client)
}
cx25840_and_or(client, 0x400, ~0xf, fmt);
cx25840_and_or(client, 0x403, ~0x3, pal_m);
- if (is_cx2388x(state))
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
else
cx25840_std_setup(client);
if (!is_cx2583x(state))
@@ -1312,6 +1329,7 @@ static int set_v4lstd(struct i2c_client *client)
static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
+ struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
switch (ctrl->id) {
@@ -1324,12 +1342,20 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_SATURATION:
- cx25840_write(client, 0x420, ctrl->val << 1);
- cx25840_write(client, 0x421, ctrl->val << 1);
+ if (is_cx23888(state)) {
+ cx25840_write(client, 0x418, ctrl->val << 1);
+ cx25840_write(client, 0x419, ctrl->val << 1);
+ } else {
+ cx25840_write(client, 0x420, ctrl->val << 1);
+ cx25840_write(client, 0x421, ctrl->val << 1);
+ }
break;
case V4L2_CID_HUE:
- cx25840_write(client, 0x422, ctrl->val);
+ if (is_cx23888(state))
+ cx25840_write(client, 0x41a, ctrl->val);
+ else
+ cx25840_write(client, 0x422, ctrl->val);
break;
default:
@@ -1354,11 +1380,21 @@ static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
fmt->field = V4L2_FIELD_INTERLACED;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
- Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
- Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+ if (is_cx23888(state)) {
+ Vsrc = (cx25840_read(client, 0x42a) & 0x3f) << 4;
+ Vsrc |= (cx25840_read(client, 0x429) & 0xf0) >> 4;
+ } else {
+ Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
+ Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+ }
- Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
- Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+ if (is_cx23888(state)) {
+ Hsrc = (cx25840_read(client, 0x426) & 0x3f) << 4;
+ Hsrc |= (cx25840_read(client, 0x425) & 0xf0) >> 4;
+ } else {
+ Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
+ Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+ }
Vlines = fmt->height + (is_50Hz ? 4 : 7);
@@ -1782,8 +1818,8 @@ static int cx25840_s_video_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (is_cx2388x(state))
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
return set_input(client, input, state->aud_input);
}
@@ -1794,8 +1830,8 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (is_cx2388x(state))
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
return set_input(client, state->vid_input, input);
}
@@ -4939,7 +4975,7 @@ void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
}
}
-static void cx23885_std_setup(struct i2c_client *client)
+static void cx23888_std_setup(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
v4l2_std_id std = state->std;
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
index 13c380ebb562..38ce76ed1924 100644
--- a/drivers/media/video/cx25840/cx25840-ir.c
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -316,9 +316,7 @@ static u64 ns_to_pulse_clocks(u32 ns)
static u16 pulse_clocks_to_clock_divider(u64 count)
{
- u32 rem;
-
- rem = do_div(count, (FIFO_RXTX << 2) | 0x3);
+ do_div(count, (FIFO_RXTX << 2) | 0x3);
/* net result needs to be rounded down and decremented by 1 */
if (count > RXCLK_RCD + 1)
@@ -860,12 +858,10 @@ static int cx25840_ir_tx_write(struct v4l2_subdev *sd, u8 *buf, size_t count,
ssize_t *num)
{
struct cx25840_ir_state *ir_state = to_ir_state(sd);
- struct i2c_client *c;
if (ir_state == NULL)
return -ENODEV;
- c = ir_state->c;
#if 0
/*
* FIXME - the code below is an incomplete and untested sketch of what
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index e46446a449c0..ed7b2aa1ed83 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -471,7 +471,7 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
dprintk(1,"Loading firmware ...\n");
dataptr = (u32*)firmware->data;
for (i = 0; i < (firmware->size >> 2); i++) {
- value = *dataptr;
+ value = le32_to_cpu(*dataptr);
checksum += ~value;
memory_write(dev->core, i, value);
dataptr++;
diff --git a/drivers/media/video/davinci/Kconfig b/drivers/media/video/davinci/Kconfig
index 60a456ebdc7c..9337b5605c90 100644
--- a/drivers/media/video/davinci/Kconfig
+++ b/drivers/media/video/davinci/Kconfig
@@ -40,6 +40,7 @@ config VIDEO_VPSS_SYSTEM
config VIDEO_VPFE_CAPTURE
tristate "VPFE Video Capture Driver"
depends on VIDEO_V4L2 && (ARCH_DAVINCI || ARCH_OMAP3)
+ depends on I2C
select VIDEOBUF_DMA_CONTIG
help
Support for DMx/AMx VPFE based frame grabber. This is the
diff --git a/drivers/media/video/davinci/vpbe_display.c b/drivers/media/video/davinci/vpbe_display.c
index 1f3b1c729252..e106b72810a9 100644
--- a/drivers/media/video/davinci/vpbe_display.c
+++ b/drivers/media/video/davinci/vpbe_display.c
@@ -1618,6 +1618,10 @@ static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
vbd->ioctl_ops = &vpbe_ioctl_ops;
vbd->minor = -1;
vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vbd->flags);
vbd->lock = &vpbe_display_layer->opslock;
if (disp_dev->vpbe_dev->current_timings.timings_type &
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 20cf271a774b..49a845fb804a 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -1761,7 +1761,7 @@ static long vpfe_param_handler(struct file *file, void *priv,
}
break;
default:
- ret = -EINVAL;
+ ret = -ENOTTY;
}
unlock_out:
mutex_unlock(&vpfe_dev->lock);
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 6504e40a31dd..96046957bf21 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -2228,6 +2228,10 @@ static __init int vpif_probe(struct platform_device *pdev)
common = &(ch->common[VPIF_VIDEO_INDEX]);
spin_lock_init(&common->irqlock);
mutex_init(&common->lock);
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &ch->video_dev->flags);
ch->video_dev->lock = &common->lock;
/* Initialize prio member of channel object */
v4l2_prio_init(&ch->prio);
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 7fa34b4fae26..e6488ee7db18 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -1778,6 +1778,10 @@ static __init int vpif_probe(struct platform_device *pdev)
v4l2_prio_init(&ch->prio);
ch->common[VPIF_VIDEO_INDEX].fmt.type =
V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &ch->video_dev->flags);
ch->video_dev->lock = &common->lock;
/* register video device */
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index f6f622e123bd..928ef0d0429f 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -49,10 +49,10 @@ config VIDEO_EM28XX_DVB
Empiatech em28xx chips.
config VIDEO_EM28XX_RC
- bool "EM28XX Remote Controller support"
+ tristate "EM28XX Remote Controller support"
depends on RC_CORE
depends on VIDEO_EM28XX
depends on !(RC_CORE=m && VIDEO_EM28XX=y)
- default y
+ default VIDEO_EM28XX
---help---
Enables Remote Controller support on em28xx driver.
diff --git a/drivers/media/video/em28xx/Makefile b/drivers/media/video/em28xx/Makefile
index 2abdf76c5203..c8b338d4be05 100644
--- a/drivers/media/video/em28xx/Makefile
+++ b/drivers/media/video/em28xx/Makefile
@@ -1,16 +1,15 @@
em28xx-y := em28xx-video.o em28xx-i2c.o em28xx-cards.o
em28xx-y += em28xx-core.o em28xx-vbi.o
-em28xx-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-input.o
-
em28xx-alsa-objs := em28xx-audio.o
+em28xx-rc-objs := em28xx-input.o
obj-$(CONFIG_VIDEO_EM28XX) += em28xx.o
obj-$(CONFIG_VIDEO_EM28XX_ALSA) += em28xx-alsa.o
obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o
+obj-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-rc.o
ccflags-y += -Idrivers/media/video
ccflags-y += -Idrivers/media/common/tuners
ccflags-y += -Idrivers/media/dvb/dvb-core
ccflags-y += -Idrivers/media/dvb/frontends
-
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index e2a7b77c39c7..d7e2a3dc5525 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -343,7 +343,6 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- unsigned int channels, rate, format;
int ret;
dprintk("Setting capture parameters\n");
@@ -352,13 +351,17 @@ static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
params_buffer_bytes(hw_params));
if (ret < 0)
return ret;
+#if 0
+ /* TODO: set up em28xx audio chip to deliver the correct audio format,
+ current default is 48000hz multiplexed => 96000hz mono
+ which shouldn't matter since analogue TV only supports mono */
+ unsigned int channels, rate, format;
+
format = params_format(hw_params);
rate = params_rate(hw_params);
channels = params_channels(hw_params);
+#endif
- /* TODO: set up em28xx audio chip to deliver the correct audio format,
- current default is 48000hz multiplexed => 96000hz mono
- which shouldn't matter since analogue TV only supports mono */
return 0;
}
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 9fd8cc7dbb23..862c6575c557 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -69,6 +69,8 @@ struct em28xx_hash_table {
unsigned int tuner;
};
+static void em28xx_pre_card_setup(struct em28xx *dev);
+
/*
* Reset sequences for analog/digital modes
*/
@@ -972,6 +974,7 @@ struct em28xx_board em28xx_boards[] = {
[EM2884_BOARD_CINERGY_HTC_STICK] = {
.name = "Terratec Cinergy HTC Stick",
.has_dvb = 1,
+ .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
#if 0
.tuner_type = TUNER_PHILIPS_TDA8290,
.tuner_addr = 0x41,
@@ -2361,7 +2364,7 @@ static int em28xx_hint_sensor(struct em28xx *dev)
/* Since em28xx_pre_card_setup() requires a proper dev->model,
* this won't work for boards with generic PCI IDs
*/
-void em28xx_pre_card_setup(struct em28xx *dev)
+static void em28xx_pre_card_setup(struct em28xx *dev)
{
/* Set the initial XCLK and I2C clock values based on the board
definition */
@@ -2661,55 +2664,7 @@ static int em28xx_hint_board(struct em28xx *dev)
return -1;
}
-/* ----------------------------------------------------------------------- */
-void em28xx_register_i2c_ir(struct em28xx *dev)
-{
- /* Leadtek winfast tv USBII deluxe can find a non working IR-device */
- /* at address 0x18, so if that address is needed for another board in */
- /* the future, please put it after 0x1f. */
- struct i2c_board_info info;
- const unsigned short addr_list[] = {
- 0x1f, 0x30, 0x47, I2C_CLIENT_END
- };
-
- if (disable_ir)
- return;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- memset(&dev->init_data, 0, sizeof(dev->init_data));
- strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
-
- /* detect & configure */
- switch (dev->model) {
- case EM2800_BOARD_TERRATEC_CINERGY_200:
- case EM2820_BOARD_TERRATEC_CINERGY_250:
- dev->init_data.ir_codes = RC_MAP_EM_TERRATEC;
- dev->init_data.get_key = em28xx_get_key_terratec;
- dev->init_data.name = "i2c IR (EM28XX Terratec)";
- break;
- case EM2820_BOARD_PINNACLE_USB_2:
- dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
- dev->init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
- dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
- break;
- case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
- dev->init_data.ir_codes = RC_MAP_HAUPPAUGE;
- dev->init_data.get_key = em28xx_get_key_em_haup;
- dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
- break;
- case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
- dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
- dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
- dev->init_data.name = "i2c IR (EM2820 Winfast TV USBII Deluxe)";
- break;
- }
-
- if (dev->init_data.name)
- info.platform_data = &dev->init_data;
- i2c_new_probed_device(&dev->i2c_adap, &info, addr_list, NULL);
-}
-
-void em28xx_card_setup(struct em28xx *dev)
+static void em28xx_card_setup(struct em28xx *dev)
{
/*
* If the device can be a webcam, seek for a sensor.
@@ -2849,13 +2804,6 @@ void em28xx_card_setup(struct em28xx *dev)
break;
}
-#if defined(CONFIG_MODULES) && defined(MODULE)
- if (dev->board.has_ir_i2c && !disable_ir)
- request_module("ir-kbd-i2c");
-#endif
- if (dev->board.has_snapshot_button)
- em28xx_register_snapshot_button(dev);
-
if (dev->board.valid == EM28XX_BOARD_NOT_VALIDATED) {
em28xx_errdev("\n\n");
em28xx_errdev("The support for this board weren't "
@@ -2929,9 +2877,6 @@ void em28xx_card_setup(struct em28xx *dev)
}
em28xx_tuner_setup(dev);
-
- if(!disable_ir)
- em28xx_ir_init(dev);
}
@@ -2948,6 +2893,8 @@ static void request_module_async(struct work_struct *work)
if (dev->board.has_dvb)
request_module("em28xx-dvb");
+ if (dev->board.ir_codes && !disable_ir)
+ request_module("em28xx-rc");
}
static void request_modules(struct em28xx *dev)
@@ -2972,12 +2919,6 @@ static void flush_request_modules(struct em28xx *dev)
*/
void em28xx_release_resources(struct em28xx *dev)
{
- if (dev->sbutton_input_dev)
- em28xx_deregister_snapshot_button(dev);
-
- if (dev->ir)
- em28xx_ir_fini(dev);
-
/*FIXME: I2C IR should be disconnected */
em28xx_release_analog_resources(dev);
@@ -3005,9 +2946,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
dev->udev = udev;
mutex_init(&dev->ctrl_urb_lock);
spin_lock_init(&dev->slock);
- init_waitqueue_head(&dev->open);
- init_waitqueue_head(&dev->wait_frame);
- init_waitqueue_head(&dev->wait_stream);
dev->em28xx_write_regs = em28xx_write_regs;
dev->em28xx_read_reg = em28xx_read_reg;
@@ -3140,9 +3078,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
- INIT_LIST_HEAD(&dev->vidq.queued);
INIT_LIST_HEAD(&dev->vbiq.active);
- INIT_LIST_HEAD(&dev->vbiq.queued);
if (dev->board.has_msp34xx) {
/* Send a reset to other chips via gpio */
@@ -3447,8 +3383,6 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
resources */
mutex_lock(&dev->lock);
- wake_up_interruptible_all(&dev->open);
-
v4l2_device_disconnect(&dev->v4l2_dev);
if (dev->users) {
@@ -3460,8 +3394,6 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
dev->state |= DEV_MISCONFIGURED;
em28xx_uninit_isoc(dev, dev->mode);
dev->state |= DEV_DISCONNECTED;
- wake_up_interruptible(&dev->wait_frame);
- wake_up_interruptible(&dev->wait_stream);
} else {
dev->state |= DEV_DISCONNECTED;
em28xx_release_resources(dev);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 53a9fb91e97e..5717bdee8f1b 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -139,6 +139,7 @@ int em28xx_read_reg(struct em28xx *dev, u16 reg)
{
return em28xx_read_reg_req(dev, USB_REQ_GET_STATUS, reg);
}
+EXPORT_SYMBOL_GPL(em28xx_read_reg);
/*
* em28xx_write_regs_req()
@@ -205,6 +206,7 @@ int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len)
return rc;
}
+EXPORT_SYMBOL_GPL(em28xx_write_regs);
/* Write a single register */
int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val)
@@ -239,6 +241,7 @@ int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
return em28xx_write_regs(dev, reg, &newval, 1);
}
+EXPORT_SYMBOL_GPL(em28xx_write_reg_bits);
/*
* em28xx_is_ac97_ready()
@@ -666,7 +669,6 @@ int em28xx_capture_start(struct em28xx *dev, int start)
return rc;
}
-EXPORT_SYMBOL_GPL(em28xx_capture_start);
int em28xx_vbi_supported(struct em28xx *dev)
{
@@ -975,7 +977,6 @@ void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode)
else
isoc_bufs = &dev->isoc_ctl.analog_bufs;
- dev->isoc_ctl.nfields = -1;
for (i = 0; i < isoc_bufs->num_bufs; i++) {
urb = isoc_bufs->urb[i];
if (urb) {
@@ -1008,6 +1009,31 @@ void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode)
EXPORT_SYMBOL_GPL(em28xx_uninit_isoc);
/*
+ * Stop URBs
+ */
+void em28xx_stop_urbs(struct em28xx *dev)
+{
+ int i;
+ struct urb *urb;
+ struct em28xx_usb_isoc_bufs *isoc_bufs = &dev->isoc_ctl.digital_bufs;
+
+ em28xx_isocdbg("em28xx: called em28xx_stop_urbs\n");
+
+ for (i = 0; i < isoc_bufs->num_bufs; i++) {
+ urb = isoc_bufs->urb[i];
+ if (urb) {
+ if (!irqs_disabled())
+ usb_kill_urb(urb);
+ else
+ usb_unlink_urb(urb);
+ }
+ }
+
+ em28xx_capture_start(dev, 0);
+}
+EXPORT_SYMBOL_GPL(em28xx_stop_urbs);
+
+/*
* Allocate URBs
*/
int em28xx_alloc_isoc(struct em28xx *dev, enum em28xx_mode mode,
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 503a8d5b5382..16410ac20092 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -183,7 +183,7 @@ static int em28xx_stop_streaming(struct em28xx_dvb *dvb)
{
struct em28xx *dev = dvb->adapter.priv;
- em28xx_capture_start(dev, 0);
+ em28xx_stop_urbs(dev);
em28xx_set_mode(dev, EM28XX_SUSPEND);
@@ -336,6 +336,8 @@ struct drxk_config pctv_520e_drxk = {
.single_master = 1,
.microcode_name = "dvb-demod-drxk-pctv.fw",
.chunk_size = 58,
+ .antenna_dvbt = true, /* disable LNA */
+ .antenna_gpio = (1 << 2), /* disable LNA */
};
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
@@ -474,8 +476,8 @@ static void terratec_h5_init(struct em28xx *dev)
static void pctv_520e_init(struct em28xx *dev)
{
/*
- * Init TDA8295(?) analog demodulator. Looks like I2C traffic to
- * digital demodulator and tuner are routed via TDA8295.
+ * Init AVF4910B analog decoder. Looks like I2C traffic to
+ * digital demodulator and tuner are routed via AVF4910B.
*/
int i;
struct {
@@ -542,7 +544,8 @@ static struct cxd2820r_config em28xx_cxd2820r_config = {
.i2c_address = (0xd8 >> 1),
.ts_mode = CXD2820R_TS_SERIAL,
- /* enable LNA for DVB-T2 and DVB-C */
+ /* enable LNA for DVB-T, DVB-T2 and DVB-C */
+ .gpio_dvbt[0] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | CXD2820R_GPIO_L,
.gpio_dvbt2[0] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | CXD2820R_GPIO_L,
.gpio_dvbc[0] = CXD2820R_GPIO_E | CXD2820R_GPIO_O | CXD2820R_GPIO_L,
};
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index a88e169dba23..185db65b766e 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -553,9 +553,6 @@ int em28xx_i2c_register(struct em28xx *dev)
if (i2c_scan)
em28xx_do_i2c_scan(dev);
- /* Instantiate the IR receiver device, if present */
- em28xx_register_i2c_ir(dev);
-
return 0;
}
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 2630b265b0e8..5e30c4f3f248 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -80,7 +80,7 @@ struct em28xx_IR {
I2C IR based get keycodes - should be used with ir-kbd-i2c
**********************************************************/
-int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
unsigned char b;
@@ -108,7 +108,7 @@ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
return 1;
}
-int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
unsigned char buf[2];
u16 code;
@@ -157,7 +157,7 @@ int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
return 1;
}
-int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
+static int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
u32 *ir_raw)
{
unsigned char buf[3];
@@ -179,7 +179,8 @@ int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
return 1;
}
-int em28xx_get_key_winfast_usbii_deluxe(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int em28xx_get_key_winfast_usbii_deluxe(struct IR_i2c *ir, u32 *ir_key,
+ u32 *ir_raw)
{
unsigned char subaddr, keydetect, key;
@@ -387,7 +388,138 @@ int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
return rc;
}
-int em28xx_ir_init(struct em28xx *dev)
+static void em28xx_register_i2c_ir(struct em28xx *dev)
+{
+ /* Leadtek winfast tv USBII deluxe can find a non working IR-device */
+ /* at address 0x18, so if that address is needed for another board in */
+ /* the future, please put it after 0x1f. */
+ struct i2c_board_info info;
+ const unsigned short addr_list[] = {
+ 0x1f, 0x30, 0x47, I2C_CLIENT_END
+ };
+
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ memset(&dev->init_data, 0, sizeof(dev->init_data));
+ strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+
+ /* detect & configure */
+ switch (dev->model) {
+ case EM2800_BOARD_TERRATEC_CINERGY_200:
+ case EM2820_BOARD_TERRATEC_CINERGY_250:
+ dev->init_data.ir_codes = RC_MAP_EM_TERRATEC;
+ dev->init_data.get_key = em28xx_get_key_terratec;
+ dev->init_data.name = "i2c IR (EM28XX Terratec)";
+ break;
+ case EM2820_BOARD_PINNACLE_USB_2:
+ dev->init_data.ir_codes = RC_MAP_PINNACLE_GREY;
+ dev->init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
+ dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
+ break;
+ case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
+ dev->init_data.ir_codes = RC_MAP_HAUPPAUGE;
+ dev->init_data.get_key = em28xx_get_key_em_haup;
+ dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
+ break;
+ case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
+ dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
+ dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
+ dev->init_data.name = "i2c IR (EM2820 Winfast TV USBII Deluxe)";
+ break;
+ }
+
+ if (dev->init_data.name)
+ info.platform_data = &dev->init_data;
+ i2c_new_probed_device(&dev->i2c_adap, &info, addr_list, NULL);
+}
+
+/**********************************************************
+ Handle Webcam snapshot button
+ **********************************************************/
+
+static void em28xx_query_sbutton(struct work_struct *work)
+{
+ /* Poll the register and see if the button is depressed */
+ struct em28xx *dev =
+ container_of(work, struct em28xx, sbutton_query_work.work);
+ int ret;
+
+ ret = em28xx_read_reg(dev, EM28XX_R0C_USBSUSP);
+
+ if (ret & EM28XX_R0C_USBSUSP_SNAPSHOT) {
+ u8 cleared;
+ /* Button is depressed, clear the register */
+ cleared = ((u8) ret) & ~EM28XX_R0C_USBSUSP_SNAPSHOT;
+ em28xx_write_regs(dev, EM28XX_R0C_USBSUSP, &cleared, 1);
+
+ /* Not emulate the keypress */
+ input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
+ 1);
+ /* Now unpress the key */
+ input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
+ 0);
+ }
+
+ /* Schedule next poll */
+ schedule_delayed_work(&dev->sbutton_query_work,
+ msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
+}
+
+static void em28xx_register_snapshot_button(struct em28xx *dev)
+{
+ struct input_dev *input_dev;
+ int err;
+
+ em28xx_info("Registering snapshot button...\n");
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ em28xx_errdev("input_allocate_device failed\n");
+ return;
+ }
+
+ usb_make_path(dev->udev, dev->snapshot_button_path,
+ sizeof(dev->snapshot_button_path));
+ strlcat(dev->snapshot_button_path, "/sbutton",
+ sizeof(dev->snapshot_button_path));
+ INIT_DELAYED_WORK(&dev->sbutton_query_work, em28xx_query_sbutton);
+
+ input_dev->name = "em28xx snapshot button";
+ input_dev->phys = dev->snapshot_button_path;
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
+ set_bit(EM28XX_SNAPSHOT_KEY, input_dev->keybit);
+ input_dev->keycodesize = 0;
+ input_dev->keycodemax = 0;
+ input_dev->id.bustype = BUS_USB;
+ input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
+ input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
+ input_dev->id.version = 1;
+ input_dev->dev.parent = &dev->udev->dev;
+
+ err = input_register_device(input_dev);
+ if (err) {
+ em28xx_errdev("input_register_device failed\n");
+ input_free_device(input_dev);
+ return;
+ }
+
+ dev->sbutton_input_dev = input_dev;
+ schedule_delayed_work(&dev->sbutton_query_work,
+ msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
+ return;
+
+}
+
+static void em28xx_deregister_snapshot_button(struct em28xx *dev)
+{
+ if (dev->sbutton_input_dev != NULL) {
+ em28xx_info("Deregistering snapshot button\n");
+ cancel_delayed_work_sync(&dev->sbutton_query_work);
+ input_unregister_device(dev->sbutton_input_dev);
+ dev->sbutton_input_dev = NULL;
+ }
+ return;
+}
+
+static int em28xx_ir_init(struct em28xx *dev)
{
struct em28xx_IR *ir;
struct rc_dev *rc;
@@ -395,6 +527,8 @@ int em28xx_ir_init(struct em28xx *dev)
if (dev->board.ir_codes == NULL) {
/* No remote control support */
+ em28xx_warn("Remote control support is not available for "
+ "this card.\n");
return 0;
}
@@ -448,6 +582,15 @@ int em28xx_ir_init(struct em28xx *dev)
if (err)
goto err_out_stop;
+ em28xx_register_i2c_ir(dev);
+
+#if defined(CONFIG_MODULES) && defined(MODULE)
+ if (dev->board.has_ir_i2c)
+ request_module("ir-kbd-i2c");
+#endif
+ if (dev->board.has_snapshot_button)
+ em28xx_register_snapshot_button(dev);
+
return 0;
err_out_stop:
@@ -458,10 +601,12 @@ int em28xx_ir_init(struct em28xx *dev)
return err;
}
-int em28xx_ir_fini(struct em28xx *dev)
+static int em28xx_ir_fini(struct em28xx *dev)
{
struct em28xx_IR *ir = dev->ir;
+ em28xx_deregister_snapshot_button(dev);
+
/* skip detach on non attached boards */
if (!ir)
return 0;
@@ -475,89 +620,26 @@ int em28xx_ir_fini(struct em28xx *dev)
return 0;
}
-/**********************************************************
- Handle Webcam snapshot button
- **********************************************************/
+static struct em28xx_ops rc_ops = {
+ .id = EM28XX_RC,
+ .name = "Em28xx Input Extension",
+ .init = em28xx_ir_init,
+ .fini = em28xx_ir_fini,
+};
-static void em28xx_query_sbutton(struct work_struct *work)
+static int __init em28xx_rc_register(void)
{
- /* Poll the register and see if the button is depressed */
- struct em28xx *dev =
- container_of(work, struct em28xx, sbutton_query_work.work);
- int ret;
-
- ret = em28xx_read_reg(dev, EM28XX_R0C_USBSUSP);
-
- if (ret & EM28XX_R0C_USBSUSP_SNAPSHOT) {
- u8 cleared;
- /* Button is depressed, clear the register */
- cleared = ((u8) ret) & ~EM28XX_R0C_USBSUSP_SNAPSHOT;
- em28xx_write_regs(dev, EM28XX_R0C_USBSUSP, &cleared, 1);
-
- /* Not emulate the keypress */
- input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
- 1);
- /* Now unpress the key */
- input_report_key(dev->sbutton_input_dev, EM28XX_SNAPSHOT_KEY,
- 0);
- }
-
- /* Schedule next poll */
- schedule_delayed_work(&dev->sbutton_query_work,
- msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
+ return em28xx_register_extension(&rc_ops);
}
-void em28xx_register_snapshot_button(struct em28xx *dev)
+static void __exit em28xx_rc_unregister(void)
{
- struct input_dev *input_dev;
- int err;
-
- em28xx_info("Registering snapshot button...\n");
- input_dev = input_allocate_device();
- if (!input_dev) {
- em28xx_errdev("input_allocate_device failed\n");
- return;
- }
-
- usb_make_path(dev->udev, dev->snapshot_button_path,
- sizeof(dev->snapshot_button_path));
- strlcat(dev->snapshot_button_path, "/sbutton",
- sizeof(dev->snapshot_button_path));
- INIT_DELAYED_WORK(&dev->sbutton_query_work, em28xx_query_sbutton);
-
- input_dev->name = "em28xx snapshot button";
- input_dev->phys = dev->snapshot_button_path;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
- set_bit(EM28XX_SNAPSHOT_KEY, input_dev->keybit);
- input_dev->keycodesize = 0;
- input_dev->keycodemax = 0;
- input_dev->id.bustype = BUS_USB;
- input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
- input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
- input_dev->id.version = 1;
- input_dev->dev.parent = &dev->udev->dev;
-
- err = input_register_device(input_dev);
- if (err) {
- em28xx_errdev("input_register_device failed\n");
- input_free_device(input_dev);
- return;
- }
-
- dev->sbutton_input_dev = input_dev;
- schedule_delayed_work(&dev->sbutton_query_work,
- msecs_to_jiffies(EM28XX_SBUTTON_QUERY_INTERVAL));
- return;
-
+ em28xx_unregister_extension(&rc_ops);
}
-void em28xx_deregister_snapshot_button(struct em28xx *dev)
-{
- if (dev->sbutton_input_dev != NULL) {
- em28xx_info("Deregistering snapshot button\n");
- cancel_delayed_work_sync(&dev->sbutton_query_work);
- input_unregister_device(dev->sbutton_input_dev);
- dev->sbutton_input_dev = NULL;
- }
- return;
-}
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_DESCRIPTION("Em28xx Input driver");
+
+module_init(em28xx_rc_register);
+module_exit(em28xx_rc_unregister);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 324b695c0724..50f5f4fc2148 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1305,9 +1305,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
if (0 == INPUT(i)->type)
return -EINVAL;
- dev->ctl_input = i;
-
- video_mux(dev, dev->ctl_input);
+ video_mux(dev, i);
return 0;
}
@@ -2262,6 +2260,7 @@ static int em28xx_v4l2_close(struct file *filp)
em28xx_release_resources(dev);
kfree(dev->alt_max_pkt_size);
kfree(dev);
+ kfree(fh);
return 0;
}
@@ -2286,7 +2285,6 @@ static int em28xx_v4l2_close(struct file *filp)
videobuf_mmap_free(&fh->vb_vbiq);
kfree(fh);
dev->users--;
- wake_up_interruptible_nr(&dev->open, 1);
return 0;
}
@@ -2497,6 +2495,10 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
vfd->release = video_device_release;
vfd->debug = video_debug;
vfd->lock = &dev->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s %s",
dev->name, type_name);
@@ -2518,7 +2520,6 @@ int em28xx_register_analog_devices(struct em28xx *dev)
dev->norm = em28xx_video_template.current_norm;
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
dev->interlaced = EM28XX_INTERLACED_DEFAULT;
- dev->ctl_input = 0;
/* Analog specific initialization */
dev->format = &format[0];
@@ -2532,7 +2533,7 @@ int em28xx_register_analog_devices(struct em28xx *dev)
em28xx_set_video_format(dev, format[0].fourcc,
maxw, norm_maxh(dev));
- video_mux(dev, dev->ctl_input);
+ video_mux(dev, 0);
/* Audio defaults */
dev->mute = 1;
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 2868b19f8b54..8757523e6863 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -226,24 +226,10 @@ struct em28xx_usb_isoc_ctl {
/* isoc transfer buffers for digital mode */
struct em28xx_usb_isoc_bufs digital_bufs;
- /* Last buffer command and region */
- u8 cmd;
- int pos, size, pktsize;
-
- /* Last field: ODD or EVEN? */
- int field;
-
- /* Stores incomplete commands */
- u32 tmp_buf;
- int tmp_buf_len;
-
/* Stores already requested buffers */
struct em28xx_buffer *vid_buf;
struct em28xx_buffer *vbi_buf;
- /* Stores the number of received fields */
- int nfields;
-
/* isoc urb callback */
int (*isoc_copy) (struct em28xx *dev, struct urb *urb);
@@ -264,12 +250,10 @@ struct em28xx_buffer {
struct list_head frame;
int top_field;
- int receiving;
};
struct em28xx_dmaqueue {
struct list_head active;
- struct list_head queued;
wait_queue_head_t wq;
@@ -277,13 +261,6 @@ struct em28xx_dmaqueue {
int pos;
};
-/* io methods */
-enum em28xx_io_method {
- IO_NONE,
- IO_READ,
- IO_MMAP,
-};
-
/* inputs */
#define MAX_EM28XX_INPUT 4
@@ -467,6 +444,7 @@ enum em28xx_dev_state {
/* em28xx extensions */
#define EM28XX_AUDIO 0x10
#define EM28XX_DVB 0x20
+#define EM28XX_RC 0x30
/* em28xx resource types (used for res_get/res_lock etc */
#define EM28XX_RESOURCE_VIDEO 0x01
@@ -577,7 +555,6 @@ struct em28xx {
/* states */
enum em28xx_dev_state state;
- enum em28xx_io_method io;
/* vbi related state tracking */
int capture_type;
@@ -593,7 +570,6 @@ struct em28xx {
struct mutex ctrl_urb_lock; /* protects urb_buf */
/* spinlock_t queue_lock; */
struct list_head inqueue, outqueue;
- wait_queue_head_t open, wait_frame, wait_stream;
struct video_device *vbi_dev;
struct video_device *radio_dev;
@@ -695,6 +671,7 @@ int em28xx_init_isoc(struct em28xx *dev, enum em28xx_mode mode,
int max_packets, int num_bufs, int max_pkt_size,
int (*isoc_copy) (struct em28xx *dev, struct urb *urb));
void em28xx_uninit_isoc(struct em28xx *dev, enum em28xx_mode mode);
+void em28xx_stop_urbs(struct em28xx *dev);
int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
@@ -710,45 +687,12 @@ void em28xx_release_analog_resources(struct em28xx *dev);
/* Provided by em28xx-cards.c */
extern int em2800_variant_detect(struct usb_device *udev, int model);
-extern void em28xx_pre_card_setup(struct em28xx *dev);
-extern void em28xx_card_setup(struct em28xx *dev);
extern struct em28xx_board em28xx_boards[];
extern struct usb_device_id em28xx_id_table[];
extern const unsigned int em28xx_bcount;
-void em28xx_register_i2c_ir(struct em28xx *dev);
int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
void em28xx_release_resources(struct em28xx *dev);
-/* Provided by em28xx-input.c */
-
-#ifdef CONFIG_VIDEO_EM28XX_RC
-
-int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
-int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
-int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
- u32 *ir_raw);
-int em28xx_get_key_winfast_usbii_deluxe(struct IR_i2c *ir, u32 *ir_key,
- u32 *ir_raw);
-void em28xx_register_snapshot_button(struct em28xx *dev);
-void em28xx_deregister_snapshot_button(struct em28xx *dev);
-
-int em28xx_ir_init(struct em28xx *dev);
-int em28xx_ir_fini(struct em28xx *dev);
-
-#else
-
-#define em28xx_get_key_terratec NULL
-#define em28xx_get_key_em_haup NULL
-#define em28xx_get_key_pinnacle_usb_grey NULL
-#define em28xx_get_key_winfast_usbii_deluxe NULL
-
-static inline void em28xx_register_snapshot_button(struct em28xx *dev) {}
-static inline void em28xx_deregister_snapshot_button(struct em28xx *dev) {}
-static inline int em28xx_ir_init(struct em28xx *dev) { return 0; }
-static inline int em28xx_ir_fini(struct em28xx *dev) { return 0; }
-
-#endif
-
/* Provided by em28xx-vbi.c */
extern struct videobuf_queue_ops em28xx_vbi_qops;
diff --git a/drivers/media/video/et61x251/Kconfig b/drivers/media/video/et61x251/Kconfig
deleted file mode 100644
index 87981b078fe6..000000000000
--- a/drivers/media/video/et61x251/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-config USB_ET61X251
- tristate "USB ET61X[12]51 PC Camera Controller support (DEPRECATED)"
- depends on VIDEO_V4L2
- default n
- ---help---
- This driver is DEPRECATED please use the gspca zc3xx module
- instead.
-
- Say Y here if you want support for cameras based on Etoms ET61X151
- or ET61X251 PC Camera Controllers.
-
- See <file:Documentation/video4linux/et61x251.txt> for more info.
-
- This driver uses the Video For Linux API. You must say Y or M to
- "Video For Linux" to use this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called et61x251.
diff --git a/drivers/media/video/et61x251/Makefile b/drivers/media/video/et61x251/Makefile
deleted file mode 100644
index 2ff4db9ec882..000000000000
--- a/drivers/media/video/et61x251/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-et61x251-objs := et61x251_core.o et61x251_tas5130d1b.o
-
-obj-$(CONFIG_USB_ET61X251) += et61x251.o
-
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
deleted file mode 100644
index 337ded4a6388..000000000000
--- a/drivers/media/video/et61x251/et61x251.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/***************************************************************************
- * V4L2 driver for ET61X[12]51 PC Camera Controllers *
- * *
- * Copyright (C) 2006 by Luca Risolia <luca.risolia@studio.unibo.it> *
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the Free Software *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
- ***************************************************************************/
-
-#ifndef _ET61X251_H_
-#define _ET61X251_H_
-
-#include <linux/usb.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-common.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/types.h>
-#include <linux/param.h>
-#include <linux/rwsem.h>
-#include <linux/mutex.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <linux/kref.h>
-
-#include "et61x251_sensor.h"
-
-/*****************************************************************************/
-
-#define ET61X251_DEBUG
-#define ET61X251_DEBUG_LEVEL 2
-#define ET61X251_MAX_DEVICES 64
-#define ET61X251_PRESERVE_IMGSCALE 0
-#define ET61X251_FORCE_MUNMAP 0
-#define ET61X251_MAX_FRAMES 32
-#define ET61X251_COMPRESSION_QUALITY 0
-#define ET61X251_URBS 2
-#define ET61X251_ISO_PACKETS 7
-#define ET61X251_ALTERNATE_SETTING 13
-#define ET61X251_URB_TIMEOUT msecs_to_jiffies(2 * ET61X251_ISO_PACKETS)
-#define ET61X251_CTRL_TIMEOUT 100
-#define ET61X251_FRAME_TIMEOUT 2
-
-/*****************************************************************************/
-
-static const struct usb_device_id et61x251_id_table[] = {
- { USB_DEVICE(0x102c, 0x6251), },
- { }
-};
-
-ET61X251_SENSOR_TABLE
-
-/*****************************************************************************/
-
-enum et61x251_frame_state {
- F_UNUSED,
- F_QUEUED,
- F_GRABBING,
- F_DONE,
- F_ERROR,
-};
-
-struct et61x251_frame_t {
- void* bufmem;
- struct v4l2_buffer buf;
- enum et61x251_frame_state state;
- struct list_head frame;
- unsigned long vma_use_count;
-};
-
-enum et61x251_dev_state {
- DEV_INITIALIZED = 0x01,
- DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04,
-};
-
-enum et61x251_io_method {
- IO_NONE,
- IO_READ,
- IO_MMAP,
-};
-
-enum et61x251_stream_state {
- STREAM_OFF,
- STREAM_INTERRUPT,
- STREAM_ON,
-};
-
-struct et61x251_sysfs_attr {
- u8 reg, i2c_reg;
-};
-
-struct et61x251_module_param {
- u8 force_munmap;
- u16 frame_timeout;
-};
-
-static DEFINE_MUTEX(et61x251_sysfs_lock);
-static DECLARE_RWSEM(et61x251_dev_lock);
-
-struct et61x251_device {
- struct video_device* v4ldev;
-
- struct et61x251_sensor sensor;
-
- struct usb_device* usbdev;
- struct urb* urb[ET61X251_URBS];
- void* transfer_buffer[ET61X251_URBS];
- u8* control_buffer;
-
- struct et61x251_frame_t *frame_current, frame[ET61X251_MAX_FRAMES];
- struct list_head inqueue, outqueue;
- u32 frame_count, nbuffers, nreadbuffers;
-
- enum et61x251_io_method io;
- enum et61x251_stream_state stream;
-
- struct v4l2_jpegcompression compression;
-
- struct et61x251_sysfs_attr sysfs;
- struct et61x251_module_param module_param;
-
- struct kref kref;
- enum et61x251_dev_state state;
- u8 users;
-
- struct completion probe;
- struct mutex open_mutex, fileop_mutex;
- spinlock_t queue_lock;
- wait_queue_head_t wait_open, wait_frame, wait_stream;
-};
-
-/*****************************************************************************/
-
-struct et61x251_device*
-et61x251_match_id(struct et61x251_device* cam, const struct usb_device_id *id)
-{
- return usb_match_id(usb_ifnum_to_if(cam->usbdev, 0), id) ? cam : NULL;
-}
-
-
-void
-et61x251_attach_sensor(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor)
-{
- memcpy(&cam->sensor, sensor, sizeof(struct et61x251_sensor));
-}
-
-/*****************************************************************************/
-
-#undef DBG
-#undef KDBG
-#ifdef ET61X251_DEBUG
-#define DBG(level, fmt, ...) \
-do { \
- if (debug >= (level)) { \
- if ((level) == 1) \
- dev_err(&cam->usbdev->dev, fmt "\n", \
- ##__VA_ARGS__); \
- else if ((level) == 2) \
- dev_info(&cam->usbdev->dev, fmt "\n", \
- ##__VA_ARGS__); \
- else if ((level) >= 3) \
- dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
- __FILE__, __func__, __LINE__, \
- ##__VA_ARGS__); \
- } \
-} while (0)
-#define KDBG(level, fmt, ...) \
-do { \
- if (debug >= (level)) { \
- if ((level) == 1 || (level) == 2) \
- pr_info(fmt "\n", ##__VA_ARGS__); \
- else if ((level) == 3) \
- pr_debug("[%s:%s:%d] " fmt "\n", \
- __FILE__, __func__, __LINE__, \
- ##__VA_ARGS__); \
- } \
-} while (0)
-#define V4LDBG(level, name, cmd) \
-do { \
- if (debug >= (level)) \
- v4l_print_ioctl(name, cmd); \
-} while (0)
-#else
-#define DBG(level, fmt, ...) do {;} while(0)
-#define KDBG(level, fmt, ...) do {;} while(0)
-#define V4LDBG(level, name, cmd) do {;} while(0)
-#endif
-
-#undef PDBG
-#define PDBG(fmt, ...) \
- dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
- __FILE__, __func__, __LINE__, ##__VA_ARGS__)
-
-#undef PDBGG
-#define PDBGG(fmt, args...) do {;} while (0) /* placeholder */
-
-#endif /* _ET61X251_H_ */
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
deleted file mode 100644
index 5539f09440ac..000000000000
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ /dev/null
@@ -1,2683 +0,0 @@
-/***************************************************************************
- * V4L2 driver for ET61X[12]51 PC Camera Controllers *
- * *
- * Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the Free Software *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
- ***************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/compiler.h>
-#include <linux/ioctl.h>
-#include <linux/poll.h>
-#include <linux/stat.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/page-flags.h>
-#include <media/v4l2-ioctl.h>
-#include <asm/byteorder.h>
-#include <asm/page.h>
-#include <asm/uaccess.h>
-
-#include "et61x251.h"
-
-/*****************************************************************************/
-
-#define ET61X251_MODULE_NAME "V4L2 driver for ET61X[12]51 " \
- "PC Camera Controllers"
-#define ET61X251_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia"
-#define ET61X251_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
-#define ET61X251_MODULE_LICENSE "GPL"
-#define ET61X251_MODULE_VERSION "1.1.10"
-
-/*****************************************************************************/
-
-MODULE_DEVICE_TABLE(usb, et61x251_id_table);
-
-MODULE_AUTHOR(ET61X251_MODULE_AUTHOR " " ET61X251_AUTHOR_EMAIL);
-MODULE_DESCRIPTION(ET61X251_MODULE_NAME);
-MODULE_VERSION(ET61X251_MODULE_VERSION);
-MODULE_LICENSE(ET61X251_MODULE_LICENSE);
-
-static short video_nr[] = {[0 ... ET61X251_MAX_DEVICES-1] = -1};
-module_param_array(video_nr, short, NULL, 0444);
-MODULE_PARM_DESC(video_nr,
- "\n<-1|n[,...]> Specify V4L2 minor mode number."
- "\n -1 = use next available (default)"
- "\n n = use minor number n (integer >= 0)"
- "\nYou can specify up to "
- __MODULE_STRING(ET61X251_MAX_DEVICES) " cameras this way."
- "\nFor example:"
- "\nvideo_nr=-1,2,-1 would assign minor number 2 to"
- "\nthe second registered camera and use auto for the first"
- "\none and for every other camera."
- "\n");
-
-static bool force_munmap[] = {[0 ... ET61X251_MAX_DEVICES-1] =
- ET61X251_FORCE_MUNMAP};
-module_param_array(force_munmap, bool, NULL, 0444);
-MODULE_PARM_DESC(force_munmap,
- "\n<0|1[,...]> Force the application to unmap previously"
- "\nmapped buffer memory before calling any VIDIOC_S_CROP or"
- "\nVIDIOC_S_FMT ioctl's. Not all the applications support"
- "\nthis feature. This parameter is specific for each"
- "\ndetected camera."
- "\n 0 = do not force memory unmapping"
- "\n 1 = force memory unmapping (save memory)"
- "\nDefault value is "__MODULE_STRING(ET61X251_FORCE_MUNMAP)"."
- "\n");
-
-static unsigned int frame_timeout[] = {[0 ... ET61X251_MAX_DEVICES-1] =
- ET61X251_FRAME_TIMEOUT};
-module_param_array(frame_timeout, uint, NULL, 0644);
-MODULE_PARM_DESC(frame_timeout,
- "\n<n[,...]> Timeout for a video frame in seconds."
- "\nThis parameter is specific for each detected camera."
- "\nDefault value is "
- __MODULE_STRING(ET61X251_FRAME_TIMEOUT)"."
- "\n");
-
-#ifdef ET61X251_DEBUG
-static unsigned short debug = ET61X251_DEBUG_LEVEL;
-module_param(debug, ushort, 0644);
-MODULE_PARM_DESC(debug,
- "\n<n> Debugging information level, from 0 to 3:"
- "\n0 = none (use carefully)"
- "\n1 = critical errors"
- "\n2 = significant informations"
- "\n3 = more verbose messages"
- "\nLevel 3 is useful for testing only, when only "
- "one device is used."
- "\nDefault value is "__MODULE_STRING(ET61X251_DEBUG_LEVEL)"."
- "\n");
-#endif
-
-/*****************************************************************************/
-
-static u32
-et61x251_request_buffers(struct et61x251_device* cam, u32 count,
- enum et61x251_io_method io)
-{
- struct v4l2_pix_format* p = &(cam->sensor.pix_format);
- struct v4l2_rect* r = &(cam->sensor.cropcap.bounds);
- const size_t imagesize = cam->module_param.force_munmap ||
- io == IO_READ ?
- (p->width * p->height * p->priv) / 8 :
- (r->width * r->height * p->priv) / 8;
- void* buff = NULL;
- u32 i;
-
- if (count > ET61X251_MAX_FRAMES)
- count = ET61X251_MAX_FRAMES;
-
- cam->nbuffers = count;
- while (cam->nbuffers > 0) {
- if ((buff = vmalloc_32_user(cam->nbuffers *
- PAGE_ALIGN(imagesize))))
- break;
- cam->nbuffers--;
- }
-
- for (i = 0; i < cam->nbuffers; i++) {
- cam->frame[i].bufmem = buff + i*PAGE_ALIGN(imagesize);
- cam->frame[i].buf.index = i;
- cam->frame[i].buf.m.offset = i*PAGE_ALIGN(imagesize);
- cam->frame[i].buf.length = imagesize;
- cam->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- cam->frame[i].buf.sequence = 0;
- cam->frame[i].buf.field = V4L2_FIELD_NONE;
- cam->frame[i].buf.memory = V4L2_MEMORY_MMAP;
- cam->frame[i].buf.flags = 0;
- }
-
- return cam->nbuffers;
-}
-
-
-static void et61x251_release_buffers(struct et61x251_device* cam)
-{
- if (cam->nbuffers) {
- vfree(cam->frame[0].bufmem);
- cam->nbuffers = 0;
- }
- cam->frame_current = NULL;
-}
-
-
-static void et61x251_empty_framequeues(struct et61x251_device* cam)
-{
- u32 i;
-
- INIT_LIST_HEAD(&cam->inqueue);
- INIT_LIST_HEAD(&cam->outqueue);
-
- for (i = 0; i < ET61X251_MAX_FRAMES; i++) {
- cam->frame[i].state = F_UNUSED;
- cam->frame[i].buf.bytesused = 0;
- }
-}
-
-
-static void et61x251_requeue_outqueue(struct et61x251_device* cam)
-{
- struct et61x251_frame_t *i;
-
- list_for_each_entry(i, &cam->outqueue, frame) {
- i->state = F_QUEUED;
- list_add(&i->frame, &cam->inqueue);
- }
-
- INIT_LIST_HEAD(&cam->outqueue);
-}
-
-
-static void et61x251_queue_unusedframes(struct et61x251_device* cam)
-{
- unsigned long lock_flags;
- u32 i;
-
- for (i = 0; i < cam->nbuffers; i++)
- if (cam->frame[i].state == F_UNUSED) {
- cam->frame[i].state = F_QUEUED;
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- list_add_tail(&cam->frame[i].frame, &cam->inqueue);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
- }
-}
-
-/*****************************************************************************/
-
-int et61x251_write_reg(struct et61x251_device* cam, u8 value, u16 index)
-{
- struct usb_device* udev = cam->usbdev;
- u8* buff = cam->control_buffer;
- int res;
-
- *buff = value;
-
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, index, buff, 1, ET61X251_CTRL_TIMEOUT);
- if (res < 0) {
- DBG(3, "Failed to write a register (value 0x%02X, index "
- "0x%02X, error %d)", value, index, res);
- return -1;
- }
-
- return 0;
-}
-
-
-static int et61x251_read_reg(struct et61x251_device* cam, u16 index)
-{
- struct usb_device* udev = cam->usbdev;
- u8* buff = cam->control_buffer;
- int res;
-
- res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1,
- 0, index, buff, 1, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- DBG(3, "Failed to read a register (index 0x%02X, error %d)",
- index, res);
-
- return (res >= 0) ? (int)(*buff) : -1;
-}
-
-
-static int
-et61x251_i2c_wait(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor)
-{
- int i, r;
-
- for (i = 1; i <= 8; i++) {
- if (sensor->interface == ET61X251_I2C_3WIRES) {
- r = et61x251_read_reg(cam, 0x8e);
- if (!(r & 0x02) && (r >= 0))
- return 0;
- } else {
- r = et61x251_read_reg(cam, 0x8b);
- if (!(r & 0x01) && (r >= 0))
- return 0;
- }
- if (r < 0)
- return -EIO;
- udelay(8*8); /* minimum for sensors at 400kHz */
- }
-
- return -EBUSY;
-}
-
-
-int
-et61x251_i2c_raw_write(struct et61x251_device* cam, u8 n, u8 data1, u8 data2,
- u8 data3, u8 data4, u8 data5, u8 data6, u8 data7,
- u8 data8, u8 address)
-{
- struct usb_device* udev = cam->usbdev;
- u8* data = cam->control_buffer;
- int err = 0, res;
-
- data[0] = data2;
- data[1] = data3;
- data[2] = data4;
- data[3] = data5;
- data[4] = data6;
- data[5] = data7;
- data[6] = data8;
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x81, data, n-1, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- data[0] = address;
- data[1] = cam->sensor.i2c_slave_id;
- data[2] = cam->sensor.rsta | 0x02 | (n << 4);
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x88, data, 3, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- /* Start writing through the serial interface */
- data[0] = data1;
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x80, data, 1, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- err += et61x251_i2c_wait(cam, &cam->sensor);
-
- if (err)
- DBG(3, "I2C raw write failed for %s image sensor",
- cam->sensor.name);
-
- PDBGG("I2C raw write: %u bytes, address = 0x%02X, data1 = 0x%02X, "
- "data2 = 0x%02X, data3 = 0x%02X, data4 = 0x%02X, data5 = 0x%02X,"
- " data6 = 0x%02X, data7 = 0x%02X, data8 = 0x%02X", n, address,
- data1, data2, data3, data4, data5, data6, data7, data8);
-
- return err ? -1 : 0;
-
-}
-
-
-/*****************************************************************************/
-
-static void et61x251_urb_complete(struct urb *urb)
-{
- struct et61x251_device* cam = urb->context;
- struct et61x251_frame_t** f;
- size_t imagesize;
- u8 i;
- int err = 0;
-
- if (urb->status == -ENOENT)
- return;
-
- f = &cam->frame_current;
-
- if (cam->stream == STREAM_INTERRUPT) {
- cam->stream = STREAM_OFF;
- if ((*f))
- (*f)->state = F_QUEUED;
- DBG(3, "Stream interrupted");
- wake_up(&cam->wait_stream);
- }
-
- if (cam->state & DEV_DISCONNECTED)
- return;
-
- if (cam->state & DEV_MISCONFIGURED) {
- wake_up_interruptible(&cam->wait_frame);
- return;
- }
-
- if (cam->stream == STREAM_OFF || list_empty(&cam->inqueue))
- goto resubmit_urb;
-
- if (!(*f))
- (*f) = list_entry(cam->inqueue.next, struct et61x251_frame_t,
- frame);
-
- imagesize = (cam->sensor.pix_format.width *
- cam->sensor.pix_format.height *
- cam->sensor.pix_format.priv) / 8;
-
- for (i = 0; i < urb->number_of_packets; i++) {
- unsigned int len, status;
- void *pos;
- u8* b1, * b2, sof;
- const u8 VOID_BYTES = 6;
- size_t imglen;
-
- len = urb->iso_frame_desc[i].actual_length;
- status = urb->iso_frame_desc[i].status;
- pos = urb->iso_frame_desc[i].offset + urb->transfer_buffer;
-
- if (status) {
- DBG(3, "Error in isochronous frame");
- (*f)->state = F_ERROR;
- continue;
- }
-
- b1 = pos++;
- b2 = pos++;
- sof = ((*b1 & 0x3f) == 63);
- imglen = ((*b1 & 0xc0) << 2) | *b2;
-
- PDBGG("Isochrnous frame: length %u, #%u i, image length %zu",
- len, i, imglen);
-
- if ((*f)->state == F_QUEUED || (*f)->state == F_ERROR)
-start_of_frame:
- if (sof) {
- (*f)->state = F_GRABBING;
- (*f)->buf.bytesused = 0;
- do_gettimeofday(&(*f)->buf.timestamp);
- pos += 22;
- DBG(3, "SOF detected: new video frame");
- }
-
- if ((*f)->state == F_GRABBING) {
- if (sof && (*f)->buf.bytesused) {
- if (cam->sensor.pix_format.pixelformat ==
- V4L2_PIX_FMT_ET61X251)
- goto end_of_frame;
- else {
- DBG(3, "Not expected SOF detected "
- "after %lu bytes",
- (unsigned long)(*f)->buf.bytesused);
- (*f)->state = F_ERROR;
- continue;
- }
- }
-
- if ((*f)->buf.bytesused + imglen > imagesize) {
- DBG(3, "Video frame size exceeded");
- (*f)->state = F_ERROR;
- continue;
- }
-
- pos += VOID_BYTES;
-
- memcpy((*f)->bufmem+(*f)->buf.bytesused, pos, imglen);
- (*f)->buf.bytesused += imglen;
-
- if ((*f)->buf.bytesused == imagesize) {
- u32 b;
-end_of_frame:
- b = (*f)->buf.bytesused;
- (*f)->state = F_DONE;
- (*f)->buf.sequence= ++cam->frame_count;
- spin_lock(&cam->queue_lock);
- list_move_tail(&(*f)->frame, &cam->outqueue);
- if (!list_empty(&cam->inqueue))
- (*f) = list_entry(cam->inqueue.next,
- struct et61x251_frame_t,
- frame);
- else
- (*f) = NULL;
- spin_unlock(&cam->queue_lock);
- DBG(3, "Video frame captured: : %lu bytes",
- (unsigned long)(b));
-
- if (!(*f))
- goto resubmit_urb;
-
- if (sof &&
- cam->sensor.pix_format.pixelformat ==
- V4L2_PIX_FMT_ET61X251)
- goto start_of_frame;
- }
- }
- }
-
-resubmit_urb:
- urb->dev = cam->usbdev;
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0 && err != -EPERM) {
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "usb_submit_urb() failed");
- }
-
- wake_up_interruptible(&cam->wait_frame);
-}
-
-
-static int et61x251_start_transfer(struct et61x251_device* cam)
-{
- struct usb_device *udev = cam->usbdev;
- struct urb* urb;
- struct usb_host_interface* altsetting = usb_altnum_to_altsetting(
- usb_ifnum_to_if(udev, 0),
- ET61X251_ALTERNATE_SETTING);
- const unsigned int psz = le16_to_cpu(altsetting->
- endpoint[0].desc.wMaxPacketSize);
- s8 i, j;
- int err = 0;
-
- for (i = 0; i < ET61X251_URBS; i++) {
- cam->transfer_buffer[i] = kzalloc(ET61X251_ISO_PACKETS * psz,
- GFP_KERNEL);
- if (!cam->transfer_buffer[i]) {
- err = -ENOMEM;
- DBG(1, "Not enough memory");
- goto free_buffers;
- }
- }
-
- for (i = 0; i < ET61X251_URBS; i++) {
- urb = usb_alloc_urb(ET61X251_ISO_PACKETS, GFP_KERNEL);
- cam->urb[i] = urb;
- if (!urb) {
- err = -ENOMEM;
- DBG(1, "usb_alloc_urb() failed");
- goto free_urbs;
- }
- urb->dev = udev;
- urb->context = cam;
- urb->pipe = usb_rcvisocpipe(udev, 1);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->number_of_packets = ET61X251_ISO_PACKETS;
- urb->complete = et61x251_urb_complete;
- urb->transfer_buffer = cam->transfer_buffer[i];
- urb->transfer_buffer_length = psz * ET61X251_ISO_PACKETS;
- urb->interval = 1;
- for (j = 0; j < ET61X251_ISO_PACKETS; j++) {
- urb->iso_frame_desc[j].offset = psz * j;
- urb->iso_frame_desc[j].length = psz;
- }
- }
-
- err = et61x251_write_reg(cam, 0x01, 0x03);
- err = et61x251_write_reg(cam, 0x00, 0x03);
- err = et61x251_write_reg(cam, 0x08, 0x03);
- if (err) {
- err = -EIO;
- DBG(1, "I/O hardware error");
- goto free_urbs;
- }
-
- err = usb_set_interface(udev, 0, ET61X251_ALTERNATE_SETTING);
- if (err) {
- DBG(1, "usb_set_interface() failed");
- goto free_urbs;
- }
-
- cam->frame_current = NULL;
-
- for (i = 0; i < ET61X251_URBS; i++) {
- err = usb_submit_urb(cam->urb[i], GFP_KERNEL);
- if (err) {
- for (j = i-1; j >= 0; j--)
- usb_kill_urb(cam->urb[j]);
- DBG(1, "usb_submit_urb() failed, error %d", err);
- goto free_urbs;
- }
- }
-
- return 0;
-
-free_urbs:
- for (i = 0; (i < ET61X251_URBS) && cam->urb[i]; i++)
- usb_free_urb(cam->urb[i]);
-
-free_buffers:
- for (i = 0; (i < ET61X251_URBS) && cam->transfer_buffer[i]; i++)
- kfree(cam->transfer_buffer[i]);
-
- return err;
-}
-
-
-static int et61x251_stop_transfer(struct et61x251_device* cam)
-{
- struct usb_device *udev = cam->usbdev;
- s8 i;
- int err = 0;
-
- if (cam->state & DEV_DISCONNECTED)
- return 0;
-
- for (i = ET61X251_URBS-1; i >= 0; i--) {
- usb_kill_urb(cam->urb[i]);
- usb_free_urb(cam->urb[i]);
- kfree(cam->transfer_buffer[i]);
- }
-
- err = usb_set_interface(udev, 0, 0); /* 0 Mb/s */
- if (err)
- DBG(3, "usb_set_interface() failed");
-
- return err;
-}
-
-
-static int et61x251_stream_interrupt(struct et61x251_device* cam)
-{
- long timeout;
-
- cam->stream = STREAM_INTERRUPT;
- timeout = wait_event_timeout(cam->wait_stream,
- (cam->stream == STREAM_OFF) ||
- (cam->state & DEV_DISCONNECTED),
- ET61X251_URB_TIMEOUT);
- if (cam->state & DEV_DISCONNECTED)
- return -ENODEV;
- else if (cam->stream != STREAM_OFF) {
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "URB timeout reached. The camera is misconfigured. To "
- "use it, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -EIO;
- }
-
- return 0;
-}
-
-/*****************************************************************************/
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-
-static int et61x251_i2c_try_read(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor,
- u8 address)
-{
- struct usb_device* udev = cam->usbdev;
- u8* data = cam->control_buffer;
- int err = 0, res;
-
- data[0] = address;
- data[1] = cam->sensor.i2c_slave_id;
- data[2] = cam->sensor.rsta | 0x10;
- data[3] = !(et61x251_read_reg(cam, 0x8b) & 0x02);
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x88, data, 4, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- err += et61x251_i2c_wait(cam, sensor);
-
- res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1,
- 0, 0x80, data, 8, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- if (err)
- DBG(3, "I2C read failed for %s image sensor", sensor->name);
-
- PDBGG("I2C read: address 0x%02X, value: 0x%02X", address, data[0]);
-
- return err ? -1 : (int)data[0];
-}
-
-
-static int et61x251_i2c_try_write(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor,
- u8 address, u8 value)
-{
- struct usb_device* udev = cam->usbdev;
- u8* data = cam->control_buffer;
- int err = 0, res;
-
- data[0] = address;
- data[1] = cam->sensor.i2c_slave_id;
- data[2] = cam->sensor.rsta | 0x12;
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x88, data, 3, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- data[0] = value;
- res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, 0x41,
- 0, 0x80, data, 1, ET61X251_CTRL_TIMEOUT);
- if (res < 0)
- err += res;
-
- err += et61x251_i2c_wait(cam, sensor);
-
- if (err)
- DBG(3, "I2C write failed for %s image sensor", sensor->name);
-
- PDBGG("I2C write: address 0x%02X, value: 0x%02X", address, value);
-
- return err ? -1 : 0;
-}
-
-static int et61x251_i2c_read(struct et61x251_device* cam, u8 address)
-{
- return et61x251_i2c_try_read(cam, &cam->sensor, address);
-}
-
-static int et61x251_i2c_write(struct et61x251_device* cam,
- u8 address, u8 value)
-{
- return et61x251_i2c_try_write(cam, &cam->sensor, address, value);
-}
-
-static u8 et61x251_strtou8(const char* buff, size_t len, ssize_t* count)
-{
- char str[5];
- char* endp;
- unsigned long val;
-
- if (len < 4) {
- strncpy(str, buff, len);
- str[len] = '\0';
- } else {
- strncpy(str, buff, 4);
- str[4] = '\0';
- }
-
- val = simple_strtoul(str, &endp, 0);
-
- *count = 0;
- if (val <= 0xff)
- *count = (ssize_t)(endp - str);
- if ((*count) && (len == *count+1) && (buff[*count] == '\n'))
- *count += 1;
-
- return (u8)val;
-}
-
-/*
- NOTE 1: being inside one of the following methods implies that the v4l
- device exists for sure (see kobjects and reference counters)
- NOTE 2: buffers are PAGE_SIZE long
-*/
-
-static ssize_t et61x251_show_reg(struct device* cd,
- struct device_attribute *attr, char* buf)
-{
- struct et61x251_device* cam;
- ssize_t count;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- count = sprintf(buf, "%u\n", cam->sysfs.reg);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t
-et61x251_store_reg(struct device* cd,
- struct device_attribute *attr, const char* buf, size_t len)
-{
- struct et61x251_device* cam;
- u8 index;
- ssize_t count;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- index = et61x251_strtou8(buf, len, &count);
- if (index > 0x8e || !count) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EINVAL;
- }
-
- cam->sysfs.reg = index;
-
- DBG(2, "Moved ET61X[12]51 register index to 0x%02X", cam->sysfs.reg);
- DBG(3, "Written bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t et61x251_show_val(struct device* cd,
- struct device_attribute *attr, char* buf)
-{
- struct et61x251_device* cam;
- ssize_t count;
- int val;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- if ((val = et61x251_read_reg(cam, cam->sysfs.reg)) < 0) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EIO;
- }
-
- count = sprintf(buf, "%d\n", val);
-
- DBG(3, "Read bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t
-et61x251_store_val(struct device* cd, struct device_attribute *attr,
- const char* buf, size_t len)
-{
- struct et61x251_device* cam;
- u8 value;
- ssize_t count;
- int err;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- value = et61x251_strtou8(buf, len, &count);
- if (!count) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EINVAL;
- }
-
- err = et61x251_write_reg(cam, value, cam->sysfs.reg);
- if (err) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EIO;
- }
-
- DBG(2, "Written ET61X[12]51 reg. 0x%02X, val. 0x%02X",
- cam->sysfs.reg, value);
- DBG(3, "Written bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t et61x251_show_i2c_reg(struct device* cd,
- struct device_attribute *attr, char* buf)
-{
- struct et61x251_device* cam;
- ssize_t count;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- count = sprintf(buf, "%u\n", cam->sysfs.i2c_reg);
-
- DBG(3, "Read bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t
-et61x251_store_i2c_reg(struct device* cd, struct device_attribute *attr,
- const char* buf, size_t len)
-{
- struct et61x251_device* cam;
- u8 index;
- ssize_t count;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- index = et61x251_strtou8(buf, len, &count);
- if (!count) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EINVAL;
- }
-
- cam->sysfs.i2c_reg = index;
-
- DBG(2, "Moved sensor register index to 0x%02X", cam->sysfs.i2c_reg);
- DBG(3, "Written bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t et61x251_show_i2c_val(struct device* cd,
- struct device_attribute *attr, char* buf)
-{
- struct et61x251_device* cam;
- ssize_t count;
- int val;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- if (!(cam->sensor.sysfs_ops & ET61X251_I2C_READ)) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENOSYS;
- }
-
- if ((val = et61x251_i2c_read(cam, cam->sysfs.i2c_reg)) < 0) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EIO;
- }
-
- count = sprintf(buf, "%d\n", val);
-
- DBG(3, "Read bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static ssize_t
-et61x251_store_i2c_val(struct device* cd, struct device_attribute *attr,
- const char* buf, size_t len)
-{
- struct et61x251_device* cam;
- u8 value;
- ssize_t count;
- int err;
-
- if (mutex_lock_interruptible(&et61x251_sysfs_lock))
- return -ERESTARTSYS;
-
- cam = video_get_drvdata(to_video_device(cd));
- if (!cam) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENODEV;
- }
-
- if (!(cam->sensor.sysfs_ops & ET61X251_I2C_READ)) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -ENOSYS;
- }
-
- value = et61x251_strtou8(buf, len, &count);
- if (!count) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EINVAL;
- }
-
- err = et61x251_i2c_write(cam, cam->sysfs.i2c_reg, value);
- if (err) {
- mutex_unlock(&et61x251_sysfs_lock);
- return -EIO;
- }
-
- DBG(2, "Written sensor reg. 0x%02X, val. 0x%02X",
- cam->sysfs.i2c_reg, value);
- DBG(3, "Written bytes: %zd", count);
-
- mutex_unlock(&et61x251_sysfs_lock);
-
- return count;
-}
-
-
-static DEVICE_ATTR(reg, S_IRUGO | S_IWUSR,
- et61x251_show_reg, et61x251_store_reg);
-static DEVICE_ATTR(val, S_IRUGO | S_IWUSR,
- et61x251_show_val, et61x251_store_val);
-static DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR,
- et61x251_show_i2c_reg, et61x251_store_i2c_reg);
-static DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR,
- et61x251_show_i2c_val, et61x251_store_i2c_val);
-
-
-static int et61x251_create_sysfs(struct et61x251_device* cam)
-{
- struct device *classdev = &(cam->v4ldev->dev);
- int err = 0;
-
- if ((err = device_create_file(classdev, &dev_attr_reg)))
- goto err_out;
- if ((err = device_create_file(classdev, &dev_attr_val)))
- goto err_reg;
-
- if (cam->sensor.sysfs_ops) {
- if ((err = device_create_file(classdev, &dev_attr_i2c_reg)))
- goto err_val;
- if ((err = device_create_file(classdev, &dev_attr_i2c_val)))
- goto err_i2c_reg;
- }
-
-err_i2c_reg:
- if (cam->sensor.sysfs_ops)
- device_remove_file(classdev, &dev_attr_i2c_reg);
-err_val:
- device_remove_file(classdev, &dev_attr_val);
-err_reg:
- device_remove_file(classdev, &dev_attr_reg);
-err_out:
- return err;
-}
-#endif /* CONFIG_VIDEO_ADV_DEBUG */
-
-/*****************************************************************************/
-
-static int
-et61x251_set_pix_format(struct et61x251_device* cam,
- struct v4l2_pix_format* pix)
-{
- int r, err = 0;
-
- if ((r = et61x251_read_reg(cam, 0x12)) < 0)
- err += r;
- if (pix->pixelformat == V4L2_PIX_FMT_ET61X251)
- err += et61x251_write_reg(cam, r & 0xfd, 0x12);
- else
- err += et61x251_write_reg(cam, r | 0x02, 0x12);
-
- return err ? -EIO : 0;
-}
-
-
-static int
-et61x251_set_compression(struct et61x251_device* cam,
- struct v4l2_jpegcompression* compression)
-{
- int r, err = 0;
-
- if ((r = et61x251_read_reg(cam, 0x12)) < 0)
- err += r;
- if (compression->quality == 0)
- err += et61x251_write_reg(cam, r & 0xfb, 0x12);
- else
- err += et61x251_write_reg(cam, r | 0x04, 0x12);
-
- return err ? -EIO : 0;
-}
-
-
-static int et61x251_set_scale(struct et61x251_device* cam, u8 scale)
-{
- int r = 0, err = 0;
-
- r = et61x251_read_reg(cam, 0x12);
- if (r < 0)
- err += r;
-
- if (scale == 1)
- err += et61x251_write_reg(cam, r & ~0x01, 0x12);
- else if (scale == 2)
- err += et61x251_write_reg(cam, r | 0x01, 0x12);
-
- if (err)
- return -EIO;
-
- PDBGG("Scaling factor: %u", scale);
-
- return 0;
-}
-
-
-static int
-et61x251_set_crop(struct et61x251_device* cam, struct v4l2_rect* rect)
-{
- struct et61x251_sensor* s = &cam->sensor;
- u16 fmw_sx = (u16)(rect->left - s->cropcap.bounds.left +
- s->active_pixel.left),
- fmw_sy = (u16)(rect->top - s->cropcap.bounds.top +
- s->active_pixel.top),
- fmw_length = (u16)(rect->width),
- fmw_height = (u16)(rect->height);
- int err = 0;
-
- err += et61x251_write_reg(cam, fmw_sx & 0xff, 0x69);
- err += et61x251_write_reg(cam, fmw_sy & 0xff, 0x6a);
- err += et61x251_write_reg(cam, fmw_length & 0xff, 0x6b);
- err += et61x251_write_reg(cam, fmw_height & 0xff, 0x6c);
- err += et61x251_write_reg(cam, (fmw_sx >> 8) | ((fmw_sy & 0x300) >> 6)
- | ((fmw_length & 0x300) >> 4)
- | ((fmw_height & 0x300) >> 2), 0x6d);
- if (err)
- return -EIO;
-
- PDBGG("fmw_sx, fmw_sy, fmw_length, fmw_height: %u %u %u %u",
- fmw_sx, fmw_sy, fmw_length, fmw_height);
-
- return 0;
-}
-
-
-static int et61x251_init(struct et61x251_device* cam)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_control ctrl;
- struct v4l2_queryctrl *qctrl;
- struct v4l2_rect* rect;
- u8 i = 0;
- int err = 0;
-
- if (!(cam->state & DEV_INITIALIZED)) {
- mutex_init(&cam->open_mutex);
- init_waitqueue_head(&cam->wait_open);
- qctrl = s->qctrl;
- rect = &(s->cropcap.defrect);
- cam->compression.quality = ET61X251_COMPRESSION_QUALITY;
- } else { /* use current values */
- qctrl = s->_qctrl;
- rect = &(s->_rect);
- }
-
- err += et61x251_set_scale(cam, rect->width / s->pix_format.width);
- err += et61x251_set_crop(cam, rect);
- if (err)
- return err;
-
- if (s->init) {
- err = s->init(cam);
- if (err) {
- DBG(3, "Sensor initialization failed");
- return err;
- }
- }
-
- err += et61x251_set_compression(cam, &cam->compression);
- err += et61x251_set_pix_format(cam, &s->pix_format);
- if (s->set_pix_format)
- err += s->set_pix_format(cam, &s->pix_format);
- if (err)
- return err;
-
- if (s->pix_format.pixelformat == V4L2_PIX_FMT_ET61X251)
- DBG(3, "Compressed video format is active, quality %d",
- cam->compression.quality);
- else
- DBG(3, "Uncompressed video format is active");
-
- if (s->set_crop)
- if ((err = s->set_crop(cam, rect))) {
- DBG(3, "set_crop() failed");
- return err;
- }
-
- if (s->set_ctrl) {
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
- if (s->qctrl[i].id != 0 &&
- !(s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)) {
- ctrl.id = s->qctrl[i].id;
- ctrl.value = qctrl[i].default_value;
- err = s->set_ctrl(cam, &ctrl);
- if (err) {
- DBG(3, "Set %s control failed",
- s->qctrl[i].name);
- return err;
- }
- DBG(3, "Image sensor supports '%s' control",
- s->qctrl[i].name);
- }
- }
-
- if (!(cam->state & DEV_INITIALIZED)) {
- mutex_init(&cam->fileop_mutex);
- spin_lock_init(&cam->queue_lock);
- init_waitqueue_head(&cam->wait_frame);
- init_waitqueue_head(&cam->wait_stream);
- cam->nreadbuffers = 2;
- memcpy(s->_qctrl, s->qctrl, sizeof(s->qctrl));
- memcpy(&(s->_rect), &(s->cropcap.defrect),
- sizeof(struct v4l2_rect));
- cam->state |= DEV_INITIALIZED;
- }
-
- DBG(2, "Initialization succeeded");
- return 0;
-}
-
-/*****************************************************************************/
-
-static void et61x251_release_resources(struct kref *kref)
-{
- struct et61x251_device *cam;
-
- mutex_lock(&et61x251_sysfs_lock);
-
- cam = container_of(kref, struct et61x251_device, kref);
-
- DBG(2, "V4L2 device %s deregistered",
- video_device_node_name(cam->v4ldev));
- video_set_drvdata(cam->v4ldev, NULL);
- video_unregister_device(cam->v4ldev);
- usb_put_dev(cam->usbdev);
- kfree(cam->control_buffer);
- kfree(cam);
-
- mutex_unlock(&et61x251_sysfs_lock);
-}
-
-
-static int et61x251_open(struct file *filp)
-{
- struct et61x251_device* cam;
- int err = 0;
-
- if (!down_read_trylock(&et61x251_dev_lock))
- return -ERESTARTSYS;
-
- cam = video_drvdata(filp);
-
- if (wait_for_completion_interruptible(&cam->probe)) {
- up_read(&et61x251_dev_lock);
- return -ERESTARTSYS;
- }
-
- kref_get(&cam->kref);
-
- if (mutex_lock_interruptible(&cam->open_mutex)) {
- kref_put(&cam->kref, et61x251_release_resources);
- up_read(&et61x251_dev_lock);
- return -ERESTARTSYS;
- }
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- err = -ENODEV;
- goto out;
- }
-
- if (cam->users) {
- DBG(2, "Device %s is already in use",
- video_device_node_name(cam->v4ldev));
- DBG(3, "Simultaneous opens are not supported");
- if ((filp->f_flags & O_NONBLOCK) ||
- (filp->f_flags & O_NDELAY)) {
- err = -EWOULDBLOCK;
- goto out;
- }
- DBG(2, "A blocking open() has been requested. Wait for the "
- "device to be released...");
- up_read(&et61x251_dev_lock);
- err = wait_event_interruptible_exclusive(cam->wait_open,
- (cam->state & DEV_DISCONNECTED)
- || !cam->users);
- down_read(&et61x251_dev_lock);
- if (err)
- goto out;
- if (cam->state & DEV_DISCONNECTED) {
- err = -ENODEV;
- goto out;
- }
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- err = et61x251_init(cam);
- if (err) {
- DBG(1, "Initialization failed again. "
- "I will retry on next open().");
- goto out;
- }
- cam->state &= ~DEV_MISCONFIGURED;
- }
-
- if ((err = et61x251_start_transfer(cam)))
- goto out;
-
- filp->private_data = cam;
- cam->users++;
- cam->io = IO_NONE;
- cam->stream = STREAM_OFF;
- cam->nbuffers = 0;
- cam->frame_count = 0;
- et61x251_empty_framequeues(cam);
-
- DBG(3, "Video device %s is open",
- video_device_node_name(cam->v4ldev));
-
-out:
- mutex_unlock(&cam->open_mutex);
- if (err)
- kref_put(&cam->kref, et61x251_release_resources);
- up_read(&et61x251_dev_lock);
- return err;
-}
-
-
-static int et61x251_release(struct file *filp)
-{
- struct et61x251_device* cam;
-
- down_write(&et61x251_dev_lock);
-
- cam = video_drvdata(filp);
-
- et61x251_stop_transfer(cam);
- et61x251_release_buffers(cam);
- cam->users--;
- wake_up_interruptible_nr(&cam->wait_open, 1);
-
- DBG(3, "Video device %s closed",
- video_device_node_name(cam->v4ldev));
-
- kref_put(&cam->kref, et61x251_release_resources);
-
- up_write(&et61x251_dev_lock);
-
- return 0;
-}
-
-
-static ssize_t
-et61x251_read(struct file* filp, char __user * buf,
- size_t count, loff_t* f_pos)
-{
- struct et61x251_device *cam = video_drvdata(filp);
- struct et61x251_frame_t* f, * i;
- unsigned long lock_flags;
- long timeout;
- int err = 0;
-
- if (mutex_lock_interruptible(&cam->fileop_mutex))
- return -ERESTARTSYS;
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- mutex_unlock(&cam->fileop_mutex);
- return -ENODEV;
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- DBG(1, "The camera is misconfigured. Close and open it "
- "again.");
- mutex_unlock(&cam->fileop_mutex);
- return -EIO;
- }
-
- if (cam->io == IO_MMAP) {
- DBG(3, "Close and open the device again to choose the read "
- "method");
- mutex_unlock(&cam->fileop_mutex);
- return -EBUSY;
- }
-
- if (cam->io == IO_NONE) {
- if (!et61x251_request_buffers(cam, cam->nreadbuffers,
- IO_READ)) {
- DBG(1, "read() failed, not enough memory");
- mutex_unlock(&cam->fileop_mutex);
- return -ENOMEM;
- }
- cam->io = IO_READ;
- cam->stream = STREAM_ON;
- }
-
- if (list_empty(&cam->inqueue)) {
- if (!list_empty(&cam->outqueue))
- et61x251_empty_framequeues(cam);
- et61x251_queue_unusedframes(cam);
- }
-
- if (!count) {
- mutex_unlock(&cam->fileop_mutex);
- return 0;
- }
-
- if (list_empty(&cam->outqueue)) {
- if (filp->f_flags & O_NONBLOCK) {
- mutex_unlock(&cam->fileop_mutex);
- return -EAGAIN;
- }
- timeout = wait_event_interruptible_timeout
- ( cam->wait_frame,
- (!list_empty(&cam->outqueue)) ||
- (cam->state & DEV_DISCONNECTED) ||
- (cam->state & DEV_MISCONFIGURED),
- msecs_to_jiffies(
- cam->module_param.frame_timeout * 1000
- )
- );
- if (timeout < 0) {
- mutex_unlock(&cam->fileop_mutex);
- return timeout;
- }
- if (cam->state & DEV_DISCONNECTED) {
- mutex_unlock(&cam->fileop_mutex);
- return -ENODEV;
- }
- if (!timeout || (cam->state & DEV_MISCONFIGURED)) {
- mutex_unlock(&cam->fileop_mutex);
- return -EIO;
- }
- }
-
- f = list_entry(cam->outqueue.prev, struct et61x251_frame_t, frame);
-
- if (count > f->buf.bytesused)
- count = f->buf.bytesused;
-
- if (copy_to_user(buf, f->bufmem, count)) {
- err = -EFAULT;
- goto exit;
- }
- *f_pos += count;
-
-exit:
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- list_for_each_entry(i, &cam->outqueue, frame)
- i->state = F_UNUSED;
- INIT_LIST_HEAD(&cam->outqueue);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
-
- et61x251_queue_unusedframes(cam);
-
- PDBGG("Frame #%lu, bytes read: %zu",
- (unsigned long)f->buf.index, count);
-
- mutex_unlock(&cam->fileop_mutex);
-
- return err ? err : count;
-}
-
-
-static unsigned int et61x251_poll(struct file *filp, poll_table *wait)
-{
- struct et61x251_device *cam = video_drvdata(filp);
- struct et61x251_frame_t* f;
- unsigned long lock_flags;
- unsigned int mask = 0;
-
- if (mutex_lock_interruptible(&cam->fileop_mutex))
- return POLLERR;
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- goto error;
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- DBG(1, "The camera is misconfigured. Close and open it "
- "again.");
- goto error;
- }
-
- if (cam->io == IO_NONE) {
- if (!et61x251_request_buffers(cam, cam->nreadbuffers,
- IO_READ)) {
- DBG(1, "poll() failed, not enough memory");
- goto error;
- }
- cam->io = IO_READ;
- cam->stream = STREAM_ON;
- }
-
- if (cam->io == IO_READ) {
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- list_for_each_entry(f, &cam->outqueue, frame)
- f->state = F_UNUSED;
- INIT_LIST_HEAD(&cam->outqueue);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
- et61x251_queue_unusedframes(cam);
- }
-
- poll_wait(filp, &cam->wait_frame, wait);
-
- if (!list_empty(&cam->outqueue))
- mask |= POLLIN | POLLRDNORM;
-
- mutex_unlock(&cam->fileop_mutex);
-
- return mask;
-
-error:
- mutex_unlock(&cam->fileop_mutex);
- return POLLERR;
-}
-
-
-static void et61x251_vm_open(struct vm_area_struct* vma)
-{
- struct et61x251_frame_t* f = vma->vm_private_data;
- f->vma_use_count++;
-}
-
-
-static void et61x251_vm_close(struct vm_area_struct* vma)
-{
- /* NOTE: buffers are not freed here */
- struct et61x251_frame_t* f = vma->vm_private_data;
- f->vma_use_count--;
-}
-
-
-static const struct vm_operations_struct et61x251_vm_ops = {
- .open = et61x251_vm_open,
- .close = et61x251_vm_close,
-};
-
-
-static int et61x251_mmap(struct file* filp, struct vm_area_struct *vma)
-{
- struct et61x251_device *cam = video_drvdata(filp);
- unsigned long size = vma->vm_end - vma->vm_start,
- start = vma->vm_start;
- void *pos;
- u32 i;
-
- if (mutex_lock_interruptible(&cam->fileop_mutex))
- return -ERESTARTSYS;
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- mutex_unlock(&cam->fileop_mutex);
- return -ENODEV;
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- DBG(1, "The camera is misconfigured. Close and open it "
- "again.");
- mutex_unlock(&cam->fileop_mutex);
- return -EIO;
- }
-
- if (!(vma->vm_flags & (VM_WRITE | VM_READ))) {
- mutex_unlock(&cam->fileop_mutex);
- return -EACCES;
- }
-
- if (cam->io != IO_MMAP ||
- size != PAGE_ALIGN(cam->frame[0].buf.length)) {
- mutex_unlock(&cam->fileop_mutex);
- return -EINVAL;
- }
-
- for (i = 0; i < cam->nbuffers; i++) {
- if ((cam->frame[i].buf.m.offset>>PAGE_SHIFT) == vma->vm_pgoff)
- break;
- }
- if (i == cam->nbuffers) {
- mutex_unlock(&cam->fileop_mutex);
- return -EINVAL;
- }
-
- vma->vm_flags |= VM_IO;
- vma->vm_flags |= VM_RESERVED;
-
- pos = cam->frame[i].bufmem;
- while (size > 0) { /* size is page-aligned */
- if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
- mutex_unlock(&cam->fileop_mutex);
- return -EAGAIN;
- }
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- vma->vm_ops = &et61x251_vm_ops;
- vma->vm_private_data = &cam->frame[i];
- et61x251_vm_open(vma);
-
- mutex_unlock(&cam->fileop_mutex);
-
- return 0;
-}
-
-/*****************************************************************************/
-
-static int
-et61x251_vidioc_querycap(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_capability cap = {
- .driver = "et61x251",
- .version = LINUX_VERSION_CODE,
- .capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING,
- };
-
- strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card));
- if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0)
- strlcpy(cap.bus_info, dev_name(&cam->usbdev->dev),
- sizeof(cap.bus_info));
-
- if (copy_to_user(arg, &cap, sizeof(cap)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_enuminput(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_input i;
-
- if (copy_from_user(&i, arg, sizeof(i)))
- return -EFAULT;
-
- if (i.index)
- return -EINVAL;
-
- memset(&i, 0, sizeof(i));
- strcpy(i.name, "Camera");
- i.type = V4L2_INPUT_TYPE_CAMERA;
- i.capabilities = V4L2_IN_CAP_STD;
-
- if (copy_to_user(arg, &i, sizeof(i)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_input(struct et61x251_device* cam, void __user * arg)
-{
- int index = 0;
-
- if (copy_to_user(arg, &index, sizeof(index)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_s_input(struct et61x251_device* cam, void __user * arg)
-{
- int index;
-
- if (copy_from_user(&index, arg, sizeof(index)))
- return -EFAULT;
-
- if (index != 0)
- return -EINVAL;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_query_ctrl(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_queryctrl qc;
- u8 i;
-
- if (copy_from_user(&qc, arg, sizeof(qc)))
- return -EFAULT;
-
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
- if (qc.id && qc.id == s->qctrl[i].id) {
- memcpy(&qc, &(s->qctrl[i]), sizeof(qc));
- if (copy_to_user(arg, &qc, sizeof(qc)))
- return -EFAULT;
- return 0;
- }
-
- return -EINVAL;
-}
-
-
-static int
-et61x251_vidioc_g_ctrl(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_control ctrl;
- int err = 0;
- u8 i;
-
- if (!s->get_ctrl && !s->set_ctrl)
- return -EINVAL;
-
- if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
- return -EFAULT;
-
- if (!s->get_ctrl) {
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
- if (ctrl.id == s->qctrl[i].id) {
- ctrl.value = s->_qctrl[i].default_value;
- goto exit;
- }
- return -EINVAL;
- } else
- err = s->get_ctrl(cam, &ctrl);
-
-exit:
- if (copy_to_user(arg, &ctrl, sizeof(ctrl)))
- return -EFAULT;
-
- return err;
-}
-
-
-static int
-et61x251_vidioc_s_ctrl(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_control ctrl;
- u8 i;
- int err = 0;
-
- if (!s->set_ctrl)
- return -EINVAL;
-
- if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
- return -EFAULT;
-
- for (i = 0; i < ARRAY_SIZE(s->qctrl); i++) {
- if (ctrl.id == s->qctrl[i].id) {
- if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
- return -EINVAL;
- if (ctrl.value < s->qctrl[i].minimum ||
- ctrl.value > s->qctrl[i].maximum)
- return -ERANGE;
- ctrl.value -= ctrl.value % s->qctrl[i].step;
- break;
- }
- }
- if (i == ARRAY_SIZE(s->qctrl))
- return -EINVAL;
- if ((err = s->set_ctrl(cam, &ctrl)))
- return err;
-
- s->_qctrl[i].default_value = ctrl.value;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_cropcap(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_cropcap* cc = &(cam->sensor.cropcap);
-
- cc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- cc->pixelaspect.numerator = 1;
- cc->pixelaspect.denominator = 1;
-
- if (copy_to_user(arg, cc, sizeof(*cc)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_crop(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_crop crop = {
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- };
-
- memcpy(&(crop.c), &(s->_rect), sizeof(struct v4l2_rect));
-
- if (copy_to_user(arg, &crop, sizeof(crop)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_crop crop;
- struct v4l2_rect* rect;
- struct v4l2_rect* bounds = &(s->cropcap.bounds);
- struct v4l2_pix_format* pix_format = &(s->pix_format);
- u8 scale;
- const enum et61x251_stream_state stream = cam->stream;
- const u32 nbuffers = cam->nbuffers;
- u32 i;
- int err = 0;
-
- if (copy_from_user(&crop, arg, sizeof(crop)))
- return -EFAULT;
-
- rect = &(crop.c);
-
- if (crop.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (cam->module_param.force_munmap)
- for (i = 0; i < cam->nbuffers; i++)
- if (cam->frame[i].vma_use_count) {
- DBG(3, "VIDIOC_S_CROP failed. "
- "Unmap the buffers first.");
- return -EBUSY;
- }
-
- /* Preserve R,G or B origin */
- rect->left = (s->_rect.left & 1L) ? rect->left | 1L : rect->left & ~1L;
- rect->top = (s->_rect.top & 1L) ? rect->top | 1L : rect->top & ~1L;
-
- if (rect->width < 16)
- rect->width = 16;
- if (rect->height < 16)
- rect->height = 16;
- if (rect->width > bounds->width)
- rect->width = bounds->width;
- if (rect->height > bounds->height)
- rect->height = bounds->height;
- if (rect->left < bounds->left)
- rect->left = bounds->left;
- if (rect->top < bounds->top)
- rect->top = bounds->top;
- if (rect->left + rect->width > bounds->left + bounds->width)
- rect->left = bounds->left+bounds->width - rect->width;
- if (rect->top + rect->height > bounds->top + bounds->height)
- rect->top = bounds->top+bounds->height - rect->height;
-
- rect->width &= ~15L;
- rect->height &= ~15L;
-
- if (ET61X251_PRESERVE_IMGSCALE) {
- /* Calculate the actual scaling factor */
- u32 a, b;
- a = rect->width * rect->height;
- b = pix_format->width * pix_format->height;
- scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1;
- } else
- scale = 1;
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- if (copy_to_user(arg, &crop, sizeof(crop))) {
- cam->stream = stream;
- return -EFAULT;
- }
-
- if (cam->module_param.force_munmap || cam->io == IO_READ)
- et61x251_release_buffers(cam);
-
- err = et61x251_set_crop(cam, rect);
- if (s->set_crop)
- err += s->set_crop(cam, rect);
- err += et61x251_set_scale(cam, scale);
-
- if (err) { /* atomic, no rollback in ioctl() */
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
- "use the camera, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -EIO;
- }
-
- s->pix_format.width = rect->width/scale;
- s->pix_format.height = rect->height/scale;
- memcpy(&(s->_rect), rect, sizeof(*rect));
-
- if ((cam->module_param.force_munmap || cam->io == IO_READ) &&
- nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) {
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
- "use the camera, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -ENOMEM;
- }
-
- if (cam->io == IO_READ)
- et61x251_empty_framequeues(cam);
- else if (cam->module_param.force_munmap)
- et61x251_requeue_outqueue(cam);
-
- cam->stream = stream;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_enum_framesizes(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_frmsizeenum frmsize;
-
- if (copy_from_user(&frmsize, arg, sizeof(frmsize)))
- return -EFAULT;
-
- if (frmsize.index != 0)
- return -EINVAL;
-
- if (frmsize.pixel_format != V4L2_PIX_FMT_ET61X251 &&
- frmsize.pixel_format != V4L2_PIX_FMT_SBGGR8)
- return -EINVAL;
-
- frmsize.type = V4L2_FRMSIZE_TYPE_STEPWISE;
- frmsize.stepwise.min_width = frmsize.stepwise.step_width = 16;
- frmsize.stepwise.min_height = frmsize.stepwise.step_height = 16;
- frmsize.stepwise.max_width = cam->sensor.cropcap.bounds.width;
- frmsize.stepwise.max_height = cam->sensor.cropcap.bounds.height;
- memset(&frmsize.reserved, 0, sizeof(frmsize.reserved));
-
- if (copy_to_user(arg, &frmsize, sizeof(frmsize)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_enum_fmt(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_fmtdesc fmtd;
-
- if (copy_from_user(&fmtd, arg, sizeof(fmtd)))
- return -EFAULT;
-
- if (fmtd.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (fmtd.index == 0) {
- strcpy(fmtd.description, "bayer rgb");
- fmtd.pixelformat = V4L2_PIX_FMT_SBGGR8;
- } else if (fmtd.index == 1) {
- strcpy(fmtd.description, "compressed");
- fmtd.pixelformat = V4L2_PIX_FMT_ET61X251;
- fmtd.flags = V4L2_FMT_FLAG_COMPRESSED;
- } else
- return -EINVAL;
-
- fmtd.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- memset(&fmtd.reserved, 0, sizeof(fmtd.reserved));
-
- if (copy_to_user(arg, &fmtd, sizeof(fmtd)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_fmt(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_format format;
- struct v4l2_pix_format* pfmt = &(cam->sensor.pix_format);
-
- if (copy_from_user(&format, arg, sizeof(format)))
- return -EFAULT;
-
- if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- pfmt->colorspace = (pfmt->pixelformat == V4L2_PIX_FMT_ET61X251) ?
- 0 : V4L2_COLORSPACE_SRGB;
- pfmt->bytesperline = (pfmt->pixelformat==V4L2_PIX_FMT_ET61X251)
- ? 0 : (pfmt->width * pfmt->priv) / 8;
- pfmt->sizeimage = pfmt->height * ((pfmt->width*pfmt->priv)/8);
- pfmt->field = V4L2_FIELD_NONE;
- memcpy(&(format.fmt.pix), pfmt, sizeof(*pfmt));
-
- if (copy_to_user(arg, &format, sizeof(format)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
- void __user * arg)
-{
- struct et61x251_sensor* s = &cam->sensor;
- struct v4l2_format format;
- struct v4l2_pix_format* pix;
- struct v4l2_pix_format* pfmt = &(s->pix_format);
- struct v4l2_rect* bounds = &(s->cropcap.bounds);
- struct v4l2_rect rect;
- u8 scale;
- const enum et61x251_stream_state stream = cam->stream;
- const u32 nbuffers = cam->nbuffers;
- u32 i;
- int err = 0;
-
- if (copy_from_user(&format, arg, sizeof(format)))
- return -EFAULT;
-
- pix = &(format.fmt.pix);
-
- if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- memcpy(&rect, &(s->_rect), sizeof(rect));
-
- { /* calculate the actual scaling factor */
- u32 a, b;
- a = rect.width * rect.height;
- b = pix->width * pix->height;
- scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1;
- }
-
- rect.width = scale * pix->width;
- rect.height = scale * pix->height;
-
- if (rect.width < 16)
- rect.width = 16;
- if (rect.height < 16)
- rect.height = 16;
- if (rect.width > bounds->left + bounds->width - rect.left)
- rect.width = bounds->left + bounds->width - rect.left;
- if (rect.height > bounds->top + bounds->height - rect.top)
- rect.height = bounds->top + bounds->height - rect.top;
-
- rect.width &= ~15L;
- rect.height &= ~15L;
-
- { /* adjust the scaling factor */
- u32 a, b;
- a = rect.width * rect.height;
- b = pix->width * pix->height;
- scale = b ? (u8)((a / b) < 4 ? 1 : 2) : 1;
- }
-
- pix->width = rect.width / scale;
- pix->height = rect.height / scale;
-
- if (pix->pixelformat != V4L2_PIX_FMT_ET61X251 &&
- pix->pixelformat != V4L2_PIX_FMT_SBGGR8)
- pix->pixelformat = pfmt->pixelformat;
- pix->priv = pfmt->priv; /* bpp */
- pix->colorspace = (pix->pixelformat == V4L2_PIX_FMT_ET61X251) ?
- 0 : V4L2_COLORSPACE_SRGB;
- pix->colorspace = pfmt->colorspace;
- pix->bytesperline = (pix->pixelformat == V4L2_PIX_FMT_ET61X251)
- ? 0 : (pix->width * pix->priv) / 8;
- pix->sizeimage = pix->height * ((pix->width * pix->priv) / 8);
- pix->field = V4L2_FIELD_NONE;
-
- if (cmd == VIDIOC_TRY_FMT) {
- if (copy_to_user(arg, &format, sizeof(format)))
- return -EFAULT;
- return 0;
- }
-
- if (cam->module_param.force_munmap)
- for (i = 0; i < cam->nbuffers; i++)
- if (cam->frame[i].vma_use_count) {
- DBG(3, "VIDIOC_S_FMT failed. "
- "Unmap the buffers first.");
- return -EBUSY;
- }
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- if (copy_to_user(arg, &format, sizeof(format))) {
- cam->stream = stream;
- return -EFAULT;
- }
-
- if (cam->module_param.force_munmap || cam->io == IO_READ)
- et61x251_release_buffers(cam);
-
- err += et61x251_set_pix_format(cam, pix);
- err += et61x251_set_crop(cam, &rect);
- if (s->set_pix_format)
- err += s->set_pix_format(cam, pix);
- if (s->set_crop)
- err += s->set_crop(cam, &rect);
- err += et61x251_set_scale(cam, scale);
-
- if (err) { /* atomic, no rollback in ioctl() */
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
- "use the camera, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -EIO;
- }
-
- memcpy(pfmt, pix, sizeof(*pix));
- memcpy(&(s->_rect), &rect, sizeof(rect));
-
- if ((cam->module_param.force_munmap || cam->io == IO_READ) &&
- nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) {
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
- "use the camera, close and open %s again.",
- video_device_node_name(cam->v4ldev));
- return -ENOMEM;
- }
-
- if (cam->io == IO_READ)
- et61x251_empty_framequeues(cam);
- else if (cam->module_param.force_munmap)
- et61x251_requeue_outqueue(cam);
-
- cam->stream = stream;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_jpegcomp(struct et61x251_device* cam, void __user * arg)
-{
- if (copy_to_user(arg, &cam->compression,
- sizeof(cam->compression)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_s_jpegcomp(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_jpegcompression jc;
- const enum et61x251_stream_state stream = cam->stream;
- int err = 0;
-
- if (copy_from_user(&jc, arg, sizeof(jc)))
- return -EFAULT;
-
- if (jc.quality != 0 && jc.quality != 1)
- return -EINVAL;
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- err += et61x251_set_compression(cam, &jc);
- if (err) { /* atomic, no rollback in ioctl() */
- cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
- "problems. To use the camera, close and open "
- "%s again.", video_device_node_name(cam->v4ldev));
- return -EIO;
- }
-
- cam->compression.quality = jc.quality;
-
- cam->stream = stream;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_reqbufs(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_requestbuffers rb;
- u32 i;
- int err;
-
- if (copy_from_user(&rb, arg, sizeof(rb)))
- return -EFAULT;
-
- if (rb.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- rb.memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
-
- if (cam->io == IO_READ) {
- DBG(3, "Close and open the device again to choose the mmap "
- "I/O method");
- return -EBUSY;
- }
-
- for (i = 0; i < cam->nbuffers; i++)
- if (cam->frame[i].vma_use_count) {
- DBG(3, "VIDIOC_REQBUFS failed. "
- "Previous buffers are still mapped.");
- return -EBUSY;
- }
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- et61x251_empty_framequeues(cam);
-
- et61x251_release_buffers(cam);
- if (rb.count)
- rb.count = et61x251_request_buffers(cam, rb.count, IO_MMAP);
-
- if (copy_to_user(arg, &rb, sizeof(rb))) {
- et61x251_release_buffers(cam);
- cam->io = IO_NONE;
- return -EFAULT;
- }
-
- cam->io = rb.count ? IO_MMAP : IO_NONE;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_querybuf(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_buffer b;
-
- if (copy_from_user(&b, arg, sizeof(b)))
- return -EFAULT;
-
- if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- b.index >= cam->nbuffers || cam->io != IO_MMAP)
- return -EINVAL;
-
- memcpy(&b, &cam->frame[b.index].buf, sizeof(b));
-
- if (cam->frame[b.index].vma_use_count)
- b.flags |= V4L2_BUF_FLAG_MAPPED;
-
- if (cam->frame[b.index].state == F_DONE)
- b.flags |= V4L2_BUF_FLAG_DONE;
- else if (cam->frame[b.index].state != F_UNUSED)
- b.flags |= V4L2_BUF_FLAG_QUEUED;
-
- if (copy_to_user(arg, &b, sizeof(b)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_qbuf(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_buffer b;
- unsigned long lock_flags;
-
- if (copy_from_user(&b, arg, sizeof(b)))
- return -EFAULT;
-
- if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- b.index >= cam->nbuffers || cam->io != IO_MMAP)
- return -EINVAL;
-
- if (cam->frame[b.index].state != F_UNUSED)
- return -EINVAL;
-
- cam->frame[b.index].state = F_QUEUED;
-
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- list_add_tail(&cam->frame[b.index].frame, &cam->inqueue);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
-
- PDBGG("Frame #%lu queued", (unsigned long)b.index);
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_dqbuf(struct et61x251_device* cam, struct file* filp,
- void __user * arg)
-{
- struct v4l2_buffer b;
- struct et61x251_frame_t *f;
- unsigned long lock_flags;
- long timeout;
-
- if (copy_from_user(&b, arg, sizeof(b)))
- return -EFAULT;
-
- if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io!= IO_MMAP)
- return -EINVAL;
-
- if (list_empty(&cam->outqueue)) {
- if (cam->stream == STREAM_OFF)
- return -EINVAL;
- if (filp->f_flags & O_NONBLOCK)
- return -EAGAIN;
- timeout = wait_event_interruptible_timeout
- ( cam->wait_frame,
- (!list_empty(&cam->outqueue)) ||
- (cam->state & DEV_DISCONNECTED) ||
- (cam->state & DEV_MISCONFIGURED),
- cam->module_param.frame_timeout *
- 1000 * msecs_to_jiffies(1) );
- if (timeout < 0)
- return timeout;
- if (cam->state & DEV_DISCONNECTED)
- return -ENODEV;
- if (!timeout || (cam->state & DEV_MISCONFIGURED))
- return -EIO;
- }
-
- spin_lock_irqsave(&cam->queue_lock, lock_flags);
- f = list_entry(cam->outqueue.next, struct et61x251_frame_t, frame);
- list_del(cam->outqueue.next);
- spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
-
- f->state = F_UNUSED;
-
- memcpy(&b, &f->buf, sizeof(b));
- if (f->vma_use_count)
- b.flags |= V4L2_BUF_FLAG_MAPPED;
-
- if (copy_to_user(arg, &b, sizeof(b)))
- return -EFAULT;
-
- PDBGG("Frame #%lu dequeued", (unsigned long)f->buf.index);
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_streamon(struct et61x251_device* cam, void __user * arg)
-{
- int type;
-
- if (copy_from_user(&type, arg, sizeof(type)))
- return -EFAULT;
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
- return -EINVAL;
-
- cam->stream = STREAM_ON;
-
- DBG(3, "Stream on");
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_streamoff(struct et61x251_device* cam, void __user * arg)
-{
- int type, err;
-
- if (copy_from_user(&type, arg, sizeof(type)))
- return -EFAULT;
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
- return -EINVAL;
-
- if (cam->stream == STREAM_ON)
- if ((err = et61x251_stream_interrupt(cam)))
- return err;
-
- et61x251_empty_framequeues(cam);
-
- DBG(3, "Stream off");
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_g_parm(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_streamparm sp;
-
- if (copy_from_user(&sp, arg, sizeof(sp)))
- return -EFAULT;
-
- if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- sp.parm.capture.extendedmode = 0;
- sp.parm.capture.readbuffers = cam->nreadbuffers;
-
- if (copy_to_user(arg, &sp, sizeof(sp)))
- return -EFAULT;
-
- return 0;
-}
-
-
-static int
-et61x251_vidioc_s_parm(struct et61x251_device* cam, void __user * arg)
-{
- struct v4l2_streamparm sp;
-
- if (copy_from_user(&sp, arg, sizeof(sp)))
- return -EFAULT;
-
- if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- sp.parm.capture.extendedmode = 0;
-
- if (sp.parm.capture.readbuffers == 0)
- sp.parm.capture.readbuffers = cam->nreadbuffers;
-
- if (sp.parm.capture.readbuffers > ET61X251_MAX_FRAMES)
- sp.parm.capture.readbuffers = ET61X251_MAX_FRAMES;
-
- if (copy_to_user(arg, &sp, sizeof(sp)))
- return -EFAULT;
-
- cam->nreadbuffers = sp.parm.capture.readbuffers;
-
- return 0;
-}
-
-
-static long et61x251_ioctl_v4l2(struct file *filp,
- unsigned int cmd, void __user *arg)
-{
- struct et61x251_device *cam = video_drvdata(filp);
-
- switch (cmd) {
-
- case VIDIOC_QUERYCAP:
- return et61x251_vidioc_querycap(cam, arg);
-
- case VIDIOC_ENUMINPUT:
- return et61x251_vidioc_enuminput(cam, arg);
-
- case VIDIOC_G_INPUT:
- return et61x251_vidioc_g_input(cam, arg);
-
- case VIDIOC_S_INPUT:
- return et61x251_vidioc_s_input(cam, arg);
-
- case VIDIOC_QUERYCTRL:
- return et61x251_vidioc_query_ctrl(cam, arg);
-
- case VIDIOC_G_CTRL:
- return et61x251_vidioc_g_ctrl(cam, arg);
-
- case VIDIOC_S_CTRL:
- return et61x251_vidioc_s_ctrl(cam, arg);
-
- case VIDIOC_CROPCAP:
- return et61x251_vidioc_cropcap(cam, arg);
-
- case VIDIOC_G_CROP:
- return et61x251_vidioc_g_crop(cam, arg);
-
- case VIDIOC_S_CROP:
- return et61x251_vidioc_s_crop(cam, arg);
-
- case VIDIOC_ENUM_FMT:
- return et61x251_vidioc_enum_fmt(cam, arg);
-
- case VIDIOC_G_FMT:
- return et61x251_vidioc_g_fmt(cam, arg);
-
- case VIDIOC_TRY_FMT:
- case VIDIOC_S_FMT:
- return et61x251_vidioc_try_s_fmt(cam, cmd, arg);
-
- case VIDIOC_ENUM_FRAMESIZES:
- return et61x251_vidioc_enum_framesizes(cam, arg);
-
- case VIDIOC_G_JPEGCOMP:
- return et61x251_vidioc_g_jpegcomp(cam, arg);
-
- case VIDIOC_S_JPEGCOMP:
- return et61x251_vidioc_s_jpegcomp(cam, arg);
-
- case VIDIOC_REQBUFS:
- return et61x251_vidioc_reqbufs(cam, arg);
-
- case VIDIOC_QUERYBUF:
- return et61x251_vidioc_querybuf(cam, arg);
-
- case VIDIOC_QBUF:
- return et61x251_vidioc_qbuf(cam, arg);
-
- case VIDIOC_DQBUF:
- return et61x251_vidioc_dqbuf(cam, filp, arg);
-
- case VIDIOC_STREAMON:
- return et61x251_vidioc_streamon(cam, arg);
-
- case VIDIOC_STREAMOFF:
- return et61x251_vidioc_streamoff(cam, arg);
-
- case VIDIOC_G_PARM:
- return et61x251_vidioc_g_parm(cam, arg);
-
- case VIDIOC_S_PARM:
- return et61x251_vidioc_s_parm(cam, arg);
-
- default:
- return -ENOTTY;
-
- }
-}
-
-
-static long et61x251_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct et61x251_device *cam = video_drvdata(filp);
- long err = 0;
-
- if (mutex_lock_interruptible(&cam->fileop_mutex))
- return -ERESTARTSYS;
-
- if (cam->state & DEV_DISCONNECTED) {
- DBG(1, "Device not present");
- mutex_unlock(&cam->fileop_mutex);
- return -ENODEV;
- }
-
- if (cam->state & DEV_MISCONFIGURED) {
- DBG(1, "The camera is misconfigured. Close and open it "
- "again.");
- mutex_unlock(&cam->fileop_mutex);
- return -EIO;
- }
-
- V4LDBG(3, "et61x251", cmd);
-
- err = et61x251_ioctl_v4l2(filp, cmd, (void __user *)arg);
-
- mutex_unlock(&cam->fileop_mutex);
-
- return err;
-}
-
-
-static const struct v4l2_file_operations et61x251_fops = {
- .owner = THIS_MODULE,
- .open = et61x251_open,
- .release = et61x251_release,
- .unlocked_ioctl = et61x251_ioctl,
- .read = et61x251_read,
- .poll = et61x251_poll,
- .mmap = et61x251_mmap,
-};
-
-/*****************************************************************************/
-
-/* It exists a single interface only. We do not need to validate anything. */
-static int
-et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
- struct et61x251_device* cam;
- static unsigned int dev_nr;
- unsigned int i;
- int err = 0;
-
- if (!(cam = kzalloc(sizeof(struct et61x251_device), GFP_KERNEL)))
- return -ENOMEM;
-
- cam->usbdev = udev;
-
- if (!(cam->control_buffer = kzalloc(8, GFP_KERNEL))) {
- DBG(1, "kmalloc() failed");
- err = -ENOMEM;
- goto fail;
- }
-
- if (!(cam->v4ldev = video_device_alloc())) {
- DBG(1, "video_device_alloc() failed");
- err = -ENOMEM;
- goto fail;
- }
-
- DBG(2, "ET61X[12]51 PC Camera Controller detected "
- "(vid/pid 0x%04X:0x%04X)",id->idVendor, id->idProduct);
-
- for (i = 0; et61x251_sensor_table[i]; i++) {
- err = et61x251_sensor_table[i](cam);
- if (!err)
- break;
- }
-
- if (!err)
- DBG(2, "%s image sensor detected", cam->sensor.name);
- else {
- DBG(1, "No supported image sensor detected");
- err = -ENODEV;
- goto fail;
- }
-
- if (et61x251_init(cam)) {
- DBG(1, "Initialization failed. I will retry on open().");
- cam->state |= DEV_MISCONFIGURED;
- }
-
- strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera");
- cam->v4ldev->fops = &et61x251_fops;
- cam->v4ldev->release = video_device_release;
- cam->v4ldev->parent = &udev->dev;
- video_set_drvdata(cam->v4ldev, cam);
-
- init_completion(&cam->probe);
-
- err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER,
- video_nr[dev_nr]);
- if (err) {
- DBG(1, "V4L2 device registration failed");
- if (err == -ENFILE && video_nr[dev_nr] == -1)
- DBG(1, "Free /dev/videoX node not found");
- video_nr[dev_nr] = -1;
- dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0;
- complete_all(&cam->probe);
- goto fail;
- }
-
- DBG(2, "V4L2 device registered as %s",
- video_device_node_name(cam->v4ldev));
-
- cam->module_param.force_munmap = force_munmap[dev_nr];
- cam->module_param.frame_timeout = frame_timeout[dev_nr];
-
- dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0;
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- err = et61x251_create_sysfs(cam);
- if (!err)
- DBG(2, "Optional device control through 'sysfs' "
- "interface ready");
- else
- DBG(2, "Failed to create 'sysfs' interface for optional "
- "device controlling. Error #%d", err);
-#else
- DBG(2, "Optional device control through 'sysfs' interface disabled");
- DBG(3, "Compile the kernel with the 'CONFIG_VIDEO_ADV_DEBUG' "
- "configuration option to enable it.");
-#endif
-
- usb_set_intfdata(intf, cam);
- kref_init(&cam->kref);
- usb_get_dev(cam->usbdev);
-
- complete_all(&cam->probe);
-
- return 0;
-
-fail:
- if (cam) {
- kfree(cam->control_buffer);
- if (cam->v4ldev)
- video_device_release(cam->v4ldev);
- kfree(cam);
- }
- return err;
-}
-
-
-static void et61x251_usb_disconnect(struct usb_interface* intf)
-{
- struct et61x251_device* cam;
-
- down_write(&et61x251_dev_lock);
-
- cam = usb_get_intfdata(intf);
-
- DBG(2, "Disconnecting %s...", cam->v4ldev->name);
-
- if (cam->users) {
- DBG(2, "Device %s is open! Deregistration and memory "
- "deallocation are deferred.",
- video_device_node_name(cam->v4ldev));
- cam->state |= DEV_MISCONFIGURED;
- et61x251_stop_transfer(cam);
- cam->state |= DEV_DISCONNECTED;
- wake_up_interruptible(&cam->wait_frame);
- wake_up(&cam->wait_stream);
- } else
- cam->state |= DEV_DISCONNECTED;
-
- wake_up_interruptible_all(&cam->wait_open);
-
- kref_put(&cam->kref, et61x251_release_resources);
-
- up_write(&et61x251_dev_lock);
-}
-
-
-static struct usb_driver et61x251_usb_driver = {
- .name = "et61x251",
- .id_table = et61x251_id_table,
- .probe = et61x251_usb_probe,
- .disconnect = et61x251_usb_disconnect,
-};
-
-module_usb_driver(et61x251_usb_driver);
diff --git a/drivers/media/video/et61x251/et61x251_sensor.h b/drivers/media/video/et61x251/et61x251_sensor.h
deleted file mode 100644
index 71a03148cb09..000000000000
--- a/drivers/media/video/et61x251/et61x251_sensor.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/***************************************************************************
- * API for image sensors connected to ET61X[12]51 PC Camera Controllers *
- * *
- * Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the Free Software *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
- ***************************************************************************/
-
-#ifndef _ET61X251_SENSOR_H_
-#define _ET61X251_SENSOR_H_
-
-#include <linux/usb.h>
-#include <linux/videodev2.h>
-#include <linux/device.h>
-#include <linux/stddef.h>
-#include <linux/errno.h>
-#include <asm/types.h>
-
-struct et61x251_device;
-struct et61x251_sensor;
-
-/*****************************************************************************/
-
-extern int et61x251_probe_tas5130d1b(struct et61x251_device* cam);
-
-#define ET61X251_SENSOR_TABLE \
-/* Weak detections must go at the end of the list */ \
-static int (*et61x251_sensor_table[])(struct et61x251_device*) = { \
- &et61x251_probe_tas5130d1b, \
- NULL, \
-};
-
-extern struct et61x251_device*
-et61x251_match_id(struct et61x251_device* cam, const struct usb_device_id *id);
-
-extern void
-et61x251_attach_sensor(struct et61x251_device* cam,
- const struct et61x251_sensor* sensor);
-
-/*****************************************************************************/
-
-extern int et61x251_write_reg(struct et61x251_device*, u8 value, u16 index);
-extern int et61x251_i2c_raw_write(struct et61x251_device*, u8 n, u8 data1,
- u8 data2, u8 data3, u8 data4, u8 data5,
- u8 data6, u8 data7, u8 data8, u8 address);
-
-/*****************************************************************************/
-
-enum et61x251_i2c_sysfs_ops {
- ET61X251_I2C_READ = 0x01,
- ET61X251_I2C_WRITE = 0x02,
-};
-
-enum et61x251_i2c_interface {
- ET61X251_I2C_2WIRES,
- ET61X251_I2C_3WIRES,
-};
-
-/* Repeat start condition when RSTA is high */
-enum et61x251_i2c_rsta {
- ET61X251_I2C_RSTA_STOP = 0x00, /* stop then start */
- ET61X251_I2C_RSTA_REPEAT = 0x01, /* repeat start */
-};
-
-#define ET61X251_MAX_CTRLS (V4L2_CID_LASTP1-V4L2_CID_BASE+10)
-
-struct et61x251_sensor {
- char name[32];
-
- enum et61x251_i2c_sysfs_ops sysfs_ops;
-
- enum et61x251_i2c_interface interface;
- u8 i2c_slave_id;
- enum et61x251_i2c_rsta rsta;
- struct v4l2_rect active_pixel; /* left and top define FVSX and FVSY */
-
- struct v4l2_queryctrl qctrl[ET61X251_MAX_CTRLS];
- struct v4l2_cropcap cropcap;
- struct v4l2_pix_format pix_format;
-
- int (*init)(struct et61x251_device* cam);
- int (*get_ctrl)(struct et61x251_device* cam,
- struct v4l2_control* ctrl);
- int (*set_ctrl)(struct et61x251_device* cam,
- const struct v4l2_control* ctrl);
- int (*set_crop)(struct et61x251_device* cam,
- const struct v4l2_rect* rect);
- int (*set_pix_format)(struct et61x251_device* cam,
- const struct v4l2_pix_format* pix);
-
- /* Private */
- struct v4l2_queryctrl _qctrl[ET61X251_MAX_CTRLS];
- struct v4l2_rect _rect;
-};
-
-#endif /* _ET61X251_SENSOR_H_ */
diff --git a/drivers/media/video/et61x251/et61x251_tas5130d1b.c b/drivers/media/video/et61x251/et61x251_tas5130d1b.c
deleted file mode 100644
index ced2e167935d..000000000000
--- a/drivers/media/video/et61x251/et61x251_tas5130d1b.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/***************************************************************************
- * Plug-in for TAS5130D1B image sensor connected to the ET61X[12]51 *
- * PC Camera Controllers *
- * *
- * Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
- * *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License as published by *
- * the Free Software Foundation; either version 2 of the License, or *
- * (at your option) any later version. *
- * *
- * This program is distributed in the hope that it will be useful, *
- * but WITHOUT ANY WARRANTY; without even the implied warranty of *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
- * GNU General Public License for more details. *
- * *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the Free Software *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
- ***************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "et61x251_sensor.h"
-
-
-static int tas5130d1b_init(struct et61x251_device* cam)
-{
- int err = 0;
-
- err += et61x251_write_reg(cam, 0x14, 0x01);
- err += et61x251_write_reg(cam, 0x1b, 0x02);
- err += et61x251_write_reg(cam, 0x02, 0x12);
- err += et61x251_write_reg(cam, 0x0e, 0x60);
- err += et61x251_write_reg(cam, 0x80, 0x61);
- err += et61x251_write_reg(cam, 0xf0, 0x62);
- err += et61x251_write_reg(cam, 0x03, 0x63);
- err += et61x251_write_reg(cam, 0x14, 0x64);
- err += et61x251_write_reg(cam, 0xf4, 0x65);
- err += et61x251_write_reg(cam, 0x01, 0x66);
- err += et61x251_write_reg(cam, 0x05, 0x67);
- err += et61x251_write_reg(cam, 0x8f, 0x68);
- err += et61x251_write_reg(cam, 0x0f, 0x8d);
- err += et61x251_write_reg(cam, 0x08, 0x8e);
-
- return err;
-}
-
-
-static int tas5130d1b_set_ctrl(struct et61x251_device* cam,
- const struct v4l2_control* ctrl)
-{
- int err = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_GAIN:
- err += et61x251_i2c_raw_write(cam, 2, 0x20,
- 0xf6-ctrl->value, 0, 0, 0,
- 0, 0, 0, 0);
- break;
- case V4L2_CID_EXPOSURE:
- err += et61x251_i2c_raw_write(cam, 2, 0x40,
- 0x47-ctrl->value, 0, 0, 0,
- 0, 0, 0, 0);
- break;
- default:
- return -EINVAL;
- }
-
- return err ? -EIO : 0;
-}
-
-
-static const struct et61x251_sensor tas5130d1b = {
- .name = "TAS5130D1B",
- .interface = ET61X251_I2C_3WIRES,
- .rsta = ET61X251_I2C_RSTA_STOP,
- .active_pixel = {
- .left = 106,
- .top = 13,
- },
- .init = &tas5130d1b_init,
- .qctrl = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "global gain",
- .minimum = 0x00,
- .maximum = 0xf6,
- .step = 0x02,
- .default_value = 0x0d,
- .flags = 0,
- },
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0x00,
- .maximum = 0x47,
- .step = 0x01,
- .default_value = 0x23,
- .flags = 0,
- },
- },
- .set_ctrl = &tas5130d1b_set_ctrl,
- .cropcap = {
- .bounds = {
- .left = 0,
- .top = 0,
- .width = 640,
- .height = 480,
- },
- .defrect = {
- .left = 0,
- .top = 0,
- .width = 640,
- .height = 480,
- },
- },
- .pix_format = {
- .width = 640,
- .height = 480,
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
- .priv = 8,
- },
-};
-
-
-int et61x251_probe_tas5130d1b(struct et61x251_device* cam)
-{
- const struct usb_device_id tas5130d1b_id_table[] = {
- { USB_DEVICE(0x102c, 0x6251), },
- { }
- };
-
- /* Sensor detection is based on USB pid/vid */
- if (!et61x251_match_id(cam, tas5130d1b_id_table))
- return -ENODEV;
-
- et61x251_attach_sensor(cam, &tas5130d1b);
-
- return 0;
-}
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 27e3e0c0b219..777486f7cadb 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -1544,6 +1544,10 @@ static int __devinit viu_of_probe(struct platform_device *op)
/* initialize locks */
mutex_init(&viu_dev->lock);
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &viu_dev->vdev->flags);
viu_dev->vdev->lock = &viu_dev->lock;
spin_lock_init(&viu_dev->slock);
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index 79ebe46e1ad7..c901da0bd657 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_USB_GSPCA_VICAM) += gspca_vicam.o
obj-$(CONFIG_USB_GSPCA_XIRLINK_CIT) += gspca_xirlink_cit.o
obj-$(CONFIG_USB_GSPCA_ZC3XX) += gspca_zc3xx.o
-gspca_main-objs := gspca.o
+gspca_main-objs := gspca.o autogain_functions.o
gspca_benq-objs := benq.o
gspca_conex-objs := conex.o
gspca_cpia1-objs := cpia1.o
diff --git a/drivers/media/video/gspca/autogain_functions.c b/drivers/media/video/gspca/autogain_functions.c
new file mode 100644
index 000000000000..67db674bb044
--- /dev/null
+++ b/drivers/media/video/gspca/autogain_functions.c
@@ -0,0 +1,178 @@
+/*
+ * Functions for auto gain.
+ *
+ * Copyright (C) 2010-2012 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "gspca.h"
+
+/* auto gain and exposure algorithm based on the knee algorithm described here:
+ http://ytse.tricolour.net/docs/LowLightOptimization.html
+
+ Returns 0 if no changes were made, 1 if the gain and or exposure settings
+ where changed. */
+int gspca_expo_autogain(
+ struct gspca_dev *gspca_dev,
+ int avg_lum,
+ int desired_avg_lum,
+ int deadzone,
+ int gain_knee,
+ int exposure_knee)
+{
+ s32 gain, orig_gain, exposure, orig_exposure;
+ int i, steps, retval = 0;
+
+ if (v4l2_ctrl_g_ctrl(gspca_dev->autogain) == 0)
+ return 0;
+
+ orig_gain = gain = v4l2_ctrl_g_ctrl(gspca_dev->gain);
+ orig_exposure = exposure = v4l2_ctrl_g_ctrl(gspca_dev->exposure);
+
+ /* If we are of a multiple of deadzone, do multiple steps to reach the
+ desired lumination fast (with the risc of a slight overshoot) */
+ steps = abs(desired_avg_lum - avg_lum) / deadzone;
+
+ PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
+ avg_lum, desired_avg_lum, steps);
+
+ for (i = 0; i < steps; i++) {
+ if (avg_lum > desired_avg_lum) {
+ if (gain > gain_knee)
+ gain--;
+ else if (exposure > exposure_knee)
+ exposure--;
+ else if (gain > gspca_dev->gain->default_value)
+ gain--;
+ else if (exposure > gspca_dev->exposure->minimum)
+ exposure--;
+ else if (gain > gspca_dev->gain->minimum)
+ gain--;
+ else
+ break;
+ } else {
+ if (gain < gspca_dev->gain->default_value)
+ gain++;
+ else if (exposure < exposure_knee)
+ exposure++;
+ else if (gain < gain_knee)
+ gain++;
+ else if (exposure < gspca_dev->exposure->maximum)
+ exposure++;
+ else if (gain < gspca_dev->gain->maximum)
+ gain++;
+ else
+ break;
+ }
+ }
+
+ if (gain != orig_gain) {
+ v4l2_ctrl_s_ctrl(gspca_dev->gain, gain);
+ retval = 1;
+ }
+ if (exposure != orig_exposure) {
+ v4l2_ctrl_s_ctrl(gspca_dev->exposure, exposure);
+ retval = 1;
+ }
+
+ if (retval)
+ PDEBUG(D_FRAM, "autogain: changed gain: %d, expo: %d",
+ gain, exposure);
+ return retval;
+}
+EXPORT_SYMBOL(gspca_expo_autogain);
+
+/* Autogain + exposure algorithm for cameras with a coarse exposure control
+ (usually this means we can only control the clockdiv to change exposure)
+ As changing the clockdiv so that the fps drops from 30 to 15 fps for
+ example, will lead to a huge exposure change (it effectively doubles),
+ this algorithm normally tries to only adjust the gain (between 40 and
+ 80 %) and if that does not help, only then changes exposure. This leads
+ to a much more stable image then using the knee algorithm which at
+ certain points of the knee graph will only try to adjust exposure,
+ which leads to oscilating as one exposure step is huge.
+
+ Returns 0 if no changes were made, 1 if the gain and or exposure settings
+ where changed. */
+int gspca_coarse_grained_expo_autogain(
+ struct gspca_dev *gspca_dev,
+ int avg_lum,
+ int desired_avg_lum,
+ int deadzone)
+{
+ s32 gain_low, gain_high, gain, orig_gain, exposure, orig_exposure;
+ int steps, retval = 0;
+
+ if (v4l2_ctrl_g_ctrl(gspca_dev->autogain) == 0)
+ return 0;
+
+ orig_gain = gain = v4l2_ctrl_g_ctrl(gspca_dev->gain);
+ orig_exposure = exposure = v4l2_ctrl_g_ctrl(gspca_dev->exposure);
+
+ gain_low = (gspca_dev->gain->maximum - gspca_dev->gain->minimum) /
+ 5 * 2 + gspca_dev->gain->minimum;
+ gain_high = (gspca_dev->gain->maximum - gspca_dev->gain->minimum) /
+ 5 * 4 + gspca_dev->gain->minimum;
+
+ /* If we are of a multiple of deadzone, do multiple steps to reach the
+ desired lumination fast (with the risc of a slight overshoot) */
+ steps = (desired_avg_lum - avg_lum) / deadzone;
+
+ PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
+ avg_lum, desired_avg_lum, steps);
+
+ if ((gain + steps) > gain_high &&
+ exposure < gspca_dev->exposure->maximum) {
+ gain = gain_high;
+ gspca_dev->exp_too_low_cnt++;
+ gspca_dev->exp_too_high_cnt = 0;
+ } else if ((gain + steps) < gain_low &&
+ exposure > gspca_dev->exposure->minimum) {
+ gain = gain_low;
+ gspca_dev->exp_too_high_cnt++;
+ gspca_dev->exp_too_low_cnt = 0;
+ } else {
+ gain += steps;
+ if (gain > gspca_dev->gain->maximum)
+ gain = gspca_dev->gain->maximum;
+ else if (gain < gspca_dev->gain->minimum)
+ gain = gspca_dev->gain->minimum;
+ gspca_dev->exp_too_high_cnt = 0;
+ gspca_dev->exp_too_low_cnt = 0;
+ }
+
+ if (gspca_dev->exp_too_high_cnt > 3) {
+ exposure--;
+ gspca_dev->exp_too_high_cnt = 0;
+ } else if (gspca_dev->exp_too_low_cnt > 3) {
+ exposure++;
+ gspca_dev->exp_too_low_cnt = 0;
+ }
+
+ if (gain != orig_gain) {
+ v4l2_ctrl_s_ctrl(gspca_dev->gain, gain);
+ retval = 1;
+ }
+ if (exposure != orig_exposure) {
+ v4l2_ctrl_s_ctrl(gspca_dev->exposure, exposure);
+ retval = 1;
+ }
+
+ if (retval)
+ PDEBUG(D_FRAM, "autogain: changed gain: %d, expo: %d",
+ gain, exposure);
+ return retval;
+}
+EXPORT_SYMBOL(gspca_coarse_grained_expo_autogain);
diff --git a/drivers/media/video/gspca/autogain_functions.h b/drivers/media/video/gspca/autogain_functions.h
index 46777eee678b..d625eafe63eb 100644
--- a/drivers/media/video/gspca/autogain_functions.h
+++ b/drivers/media/video/gspca/autogain_functions.h
@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#ifdef WANT_REGULAR_AUTOGAIN
/* auto gain and exposure algorithm based on the knee algorithm described here:
http://ytse.tricolour.net/docs/LowLightOptimization.html
@@ -91,7 +92,9 @@ static inline int auto_gain_n_exposure(
gain, exposure);
return retval;
}
+#endif
+#ifdef WANT_COARSE_EXPO_AUTOGAIN
/* Autogain + exposure algorithm for cameras with a coarse exposure control
(usually this means we can only control the clockdiv to change exposure)
As changing the clockdiv so that the fps drops from 30 to 15 fps for
@@ -103,7 +106,7 @@ static inline int auto_gain_n_exposure(
which leads to oscilating as one exposure step is huge.
Note this assumes that the sd struct for the cam in question has
- exp_too_high_cnt and exp_too_high_cnt int members for use by this function.
+ exp_too_low_cnt and exp_too_high_cnt int members for use by this function.
Returns 0 if no changes were made, 1 if the gain and or exposure settings
where changed. */
@@ -177,3 +180,4 @@ static inline int coarse_grained_expo_autogain(
gain, exposure);
return retval;
}
+#endif
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index ea17b5d94ea4..f39fee0fd10f 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -306,7 +306,7 @@ static void cx_sensor(struct gspca_dev*gspca_dev)
reg_w(gspca_dev, 0x0020, reg20, 8);
reg_w(gspca_dev, 0x0028, reg28, 8);
- reg_w(gspca_dev, 0x0010, reg10, 8);
+ reg_w(gspca_dev, 0x0010, reg10, 2);
reg_w_val(gspca_dev, 0x0092, 0x03);
switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) {
@@ -326,7 +326,7 @@ static void cx_sensor(struct gspca_dev*gspca_dev)
}
reg_w(gspca_dev, 0x007b, reg7b, 6);
reg_w_val(gspca_dev, 0x00f8, 0x00);
- reg_w(gspca_dev, 0x0010, reg10, 8);
+ reg_w(gspca_dev, 0x0010, reg10, 2);
reg_w_val(gspca_dev, 0x0098, 0x41);
for (i = 0; i < 11; i++) {
if (i == 3 || i == 5 || i == 8)
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 0107513cd728..6e26c93b4656 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -94,7 +94,11 @@ static void dostream(struct work_struct *work)
/* loop reading a frame */
again:
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
/* request a frame */
mutex_lock(&gspca_dev->usb_lock);
@@ -102,7 +106,11 @@ again:
mutex_unlock(&gspca_dev->usb_lock);
if (ret < 0)
break;
- if (!gspca_dev->present || !gspca_dev->streaming)
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
+ if (!gspca_dev->dev || !gspca_dev->streaming)
break;
/* the frame comes in parts */
@@ -117,7 +125,11 @@ again:
* error. Just restart. */
goto again;
}
- if (!gspca_dev->present || !gspca_dev->streaming)
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ goto out;
+#endif
+ if (!gspca_dev->dev || !gspca_dev->streaming)
goto out;
if (len < FPIX_MAX_TRANSFER ||
(data[len - 2] == 0xff &&
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index c84e26006fc3..c549574c1c7e 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -405,6 +405,9 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ if (!sd->gspca_dev.present)
+ return;
+
return sd->dev_post_unset_alt(gspca_dev);
}
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index ca5a2b139d0b..31721eadc597 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -38,6 +38,9 @@
#include <linux/uaccess.h>
#include <linux/ktime.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include "gspca.h"
@@ -592,16 +595,13 @@ static int gspca_set_alt0(struct gspca_dev *gspca_dev)
static void gspca_stream_off(struct gspca_dev *gspca_dev)
{
gspca_dev->streaming = 0;
- if (gspca_dev->present) {
- if (gspca_dev->sd_desc->stopN)
- gspca_dev->sd_desc->stopN(gspca_dev);
- destroy_urbs(gspca_dev);
- gspca_input_destroy_urb(gspca_dev);
- gspca_set_alt0(gspca_dev);
- gspca_input_create_urb(gspca_dev);
- }
-
- /* always call stop0 to free the subdriver's resources */
+ gspca_dev->usb_err = 0;
+ if (gspca_dev->sd_desc->stopN)
+ gspca_dev->sd_desc->stopN(gspca_dev);
+ destroy_urbs(gspca_dev);
+ gspca_input_destroy_urb(gspca_dev);
+ gspca_set_alt0(gspca_dev);
+ gspca_input_create_urb(gspca_dev);
if (gspca_dev->sd_desc->stop0)
gspca_dev->sd_desc->stop0(gspca_dev);
PDEBUG(D_STREAM, "stream off OK");
@@ -847,14 +847,6 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
struct ep_tb_s ep_tb[MAX_ALT];
int n, ret, xfer, alt, alt_idx;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
-
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto unlock;
- }
-
/* reset the streaming variables */
gspca_dev->image = NULL;
gspca_dev->image_len = 0;
@@ -869,7 +861,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
if (gspca_dev->sd_desc->isoc_init) {
ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
if (ret < 0)
- goto unlock;
+ return ret;
}
xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
: USB_ENDPOINT_XFER_ISOC;
@@ -880,8 +872,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
ep = alt_xfer(&intf->altsetting[gspca_dev->alt], xfer);
if (ep == NULL) {
pr_err("bad altsetting %d\n", gspca_dev->alt);
- ret = -EIO;
- goto out;
+ return -EIO;
}
ep_tb[0].alt = gspca_dev->alt;
alt_idx = 1;
@@ -892,8 +883,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
alt_idx = build_isoc_ep_tb(gspca_dev, intf, ep_tb);
if (alt_idx <= 0) {
pr_err("no transfer endpoint found\n");
- ret = -EIO;
- goto unlock;
+ return -EIO;
}
}
@@ -988,8 +978,6 @@ retry:
}
out:
gspca_input_create_urb(gspca_dev);
-unlock:
- mutex_unlock(&gspca_dev->usb_lock);
return ret;
}
@@ -1006,6 +994,8 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
/* set the current control values to their default values
* which may have changed in sd_init() */
+ /* does nothing if ctrl_handler == NULL */
+ v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
ctrl = gspca_dev->cam.ctrls;
if (ctrl != NULL) {
for (i = 0;
@@ -1057,77 +1047,50 @@ static int gspca_get_mode(struct gspca_dev *gspca_dev,
static int vidioc_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- int ret;
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->get_chip_ident)
- return -EINVAL;
+ return -ENOTTY;
if (!gspca_dev->sd_desc->get_register)
- return -EINVAL;
+ return -ENOTTY;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->get_register(gspca_dev, reg);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
-
- return ret;
+ return gspca_dev->sd_desc->get_register(gspca_dev, reg);
}
static int vidioc_s_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- int ret;
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->get_chip_ident)
- return -EINVAL;
+ return -ENOTTY;
if (!gspca_dev->sd_desc->set_register)
- return -EINVAL;
+ return -ENOTTY;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->set_register(gspca_dev, reg);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
-
- return ret;
+ return gspca_dev->sd_desc->set_register(gspca_dev, reg);
}
#endif
static int vidioc_g_chip_ident(struct file *file, void *priv,
struct v4l2_dbg_chip_ident *chip)
{
- int ret;
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->get_chip_ident)
- return -EINVAL;
+ return -ENOTTY;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
-
- return ret;
+ return gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip);
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *fmtdesc)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int i, j, index;
__u32 fmt_tb[8];
@@ -1169,7 +1132,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int mode;
mode = gspca_dev->curr_mode;
@@ -1214,7 +1177,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file,
void *priv,
struct v4l2_format *fmt)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int ret;
ret = try_fmt_vid_cap(gspca_dev, fmt);
@@ -1226,7 +1189,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int ret;
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
@@ -1265,7 +1228,7 @@ out:
static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int i;
__u32 index = 0;
@@ -1291,7 +1254,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
static int vidioc_enum_frameintervals(struct file *filp, void *priv,
struct v4l2_frmivalenum *fival)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(filp);
int mode = wxh_to_mode(gspca_dev, fival->width, fival->height);
__u32 i;
@@ -1316,31 +1279,30 @@ static int vidioc_enum_frameintervals(struct file *filp, void *priv,
return -EINVAL;
}
-static void gspca_release(struct video_device *vfd)
+static void gspca_release(struct v4l2_device *v4l2_device)
{
- struct gspca_dev *gspca_dev = container_of(vfd, struct gspca_dev, vdev);
+ struct gspca_dev *gspca_dev =
+ container_of(v4l2_device, struct gspca_dev, v4l2_dev);
PDEBUG(D_PROBE, "%s released",
video_device_node_name(&gspca_dev->vdev));
+ v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
+ v4l2_device_unregister(&gspca_dev->v4l2_dev);
kfree(gspca_dev->usb_buf);
kfree(gspca_dev);
}
static int dev_open(struct file *file)
{
- struct gspca_dev *gspca_dev;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
PDEBUG(D_STREAM, "[%s] open", current->comm);
- gspca_dev = (struct gspca_dev *) video_devdata(file);
- if (!gspca_dev->present)
- return -ENODEV;
/* protect the subdriver against rmmod */
if (!try_module_get(gspca_dev->module))
return -ENODEV;
- file->private_data = gspca_dev;
#ifdef GSPCA_DEBUG
/* activate the v4l2 debug */
if (gspca_debug & D_V4L2)
@@ -1350,49 +1312,44 @@ static int dev_open(struct file *file)
gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL
| V4L2_DEBUG_IOCTL_ARG);
#endif
- return 0;
+ return v4l2_fh_open(file);
}
static int dev_close(struct file *file)
{
- struct gspca_dev *gspca_dev = file->private_data;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
PDEBUG(D_STREAM, "[%s] close", current->comm);
- if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+
+ /* Needed for gspca_stream_off, always lock before queue_lock! */
+ if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock)) {
+ mutex_unlock(&gspca_dev->usb_lock);
+ return -ERESTARTSYS;
+ }
+
/* if the file did the capture, free the streaming resources */
if (gspca_dev->capt_file == file) {
- if (gspca_dev->streaming) {
- mutex_lock(&gspca_dev->usb_lock);
- gspca_dev->usb_err = 0;
+ if (gspca_dev->streaming)
gspca_stream_off(gspca_dev);
- mutex_unlock(&gspca_dev->usb_lock);
- }
frame_free(gspca_dev);
}
- file->private_data = NULL;
module_put(gspca_dev->module);
mutex_unlock(&gspca_dev->queue_lock);
+ mutex_unlock(&gspca_dev->usb_lock);
PDEBUG(D_STREAM, "close done");
- return 0;
+ return v4l2_fh_release(file);
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct gspca_dev *gspca_dev = priv;
- int ret;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
- /* protect the access to the usb device */
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
strlcpy((char *) cap->driver, gspca_dev->sd_desc->name,
sizeof cap->driver);
if (gspca_dev->dev->product != NULL) {
@@ -1406,13 +1363,11 @@ static int vidioc_querycap(struct file *file, void *priv,
}
usb_make_path(gspca_dev->dev, (char *) cap->bus_info,
sizeof(cap->bus_info));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE
| V4L2_CAP_STREAMING
| V4L2_CAP_READWRITE;
- ret = 0;
-out:
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
}
static int get_ctrl(struct gspca_dev *gspca_dev,
@@ -1435,7 +1390,7 @@ static int get_ctrl(struct gspca_dev *gspca_dev,
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *q_ctrl)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
const struct ctrl *ctrls;
struct gspca_ctrl *gspca_ctrl;
int i, idx;
@@ -1478,10 +1433,10 @@ static int vidioc_queryctrl(struct file *file, void *priv,
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
const struct ctrl *ctrls;
struct gspca_ctrl *gspca_ctrl;
- int idx, ret;
+ int idx;
idx = get_ctrl(gspca_dev, ctrl->id);
if (idx < 0)
@@ -1501,74 +1456,52 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
return -ERANGE;
}
PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
gspca_dev->usb_err = 0;
- if (ctrls->set != NULL) {
- ret = ctrls->set(gspca_dev, ctrl->value);
- goto out;
- }
+ if (ctrls->set != NULL)
+ return ctrls->set(gspca_dev, ctrl->value);
if (gspca_ctrl != NULL) {
gspca_ctrl->val = ctrl->value;
if (ctrls->set_control != NULL
&& gspca_dev->streaming)
ctrls->set_control(gspca_dev);
}
- ret = gspca_dev->usb_err;
-out:
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ return gspca_dev->usb_err;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
const struct ctrl *ctrls;
- int idx, ret;
+ int idx;
idx = get_ctrl(gspca_dev, ctrl->id);
if (idx < 0)
return -EINVAL;
ctrls = &gspca_dev->sd_desc->ctrls[idx];
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
gspca_dev->usb_err = 0;
- if (ctrls->get != NULL) {
- ret = ctrls->get(gspca_dev, &ctrl->value);
- goto out;
- }
+ if (ctrls->get != NULL)
+ return ctrls->get(gspca_dev, &ctrl->value);
if (gspca_dev->cam.ctrls != NULL)
ctrl->value = gspca_dev->cam.ctrls[idx].val;
- ret = 0;
-out:
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ return 0;
}
static int vidioc_querymenu(struct file *file, void *priv,
struct v4l2_querymenu *qmenu)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->querymenu)
- return -EINVAL;
+ return -ENOTTY;
return gspca_dev->sd_desc->querymenu(gspca_dev, qmenu);
}
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (input->index != 0)
return -EINVAL;
@@ -1595,7 +1528,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int i, ret = 0, streaming;
i = rb->memory; /* (avoid compilation warning) */
@@ -1635,10 +1568,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
/* stop streaming */
streaming = gspca_dev->streaming;
if (streaming) {
- mutex_lock(&gspca_dev->usb_lock);
- gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
- mutex_unlock(&gspca_dev->usb_lock);
/* Don't restart the stream when switching from read
* to mmap mode */
@@ -1666,7 +1596,7 @@ out:
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *v4l2_buf)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
if (v4l2_buf->index < 0
@@ -1681,7 +1611,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type buf_type)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
int ret;
if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1722,8 +1652,8 @@ out:
static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type buf_type)
{
- struct gspca_dev *gspca_dev = priv;
- int ret;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
+ int i, ret;
if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1743,17 +1673,13 @@ static int vidioc_streamoff(struct file *file, void *priv,
}
/* stop streaming */
- if (mutex_lock_interruptible(&gspca_dev->usb_lock)) {
- ret = -ERESTARTSYS;
- goto out;
- }
- gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
- mutex_unlock(&gspca_dev->usb_lock);
/* In case another thread is waiting in dqbuf */
wake_up_interruptible(&gspca_dev->wq);
/* empty the transfer queues */
+ for (i = 0; i < gspca_dev->nframes; i++)
+ gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
atomic_set(&gspca_dev->fr_q, 0);
atomic_set(&gspca_dev->fr_i, 0);
gspca_dev->fr_o = 0;
@@ -1766,71 +1692,44 @@ out:
static int vidioc_g_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jpegcomp)
{
- struct gspca_dev *gspca_dev = priv;
- int ret;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->get_jcomp)
- return -EINVAL;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
+ return -ENOTTY;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->get_jcomp(gspca_dev, jpegcomp);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ return gspca_dev->sd_desc->get_jcomp(gspca_dev, jpegcomp);
}
static int vidioc_s_jpegcomp(struct file *file, void *priv,
struct v4l2_jpegcompression *jpegcomp)
{
- struct gspca_dev *gspca_dev = priv;
- int ret;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
if (!gspca_dev->sd_desc->set_jcomp)
- return -EINVAL;
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
+ return -ENOTTY;
gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- ret = gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp);
- else
- ret = -ENODEV;
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ return gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp);
}
static int vidioc_g_parm(struct file *filp, void *priv,
struct v4l2_streamparm *parm)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(filp);
parm->parm.capture.readbuffers = gspca_dev->nbufread;
if (gspca_dev->sd_desc->get_streamparm) {
- int ret;
-
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (gspca_dev->present) {
- gspca_dev->usb_err = 0;
- gspca_dev->sd_desc->get_streamparm(gspca_dev, parm);
- ret = gspca_dev->usb_err;
- } else {
- ret = -ENODEV;
- }
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ gspca_dev->usb_err = 0;
+ gspca_dev->sd_desc->get_streamparm(gspca_dev, parm);
+ return gspca_dev->usb_err;
}
-
return 0;
}
static int vidioc_s_parm(struct file *filp, void *priv,
struct v4l2_streamparm *parm)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(filp);
int n;
n = parm->parm.capture.readbuffers;
@@ -1840,19 +1739,9 @@ static int vidioc_s_parm(struct file *filp, void *priv,
gspca_dev->nbufread = n;
if (gspca_dev->sd_desc->set_streamparm) {
- int ret;
-
- if (mutex_lock_interruptible(&gspca_dev->usb_lock))
- return -ERESTARTSYS;
- if (gspca_dev->present) {
- gspca_dev->usb_err = 0;
- gspca_dev->sd_desc->set_streamparm(gspca_dev, parm);
- ret = gspca_dev->usb_err;
- } else {
- ret = -ENODEV;
- }
- mutex_unlock(&gspca_dev->usb_lock);
- return ret;
+ gspca_dev->usb_err = 0;
+ gspca_dev->sd_desc->set_streamparm(gspca_dev, parm);
+ return gspca_dev->usb_err;
}
return 0;
@@ -1860,7 +1749,7 @@ static int vidioc_s_parm(struct file *filp, void *priv,
static int dev_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct gspca_dev *gspca_dev = file->private_data;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
struct page *page;
unsigned long addr, start, size;
@@ -1872,10 +1761,6 @@ static int dev_mmap(struct file *file, struct vm_area_struct *vma)
if (mutex_lock_interruptible(&gspca_dev->queue_lock))
return -ERESTARTSYS;
- if (!gspca_dev->present) {
- ret = -ENODEV;
- goto out;
- }
if (gspca_dev->capt_file != file) {
ret = -EINVAL;
goto out;
@@ -1963,7 +1848,7 @@ static int frame_ready(struct gspca_dev *gspca_dev, struct file *file,
static int vidioc_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *v4l2_buf)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
int i, j, ret;
@@ -2003,14 +1888,6 @@ static int vidioc_dqbuf(struct file *file, void *priv,
gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES;
- if (gspca_dev->sd_desc->dq_callback) {
- mutex_lock(&gspca_dev->usb_lock);
- gspca_dev->usb_err = 0;
- if (gspca_dev->present)
- gspca_dev->sd_desc->dq_callback(gspca_dev);
- mutex_unlock(&gspca_dev->usb_lock);
- }
-
frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
PDEBUG(D_FRAM, "dqbuf %d", j);
@@ -2027,6 +1904,15 @@ static int vidioc_dqbuf(struct file *file, void *priv,
}
out:
mutex_unlock(&gspca_dev->queue_lock);
+
+ if (ret == 0 && gspca_dev->sd_desc->dq_callback) {
+ mutex_lock(&gspca_dev->usb_lock);
+ gspca_dev->usb_err = 0;
+ if (gspca_dev->present)
+ gspca_dev->sd_desc->dq_callback(gspca_dev);
+ mutex_unlock(&gspca_dev->usb_lock);
+ }
+
return ret;
}
@@ -2039,7 +1925,7 @@ out:
static int vidioc_qbuf(struct file *file, void *priv,
struct v4l2_buffer *v4l2_buf)
{
- struct gspca_dev *gspca_dev = priv;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
int i, index, ret;
@@ -2098,6 +1984,10 @@ static int read_alloc(struct gspca_dev *gspca_dev,
int i, ret;
PDEBUG(D_STREAM, "read alloc");
+
+ if (mutex_lock_interruptible(&gspca_dev->usb_lock))
+ return -ERESTARTSYS;
+
if (gspca_dev->nframes == 0) {
struct v4l2_requestbuffers rb;
@@ -2108,7 +1998,7 @@ static int read_alloc(struct gspca_dev *gspca_dev,
ret = vidioc_reqbufs(file, gspca_dev, &rb);
if (ret != 0) {
PDEBUG(D_STREAM, "read reqbuf err %d", ret);
- return ret;
+ goto out;
}
memset(&v4l2_buf, 0, sizeof v4l2_buf);
v4l2_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -2118,61 +2008,69 @@ static int read_alloc(struct gspca_dev *gspca_dev,
ret = vidioc_qbuf(file, gspca_dev, &v4l2_buf);
if (ret != 0) {
PDEBUG(D_STREAM, "read qbuf err: %d", ret);
- return ret;
+ goto out;
}
}
- gspca_dev->memory = GSPCA_MEMORY_READ;
}
/* start streaming */
ret = vidioc_streamon(file, gspca_dev, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (ret != 0)
PDEBUG(D_STREAM, "read streamon err %d", ret);
+out:
+ mutex_unlock(&gspca_dev->usb_lock);
return ret;
}
static unsigned int dev_poll(struct file *file, poll_table *wait)
{
- struct gspca_dev *gspca_dev = file->private_data;
- int ret;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
+ unsigned long req_events = poll_requested_events(wait);
+ int ret = 0;
PDEBUG(D_FRAM, "poll");
- poll_wait(file, &gspca_dev->wq, wait);
+ if (req_events & POLLPRI)
+ ret |= v4l2_ctrl_poll(file, wait);
- /* if reqbufs is not done, the user would use read() */
- if (gspca_dev->memory == GSPCA_MEMORY_NO) {
- ret = read_alloc(gspca_dev, file);
- if (ret != 0)
- return POLLERR;
- }
+ if (req_events & (POLLIN | POLLRDNORM)) {
+ /* if reqbufs is not done, the user would use read() */
+ if (gspca_dev->memory == GSPCA_MEMORY_NO) {
+ if (read_alloc(gspca_dev, file) != 0) {
+ ret |= POLLERR;
+ goto out;
+ }
+ }
- if (mutex_lock_interruptible(&gspca_dev->queue_lock) != 0)
- return POLLERR;
+ poll_wait(file, &gspca_dev->wq, wait);
- /* check if an image has been received */
- if (gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i))
- ret = POLLIN | POLLRDNORM; /* yes */
- else
- ret = 0;
- mutex_unlock(&gspca_dev->queue_lock);
+ /* check if an image has been received */
+ if (mutex_lock_interruptible(&gspca_dev->queue_lock) != 0) {
+ ret |= POLLERR;
+ goto out;
+ }
+ if (gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i))
+ ret |= POLLIN | POLLRDNORM;
+ mutex_unlock(&gspca_dev->queue_lock);
+ }
+
+out:
if (!gspca_dev->present)
- return POLLHUP;
+ ret |= POLLHUP;
+
return ret;
}
static ssize_t dev_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
- struct gspca_dev *gspca_dev = file->private_data;
+ struct gspca_dev *gspca_dev = video_drvdata(file);
struct gspca_frame *frame;
struct v4l2_buffer v4l2_buf;
struct timeval timestamp;
int n, ret, ret2;
PDEBUG(D_FRAM, "read (%zd)", count);
- if (!gspca_dev->present)
- return -ENODEV;
if (gspca_dev->memory == GSPCA_MEMORY_NO) { /* first time ? */
ret = read_alloc(gspca_dev, file);
if (ret != 0)
@@ -2266,13 +2164,15 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = {
.vidioc_s_register = vidioc_s_register,
#endif
.vidioc_g_chip_ident = vidioc_g_chip_ident,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static const struct video_device gspca_template = {
.name = "gspca main driver",
.fops = &dev_fops,
.ioctl_ops = &dev_ioctl_ops,
- .release = gspca_release,
+ .release = video_device_release_empty, /* We use v4l2_dev.release */
};
/* initialize the controls */
@@ -2344,9 +2244,24 @@ int gspca_dev_probe2(struct usb_interface *intf,
}
}
+ gspca_dev->v4l2_dev.release = gspca_release;
+ ret = v4l2_device_register(&intf->dev, &gspca_dev->v4l2_dev);
+ if (ret)
+ goto out;
gspca_dev->sd_desc = sd_desc;
gspca_dev->nbufread = 2;
gspca_dev->empty_packet = -1; /* don't check the empty packets */
+ gspca_dev->vdev = gspca_template;
+ gspca_dev->vdev.v4l2_dev = &gspca_dev->v4l2_dev;
+ video_set_drvdata(&gspca_dev->vdev, gspca_dev);
+ set_bit(V4L2_FL_USE_FH_PRIO, &gspca_dev->vdev.flags);
+ gspca_dev->module = module;
+ gspca_dev->present = 1;
+
+ mutex_init(&gspca_dev->usb_lock);
+ gspca_dev->vdev.lock = &gspca_dev->usb_lock;
+ mutex_init(&gspca_dev->queue_lock);
+ init_waitqueue_head(&gspca_dev->wq);
/* configure the subdriver and initialize the USB device */
ret = sd_desc->config(gspca_dev, id);
@@ -2357,21 +2272,26 @@ int gspca_dev_probe2(struct usb_interface *intf,
ret = sd_desc->init(gspca_dev);
if (ret < 0)
goto out;
+ if (sd_desc->init_controls)
+ ret = sd_desc->init_controls(gspca_dev);
+ if (ret < 0)
+ goto out;
gspca_set_default_mode(gspca_dev);
ret = gspca_input_connect(gspca_dev);
if (ret)
goto out;
- mutex_init(&gspca_dev->usb_lock);
- mutex_init(&gspca_dev->queue_lock);
- init_waitqueue_head(&gspca_dev->wq);
+ /*
+ * Don't take usb_lock for these ioctls. This improves latency if
+ * usb_lock is taken for a long time, e.g. when changing a control
+ * value, and a new frame is ready to be dequeued.
+ */
+ v4l2_disable_ioctl_locking(&gspca_dev->vdev, VIDIOC_DQBUF);
+ v4l2_disable_ioctl_locking(&gspca_dev->vdev, VIDIOC_QBUF);
+ v4l2_disable_ioctl_locking(&gspca_dev->vdev, VIDIOC_QUERYBUF);
/* init video stuff */
- memcpy(&gspca_dev->vdev, &gspca_template, sizeof gspca_template);
- gspca_dev->vdev.parent = &intf->dev;
- gspca_dev->module = module;
- gspca_dev->present = 1;
ret = video_register_device(&gspca_dev->vdev,
VFL_TYPE_GRABBER,
-1);
@@ -2391,6 +2311,7 @@ out:
if (gspca_dev->input_dev)
input_unregister_device(gspca_dev->input_dev);
#endif
+ v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
kfree(gspca_dev->usb_buf);
kfree(gspca_dev);
return ret;
@@ -2437,11 +2358,12 @@ void gspca_disconnect(struct usb_interface *intf)
PDEBUG(D_PROBE, "%s disconnect",
video_device_node_name(&gspca_dev->vdev));
+
mutex_lock(&gspca_dev->usb_lock);
+ usb_set_intfdata(intf, NULL);
+ gspca_dev->dev = NULL;
gspca_dev->present = 0;
- wake_up_interruptible(&gspca_dev->wq);
-
destroy_urbs(gspca_dev);
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
@@ -2452,18 +2374,19 @@ void gspca_disconnect(struct usb_interface *intf)
input_unregister_device(input_dev);
}
#endif
+ /* Free subdriver's streaming resources / stop sd workqueue(s) */
+ if (gspca_dev->sd_desc->stop0 && gspca_dev->streaming)
+ gspca_dev->sd_desc->stop0(gspca_dev);
+ gspca_dev->streaming = 0;
+ wake_up_interruptible(&gspca_dev->wq);
- /* the device is freed at exit of this function */
- gspca_dev->dev = NULL;
- mutex_unlock(&gspca_dev->usb_lock);
+ v4l2_device_disconnect(&gspca_dev->v4l2_dev);
+ video_unregister_device(&gspca_dev->vdev);
- usb_set_intfdata(intf, NULL);
+ mutex_unlock(&gspca_dev->usb_lock);
- /* release the device */
/* (this will call gspca_release() immediately or on last close) */
- video_unregister_device(&gspca_dev->vdev);
-
-/* PDEBUG(D_PROBE, "disconnect complete"); */
+ v4l2_device_put(&gspca_dev->v4l2_dev);
}
EXPORT_SYMBOL(gspca_disconnect);
@@ -2474,7 +2397,9 @@ int gspca_suspend(struct usb_interface *intf, pm_message_t message)
if (!gspca_dev->streaming)
return 0;
+ mutex_lock(&gspca_dev->usb_lock);
gspca_dev->frozen = 1; /* avoid urb error messages */
+ gspca_dev->usb_err = 0;
if (gspca_dev->sd_desc->stopN)
gspca_dev->sd_desc->stopN(gspca_dev);
destroy_urbs(gspca_dev);
@@ -2482,6 +2407,7 @@ int gspca_suspend(struct usb_interface *intf, pm_message_t message)
gspca_set_alt0(gspca_dev);
if (gspca_dev->sd_desc->stop0)
gspca_dev->sd_desc->stop0(gspca_dev);
+ mutex_unlock(&gspca_dev->usb_lock);
return 0;
}
EXPORT_SYMBOL(gspca_suspend);
@@ -2489,105 +2415,28 @@ EXPORT_SYMBOL(gspca_suspend);
int gspca_resume(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
+ int streaming, ret = 0;
+ mutex_lock(&gspca_dev->usb_lock);
gspca_dev->frozen = 0;
+ gspca_dev->usb_err = 0;
gspca_dev->sd_desc->init(gspca_dev);
gspca_input_create_urb(gspca_dev);
- if (gspca_dev->streaming)
- return gspca_init_transfer(gspca_dev);
- return 0;
+ /*
+ * Most subdrivers send all ctrl values on sd_start and thus
+ * only write to the device registers on s_ctrl when streaming ->
+ * Clear streaming to avoid setting all ctrls twice.
+ */
+ streaming = gspca_dev->streaming;
+ gspca_dev->streaming = 0;
+ v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
+ if (streaming)
+ ret = gspca_init_transfer(gspca_dev);
+ mutex_unlock(&gspca_dev->usb_lock);
+ return ret;
}
EXPORT_SYMBOL(gspca_resume);
#endif
-/* -- cam driver utility functions -- */
-
-/* auto gain and exposure algorithm based on the knee algorithm described here:
- http://ytse.tricolour.net/docs/LowLightOptimization.html
-
- Returns 0 if no changes were made, 1 if the gain and or exposure settings
- where changed. */
-int gspca_auto_gain_n_exposure(struct gspca_dev *gspca_dev, int avg_lum,
- int desired_avg_lum, int deadzone, int gain_knee, int exposure_knee)
-{
- int i, steps, gain, orig_gain, exposure, orig_exposure, autogain;
- const struct ctrl *gain_ctrl = NULL;
- const struct ctrl *exposure_ctrl = NULL;
- const struct ctrl *autogain_ctrl = NULL;
- int retval = 0;
-
- for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
- if (gspca_dev->ctrl_dis & (1 << i))
- continue;
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_GAIN)
- gain_ctrl = &gspca_dev->sd_desc->ctrls[i];
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_EXPOSURE)
- exposure_ctrl = &gspca_dev->sd_desc->ctrls[i];
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_AUTOGAIN)
- autogain_ctrl = &gspca_dev->sd_desc->ctrls[i];
- }
- if (!gain_ctrl || !exposure_ctrl || !autogain_ctrl) {
- PDEBUG(D_ERR, "Error: gspca_auto_gain_n_exposure called "
- "on cam without (auto)gain/exposure");
- return 0;
- }
-
- if (gain_ctrl->get(gspca_dev, &gain) ||
- exposure_ctrl->get(gspca_dev, &exposure) ||
- autogain_ctrl->get(gspca_dev, &autogain) || !autogain)
- return 0;
-
- orig_gain = gain;
- orig_exposure = exposure;
-
- /* If we are of a multiple of deadzone, do multiple steps to reach the
- desired lumination fast (with the risc of a slight overshoot) */
- steps = abs(desired_avg_lum - avg_lum) / deadzone;
-
- PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
- avg_lum, desired_avg_lum, steps);
-
- for (i = 0; i < steps; i++) {
- if (avg_lum > desired_avg_lum) {
- if (gain > gain_knee)
- gain--;
- else if (exposure > exposure_knee)
- exposure--;
- else if (gain > gain_ctrl->qctrl.default_value)
- gain--;
- else if (exposure > exposure_ctrl->qctrl.minimum)
- exposure--;
- else if (gain > gain_ctrl->qctrl.minimum)
- gain--;
- else
- break;
- } else {
- if (gain < gain_ctrl->qctrl.default_value)
- gain++;
- else if (exposure < exposure_knee)
- exposure++;
- else if (gain < gain_knee)
- gain++;
- else if (exposure < exposure_ctrl->qctrl.maximum)
- exposure++;
- else if (gain < gain_ctrl->qctrl.maximum)
- gain++;
- else
- break;
- }
- }
-
- if (gain != orig_gain) {
- gain_ctrl->set(gspca_dev, gain);
- retval = 1;
- }
- if (exposure != orig_exposure) {
- exposure_ctrl->set(gspca_dev, exposure);
- retval = 1;
- }
-
- return retval;
-}
-EXPORT_SYMBOL(gspca_auto_gain_n_exposure);
/* -- module insert / remove -- */
static int __init gspca_init(void)
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 589009f4496f..dc688c7f5e48 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -6,6 +6,8 @@
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
#include <linux/mutex.h>
/* compilation option */
@@ -115,6 +117,7 @@ struct sd_desc {
/* mandatory operations */
cam_cf_op config; /* called on probe */
cam_op init; /* called on probe and resume */
+ cam_op init_controls; /* called on probe */
cam_op start; /* called on stream on after URBs creation */
cam_pkt_op pkt_scan;
/* optional operations */
@@ -158,8 +161,10 @@ struct gspca_frame {
struct gspca_dev {
struct video_device vdev; /* !! must be the first item */
struct module *module; /* subdriver handling the device */
+ struct v4l2_device v4l2_dev;
struct usb_device *dev;
struct file *capt_file; /* file doing video capture */
+ /* protected by queue_lock */
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
struct input_dev *input_dev;
char phys[64]; /* physical device path */
@@ -169,6 +174,16 @@ struct gspca_dev {
const struct sd_desc *sd_desc; /* subdriver description */
unsigned ctrl_dis; /* disabled controls (bit map) */
unsigned ctrl_inac; /* inactive controls (bit map) */
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* autogain and exposure or gain control cluster, these are global as
+ the autogain/exposure functions in autogain_functions.c use them */
+ struct {
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ int exp_too_low_cnt, exp_too_high_cnt;
+ };
#define USB_BUF_SZ 64
__u8 *usb_buf; /* buffer for USB exchanges */
@@ -189,7 +204,7 @@ struct gspca_dev {
u8 fr_o; /* next frame to dequeue */
__u8 last_packet_type;
__s8 empty_packet; /* if (-1) don't check empty packets */
- __u8 streaming;
+ __u8 streaming; /* protected by both mutexes (*) */
__u8 curr_mode; /* current camera mode */
__u32 pixfmt; /* current mode parameters */
@@ -211,6 +226,10 @@ struct gspca_dev {
__u8 iface; /* USB interface number */
__u8 alt; /* USB alternate setting */
u8 audio; /* presence of audio device */
+
+ /* (*) These variables are proteced by both usb_lock and queue_lock,
+ that is any code setting them is holding *both*, which means that
+ any code getting them needs to hold at least one of them */
};
int gspca_dev_probe(struct usb_interface *intf,
@@ -232,6 +251,9 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
int gspca_suspend(struct usb_interface *intf, pm_message_t message);
int gspca_resume(struct usb_interface *intf);
#endif
-int gspca_auto_gain_n_exposure(struct gspca_dev *gspca_dev, int avg_lum,
+int gspca_expo_autogain(struct gspca_dev *gspca_dev, int avg_lum,
int desired_avg_lum, int deadzone, int gain_knee, int exposure_knee);
+int gspca_coarse_grained_expo_autogain(struct gspca_dev *gspca_dev,
+ int avg_lum, int desired_avg_lum, int deadzone);
+
#endif /* GSPCAV2_H */
diff --git a/drivers/media/video/gspca/jl2005bcd.c b/drivers/media/video/gspca/jl2005bcd.c
index 53f58ef367cf..9c591c7c6f54 100644
--- a/drivers/media/video/gspca/jl2005bcd.c
+++ b/drivers/media/video/gspca/jl2005bcd.c
@@ -335,7 +335,11 @@ static void jl2005c_dostream(struct work_struct *work)
goto quit_stream;
}
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
/* Check if this is a new frame. If so, start the frame first */
if (!header_read) {
mutex_lock(&gspca_dev->usb_lock);
@@ -367,7 +371,7 @@ static void jl2005c_dostream(struct work_struct *work)
buffer, act_len);
header_read = 1;
}
- while (bytes_left > 0 && gspca_dev->present) {
+ while (bytes_left > 0 && gspca_dev->dev) {
data_len = bytes_left > JL2005C_MAX_TRANSFER ?
JL2005C_MAX_TRANSFER : bytes_left;
ret = usb_bulk_msg(gspca_dev->dev,
@@ -390,7 +394,7 @@ static void jl2005c_dostream(struct work_struct *work)
}
}
quit_stream:
- if (gspca_dev->present) {
+ if (gspca_dev->dev) {
mutex_lock(&gspca_dev->usb_lock);
jl2005c_stop(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index b0231465afae..ec7b21ee79fb 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -30,22 +30,19 @@ MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/Mars USB Camera Driver");
MODULE_LICENSE("GPL");
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- COLORS,
- GAMMA,
- SHARPNESS,
- ILLUM_TOP,
- ILLUM_BOT,
- NCTRLS /* number of controls */
-};
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *gamma;
+ struct { /* illuminator control cluster */
+ struct v4l2_ctrl *illum_top;
+ struct v4l2_ctrl *illum_bottom;
+ };
+ struct v4l2_ctrl *jpegqual;
u8 quality;
#define QUALITY_MIN 40
@@ -56,89 +53,10 @@ struct sd {
};
/* V4L2 controls supported by the driver */
-static void setbrightness(struct gspca_dev *gspca_dev);
-static void setcolors(struct gspca_dev *gspca_dev);
-static void setgamma(struct gspca_dev *gspca_dev);
-static void setsharpness(struct gspca_dev *gspca_dev);
-static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val);
-
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 30,
- .step = 1,
- .default_value = 15,
- },
- .set_control = setbrightness
- },
-[COLORS] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 1,
- .maximum = 255,
- .step = 1,
- .default_value = 200,
- },
- .set_control = setcolors
- },
-[GAMMA] = {
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setgamma
- },
-[SHARPNESS] = {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 2,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setsharpness
- },
-[ILLUM_TOP] = {
- {
- .id = V4L2_CID_ILLUMINATORS_1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Top illuminator",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- .flags = V4L2_CTRL_FLAG_UPDATE,
- },
- .set = sd_setilluminator1
- },
-[ILLUM_BOT] = {
- {
- .id = V4L2_CID_ILLUMINATORS_2,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Bottom illuminator",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- .flags = V4L2_CTRL_FLAG_UPDATE,
- },
- .set = sd_setilluminator2
- },
-};
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val);
+static void setcolors(struct gspca_dev *gspca_dev, s32 val);
+static void setgamma(struct gspca_dev *gspca_dev, s32 val);
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val);
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
@@ -198,59 +116,130 @@ static void mi_w(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 4);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
gspca_dev->usb_buf[0] = 0x61;
- gspca_dev->usb_buf[1] = sd->ctrls[BRIGHTNESS].val;
+ gspca_dev->usb_buf[1] = val;
reg_w(gspca_dev, 2);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- s16 val;
-
- val = sd->ctrls[COLORS].val;
gspca_dev->usb_buf[0] = 0x5f;
gspca_dev->usb_buf[1] = val << 3;
gspca_dev->usb_buf[2] = ((val >> 2) & 0xf8) | 0x04;
reg_w(gspca_dev, 3);
}
-static void setgamma(struct gspca_dev *gspca_dev)
+static void setgamma(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
gspca_dev->usb_buf[0] = 0x06;
- gspca_dev->usb_buf[1] = sd->ctrls[GAMMA].val * 0x40;
+ gspca_dev->usb_buf[1] = val * 0x40;
reg_w(gspca_dev, 2);
}
-static void setsharpness(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
gspca_dev->usb_buf[0] = 0x67;
- gspca_dev->usb_buf[1] = sd->ctrls[SHARPNESS].val * 4 + 3;
+ gspca_dev->usb_buf[1] = val * 4 + 3;
reg_w(gspca_dev, 2);
}
-static void setilluminators(struct gspca_dev *gspca_dev)
+static void setilluminators(struct gspca_dev *gspca_dev, bool top, bool bottom)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
+ /* both are off if not streaming */
gspca_dev->usb_buf[0] = 0x22;
- if (sd->ctrls[ILLUM_TOP].val)
+ if (top)
gspca_dev->usb_buf[1] = 0x76;
- else if (sd->ctrls[ILLUM_BOT].val)
+ else if (bottom)
gspca_dev->usb_buf[1] = 0x7a;
else
gspca_dev->usb_buf[1] = 0x7e;
reg_w(gspca_dev, 2);
}
+static int mars_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_ILLUMINATORS_1) {
+ /* only one can be on at a time */
+ if (ctrl->is_new && ctrl->val)
+ sd->illum_bottom->val = 0;
+ if (sd->illum_bottom->is_new && sd->illum_bottom->val)
+ sd->illum_top->val = 0;
+ }
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAMMA:
+ setgamma(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_ILLUMINATORS_1:
+ setilluminators(gspca_dev, sd->illum_top->val,
+ sd->illum_bottom->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ jpeg_set_qual(sd->jpeg_hdr, ctrl->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops mars_ctrl_ops = {
+ .s_ctrl = mars_s_ctrl,
+};
+
+/* this function is called at probe time */
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 7);
+ sd->brightness = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 30, 1, 15);
+ sd->saturation = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 200);
+ sd->gamma = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 3, 1, 1);
+ sd->sharpness = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 2, 1, 1);
+ sd->illum_top = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_1, 0, 1, 1, 0);
+ sd->illum_top->flags |= V4L2_CTRL_FLAG_UPDATE;
+ sd->illum_bottom = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_2, 0, 1, 1, 0);
+ sd->illum_bottom->flags |= V4L2_CTRL_FLAG_UPDATE;
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ QUALITY_MIN, QUALITY_MAX, 1, QUALITY_DEF);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ v4l2_ctrl_cluster(2, &sd->illum_top);
+ return 0;
+}
+
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
@@ -261,7 +250,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
- cam->ctrls = sd->ctrls;
sd->quality = QUALITY_DEF;
return 0;
}
@@ -269,7 +257,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
- gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT);
return 0;
}
@@ -282,7 +269,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* create the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x21); /* JPEG 422 */
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
data = gspca_dev->usb_buf;
@@ -301,7 +288,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[5] = 0x30; /* reg 4, MI, PAS5101 :
* 0x30 for 24mhz , 0x28 for 12mhz */
data[6] = 0x02; /* reg 5, H start - was 0x04 */
- data[7] = sd->ctrls[GAMMA].val * 0x40; /* reg 0x06: gamma */
+ data[7] = v4l2_ctrl_g_ctrl(sd->gamma) * 0x40; /* reg 0x06: gamma */
data[8] = 0x01; /* reg 7, V start - was 0x03 */
/* if (h_size == 320 ) */
/* data[9]= 0x56; * reg 8, 24MHz, 2:1 scale down */
@@ -333,16 +320,16 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* reg 0x5f/0x60 (LE) = saturation */
/* h (60): xxxx x100
* l (5f): xxxx x000 */
- data[2] = sd->ctrls[COLORS].val << 3;
- data[3] = ((sd->ctrls[COLORS].val >> 2) & 0xf8) | 0x04;
- data[4] = sd->ctrls[BRIGHTNESS].val; /* reg 0x61 = brightness */
+ data[2] = v4l2_ctrl_g_ctrl(sd->saturation) << 3;
+ data[3] = ((v4l2_ctrl_g_ctrl(sd->saturation) >> 2) & 0xf8) | 0x04;
+ data[4] = v4l2_ctrl_g_ctrl(sd->brightness); /* reg 0x61 = brightness */
data[5] = 0x00;
reg_w(gspca_dev, 6);
data[0] = 0x67;
/*jfm: from win trace*/
- data[1] = sd->ctrls[SHARPNESS].val * 4 + 3;
+ data[1] = v4l2_ctrl_g_ctrl(sd->sharpness) * 4 + 3;
data[2] = 0x14;
reg_w(gspca_dev, 3);
@@ -365,7 +352,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
data[1] = 0x4d; /* ISOC transferring enable... */
reg_w(gspca_dev, 2);
- gspca_dev->ctrl_inac = 0; /* activate the illuminator controls */
+ setilluminators(gspca_dev, v4l2_ctrl_g_ctrl(sd->illum_top),
+ v4l2_ctrl_g_ctrl(sd->illum_bottom));
+
return gspca_dev->usb_err;
}
@@ -373,11 +362,9 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- gspca_dev->ctrl_inac = (1 << ILLUM_TOP) | (1 << ILLUM_BOT);
- if (sd->ctrls[ILLUM_TOP].val || sd->ctrls[ILLUM_BOT].val) {
- sd->ctrls[ILLUM_TOP].val = 0;
- sd->ctrls[ILLUM_BOT].val = 0;
- setilluminators(gspca_dev);
+ if (v4l2_ctrl_g_ctrl(sd->illum_top) ||
+ v4l2_ctrl_g_ctrl(sd->illum_bottom)) {
+ setilluminators(gspca_dev, false, false);
msleep(20);
}
@@ -424,43 +411,16 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- /* only one illuminator may be on */
- sd->ctrls[ILLUM_TOP].val = val;
- if (val)
- sd->ctrls[ILLUM_BOT].val = 0;
- setilluminators(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- /* only one illuminator may be on */
- sd->ctrls[ILLUM_BOT].val = val;
- if (val)
- sd->ctrls[ILLUM_TOP].val = 0;
- setilluminators(gspca_dev);
- return gspca_dev->usb_err;
-}
-
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
struct v4l2_jpegcompression *jcomp)
{
struct sd *sd = (struct sd *) gspca_dev;
+ int ret;
- if (jcomp->quality < QUALITY_MIN)
- sd->quality = QUALITY_MIN;
- else if (jcomp->quality > QUALITY_MAX)
- sd->quality = QUALITY_MAX;
- else
- sd->quality = jcomp->quality;
- if (gspca_dev->streaming)
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ ret = v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality);
+ if (ret)
+ return ret;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
return 0;
}
@@ -470,7 +430,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->quality;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
| V4L2_JPEG_MARKER_DQT;
return 0;
@@ -479,10 +439,9 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
@@ -513,6 +472,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/nw80x.c b/drivers/media/video/gspca/nw80x.c
index 7167cac7359c..42e021931e60 100644
--- a/drivers/media/video/gspca/nw80x.c
+++ b/drivers/media/video/gspca/nw80x.c
@@ -2001,6 +2001,8 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
return gspca_dev->usb_err;
}
+#define WANT_REGULAR_AUTOGAIN
+#define WANT_COARSE_EXPO_AUTOGAIN
#include "autogain_functions.h"
static void do_autogain(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 739e8a2a2d30..183457c5cfdb 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -2804,7 +2804,7 @@ static void ov7xx0_configure(struct sd *sd)
/* add OV7670 here
* it appears to be wrongly detected as a 7610 by default */
if (rc < 0) {
- PDEBUG(D_ERR, "Error detecting sensor type");
+ pr_err("Error detecting sensor type\n");
return;
}
if ((rc & 3) == 3) {
@@ -2832,12 +2832,12 @@ static void ov7xx0_configure(struct sd *sd)
/* try to read product id registers */
high = i2c_r(sd, 0x0a);
if (high < 0) {
- PDEBUG(D_ERR, "Error detecting camera chip PID");
+ pr_err("Error detecting camera chip PID\n");
return;
}
low = i2c_r(sd, 0x0b);
if (low < 0) {
- PDEBUG(D_ERR, "Error detecting camera chip VER");
+ pr_err("Error detecting camera chip VER\n");
return;
}
if (high == 0x76) {
@@ -2863,7 +2863,7 @@ static void ov7xx0_configure(struct sd *sd)
sd->sensor = SEN_OV7660;
break;
default:
- PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
+ pr_err("Unknown sensor: 0x76%02x\n", low);
return;
}
} else {
@@ -2884,7 +2884,7 @@ static void ov6xx0_configure(struct sd *sd)
/* Detect sensor (sub)type */
rc = i2c_r(sd, OV7610_REG_COM_I);
if (rc < 0) {
- PDEBUG(D_ERR, "Error detecting sensor type");
+ pr_err("Error detecting sensor type\n");
return;
}
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 04753391de3e..80c81dd6d68b 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -34,6 +34,8 @@
#include "gspca.h"
+#include <linux/fixp-arith.h>
+
#define OV534_REG_ADDRESS 0xf1 /* sensor address */
#define OV534_REG_SUBADDR 0xf2
#define OV534_REG_WRITE 0xf3
@@ -53,6 +55,8 @@ MODULE_LICENSE("GPL");
/* controls */
enum e_ctrl {
+ HUE,
+ SATURATION,
BRIGHTNESS,
CONTRAST,
GAIN,
@@ -63,7 +67,6 @@ enum e_ctrl {
SHARPNESS,
HFLIP,
VFLIP,
- COLORS,
LIGHTFREQ,
NCTRLS /* number of controls */
};
@@ -87,22 +90,47 @@ enum sensors {
};
/* V4L2 controls supported by the driver */
+static void sethue(struct gspca_dev *gspca_dev);
+static void setsaturation(struct gspca_dev *gspca_dev);
static void setbrightness(struct gspca_dev *gspca_dev);
static void setcontrast(struct gspca_dev *gspca_dev);
static void setgain(struct gspca_dev *gspca_dev);
static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static void setagc(struct gspca_dev *gspca_dev);
static void setawb(struct gspca_dev *gspca_dev);
static void setaec(struct gspca_dev *gspca_dev);
static void setsharpness(struct gspca_dev *gspca_dev);
static void sethvflip(struct gspca_dev *gspca_dev);
-static void setcolors(struct gspca_dev *gspca_dev);
static void setlightfreq(struct gspca_dev *gspca_dev);
static int sd_start(struct gspca_dev *gspca_dev);
static void sd_stopN(struct gspca_dev *gspca_dev);
static const struct ctrl sd_ctrls[] = {
+[HUE] = {
+ {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = -90,
+ .maximum = 90,
+ .step = 1,
+ .default_value = 0,
+ },
+ .set_control = sethue
+ },
+[SATURATION] = {
+ {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 64,
+ },
+ .set_control = setsaturation
+ },
[BRIGHTNESS] = {
{
.id = V4L2_CID_BRIGHTNESS,
@@ -161,7 +189,7 @@ static const struct ctrl sd_ctrls[] = {
.step = 1,
.default_value = 1,
},
- .set = sd_setagc
+ .set_control = setagc
},
[AWB] = {
{
@@ -223,18 +251,6 @@ static const struct ctrl sd_ctrls[] = {
},
.set_control = sethvflip
},
-[COLORS] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 6,
- .step = 1,
- .default_value = 3,
- },
- .set_control = setcolors
- },
[LIGHTFREQ] = {
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -684,7 +700,7 @@ static const u8 sensor_init_772x[][2] = {
{ 0x9c, 0x20 },
{ 0x9e, 0x81 },
- { 0xa6, 0x04 },
+ { 0xa6, 0x07 },
{ 0x7e, 0x0c },
{ 0x7f, 0x16 },
{ 0x80, 0x2a },
@@ -835,6 +851,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
int i;
for (i = 0; i < 5; i++) {
+ msleep(10);
data = ov534_reg_read(gspca_dev, OV534_REG_STATUS);
switch (data) {
@@ -955,6 +972,74 @@ static void set_frame_rate(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "frame_rate: %d", r->fps);
}
+static void sethue(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int val;
+
+ val = sd->ctrls[HUE].val;
+ if (sd->sensor == SENSOR_OV767x) {
+ /* TBD */
+ } else {
+ s16 huesin;
+ s16 huecos;
+
+ /* fixp_sin and fixp_cos accept only positive values, while
+ * our val is between -90 and 90
+ */
+ val += 360;
+
+ /* According to the datasheet the registers expect HUESIN and
+ * HUECOS to be the result of the trigonometric functions,
+ * scaled by 0x80.
+ *
+ * The 0x100 here represents the maximun absolute value
+ * returned byt fixp_sin and fixp_cos, so the scaling will
+ * consider the result like in the interval [-1.0, 1.0].
+ */
+ huesin = fixp_sin(val) * 0x80 / 0x100;
+ huecos = fixp_cos(val) * 0x80 / 0x100;
+
+ if (huesin < 0) {
+ sccb_reg_write(gspca_dev, 0xab,
+ sccb_reg_read(gspca_dev, 0xab) | 0x2);
+ huesin = -huesin;
+ } else {
+ sccb_reg_write(gspca_dev, 0xab,
+ sccb_reg_read(gspca_dev, 0xab) & ~0x2);
+
+ }
+ sccb_reg_write(gspca_dev, 0xa9, (u8)huecos);
+ sccb_reg_write(gspca_dev, 0xaa, (u8)huesin);
+ }
+}
+
+static void setsaturation(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int val;
+
+ val = sd->ctrls[SATURATION].val;
+ if (sd->sensor == SENSOR_OV767x) {
+ int i;
+ static u8 color_tb[][6] = {
+ {0x42, 0x42, 0x00, 0x11, 0x30, 0x41},
+ {0x52, 0x52, 0x00, 0x16, 0x3c, 0x52},
+ {0x66, 0x66, 0x00, 0x1b, 0x4b, 0x66},
+ {0x80, 0x80, 0x00, 0x22, 0x5e, 0x80},
+ {0x9a, 0x9a, 0x00, 0x29, 0x71, 0x9a},
+ {0xb8, 0xb8, 0x00, 0x31, 0x87, 0xb8},
+ {0xdd, 0xdd, 0x00, 0x3b, 0xa2, 0xdd},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(color_tb[0]); i++)
+ sccb_reg_write(gspca_dev, 0x4f + i, color_tb[val][i]);
+ } else {
+ sccb_reg_write(gspca_dev, 0xa7, val); /* U saturation */
+ sccb_reg_write(gspca_dev, 0xa8, val); /* V saturation */
+ }
+}
+
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1132,26 +1217,6 @@ static void sethvflip(struct gspca_dev *gspca_dev)
}
}
-static void setcolors(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
- int i;
- static u8 color_tb[][6] = {
- {0x42, 0x42, 0x00, 0x11, 0x30, 0x41},
- {0x52, 0x52, 0x00, 0x16, 0x3c, 0x52},
- {0x66, 0x66, 0x00, 0x1b, 0x4b, 0x66},
- {0x80, 0x80, 0x00, 0x22, 0x5e, 0x80},
- {0x9a, 0x9a, 0x00, 0x29, 0x71, 0x9a},
- {0xb8, 0xb8, 0x00, 0x31, 0x87, 0xb8},
- {0xdd, 0xdd, 0x00, 0x3b, 0xa2, 0xdd},
- };
-
- val = sd->ctrls[COLORS].val;
- for (i = 0; i < ARRAY_SIZE(color_tb[0]); i++)
- sccb_reg_write(gspca_dev, 0x4f + i, color_tb[val][i]);
-}
-
static void setlightfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1178,10 +1243,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->ctrls = sd->ctrls;
- /* the auto white balance control works only when auto gain is set */
- if (sd_ctrls[AGC].qctrl.default_value == 0)
- gspca_dev->ctrl_inac |= (1 << AWB);
-
cam->cam_mode = ov772x_mode;
cam->nmodes = ARRAY_SIZE(ov772x_mode);
@@ -1225,9 +1286,13 @@ static int sd_init(struct gspca_dev *gspca_dev)
if ((sensor_id & 0xfff0) == 0x7670) {
sd->sensor = SENSOR_OV767x;
- gspca_dev->ctrl_dis = (1 << GAIN) |
+ gspca_dev->ctrl_dis = (1 << HUE) |
+ (1 << GAIN) |
(1 << AGC) |
(1 << SHARPNESS); /* auto */
+ sd->ctrls[SATURATION].min = 0,
+ sd->ctrls[SATURATION].max = 6,
+ sd->ctrls[SATURATION].def = 3,
sd->ctrls[BRIGHTNESS].min = -127;
sd->ctrls[BRIGHTNESS].max = 127;
sd->ctrls[BRIGHTNESS].def = 0;
@@ -1243,7 +1308,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
gspca_dev->cam.nmodes = ARRAY_SIZE(ov767x_mode);
} else {
sd->sensor = SENSOR_OV772x;
- gspca_dev->ctrl_dis = (1 << COLORS);
gspca_dev->cam.bulk = 1;
gspca_dev->cam.bulk_size = 16384;
gspca_dev->cam.bulk_nurbs = 2;
@@ -1302,6 +1366,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_frame_rate(gspca_dev);
+ if (!(gspca_dev->ctrl_dis & (1 << HUE)))
+ sethue(gspca_dev);
+ setsaturation(gspca_dev);
if (!(gspca_dev->ctrl_dis & (1 << AGC)))
setagc(gspca_dev);
setawb(gspca_dev);
@@ -1314,8 +1381,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
if (!(gspca_dev->ctrl_dis & (1 << SHARPNESS)))
setsharpness(gspca_dev);
sethvflip(gspca_dev);
- if (!(gspca_dev->ctrl_dis & (1 << COLORS)))
- setcolors(gspca_dev);
setlightfreq(gspca_dev);
ov534_set_led(gspca_dev, 1);
@@ -1418,29 +1483,6 @@ scan_next:
} while (remaining_len > 0);
}
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->ctrls[AGC].val = val;
-
- /* the auto white balance control works only
- * when auto gain is set */
- if (val) {
- gspca_dev->ctrl_inac &= ~(1 << AWB);
- } else {
- gspca_dev->ctrl_inac |= (1 << AWB);
- if (sd->ctrls[AWB].val) {
- sd->ctrls[AWB].val = 0;
- if (gspca_dev->streaming)
- setawb(gspca_dev);
- }
- }
- if (gspca_dev->streaming)
- setagc(gspca_dev);
- return gspca_dev->usb_err;
-}
-
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index b5797308a39b..1fd41f0d2e95 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1008,6 +1008,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
int i;
for (i = 0; i < 5; i++) {
+ msleep(10);
data = reg_r(gspca_dev, OV534_REG_STATUS);
switch (data) {
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 3844c49f269c..fa661c6d6d55 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -29,6 +29,8 @@
#include <linux/input.h>
#include "gspca.h"
+/* Include pac common sof detection functions */
+#include "pac_common.h"
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Pixart PAC207");
@@ -39,16 +41,17 @@ MODULE_LICENSE("GPL");
#define PAC207_BRIGHTNESS_MIN 0
#define PAC207_BRIGHTNESS_MAX 255
#define PAC207_BRIGHTNESS_DEFAULT 46
+#define PAC207_BRIGHTNESS_REG 0x08
#define PAC207_EXPOSURE_MIN 3
#define PAC207_EXPOSURE_MAX 90 /* 1 sec expo time / 1 fps */
#define PAC207_EXPOSURE_DEFAULT 5 /* power on default: 3 */
-#define PAC207_EXPOSURE_KNEE 9 /* fps: 90 / exposure -> 9: 10 fps */
+#define PAC207_EXPOSURE_REG 0x02
#define PAC207_GAIN_MIN 0
#define PAC207_GAIN_MAX 31
#define PAC207_GAIN_DEFAULT 7 /* power on default: 9 */
-#define PAC207_GAIN_KNEE 15
+#define PAC207_GAIN_REG 0x0e
#define PAC207_AUTOGAIN_DEADZONE 30
@@ -56,13 +59,9 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 mode;
-
- u8 brightness;
- u8 exposure;
- u8 autogain;
- u8 gain;
+ struct v4l2_ctrl *brightness;
+ u8 mode;
u8 sof_read;
u8 header_read;
u8 autogain_ignore_frames;
@@ -70,80 +69,6 @@ struct sd {
atomic_t avg_lum;
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-#define SD_BRIGHTNESS 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = PAC207_BRIGHTNESS_MIN,
- .maximum = PAC207_BRIGHTNESS_MAX,
- .step = 1,
- .default_value = PAC207_BRIGHTNESS_DEFAULT,
- .flags = 0,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-#define SD_EXPOSURE 1
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = PAC207_EXPOSURE_MIN,
- .maximum = PAC207_EXPOSURE_MAX,
- .step = 1,
- .default_value = PAC207_EXPOSURE_DEFAULT,
- .flags = 0,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
-#define SD_AUTOGAIN 2
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- .flags = 0,
- },
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
-#define SD_GAIN 3
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = PAC207_GAIN_MIN,
- .maximum = PAC207_GAIN_MAX,
- .step = 1,
- .default_value = PAC207_GAIN_DEFAULT,
- .flags = 0,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
-};
-
static const struct v4l2_pix_format sif_mode[] = {
{176, 144, V4L2_PIX_FMT_PAC207, V4L2_FIELD_NONE,
.bytesperline = 176,
@@ -167,39 +92,44 @@ static const __u8 pac207_sensor_init[][8] = {
{0x32, 0x00, 0x96, 0x00, 0xa2, 0x02, 0xaf, 0x00},
};
-static int pac207_write_regs(struct gspca_dev *gspca_dev, u16 index,
+static void pac207_write_regs(struct gspca_dev *gspca_dev, u16 index,
const u8 *buffer, u16 length)
{
struct usb_device *udev = gspca_dev->dev;
int err;
+ if (gspca_dev->usb_err < 0)
+ return;
+
memcpy(gspca_dev->usb_buf, buffer, length);
err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
0x00, index,
gspca_dev->usb_buf, length, PAC207_CTRL_TIMEOUT);
- if (err < 0)
+ if (err < 0) {
pr_err("Failed to write registers to index 0x%04X, error %d\n",
index, err);
-
- return err;
+ gspca_dev->usb_err = err;
+ }
}
-
-static int pac207_write_reg(struct gspca_dev *gspca_dev, u16 index, u16 value)
+static void pac207_write_reg(struct gspca_dev *gspca_dev, u16 index, u16 value)
{
struct usb_device *udev = gspca_dev->dev;
int err;
+ if (gspca_dev->usb_err < 0)
+ return;
+
err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
value, index, NULL, 0, PAC207_CTRL_TIMEOUT);
- if (err)
+ if (err) {
pr_err("Failed to write a register (index 0x%04X, value 0x%02X, error %d)\n",
index, value, err);
-
- return err;
+ gspca_dev->usb_err = err;
+ }
}
static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
@@ -207,6 +137,9 @@ static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
struct usb_device *udev = gspca_dev->dev;
int res;
+ if (gspca_dev->usb_err < 0)
+ return 0;
+
res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
0x00, index,
@@ -214,7 +147,8 @@ static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
if (res < 0) {
pr_err("Failed to read a register (index 0x%04X, error %d)\n",
index, res);
- return res;
+ gspca_dev->usb_err = res;
+ return 0;
}
return gspca_dev->usb_buf[0];
@@ -224,7 +158,6 @@ static int pac207_read_reg(struct gspca_dev *gspca_dev, u16 index)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
u8 idreg[2];
@@ -247,10 +180,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
cam->cam_mode = sif_mode;
cam->nmodes = ARRAY_SIZE(sif_mode);
- sd->brightness = PAC207_BRIGHTNESS_DEFAULT;
- sd->exposure = PAC207_EXPOSURE_DEFAULT;
- sd->gain = PAC207_GAIN_DEFAULT;
- sd->autogain = AUTOGAIN_DEF;
return 0;
}
@@ -264,6 +193,87 @@ static int sd_init(struct gspca_dev *gspca_dev)
* Bit_2=Compression test mode enable */
pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
+ return gspca_dev->usb_err;
+}
+
+static void setcontrol(struct gspca_dev *gspca_dev, u16 reg, u16 val)
+{
+ pac207_write_reg(gspca_dev, reg, val);
+ pac207_write_reg(gspca_dev, 0x13, 0x01); /* Bit 0, auto clear */
+ pac207_write_reg(gspca_dev, 0x1c, 0x01); /* not documented */
+}
+
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) {
+ /* when switching to autogain set defaults to make sure
+ we are on a valid point of the autogain gain /
+ exposure knee graph, and give this change time to
+ take effect before doing autogain. */
+ gspca_dev->exposure->val = PAC207_EXPOSURE_DEFAULT;
+ gspca_dev->gain->val = PAC207_GAIN_DEFAULT;
+ sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
+ }
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setcontrol(gspca_dev, PAC207_BRIGHTNESS_REG, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val))
+ setcontrol(gspca_dev, PAC207_EXPOSURE_REG,
+ gspca_dev->exposure->val);
+ if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val))
+ setcontrol(gspca_dev, PAC207_GAIN_REG,
+ gspca_dev->gain->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+/* this function is called at probe time */
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+
+ sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS,
+ PAC207_BRIGHTNESS_MIN, PAC207_BRIGHTNESS_MAX,
+ 1, PAC207_BRIGHTNESS_DEFAULT);
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ PAC207_EXPOSURE_MIN, PAC207_EXPOSURE_MAX,
+ 1, PAC207_EXPOSURE_DEFAULT);
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN,
+ PAC207_GAIN_MIN, PAC207_GAIN_MAX,
+ 1, PAC207_GAIN_DEFAULT);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false);
return 0;
}
@@ -285,11 +295,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
else
pac207_write_reg(gspca_dev, 0x4a, 0x30);
pac207_write_reg(gspca_dev, 0x4b, 0x00); /* Sram test value */
- pac207_write_reg(gspca_dev, 0x08, sd->brightness);
+ pac207_write_reg(gspca_dev, 0x08, v4l2_ctrl_g_ctrl(sd->brightness));
/* PGA global gain (Bit 4-0) */
- pac207_write_reg(gspca_dev, 0x0e, sd->gain);
- pac207_write_reg(gspca_dev, 0x02, sd->exposure); /* PXCK = 12MHz /n */
+ pac207_write_reg(gspca_dev, 0x0e,
+ v4l2_ctrl_g_ctrl(gspca_dev->gain));
+ pac207_write_reg(gspca_dev, 0x02,
+ v4l2_ctrl_g_ctrl(gspca_dev->exposure)); /* PXCK = 12MHz /n */
mode = 0x02; /* Image Format (Bit 0), LED (1), Compr. test mode (2) */
if (gspca_dev->width == 176) { /* 176x144 */
@@ -308,7 +320,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
sd->sof_read = 0;
sd->autogain_ignore_frames = 0;
atomic_set(&sd->avg_lum, -1);
- return 0;
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -318,8 +330,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
}
-/* Include pac common sof detection functions */
-#include "pac_common.h"
static void pac207_do_auto_gain(struct gspca_dev *gspca_dev)
{
@@ -331,9 +341,8 @@ static void pac207_do_auto_gain(struct gspca_dev *gspca_dev)
if (sd->autogain_ignore_frames > 0)
sd->autogain_ignore_frames--;
- else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum,
- 90, PAC207_AUTOGAIN_DEADZONE,
- PAC207_GAIN_KNEE, PAC207_EXPOSURE_KNEE))
+ else if (gspca_coarse_grained_expo_autogain(gspca_dev, avg_lum,
+ 90, PAC207_AUTOGAIN_DEADZONE))
sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
}
@@ -384,118 +393,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- pac207_write_reg(gspca_dev, 0x08, sd->brightness);
- pac207_write_reg(gspca_dev, 0x13, 0x01); /* Bit 0, auto clear */
- pac207_write_reg(gspca_dev, 0x1c, 0x01); /* not documented */
-}
-
-static void setexposure(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- pac207_write_reg(gspca_dev, 0x02, sd->exposure);
- pac207_write_reg(gspca_dev, 0x13, 0x01); /* Bit 0, auto clear */
- pac207_write_reg(gspca_dev, 0x1c, 0x01); /* not documented */
-}
-
-static void setgain(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- pac207_write_reg(gspca_dev, 0x0e, sd->gain);
- pac207_write_reg(gspca_dev, 0x13, 0x01); /* Bit 0, auto clear */
- pac207_write_reg(gspca_dev, 0x1c, 0x01); /* not documented */
-}
-
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->exposure = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return 0;
-}
-
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->exposure;
- return 0;
-}
-
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gain = val;
- if (gspca_dev->streaming)
- setgain(gspca_dev);
- return 0;
-}
-
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gain;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
- /* when switching to autogain set defaults to make sure
- we are on a valid point of the autogain gain /
- exposure knee graph, and give this change time to
- take effect before doing autogain. */
- if (sd->autogain) {
- sd->exposure = PAC207_EXPOSURE_DEFAULT;
- sd->gain = PAC207_GAIN_DEFAULT;
- if (gspca_dev->streaming) {
- sd->autogain_ignore_frames =
- PAC_AUTOGAIN_IGNORE_FRAMES;
- setexposure(gspca_dev);
- setgain(gspca_dev);
- }
- }
-
- return 0;
-}
-
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
@@ -518,10 +415,9 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.dq_callback = pac207_do_auto_gain,
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 30662fccb0cf..a0369a58c4bb 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -23,43 +23,58 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-/* Some documentation about various registers as determined by trial and error.
-
- Register page 1:
-
- Address Description
- 0x78 Global control, bit 6 controls the LED (inverted)
-
- Register page 3:
-
- Address Description
- 0x02 Clock divider 3-63, fps = 90 / val. Must be a multiple of 3 on
- the 7302, so one of 3, 6, 9, ..., except when between 6 and 12?
- 0x03 Variable framerate ctrl reg2==3: 0 -> ~30 fps, 255 -> ~22fps
- 0x04 Another var framerate ctrl reg2==3, reg3==0: 0 -> ~30 fps,
- 63 -> ~27 fps, the 2 msb's must always be 1 !!
- 0x05 Another var framerate ctrl reg2==3, reg3==0, reg4==0xc0:
- 1 -> ~30 fps, 2 -> ~20 fps
- 0x0e Exposure bits 0-7, 0-448, 0 = use full frame time
- 0x0f Exposure bit 8, 0-448, 448 = no exposure at all
- 0x10 Master gain 0-31
- 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused
-
- The registers are accessed in the following functions:
-
- Page | Register | Function
- -----+------------+---------------------------------------------------
- 0 | 0x0f..0x20 | setcolors()
- 0 | 0xa2..0xab | setbrightcont()
- 0 | 0xc5 | setredbalance()
- 0 | 0xc6 | setwhitebalance()
- 0 | 0xc7 | setbluebalance()
- 0 | 0xdc | setbrightcont(), setcolors()
- 3 | 0x02 | setexposure()
- 3 | 0x10 | setgain()
- 3 | 0x11 | setcolors(), setgain(), setexposure(), sethvflip()
- 3 | 0x21 | sethvflip()
-*/
+/*
+ * Some documentation about various registers as determined by trial and error.
+ *
+ * Register page 1:
+ *
+ * Address Description
+ * 0x78 Global control, bit 6 controls the LED (inverted)
+ * 0x80 Compression balance, 2 interesting settings:
+ * 0x0f Default
+ * 0x50 Values >= this switch the camera to a lower compression,
+ * using the same table for both luminance and chrominance.
+ * This gives a sharper picture. Only usable when running
+ * at < 15 fps! Note currently the driver does not use this
+ * as the quality gain is small and the generated JPG-s are
+ * only understood by v4l-utils >= 0.8.9
+ *
+ * Register page 3:
+ *
+ * Address Description
+ * 0x02 Clock divider 3-63, fps = 90 / val. Must be a multiple of 3 on
+ * the 7302, so one of 3, 6, 9, ..., except when between 6 and 12?
+ * 0x03 Variable framerate ctrl reg2==3: 0 -> ~30 fps, 255 -> ~22fps
+ * 0x04 Another var framerate ctrl reg2==3, reg3==0: 0 -> ~30 fps,
+ * 63 -> ~27 fps, the 2 msb's must always be 1 !!
+ * 0x05 Another var framerate ctrl reg2==3, reg3==0, reg4==0xc0:
+ * 1 -> ~30 fps, 2 -> ~20 fps
+ * 0x0e Exposure bits 0-7, 0-448, 0 = use full frame time
+ * 0x0f Exposure bit 8, 0-448, 448 = no exposure at all
+ * 0x10 Gain 0-31
+ * 0x12 Another gain 0-31, unlike 0x10 this one seems to start with an
+ * amplification value of 1 rather then 0 at its lowest setting
+ * 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused
+ * 0x80 Another framerate control, best left at 1, moving it from 1 to
+ * 2 causes the framerate to become 3/4th of what it was, and
+ * also seems to cause pixel averaging, resulting in an effective
+ * resolution of 320x240 and thus a much blockier image
+ *
+ * The registers are accessed in the following functions:
+ *
+ * Page | Register | Function
+ * -----+------------+---------------------------------------------------
+ * 0 | 0x0f..0x20 | setcolors()
+ * 0 | 0xa2..0xab | setbrightcont()
+ * 0 | 0xc5 | setredbalance()
+ * 0 | 0xc6 | setwhitebalance()
+ * 0 | 0xc7 | setbluebalance()
+ * 0 | 0xdc | setbrightcont(), setcolors()
+ * 3 | 0x02 | setexposure()
+ * 3 | 0x10, 0x12 | setgain()
+ * 3 | 0x11 | setcolors(), setgain(), setexposure(), sethvflip()
+ * 3 | 0x21 | sethvflip()
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -89,7 +104,6 @@ enum e_ctrl {
NCTRLS /* number of controls */
};
-/* specific webcam descriptor for pac7302 */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
@@ -198,10 +212,10 @@ static const struct ctrl sd_ctrls[] = {
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = 0,
- .maximum = 255,
+ .maximum = 62,
.step = 1,
-#define GAIN_DEF 127
-#define GAIN_KNEE 255 /* Gain seems to cause little noise on the pac73xx */
+#define GAIN_DEF 15
+#define GAIN_KNEE 46
.default_value = GAIN_DEF,
},
.set_control = setgain
@@ -270,7 +284,6 @@ static const struct v4l2_pix_format vga_mode[] = {
#define LOAD_PAGE3 255
#define END_OF_SEQUENCE 0
-/* pac 7302 */
static const u8 init_7302[] = {
/* index,value */
0xff, 0x01, /* page 1 */
@@ -509,7 +522,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
return 0;
}
-/* This function is used by pac7302 only */
static void setbrightcont(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -536,7 +548,6 @@ static void setbrightcont(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0xdc, 0x01);
}
-/* This function is used by pac7302 only */
static void setcolors(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -590,9 +601,19 @@ static void setbluebalance(struct gspca_dev *gspca_dev)
static void setgain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ u8 reg10, reg12;
+
+ if (sd->ctrls[GAIN].val < 32) {
+ reg10 = sd->ctrls[GAIN].val;
+ reg12 = 0;
+ } else {
+ reg10 = 31;
+ reg12 = sd->ctrls[GAIN].val - 31;
+ }
reg_w(gspca_dev, 0xff, 0x03); /* page 3 */
- reg_w(gspca_dev, 0x10, sd->ctrls[GAIN].val >> 3);
+ reg_w(gspca_dev, 0x10, reg10);
+ reg_w(gspca_dev, 0x12, reg12);
/* load registers to sensor (Bit 0, auto clear) */
reg_w(gspca_dev, 0x11, 0x01);
@@ -604,28 +625,36 @@ static void setexposure(struct gspca_dev *gspca_dev)
u8 clockdiv;
u16 exposure;
- /* register 2 of frame 3 contains the clock divider configuring the
- no fps according to the formula: 90 / reg. sd->exposure is the
- desired exposure time in 0.5 ms. */
+ /*
+ * Register 2 of frame 3 contains the clock divider configuring the
+ * no fps according to the formula: 90 / reg. sd->exposure is the
+ * desired exposure time in 0.5 ms.
+ */
clockdiv = (90 * sd->ctrls[EXPOSURE].val + 1999) / 2000;
- /* Note clockdiv = 3 also works, but when running at 30 fps, depending
- on the scene being recorded, the camera switches to another
- quantization table for certain JPEG blocks, and we don't know how
- to decompress these blocks. So we cap the framerate at 15 fps */
+ /*
+ * Note clockdiv = 3 also works, but when running at 30 fps, depending
+ * on the scene being recorded, the camera switches to another
+ * quantization table for certain JPEG blocks, and we don't know how
+ * to decompress these blocks. So we cap the framerate at 15 fps.
+ */
if (clockdiv < 6)
clockdiv = 6;
else if (clockdiv > 63)
clockdiv = 63;
- /* reg2 MUST be a multiple of 3, except when between 6 and 12?
- Always round up, otherwise we cannot get the desired frametime
- using the partial frame time exposure control */
+ /*
+ * Register 2 MUST be a multiple of 3, except when between 6 and 12?
+ * Always round up, otherwise we cannot get the desired frametime
+ * using the partial frame time exposure control.
+ */
if (clockdiv < 6 || clockdiv > 12)
clockdiv = ((clockdiv + 2) / 3) * 3;
- /* frame exposure time in ms = 1000 * clockdiv / 90 ->
- exposure = (sd->exposure / 2) * 448 / (1000 * clockdiv / 90) */
+ /*
+ * frame exposure time in ms = 1000 * clockdiv / 90 ->
+ * exposure = (sd->exposure / 2) * 448 / (1000 * clockdiv / 90)
+ */
exposure = (sd->ctrls[EXPOSURE].val * 45 * 448) / (1000 * clockdiv);
/* 0 = use full frametime, 448 = no exposure, reverse it */
exposure = 448 - exposure;
@@ -643,10 +672,12 @@ static void setautogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- /* when switching to autogain set defaults to make sure
- we are on a valid point of the autogain gain /
- exposure knee graph, and give this change time to
- take effect before doing autogain. */
+ /*
+ * When switching to autogain set defaults to make sure
+ * we are on a valid point of the autogain gain /
+ * exposure knee graph, and give this change time to
+ * take effect before doing autogain.
+ */
if (sd->ctrls[AUTOGAIN].val) {
sd->ctrls[EXPOSURE].val = EXPOSURE_DEF;
sd->ctrls[GAIN].val = GAIN_DEF;
@@ -700,8 +731,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
setautogain(gspca_dev);
sethvflip(gspca_dev);
- /* only resolution 640x480 is supported for pac7302 */
-
sd->sof_read = 0;
atomic_set(&sd->avg_lum, 270 + sd->ctrls[BRIGHTNESS].val);
@@ -729,9 +758,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x78, 0x40);
}
-/* !! coarse_grained_expo_autogain is not used !! */
-#define exp_too_low_cnt flags
-#define exp_too_high_cnt sof_read
+#define WANT_REGULAR_AUTOGAIN
#include "autogain_functions.h"
static void do_autogain(struct gspca_dev *gspca_dev)
@@ -792,10 +819,12 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
if (sof) {
int n, lum_offset, footer_length;
- /* 6 bytes after the FF D9 EOF marker a number of lumination
- bytes are send corresponding to different parts of the
- image, the 14th and 15th byte after the EOF seem to
- correspond to the center of the image */
+ /*
+ * 6 bytes after the FF D9 EOF marker a number of lumination
+ * bytes are send corresponding to different parts of the
+ * image, the 14th and 15th byte after the EOF seem to
+ * correspond to the center of the image.
+ */
lum_offset = 61 + sizeof pac_sof_marker;
footer_length = 74;
@@ -839,9 +868,10 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
u8 index;
u8 value;
- /* reg->reg: bit0..15: reserved for register index (wIndex is 16bit
- long on the USB bus)
- */
+ /*
+ * reg->reg: bit0..15: reserved for register index (wIndex is 16bit
+ * long on the USB bus)
+ */
if (reg->match.type == V4L2_CHIP_MATCH_HOST &&
reg->match.addr == 0 &&
(reg->reg < 0x000000ff) &&
@@ -852,9 +882,11 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
index = reg->reg;
value = reg->val;
- /* Note that there shall be no access to other page
- by any other function between the page swith and
- the actual register write */
+ /*
+ * Note that there shall be no access to other page
+ * by any other function between the page switch and
+ * the actual register write.
+ */
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
reg_w(gspca_dev, index, value);
@@ -940,6 +972,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2625)},
{USB_DEVICE(0x093a, 0x2626)},
+ {USB_DEVICE(0x093a, 0x2627), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2628)},
{USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x262a)},
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 1ac111176ffa..115da169f32a 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -20,34 +20,42 @@
*/
/* Some documentation about various registers as determined by trial and error.
- When the register addresses differ between the 7202 and the 7311 the 2
- different addresses are written as 7302addr/7311addr, when one of the 2
- addresses is a - sign that register description is not valid for the
- matching IC.
-
- Register page 1:
-
- Address Description
- -/0x08 Unknown compressor related, must always be 8 except when not
- in 640x480 resolution and page 4 reg 2 <= 3 then set it to 9 !
- -/0x1b Auto white balance related, bit 0 is AWB enable (inverted)
- bits 345 seem to toggle per color gains on/off (inverted)
- 0x78 Global control, bit 6 controls the LED (inverted)
- -/0x80 JPEG compression ratio ? Best not touched
-
- Register page 3/4:
-
- Address Description
- 0x02 Clock divider 2-63, fps =~ 60 / val. Must be a multiple of 3 on
- the 7302, so one of 3, 6, 9, ..., except when between 6 and 12?
- -/0x0f Master gain 1-245, low value = high gain
- 0x10/- Master gain 0-31
- -/0x10 Another gain 0-15, limited influence (1-2x gain I guess)
- 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused
- -/0x27 Seems to toggle various gains on / off, Setting bit 7 seems to
- completely disable the analog amplification block. Set to 0x68
- for max gain, 0x14 for minimal gain.
-*/
+ *
+ * Register page 1:
+ *
+ * Address Description
+ * 0x08 Unknown compressor related, must always be 8 except when not
+ * in 640x480 resolution and page 4 reg 2 <= 3 then set it to 9 !
+ * 0x1b Auto white balance related, bit 0 is AWB enable (inverted)
+ * bits 345 seem to toggle per color gains on/off (inverted)
+ * 0x78 Global control, bit 6 controls the LED (inverted)
+ * 0x80 Compression balance, interesting settings:
+ * 0x01 Use this to allow the camera to switch to higher compr.
+ * on the fly. Needed to stay within bandwidth @ 640x480@30
+ * 0x1c From usb captures under Windows for 640x480
+ * 0x2a Values >= this switch the camera to a lower compression,
+ * using the same table for both luminance and chrominance.
+ * This gives a sharper picture. Usable only at 640x480@ <
+ * 15 fps or 320x240 / 160x120. Note currently the driver
+ * does not use this as the quality gain is small and the
+ * generated JPG-s are only understood by v4l-utils >= 0.8.9
+ * 0x3f From usb captures under Windows for 320x240
+ * 0x69 From usb captures under Windows for 160x120
+ *
+ * Register page 4:
+ *
+ * Address Description
+ * 0x02 Clock divider 2-63, fps =~ 60 / val. Must be a multiple of 3 on
+ * the 7302, so one of 3, 6, 9, ..., except when between 6 and 12?
+ * 0x0f Master gain 1-245, low value = high gain
+ * 0x10 Another gain 0-15, limited influence (1-2x gain I guess)
+ * 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused
+ * Note setting vflip disabled leads to a much lower image quality,
+ * so we always vflip, and tell userspace to flip it back
+ * 0x27 Seems to toggle various gains on / off, Setting bit 7 seems to
+ * completely disable the analog amplification block. Set to 0x68
+ * for max gain, 0x14 for minimal gain.
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -55,21 +63,21 @@
#include <linux/input.h>
#include "gspca.h"
+/* Include pac common sof detection functions */
+#include "pac_common.h"
+
+#define PAC7311_GAIN_DEFAULT 122
+#define PAC7311_EXPOSURE_DEFAULT 3 /* 20 fps, avoid using high compr. */
MODULE_AUTHOR("Thomas Kaiser thomas@kaiser-linux.li");
MODULE_DESCRIPTION("Pixart PAC7311");
MODULE_LICENSE("GPL");
-/* specific webcam descriptor for pac7311 */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- unsigned char contrast;
- unsigned char gain;
- unsigned char exposure;
- unsigned char autogain;
- __u8 hflip;
- __u8 vflip;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *hflip;
u8 sof_read;
u8 autogain_ignore_frames;
@@ -77,114 +85,6 @@ struct sd {
atomic_t avg_lum;
};
-/* V4L2 controls supported by the driver */
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-/* This control is for both the 7302 and the 7311 */
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
-#define CONTRAST_MAX 255
- .maximum = CONTRAST_MAX,
- .step = 1,
-#define CONTRAST_DEF 127
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
-/* All controls below are for both the 7302 and the 7311 */
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
-#define GAIN_MAX 255
- .maximum = GAIN_MAX,
- .step = 1,
-#define GAIN_DEF 127
-#define GAIN_KNEE 255 /* Gain seems to cause little noise on the pac73xx */
- .default_value = GAIN_DEF,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
-#define EXPOSURE_MAX 255
- .maximum = EXPOSURE_MAX,
- .step = 1,
-#define EXPOSURE_DEF 16 /* 32 ms / 30 fps */
-#define EXPOSURE_KNEE 50 /* 100 ms / 10 fps */
- .default_value = EXPOSURE_DEF,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- },
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
- {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define HFLIP_DEF 0
- .default_value = HFLIP_DEF,
- },
- .set = sd_sethflip,
- .get = sd_gethflip,
- },
- {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vflip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
- },
- .set = sd_setvflip,
- .get = sd_getvflip,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE,
.bytesperline = 160,
@@ -206,8 +106,8 @@ static const struct v4l2_pix_format vga_mode[] = {
#define LOAD_PAGE4 254
#define END_OF_SEQUENCE 0
-/* pac 7311 */
static const __u8 init_7311[] = {
+ 0xff, 0x01,
0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */
0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */
0x78, 0x44, /* Bit_0=start stream, Bit_6=LED */
@@ -387,90 +287,73 @@ static void reg_w_var(struct gspca_dev *gspca_dev,
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
- struct cam *cam;
+ struct cam *cam = &gspca_dev->cam;
- cam = &gspca_dev->cam;
-
- PDEBUG(D_CONF, "Find Sensor PAC7311");
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
+ cam->input_flags = V4L2_IN_ST_VFLIP;
- sd->contrast = CONTRAST_DEF;
- sd->gain = GAIN_DEF;
- sd->exposure = EXPOSURE_DEF;
- sd->autogain = AUTOGAIN_DEF;
- sd->hflip = HFLIP_DEF;
- sd->vflip = VFLIP_DEF;
return 0;
}
-/* This function is used by pac7311 only */
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
reg_w(gspca_dev, 0xff, 0x04);
- reg_w(gspca_dev, 0x10, sd->contrast >> 4);
+ reg_w(gspca_dev, 0x10, val);
/* load registers to sensor (Bit 0, auto clear) */
reg_w(gspca_dev, 0x11, 0x01);
}
-static void setgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int gain = GAIN_MAX - sd->gain;
-
- if (gain < 1)
- gain = 1;
- else if (gain > 245)
- gain = 245;
reg_w(gspca_dev, 0xff, 0x04); /* page 4 */
reg_w(gspca_dev, 0x0e, 0x00);
- reg_w(gspca_dev, 0x0f, gain);
+ reg_w(gspca_dev, 0x0f, gspca_dev->gain->maximum - val + 1);
/* load registers to sensor (Bit 0, auto clear) */
reg_w(gspca_dev, 0x11, 0x01);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- __u8 reg;
-
- /* register 2 of frame 3/4 contains the clock divider configuring the
- no fps according to the formula: 60 / reg. sd->exposure is the
- desired exposure time in ms. */
- reg = 120 * sd->exposure / 1000;
- if (reg < 2)
- reg = 2;
- else if (reg > 63)
- reg = 63;
-
reg_w(gspca_dev, 0xff, 0x04); /* page 4 */
- reg_w(gspca_dev, 0x02, reg);
+ reg_w(gspca_dev, 0x02, val);
- /* Page 1 register 8 must always be 0x08 except when not in
- 640x480 mode and Page3/4 reg 2 <= 3 then it must be 9 */
+ /* load registers to sensor (Bit 0, auto clear) */
+ reg_w(gspca_dev, 0x11, 0x01);
+
+ /*
+ * Page 1 register 8 must always be 0x08 except when not in
+ * 640x480 mode and page 4 reg 2 <= 3 then it must be 9
+ */
reg_w(gspca_dev, 0xff, 0x01);
- if (gspca_dev->cam.cam_mode[(int)gspca_dev->curr_mode].priv &&
- reg <= 3) {
+ if (gspca_dev->width != 640 && val <= 3)
reg_w(gspca_dev, 0x08, 0x09);
- } else {
+ else
reg_w(gspca_dev, 0x08, 0x08);
- }
+
+ /*
+ * Page1 register 80 sets the compression balance, normally we
+ * want / use 0x1c, but for 640x480@30fps we must allow the
+ * camera to use higher compression or we may run out of
+ * bandwidth.
+ */
+ if (gspca_dev->width == 640 && val == 2)
+ reg_w(gspca_dev, 0x80, 0x01);
+ else
+ reg_w(gspca_dev, 0x80, 0x1c);
/* load registers to sensor (Bit 0, auto clear) */
reg_w(gspca_dev, 0x11, 0x01);
}
-static void sethvflip(struct gspca_dev *gspca_dev)
+static void sethvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip)
{
- struct sd *sd = (struct sd *) gspca_dev;
__u8 data;
reg_w(gspca_dev, 0xff, 0x04); /* page 4 */
- data = (sd->hflip ? 0x04 : 0x00) | (sd->vflip ? 0x08 : 0x00);
+ data = (hflip ? 0x04 : 0x00) |
+ (vflip ? 0x08 : 0x00);
reg_w(gspca_dev, 0x21, data);
/* load registers to sensor (Bit 0, auto clear) */
@@ -484,6 +367,82 @@ static int sd_init(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
}
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) {
+ /* when switching to autogain set defaults to make sure
+ we are on a valid point of the autogain gain /
+ exposure knee graph, and give this change time to
+ take effect before doing autogain. */
+ gspca_dev->exposure->val = PAC7311_EXPOSURE_DEFAULT;
+ gspca_dev->gain->val = PAC7311_GAIN_DEFAULT;
+ sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
+ }
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val))
+ setexposure(gspca_dev, gspca_dev->exposure->val);
+ if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val))
+ setgain(gspca_dev, gspca_dev->gain->val);
+ break;
+ case V4L2_CID_HFLIP:
+ sethvflip(gspca_dev, sd->hflip->val, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+/* this function is called at probe time */
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 5);
+
+ sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 15, 1, 7);
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 2, 63, 1,
+ PAC7311_EXPOSURE_DEFAULT);
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 244, 1,
+ PAC7311_GAIN_DEFAULT);
+ sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false);
+ return 0;
+}
+
+/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -492,19 +451,19 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w_var(gspca_dev, start_7311,
page4_7311, sizeof(page4_7311));
- setcontrast(gspca_dev);
- setgain(gspca_dev);
- setexposure(gspca_dev);
- sethvflip(gspca_dev);
+ setcontrast(gspca_dev, v4l2_ctrl_g_ctrl(sd->contrast));
+ setgain(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->gain));
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
+ sethvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip), 1);
/* set correct resolution */
switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) {
- case 2: /* 160x120 pac7311 */
+ case 2: /* 160x120 */
reg_w(gspca_dev, 0xff, 0x01);
reg_w(gspca_dev, 0x17, 0x20);
reg_w(gspca_dev, 0x87, 0x10);
break;
- case 1: /* 320x240 pac7311 */
+ case 1: /* 320x240 */
reg_w(gspca_dev, 0xff, 0x01);
reg_w(gspca_dev, 0x17, 0x30);
reg_w(gspca_dev, 0x87, 0x11);
@@ -541,14 +500,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */
}
-/* called on streamoff with alt 0 and on disconnect for 7311 */
-static void sd_stop0(struct gspca_dev *gspca_dev)
-{
-}
-
-/* Include pac common sof detection functions */
-#include "pac_common.h"
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -558,13 +509,13 @@ static void do_autogain(struct gspca_dev *gspca_dev)
if (avg_lum == -1)
return;
- desired_lum = 200;
+ desired_lum = 170;
deadzone = 20;
if (sd->autogain_ignore_frames > 0)
sd->autogain_ignore_frames--;
- else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum, desired_lum,
- deadzone, GAIN_KNEE, EXPOSURE_KNEE))
+ else if (gspca_coarse_grained_expo_autogain(gspca_dev, avg_lum,
+ desired_lum, deadzone))
sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
}
@@ -628,10 +579,12 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
if (sof) {
int n, lum_offset, footer_length;
- /* 6 bytes after the FF D9 EOF marker a number of lumination
- bytes are send corresponding to different parts of the
- image, the 14th and 15th byte after the EOF seem to
- correspond to the center of the image */
+ /*
+ * 6 bytes after the FF D9 EOF marker a number of lumination
+ * bytes are send corresponding to different parts of the
+ * image, the 14th and 15th byte after the EOF seem to
+ * correspond to the center of the image.
+ */
lum_offset = 24 + sizeof pac_sof_marker;
footer_length = 26;
@@ -668,127 +621,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gain = val;
- if (gspca_dev->streaming)
- setgain(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gain;
- return 0;
-}
-
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->exposure = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->exposure;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
- /* when switching to autogain set defaults to make sure
- we are on a valid point of the autogain gain /
- exposure knee graph, and give this change time to
- take effect before doing autogain. */
- if (sd->autogain) {
- sd->exposure = EXPOSURE_DEF;
- sd->gain = GAIN_DEF;
- if (gspca_dev->streaming) {
- sd->autogain_ignore_frames =
- PAC_AUTOGAIN_IGNORE_FRAMES;
- setexposure(gspca_dev);
- setgain(gspca_dev);
- }
- }
-
- return gspca_dev->usb_err;
-}
-
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hflip = val;
- if (gspca_dev->streaming)
- sethvflip(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hflip;
- return 0;
-}
-
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->vflip = val;
- if (gspca_dev->streaming)
- sethvflip(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->vflip;
- return 0;
-}
-
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
@@ -820,16 +652,13 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
}
#endif
-/* sub-driver description for pac7311 */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
- .stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = do_autogain,
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 7e71aa2d2522..b9c6f17eabb2 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -59,35 +59,38 @@ MODULE_LICENSE("GPL");
#define SENSOR_MT9M111 9
#define SENSOR_MT9M112 10
#define SENSOR_HV7131R 11
-#define SENSOR_MT9VPRB 20
+#define SENSOR_MT9VPRB 12
/* camera flags */
#define HAS_NO_BUTTON 0x1
#define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */
#define FLIP_DETECT 0x4
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- SATURATION,
- HUE,
- GAMMA,
- BLUE,
- RED,
- VFLIP,
- HFLIP,
- EXPOSURE,
- GAIN,
- AUTOGAIN,
- QUALITY,
- NCTRLS /* number of controls */
-};
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev;
- struct gspca_ctrl ctrls[NCTRLS];
+ struct { /* color control cluster */
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *hue;
+ };
+ struct { /* blue/red balance control cluster */
+ struct v4l2_ctrl *blue;
+ struct v4l2_ctrl *red;
+ };
+ struct { /* h/vflip control cluster */
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
+ struct v4l2_ctrl *gamma;
+ struct { /* autogain and exposure or gain control cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ };
+ struct v4l2_ctrl *jpegqual;
struct work_struct work;
struct workqueue_struct *work_thread;
@@ -105,6 +108,7 @@ struct sd {
u8 exposure_step;
u8 i2c_addr;
+ u8 i2c_intf;
u8 sensor;
u8 hstart;
u8 vstart;
@@ -166,175 +170,6 @@ static const struct dmi_system_id flip_dmi_table[] = {
{}
};
-static void set_cmatrix(struct gspca_dev *gspca_dev);
-static void set_gamma(struct gspca_dev *gspca_dev);
-static void set_redblue(struct gspca_dev *gspca_dev);
-static void set_hvflip(struct gspca_dev *gspca_dev);
-static void set_exposure(struct gspca_dev *gspca_dev);
-static void set_gain(struct gspca_dev *gspca_dev);
-static void set_quality(struct gspca_dev *gspca_dev);
-
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f
- },
- .set_control = set_cmatrix
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f
- },
- .set_control = set_cmatrix
- },
-[SATURATION] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f
- },
- .set_control = set_cmatrix
- },
-[HUE] = {
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -180,
- .maximum = 180,
- .step = 1,
- .default_value = 0
- },
- .set_control = set_cmatrix
- },
-[GAMMA] = {
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x10
- },
- .set_control = set_gamma
- },
-[BLUE] = {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = 0,
- .maximum = 0x7f,
- .step = 1,
- .default_value = 0x28
- },
- .set_control = set_redblue
- },
-[RED] = {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = 0,
- .maximum = 0x7f,
- .step = 1,
- .default_value = 0x28
- },
- .set_control = set_redblue
- },
-[HFLIP] = {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Horizontal Flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = set_hvflip
- },
-[VFLIP] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vertical Flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = set_hvflip
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 0x1780,
- .step = 1,
- .default_value = 0x33,
- },
- .set_control = set_exposure
- },
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 28,
- .step = 1,
- .default_value = 0,
- },
- .set_control = set_gain
- },
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- },
-[QUALITY] = {
- {
- .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Compression Quality",
-#define QUALITY_MIN 50
-#define QUALITY_MAX 90
-#define QUALITY_DEF 80
- .minimum = QUALITY_MIN,
- .maximum = QUALITY_MAX,
- .step = 1,
- .default_value = QUALITY_DEF,
- },
- .set_control = set_quality
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 160,
@@ -747,7 +582,7 @@ static const s16 hsv_blue_y[] = {
4, 2, 0, -1, -3, -5, -7, -9, -11
};
-static u16 i2c_ident[] = {
+static const u16 i2c_ident[] = {
V4L2_IDENT_OV9650,
V4L2_IDENT_OV9655,
V4L2_IDENT_SOI968,
@@ -760,9 +595,10 @@ static u16 i2c_ident[] = {
V4L2_IDENT_MT9M111,
V4L2_IDENT_MT9M112,
V4L2_IDENT_HV7131R,
+[SENSOR_MT9VPRB] = V4L2_IDENT_UNKNOWN,
};
-static u16 bridge_init[][2] = {
+static const u16 bridge_init[][2] = {
{0x1000, 0x78}, {0x1001, 0x40}, {0x1002, 0x1c},
{0x1020, 0x80}, {0x1061, 0x01}, {0x1067, 0x40},
{0x1068, 0x30}, {0x1069, 0x20}, {0x106a, 0x10},
@@ -786,7 +622,7 @@ static u16 bridge_init[][2] = {
};
/* Gain = (bit[3:0] / 16 + 1) * (bit[4] + 1) * (bit[5] + 1) * (bit[6] + 1) */
-static u8 ov_gain[] = {
+static const u8 ov_gain[] = {
0x00 /* 1x */, 0x04 /* 1.25x */, 0x08 /* 1.5x */, 0x0c /* 1.75x */,
0x10 /* 2x */, 0x12 /* 2.25x */, 0x14 /* 2.5x */, 0x16 /* 2.75x */,
0x18 /* 3x */, 0x1a /* 3.25x */, 0x1c /* 3.5x */, 0x1e /* 3.75x */,
@@ -798,7 +634,7 @@ static u8 ov_gain[] = {
};
/* Gain = (bit[8] + 1) * (bit[7] + 1) * (bit[6:0] * 0.03125) */
-static u16 micron1_gain[] = {
+static const u16 micron1_gain[] = {
/* 1x 1.25x 1.5x 1.75x */
0x0020, 0x0028, 0x0030, 0x0038,
/* 2x 2.25x 2.5x 2.75x */
@@ -819,7 +655,7 @@ static u16 micron1_gain[] = {
/* mt9m001 sensor uses a different gain formula then other micron sensors */
/* Gain = (bit[6] + 1) * (bit[5-0] * 0.125) */
-static u16 micron2_gain[] = {
+static const u16 micron2_gain[] = {
/* 1x 1.25x 1.5x 1.75x */
0x0008, 0x000a, 0x000c, 0x000e,
/* 2x 2.25x 2.5x 2.75x */
@@ -839,7 +675,7 @@ static u16 micron2_gain[] = {
};
/* Gain = .5 + bit[7:0] / 16 */
-static u8 hv7131r_gain[] = {
+static const u8 hv7131r_gain[] = {
0x08 /* 1x */, 0x0c /* 1.25x */, 0x10 /* 1.5x */, 0x14 /* 1.75x */,
0x18 /* 2x */, 0x1c /* 2.25x */, 0x20 /* 2.5x */, 0x24 /* 2.75x */,
0x28 /* 3x */, 0x2c /* 3.25x */, 0x30 /* 3.5x */, 0x34 /* 3.75x */,
@@ -850,7 +686,7 @@ static u8 hv7131r_gain[] = {
0x78 /* 8x */
};
-static struct i2c_reg_u8 soi968_init[] = {
+static const struct i2c_reg_u8 soi968_init[] = {
{0x0c, 0x00}, {0x0f, 0x1f},
{0x11, 0x80}, {0x38, 0x52}, {0x1e, 0x00},
{0x33, 0x08}, {0x35, 0x8c}, {0x36, 0x0c},
@@ -864,7 +700,7 @@ static struct i2c_reg_u8 soi968_init[] = {
{0x00, 0x00}, {0x01, 0x80}, {0x02, 0x80},
};
-static struct i2c_reg_u8 ov7660_init[] = {
+static const struct i2c_reg_u8 ov7660_init[] = {
{0x0e, 0x80}, {0x0d, 0x08}, {0x0f, 0xc3},
{0x04, 0xc3}, {0x10, 0x40}, {0x11, 0x40},
{0x12, 0x05}, {0x13, 0xba}, {0x14, 0x2a},
@@ -872,11 +708,11 @@ static struct i2c_reg_u8 ov7660_init[] = {
0x10, 0x61 and sd->hstart, vstart = 3, fixes ugly colored borders */
{0x17, 0x10}, {0x18, 0x61},
{0x37, 0x0f}, {0x38, 0x02}, {0x39, 0x43},
- {0x3a, 0x00}, {0x69, 0x90}, {0x2d, 0xf6},
- {0x2e, 0x0b}, {0x01, 0x78}, {0x02, 0x50},
+ {0x3a, 0x00}, {0x69, 0x90}, {0x2d, 0x00},
+ {0x2e, 0x00}, {0x01, 0x78}, {0x02, 0x50},
};
-static struct i2c_reg_u8 ov7670_init[] = {
+static const struct i2c_reg_u8 ov7670_init[] = {
{0x11, 0x80}, {0x3a, 0x04}, {0x12, 0x01},
{0x32, 0xb6}, {0x03, 0x0a}, {0x0c, 0x00}, {0x3e, 0x00},
{0x70, 0x3a}, {0x71, 0x35}, {0x72, 0x11}, {0x73, 0xf0},
@@ -933,7 +769,7 @@ static struct i2c_reg_u8 ov7670_init[] = {
{0x93, 0x00},
};
-static struct i2c_reg_u8 ov9650_init[] = {
+static const struct i2c_reg_u8 ov9650_init[] = {
{0x00, 0x00}, {0x01, 0x78},
{0x02, 0x78}, {0x03, 0x36}, {0x04, 0x03},
{0x05, 0x00}, {0x06, 0x00}, {0x08, 0x00},
@@ -963,7 +799,7 @@ static struct i2c_reg_u8 ov9650_init[] = {
{0xaa, 0x92}, {0xab, 0x0a},
};
-static struct i2c_reg_u8 ov9655_init[] = {
+static const struct i2c_reg_u8 ov9655_init[] = {
{0x0e, 0x61}, {0x11, 0x80}, {0x13, 0xba},
{0x14, 0x2e}, {0x16, 0x24}, {0x1e, 0x04}, {0x27, 0x08},
{0x28, 0x08}, {0x29, 0x15}, {0x2c, 0x08}, {0x34, 0x3d},
@@ -990,7 +826,7 @@ static struct i2c_reg_u8 ov9655_init[] = {
{0x04, 0x03}, {0x00, 0x13},
};
-static struct i2c_reg_u16 mt9v112_init[] = {
+static const struct i2c_reg_u16 mt9v112_init[] = {
{0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0020},
{0x34, 0xc019}, {0x0a, 0x0011}, {0x0b, 0x000b},
{0x20, 0x0703}, {0x35, 0x2022}, {0xf0, 0x0001},
@@ -1009,7 +845,7 @@ static struct i2c_reg_u16 mt9v112_init[] = {
{0x2c, 0x00ae}, {0x2d, 0x00ae}, {0x2e, 0x00ae},
};
-static struct i2c_reg_u16 mt9v111_init[] = {
+static const struct i2c_reg_u16 mt9v111_init[] = {
{0x01, 0x0004}, {0x0d, 0x0001}, {0x0d, 0x0000},
{0x01, 0x0001}, {0x05, 0x0004}, {0x2d, 0xe0a0},
{0x2e, 0x0c64}, {0x2f, 0x0064}, {0x06, 0x600e},
@@ -1019,7 +855,7 @@ static struct i2c_reg_u16 mt9v111_init[] = {
{0x0e, 0x0008}, {0x20, 0x0000}
};
-static struct i2c_reg_u16 mt9v011_init[] = {
+static const struct i2c_reg_u16 mt9v011_init[] = {
{0x07, 0x0002}, {0x0d, 0x0001}, {0x0d, 0x0000},
{0x01, 0x0008}, {0x02, 0x0016}, {0x03, 0x01e1},
{0x04, 0x0281}, {0x05, 0x0083}, {0x06, 0x0006},
@@ -1046,7 +882,7 @@ static struct i2c_reg_u16 mt9v011_init[] = {
{0x06, 0x0029}, {0x05, 0x0009},
};
-static struct i2c_reg_u16 mt9m001_init[] = {
+static const struct i2c_reg_u16 mt9m001_init[] = {
{0x0d, 0x0001},
{0x0d, 0x0000},
{0x04, 0x0500}, /* hres = 1280 */
@@ -1062,21 +898,21 @@ static struct i2c_reg_u16 mt9m001_init[] = {
{0x35, 0x0057},
};
-static struct i2c_reg_u16 mt9m111_init[] = {
+static const struct i2c_reg_u16 mt9m111_init[] = {
{0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008},
{0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300},
{0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e},
{0xf0, 0x0000},
};
-static struct i2c_reg_u16 mt9m112_init[] = {
+static const struct i2c_reg_u16 mt9m112_init[] = {
{0xf0, 0x0000}, {0x0d, 0x0021}, {0x0d, 0x0008},
{0xf0, 0x0001}, {0x3a, 0x4300}, {0x9b, 0x4300},
{0x06, 0x708e}, {0xf0, 0x0002}, {0x2e, 0x0a1e},
{0xf0, 0x0000},
};
-static struct i2c_reg_u8 hv7131r_init[] = {
+static const struct i2c_reg_u8 hv7131r_init[] = {
{0x02, 0x08}, {0x02, 0x00}, {0x01, 0x08},
{0x02, 0x00}, {0x20, 0x00}, {0x21, 0xd0},
{0x22, 0x00}, {0x23, 0x09}, {0x01, 0x08},
@@ -1167,7 +1003,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
* from the point of view of the bridge, the length
* includes the address
*/
- row[0] = 0x81 | (2 << 4);
+ row[0] = sd->i2c_intf | (2 << 4);
row[1] = sd->i2c_addr;
row[2] = reg;
row[3] = val;
@@ -1180,7 +1016,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
}
static void i2c_w1_buf(struct gspca_dev *gspca_dev,
- struct i2c_reg_u8 *buf, int sz)
+ const struct i2c_reg_u8 *buf, int sz)
{
while (--sz >= 0) {
i2c_w1(gspca_dev, buf->reg, buf->val);
@@ -1197,7 +1033,7 @@ static void i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val)
* from the point of view of the bridge, the length
* includes the address
*/
- row[0] = 0x81 | (3 << 4);
+ row[0] = sd->i2c_intf | (3 << 4);
row[1] = sd->i2c_addr;
row[2] = reg;
row[3] = val >> 8;
@@ -1210,7 +1046,7 @@ static void i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val)
}
static void i2c_w2_buf(struct gspca_dev *gspca_dev,
- struct i2c_reg_u16 *buf, int sz)
+ const struct i2c_reg_u16 *buf, int sz)
{
while (--sz >= 0) {
i2c_w2(gspca_dev, buf->reg, buf->val);
@@ -1223,7 +1059,7 @@ static void i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
struct sd *sd = (struct sd *) gspca_dev;
u8 row[8];
- row[0] = 0x81 | (1 << 4);
+ row[0] = sd->i2c_intf | (1 << 4);
row[1] = sd->i2c_addr;
row[2] = reg;
row[3] = 0;
@@ -1232,7 +1068,7 @@ static void i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
row[6] = 0;
row[7] = 0x10;
i2c_w(gspca_dev, row);
- row[0] = 0x81 | (1 << 4) | 0x02;
+ row[0] = sd->i2c_intf | (1 << 4) | 0x02;
row[2] = 0;
i2c_w(gspca_dev, row);
reg_r(gspca_dev, 0x10c2, 5);
@@ -1244,7 +1080,7 @@ static void i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val)
struct sd *sd = (struct sd *) gspca_dev;
u8 row[8];
- row[0] = 0x81 | (1 << 4);
+ row[0] = sd->i2c_intf | (1 << 4);
row[1] = sd->i2c_addr;
row[2] = reg;
row[3] = 0;
@@ -1253,7 +1089,7 @@ static void i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val)
row[6] = 0;
row[7] = 0x10;
i2c_w(gspca_dev, row);
- row[0] = 0x81 | (2 << 4) | 0x02;
+ row[0] = sd->i2c_intf | (2 << 4) | 0x02;
row[2] = 0;
i2c_w(gspca_dev, row);
reg_r(gspca_dev, 0x10c2, 5);
@@ -1294,8 +1130,6 @@ static void ov9655_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("OV9655 sensor initialization failed\n");
- /* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP);
sd->hstart = 1;
sd->vstart = 2;
}
@@ -1310,9 +1144,6 @@ static void soi968_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("SOI968 sensor initialization failed\n");
- /* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP)
- | (1 << EXPOSURE);
sd->hstart = 60;
sd->vstart = 11;
}
@@ -1340,8 +1171,6 @@ static void ov7670_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("OV7670 sensor initialization failed\n");
- /* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP);
sd->hstart = 0;
sd->vstart = 1;
}
@@ -1378,9 +1207,6 @@ static void mt9v_init_sensor(struct gspca_dev *gspca_dev)
pr_err("MT9V111 sensor initialization failed\n");
return;
}
- gspca_dev->ctrl_dis = (1 << EXPOSURE)
- | (1 << AUTOGAIN)
- | (1 << GAIN);
sd->hstart = 2;
sd->vstart = 2;
sd->sensor = SENSOR_MT9V111;
@@ -1422,8 +1248,6 @@ static void mt9m112_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("MT9M112 sensor initialization failed\n");
- gspca_dev->ctrl_dis = (1 << EXPOSURE) | (1 << AUTOGAIN)
- | (1 << GAIN);
sd->hstart = 0;
sd->vstart = 2;
}
@@ -1436,8 +1260,6 @@ static void mt9m111_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("MT9M111 sensor initialization failed\n");
- gspca_dev->ctrl_dis = (1 << EXPOSURE) | (1 << AUTOGAIN)
- | (1 << GAIN);
sd->hstart = 0;
sd->vstart = 2;
}
@@ -1470,8 +1292,6 @@ static void mt9m001_init_sensor(struct gspca_dev *gspca_dev)
if (gspca_dev->usb_err < 0)
pr_err("MT9M001 sensor initialization failed\n");
- /* disable hflip and vflip */
- gspca_dev->ctrl_dis = (1 << HFLIP) | (1 << VFLIP);
sd->hstart = 1;
sd->vstart = 1;
}
@@ -1488,20 +1308,18 @@ static void hv7131r_init_sensor(struct gspca_dev *gspca_dev)
sd->vstart = 1;
}
-static void set_cmatrix(struct gspca_dev *gspca_dev)
+static void set_cmatrix(struct gspca_dev *gspca_dev,
+ s32 brightness, s32 contrast, s32 satur, s32 hue)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int satur;
- s32 hue_coord, hue_index = 180 + sd->ctrls[HUE].val;
+ s32 hue_coord, hue_index = 180 + hue;
u8 cmatrix[21];
memset(cmatrix, 0, sizeof cmatrix);
- cmatrix[2] = (sd->ctrls[CONTRAST].val * 0x25 / 0x100) + 0x26;
+ cmatrix[2] = (contrast * 0x25 / 0x100) + 0x26;
cmatrix[0] = 0x13 + (cmatrix[2] - 0x26) * 0x13 / 0x25;
cmatrix[4] = 0x07 + (cmatrix[2] - 0x26) * 0x07 / 0x25;
- cmatrix[18] = sd->ctrls[BRIGHTNESS].val - 0x80;
+ cmatrix[18] = brightness - 0x80;
- satur = sd->ctrls[SATURATION].val;
hue_coord = (hsv_red_x[hue_index] * satur) >> 8;
cmatrix[6] = hue_coord;
cmatrix[7] = (hue_coord >> 8) & 0x0f;
@@ -1529,11 +1347,10 @@ static void set_cmatrix(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x10e1, cmatrix, 21);
}
-static void set_gamma(struct gspca_dev *gspca_dev)
+static void set_gamma(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 gamma[17];
- u8 gval = sd->ctrls[GAMMA].val * 0xb8 / 0x100;
+ u8 gval = val * 0xb8 / 0x100;
gamma[0] = 0x0a;
gamma[1] = 0x13 + (gval * (0xcb - 0x13) / 0xb8);
@@ -1556,26 +1373,21 @@ static void set_gamma(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x1190, gamma, 17);
}
-static void set_redblue(struct gspca_dev *gspca_dev)
+static void set_redblue(struct gspca_dev *gspca_dev, s32 blue, s32 red)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_w1(gspca_dev, 0x118c, sd->ctrls[RED].val);
- reg_w1(gspca_dev, 0x118f, sd->ctrls[BLUE].val);
+ reg_w1(gspca_dev, 0x118c, red);
+ reg_w1(gspca_dev, 0x118f, blue);
}
-static void set_hvflip(struct gspca_dev *gspca_dev)
+static void set_hvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip)
{
- u8 value, tslb, hflip, vflip;
+ u8 value, tslb;
u16 value2;
struct sd *sd = (struct sd *) gspca_dev;
if ((sd->flags & FLIP_DETECT) && dmi_check_system(flip_dmi_table)) {
- hflip = !sd->ctrls[HFLIP].val;
- vflip = !sd->ctrls[VFLIP].val;
- } else {
- hflip = sd->ctrls[HFLIP].val;
- vflip = sd->ctrls[VFLIP].val;
+ hflip = !hflip;
+ vflip = !vflip;
}
switch (sd->sensor) {
@@ -1638,20 +1450,38 @@ static void set_hvflip(struct gspca_dev *gspca_dev)
}
}
-static void set_exposure(struct gspca_dev *gspca_dev)
+static void set_exposure(struct gspca_dev *gspca_dev, s32 expo)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 exp[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e};
- int expo;
+ u8 exp[8] = {sd->i2c_intf, sd->i2c_addr,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10};
+ int expo2;
+
+ if (gspca_dev->streaming)
+ exp[7] = 0x1e;
- expo = sd->ctrls[EXPOSURE].val;
switch (sd->sensor) {
case SENSOR_OV7660:
case SENSOR_OV7670:
case SENSOR_OV9655:
case SENSOR_OV9650:
+ if (expo > 547)
+ expo2 = 547;
+ else
+ expo2 = expo;
+ exp[0] |= (2 << 4);
+ exp[2] = 0x10; /* AECH */
+ exp[3] = expo2 >> 2;
+ exp[7] = 0x10;
+ i2c_w(gspca_dev, exp);
+ exp[2] = 0x04; /* COM1 */
+ exp[3] = expo2 & 0x0003;
+ exp[7] = 0x10;
+ i2c_w(gspca_dev, exp);
+ expo -= expo2;
+ exp[7] = 0x1e;
exp[0] |= (3 << 4);
- exp[2] = 0x2d;
+ exp[2] = 0x2d; /* ADVFL & ADVFH */
exp[3] = expo;
exp[4] = expo >> 8;
break;
@@ -1676,13 +1506,15 @@ static void set_exposure(struct gspca_dev *gspca_dev)
i2c_w(gspca_dev, exp);
}
-static void set_gain(struct gspca_dev *gspca_dev)
+static void set_gain(struct gspca_dev *gspca_dev, s32 g)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 gain[8] = {0x81, sd->i2c_addr, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d};
- int g;
+ u8 gain[8] = {sd->i2c_intf, sd->i2c_addr,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10};
+
+ if (gspca_dev->streaming)
+ gain[7] = 0x15; /* or 1d ? */
- g = sd->ctrls[GAIN].val;
switch (sd->sensor) {
case SENSOR_OV7660:
case SENSOR_OV7670:
@@ -1721,11 +1553,11 @@ static void set_gain(struct gspca_dev *gspca_dev)
i2c_w(gspca_dev, gain);
}
-static void set_quality(struct gspca_dev *gspca_dev)
+static void set_quality(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- jpeg_set_qual(sd->jpeg_hdr, sd->ctrls[QUALITY].val);
+ jpeg_set_qual(sd->jpeg_hdr, val);
reg_w1(gspca_dev, 0x1061, 0x01); /* stop transfer */
reg_w1(gspca_dev, 0x10e0, sd->fmt | 0x20); /* write QTAB */
reg_w(gspca_dev, 0x1100, &sd->jpeg_hdr[JPEG_QT0_OFFSET], 64);
@@ -1827,6 +1659,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor = id->driver_info >> 8;
sd->i2c_addr = id->driver_info;
sd->flags = id->driver_info >> 16;
+ sd->i2c_intf = 0x80; /* i2c 100 Kb/s */
switch (sd->sensor) {
case SENSOR_MT9M112:
@@ -1840,6 +1673,9 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = mono_mode;
cam->nmodes = ARRAY_SIZE(mono_mode);
break;
+ case SENSOR_HV7131R:
+ sd->i2c_intf = 0x81; /* i2c 400 Kb/s */
+ /* fall thru */
default:
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
@@ -1850,13 +1686,137 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->older_step = 0;
sd->exposure_step = 16;
- gspca_dev->cam.ctrls = sd->ctrls;
-
INIT_WORK(&sd->work, qual_upd);
return 0;
}
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ /* color control cluster */
+ case V4L2_CID_BRIGHTNESS:
+ set_cmatrix(gspca_dev, sd->brightness->val,
+ sd->contrast->val, sd->saturation->val, sd->hue->val);
+ break;
+ case V4L2_CID_GAMMA:
+ set_gamma(gspca_dev, ctrl->val);
+ break;
+ /* blue/red balance cluster */
+ case V4L2_CID_BLUE_BALANCE:
+ set_redblue(gspca_dev, sd->blue->val, sd->red->val);
+ break;
+ /* h/vflip cluster */
+ case V4L2_CID_HFLIP:
+ set_hvflip(gspca_dev, sd->hflip->val, sd->vflip->val);
+ break;
+ /* standalone exposure control */
+ case V4L2_CID_EXPOSURE:
+ set_exposure(gspca_dev, ctrl->val);
+ break;
+ /* standalone gain control */
+ case V4L2_CID_GAIN:
+ set_gain(gspca_dev, ctrl->val);
+ break;
+ /* autogain + exposure or gain control cluster */
+ case V4L2_CID_AUTOGAIN:
+ if (sd->sensor == SENSOR_SOI968)
+ set_gain(gspca_dev, sd->gain->val);
+ else
+ set_exposure(gspca_dev, sd->exposure->val);
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ set_quality(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 13);
+
+ sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+ sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 127);
+ sd->saturation = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 127);
+ sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HUE, -180, 180, 1, 0);
+
+ sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 255, 1, 0x10);
+
+ sd->blue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0x28);
+ sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 127, 1, 0x28);
+
+ if (sd->sensor != SENSOR_OV9655 && sd->sensor != SENSOR_SOI968 &&
+ sd->sensor != SENSOR_OV7670 && sd->sensor != SENSOR_MT9M001 &&
+ sd->sensor != SENSOR_MT9VPRB) {
+ sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ }
+
+ if (sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_MT9VPRB &&
+ sd->sensor != SENSOR_MT9M112 && sd->sensor != SENSOR_MT9M111 &&
+ sd->sensor != SENSOR_MT9V111)
+ sd->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 0x1780, 1, 0x33);
+
+ if (sd->sensor != SENSOR_MT9VPRB && sd->sensor != SENSOR_MT9M112 &&
+ sd->sensor != SENSOR_MT9M111 && sd->sensor != SENSOR_MT9V111) {
+ sd->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 28, 1, 0);
+ sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ }
+
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+
+ v4l2_ctrl_cluster(4, &sd->brightness);
+ v4l2_ctrl_cluster(2, &sd->blue);
+ if (sd->hflip)
+ v4l2_ctrl_cluster(2, &sd->hflip);
+ if (sd->autogain) {
+ if (sd->sensor == SENSOR_SOI968)
+ /* this sensor doesn't have the exposure control and
+ autogain is clustered with gain instead. This works
+ because sd->exposure == NULL. */
+ v4l2_ctrl_auto_cluster(3, &sd->autogain, 0, false);
+ else
+ /* Otherwise autogain is clustered with exposure. */
+ v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false);
+ }
+ return 0;
+}
+
static int sd_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1949,7 +1909,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
pr_err("Unsupported sensor\n");
gspca_dev->usb_err = -ENODEV;
}
-
return gspca_dev->usb_err;
}
@@ -2025,8 +1984,8 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
if (intf->num_altsetting != 9) {
pr_warn("sn9c20x camera with unknown number of alt "
- "settings (%d), please report!\n",
- intf->num_altsetting);
+ "settings (%d), please report!\n",
+ intf->num_altsetting);
gspca_dev->alt = intf->num_altsetting;
return 0;
}
@@ -2067,7 +2026,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
jpeg_define(sd->jpeg_hdr, height, width,
0x21);
- jpeg_set_qual(sd->jpeg_hdr, sd->ctrls[QUALITY].val);
+ jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
if (mode & MODE_RAW)
fmt = 0x2d;
@@ -2104,12 +2063,20 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x1189, scale);
reg_w1(gspca_dev, 0x10e0, fmt);
- set_cmatrix(gspca_dev);
- set_gamma(gspca_dev);
- set_redblue(gspca_dev);
- set_gain(gspca_dev);
- set_exposure(gspca_dev);
- set_hvflip(gspca_dev);
+ set_cmatrix(gspca_dev, v4l2_ctrl_g_ctrl(sd->brightness),
+ v4l2_ctrl_g_ctrl(sd->contrast),
+ v4l2_ctrl_g_ctrl(sd->saturation),
+ v4l2_ctrl_g_ctrl(sd->hue));
+ set_gamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
+ set_redblue(gspca_dev, v4l2_ctrl_g_ctrl(sd->blue),
+ v4l2_ctrl_g_ctrl(sd->red));
+ if (sd->gain)
+ set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
+ if (sd->exposure)
+ set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
+ if (sd->hflip)
+ set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
+ v4l2_ctrl_g_ctrl(sd->vflip));
reg_w1(gspca_dev, 0x1007, 0x20);
reg_w1(gspca_dev, 0x1061, 0x03);
@@ -2148,6 +2115,9 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 cur_exp = v4l2_ctrl_g_ctrl(sd->exposure);
+ s32 max = sd->exposure->maximum - sd->exposure_step;
+ s32 min = sd->exposure->minimum + sd->exposure_step;
s16 new_exp;
/*
@@ -2156,16 +2126,15 @@ static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum)
* and exposure steps
*/
if (avg_lum < MIN_AVG_LUM) {
- if (sd->ctrls[EXPOSURE].val > 0x1770)
+ if (cur_exp > max)
return;
- new_exp = sd->ctrls[EXPOSURE].val + sd->exposure_step;
- if (new_exp > 0x1770)
- new_exp = 0x1770;
- if (new_exp < 0x10)
- new_exp = 0x10;
- sd->ctrls[EXPOSURE].val = new_exp;
- set_exposure(gspca_dev);
+ new_exp = cur_exp + sd->exposure_step;
+ if (new_exp > max)
+ new_exp = max;
+ if (new_exp < min)
+ new_exp = min;
+ v4l2_ctrl_s_ctrl(sd->exposure, new_exp);
sd->older_step = sd->old_step;
sd->old_step = 1;
@@ -2176,15 +2145,14 @@ static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum)
sd->exposure_step += 2;
}
if (avg_lum > MAX_AVG_LUM) {
- if (sd->ctrls[EXPOSURE].val < 0x10)
+ if (cur_exp < min)
return;
- new_exp = sd->ctrls[EXPOSURE].val - sd->exposure_step;
- if (new_exp > 0x1700)
- new_exp = 0x1770;
- if (new_exp < 0x10)
- new_exp = 0x10;
- sd->ctrls[EXPOSURE].val = new_exp;
- set_exposure(gspca_dev);
+ new_exp = cur_exp - sd->exposure_step;
+ if (new_exp > max)
+ new_exp = max;
+ if (new_exp < min)
+ new_exp = min;
+ v4l2_ctrl_s_ctrl(sd->exposure, new_exp);
sd->older_step = sd->old_step;
sd->old_step = 0;
@@ -2198,19 +2166,12 @@ static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum)
static void do_autogain(struct gspca_dev *gspca_dev, u16 avg_lum)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 cur_gain = v4l2_ctrl_g_ctrl(sd->gain);
- if (avg_lum < MIN_AVG_LUM) {
- if (sd->ctrls[GAIN].val + 1 <= 28) {
- sd->ctrls[GAIN].val++;
- set_gain(gspca_dev);
- }
- }
- if (avg_lum > MAX_AVG_LUM) {
- if (sd->ctrls[GAIN].val > 0) {
- sd->ctrls[GAIN].val--;
- set_gain(gspca_dev);
- }
- }
+ if (avg_lum < MIN_AVG_LUM && cur_gain < sd->gain->maximum)
+ v4l2_ctrl_s_ctrl(sd->gain, cur_gain + 1);
+ if (avg_lum > MAX_AVG_LUM && cur_gain > sd->gain->minimum)
+ v4l2_ctrl_s_ctrl(sd->gain, cur_gain - 1);
}
static void sd_dqcallback(struct gspca_dev *gspca_dev)
@@ -2218,7 +2179,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int avg_lum;
- if (!sd->ctrls[AUTOGAIN].val)
+ if (sd->autogain == NULL || !v4l2_ctrl_g_ctrl(sd->autogain))
return;
avg_lum = atomic_read(&sd->avg_lum);
@@ -2234,10 +2195,11 @@ static void qual_upd(struct work_struct *work)
{
struct sd *sd = container_of(work, struct sd, work);
struct gspca_dev *gspca_dev = &sd->gspca_dev;
+ s32 qual = v4l2_ctrl_g_ctrl(sd->jpegqual);
mutex_lock(&gspca_dev->usb_lock);
- PDEBUG(D_STREAM, "qual_upd %d%%", sd->ctrls[QUALITY].val);
- set_quality(gspca_dev);
+ PDEBUG(D_STREAM, "qual_upd %d%%", qual);
+ set_quality(gspca_dev, qual);
mutex_unlock(&gspca_dev->usb_lock);
}
@@ -2286,14 +2248,18 @@ static void transfer_check(struct gspca_dev *gspca_dev,
if (new_qual != 0) {
sd->nchg += new_qual;
if (sd->nchg < -6 || sd->nchg >= 12) {
+ /* Note: we are in interrupt context, so we can't
+ use v4l2_ctrl_g/s_ctrl here. Access the value
+ directly instead. */
+ s32 curqual = sd->jpegqual->cur.val;
sd->nchg = 0;
- new_qual += sd->ctrls[QUALITY].val;
- if (new_qual < QUALITY_MIN)
- new_qual = QUALITY_MIN;
- else if (new_qual > QUALITY_MAX)
- new_qual = QUALITY_MAX;
- if (new_qual != sd->ctrls[QUALITY].val) {
- sd->ctrls[QUALITY].val = new_qual;
+ new_qual += curqual;
+ if (new_qual < sd->jpegqual->minimum)
+ new_qual = sd->jpegqual->minimum;
+ else if (new_qual > sd->jpegqual->maximum)
+ new_qual = sd->jpegqual->maximum;
+ if (new_qual != curqual) {
+ sd->jpegqual->cur.val = new_qual;
queue_work(sd->work_thread, &sd->work);
}
}
@@ -2309,7 +2275,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
int avg_lum, is_jpeg;
- static u8 frame_header[] =
+ static const u8 frame_header[] =
{0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96};
is_jpeg = (sd->fmt & 0x03) == 0;
@@ -2373,10 +2339,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = KBUILD_MODNAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 6a1148d7fe92..e2bdf8f632f4 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -1000,6 +1000,8 @@ static void setfreq(struct gspca_dev *gspca_dev)
}
}
+#define WANT_REGULAR_AUTOGAIN
+#define WANT_COARSE_EXPO_AUTOGAIN
#include "autogain_functions.h"
static void do_autogain(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 863c755dd2b7..f38faa9b37c3 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -2800,10 +2800,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
}
}
-/* !! coarse_grained_expo_autogain is not used !! */
-#define exp_too_low_cnt bridge
-#define exp_too_high_cnt sensor
-
+#define WANT_REGULAR_AUTOGAIN
#include "autogain_functions.h"
static void do_autogain(struct gspca_dev *gspca_dev)
@@ -3123,7 +3120,7 @@ static const struct sd_desc sd_desc = {
| (SENSOR_ ## sensor << 8) \
| (flags)
static const struct usb_device_id device_table[] = {
- {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
+ {USB_DEVICE(0x0458, 0x7025), BSF(SN9C120, MI0360B, F_PDN_INV)},
{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 2fe3c29bd6b7..04f54654a026 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -232,7 +232,11 @@ static void sq905_dostream(struct work_struct *work)
frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage
+ FRAME_HEADER_LEN;
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
/* request some data and then read it until we have
* a complete frame. */
bytes_left = frame_sz;
@@ -242,7 +246,7 @@ static void sq905_dostream(struct work_struct *work)
we must finish reading an entire frame, otherwise the
next time we stream we start reading in the middle of a
frame. */
- while (bytes_left > 0 && gspca_dev->present) {
+ while (bytes_left > 0 && gspca_dev->dev) {
data_len = bytes_left > SQ905_MAX_TRANSFER ?
SQ905_MAX_TRANSFER : bytes_left;
ret = sq905_read_data(gspca_dev, buffer, data_len, 1);
@@ -274,7 +278,7 @@ static void sq905_dostream(struct work_struct *work)
gspca_frame_add(gspca_dev, LAST_PACKET,
NULL, 0);
}
- if (gspca_dev->present) {
+ if (gspca_dev->dev) {
/* acknowledge the frame */
mutex_lock(&gspca_dev->usb_lock);
ret = sq905_ack_frame(gspca_dev);
@@ -284,7 +288,7 @@ static void sq905_dostream(struct work_struct *work)
}
}
quit_stream:
- if (gspca_dev->present) {
+ if (gspca_dev->dev) {
mutex_lock(&gspca_dev->usb_lock);
sq905_command(gspca_dev, SQ905_CLEAR);
mutex_unlock(&gspca_dev->usb_lock);
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index ae783634712f..f34ddb0570c8 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -150,7 +150,11 @@ static void sq905c_dostream(struct work_struct *work)
goto quit_stream;
}
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
/* Request the header, which tells the size to download */
ret = usb_bulk_msg(gspca_dev->dev,
usb_rcvbulkpipe(gspca_dev->dev, 0x81),
@@ -169,7 +173,7 @@ static void sq905c_dostream(struct work_struct *work)
packet_type = FIRST_PACKET;
gspca_frame_add(gspca_dev, packet_type,
buffer, FRAME_HEADER_LEN);
- while (bytes_left > 0 && gspca_dev->present) {
+ while (bytes_left > 0 && gspca_dev->dev) {
data_len = bytes_left > SQ905C_MAX_TRANSFER ?
SQ905C_MAX_TRANSFER : bytes_left;
ret = usb_bulk_msg(gspca_dev->dev,
@@ -191,7 +195,7 @@ static void sq905c_dostream(struct work_struct *work)
}
}
quit_stream:
- if (gspca_dev->present) {
+ if (gspca_dev->dev) {
mutex_lock(&gspca_dev->usb_lock);
sq905c_command(gspca_dev, SQ905C_CLEAR, 0);
mutex_unlock(&gspca_dev->usb_lock);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 91d99b4cc57b..999ec7764449 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -261,6 +261,17 @@ static int stv06xx_init(struct gspca_dev *gspca_dev)
return (err < 0) ? err : 0;
}
+/* this function is called at probe time */
+static int stv06xx_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ PDEBUG(D_PROBE, "Initializing controls");
+
+ gspca_dev->vdev.ctrl_handler = &gspca_dev->ctrl_handler;
+ return sd->sensor->init_controls(sd);
+}
+
/* Start the camera */
static int stv06xx_start(struct gspca_dev *gspca_dev)
{
@@ -512,6 +523,7 @@ static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = stv06xx_config,
.init = stv06xx_init,
+ .init_controls = stv06xx_init_controls,
.start = stv06xx_start,
.stopN = stv06xx_stopN,
.pkt_scan = stv06xx_pkt_scan,
@@ -530,9 +542,8 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
PDEBUG(D_PROBE, "Configuring camera");
- sd->desc = sd_desc;
sd->bridge = id->driver_info;
- gspca_dev->sd_desc = &sd->desc;
+ gspca_dev->sd_desc = &sd_desc;
if (dump_bridge)
stv06xx_dump_bridge(sd);
@@ -594,11 +605,12 @@ static void sd_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
struct sd *sd = (struct sd *) gspca_dev;
+ void *priv = sd->sensor_priv;
PDEBUG(D_PROBE, "Disconnecting the stv06xx device");
- if (sd->sensor->disconnect)
- sd->sensor->disconnect(sd);
+ sd->sensor = NULL;
gspca_disconnect(intf);
+ kfree(priv);
}
static struct usb_driver sd_driver = {
@@ -609,6 +621,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index d270a5981afe..34957a4ec150 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -89,9 +89,6 @@ struct sd {
/* A pointer to the currently connected sensor */
const struct stv06xx_sensor *sensor;
- /* A pointer to the sd_desc struct */
- struct sd_desc desc;
-
/* Sensor private data */
void *sensor_priv;
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index a8698b7a7566..06fa54c5efb2 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -32,36 +32,6 @@
#include "stv06xx_hdcs.h"
-static const struct ctrl hdcs1x00_ctrl[] = {
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = HDCS_DEFAULT_EXPOSURE,
- .flags = V4L2_CTRL_FLAG_SLIDER
- },
- .set = hdcs_set_exposure,
- .get = hdcs_get_exposure
- }, {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = HDCS_DEFAULT_GAIN,
- .flags = V4L2_CTRL_FLAG_SLIDER
- },
- .set = hdcs_set_gain,
- .get = hdcs_get_gain
- }
-};
-
static struct v4l2_pix_format hdcs1x00_mode[] = {
{
HDCS_1X00_DEF_WIDTH,
@@ -76,36 +46,6 @@ static struct v4l2_pix_format hdcs1x00_mode[] = {
}
};
-static const struct ctrl hdcs1020_ctrl[] = {
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0x00,
- .maximum = 0xffff,
- .step = 0x1,
- .default_value = HDCS_DEFAULT_EXPOSURE,
- .flags = V4L2_CTRL_FLAG_SLIDER
- },
- .set = hdcs_set_exposure,
- .get = hdcs_get_exposure
- }, {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "gain",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 0x1,
- .default_value = HDCS_DEFAULT_GAIN,
- .flags = V4L2_CTRL_FLAG_SLIDER
- },
- .set = hdcs_set_gain,
- .get = hdcs_get_gain
- }
-};
-
static struct v4l2_pix_format hdcs1020_mode[] = {
{
HDCS_1020_DEF_WIDTH,
@@ -150,7 +90,6 @@ struct hdcs {
} exp;
int psmp;
- u8 exp_cache, gain_cache;
};
static int hdcs_reg_write_seq(struct sd *sd, u8 reg, u8 *vals, u8 len)
@@ -232,16 +171,6 @@ static int hdcs_reset(struct sd *sd)
return err;
}
-static int hdcs_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- struct hdcs *hdcs = sd->sensor_priv;
-
- *val = hdcs->exp_cache;
-
- return 0;
-}
-
static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -260,9 +189,6 @@ static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
int cycles, err;
u8 exp[14];
- val &= 0xff;
- hdcs->exp_cache = val;
-
cycles = val * HDCS_CLK_FREQ_MHZ * 257;
ct = hdcs->exp.cto + hdcs->psmp + (HDCS_ADC_START_SIG_DUR + 2);
@@ -336,12 +262,9 @@ static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
static int hdcs_set_gains(struct sd *sd, u8 g)
{
- struct hdcs *hdcs = sd->sensor_priv;
int err;
u8 gains[4];
- hdcs->gain_cache = g;
-
/* the voltage gain Av = (1 + 19 * val / 127) * (1 + bit7) */
if (g > 127)
g = 0x80 | (g / 2);
@@ -352,17 +275,7 @@ static int hdcs_set_gains(struct sd *sd, u8 g)
gains[3] = g;
err = hdcs_reg_write_seq(sd, HDCS_ERECPGA, gains, 4);
- return err;
-}
-
-static int hdcs_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- struct hdcs *hdcs = sd->sensor_priv;
-
- *val = hdcs->gain_cache;
-
- return 0;
+ return err;
}
static int hdcs_set_gain(struct gspca_dev *gspca_dev, __s32 val)
@@ -420,6 +333,39 @@ static int hdcs_set_size(struct sd *sd,
return err;
}
+static int hdcs_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ int err = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ err = hdcs_set_gain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ err = hdcs_set_exposure(gspca_dev, ctrl->val);
+ break;
+ }
+ return err;
+}
+
+static const struct v4l2_ctrl_ops hdcs_ctrl_ops = {
+ .s_ctrl = hdcs_s_ctrl,
+};
+
+static int hdcs_init_controls(struct sd *sd)
+{
+ struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
+
+ v4l2_ctrl_handler_init(hdl, 2);
+ v4l2_ctrl_new_std(hdl, &hdcs_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 0xff, 1, HDCS_DEFAULT_EXPOSURE);
+ v4l2_ctrl_new_std(hdl, &hdcs_ctrl_ops,
+ V4L2_CID_GAIN, 0, 0xff, 1, HDCS_DEFAULT_GAIN);
+ return hdl->error;
+}
+
static int hdcs_probe_1x00(struct sd *sd)
{
struct hdcs *hdcs;
@@ -434,8 +380,6 @@ static int hdcs_probe_1x00(struct sd *sd)
sd->gspca_dev.cam.cam_mode = hdcs1x00_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(hdcs1x00_mode);
- sd->desc.ctrls = hdcs1x00_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(hdcs1x00_ctrl);
hdcs = kmalloc(sizeof(struct hdcs), GFP_KERNEL);
if (!hdcs)
@@ -493,8 +437,6 @@ static int hdcs_probe_1020(struct sd *sd)
sd->gspca_dev.cam.cam_mode = hdcs1020_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(hdcs1020_mode);
- sd->desc.ctrls = hdcs1020_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(hdcs1020_ctrl);
hdcs = kmalloc(sizeof(struct hdcs), GFP_KERNEL);
if (!hdcs)
@@ -537,12 +479,6 @@ static int hdcs_stop(struct sd *sd)
return hdcs_set_state(sd, HDCS_STATE_SLEEP);
}
-static void hdcs_disconnect(struct sd *sd)
-{
- PDEBUG(D_PROBE, "Disconnecting the sensor");
- kfree(sd->sensor_priv);
-}
-
static int hdcs_init(struct sd *sd)
{
struct hdcs *hdcs = sd->sensor_priv;
@@ -587,16 +523,7 @@ static int hdcs_init(struct sd *sd)
if (err < 0)
return err;
- err = hdcs_set_gains(sd, HDCS_DEFAULT_GAIN);
- if (err < 0)
- return err;
-
- err = hdcs_set_size(sd, hdcs->array.width, hdcs->array.height);
- if (err < 0)
- return err;
-
- err = hdcs_set_exposure(&sd->gspca_dev, HDCS_DEFAULT_EXPOSURE);
- return err;
+ return hdcs_set_size(sd, hdcs->array.width, hdcs->array.height);
}
static int hdcs_dump(struct sd *sd)
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
index a14a84a5079b..1ba9158d0102 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
@@ -131,14 +131,12 @@ static int hdcs_probe_1x00(struct sd *sd);
static int hdcs_probe_1020(struct sd *sd);
static int hdcs_start(struct sd *sd);
static int hdcs_init(struct sd *sd);
+static int hdcs_init_controls(struct sd *sd);
static int hdcs_stop(struct sd *sd);
static int hdcs_dump(struct sd *sd);
-static void hdcs_disconnect(struct sd *sd);
-static int hdcs_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
static int hdcs_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
static int hdcs_set_gain(struct gspca_dev *gspca_dev, __s32 val);
-static int hdcs_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
const struct stv06xx_sensor stv06xx_sensor_hdcs1x00 = {
.name = "HP HDCS-1000/1100",
@@ -152,10 +150,10 @@ const struct stv06xx_sensor stv06xx_sensor_hdcs1x00 = {
.max_packet_size = { 847 },
.init = hdcs_init,
+ .init_controls = hdcs_init_controls,
.probe = hdcs_probe_1x00,
.start = hdcs_start,
.stop = hdcs_stop,
- .disconnect = hdcs_disconnect,
.dump = hdcs_dump,
};
@@ -171,6 +169,7 @@ const struct stv06xx_sensor stv06xx_sensor_hdcs1020 = {
.max_packet_size = { 847 },
.init = hdcs_init,
+ .init_controls = hdcs_init_controls,
.probe = hdcs_probe_1020,
.start = hdcs_start,
.stop = hdcs_stop,
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
index 26f14fc4a135..cdfc3d05ab6b 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.c
@@ -48,105 +48,16 @@
#include "stv06xx_pb0100.h"
-static const struct ctrl pb0100_ctrl[] = {
-#define GAIN_IDX 0
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128
- },
- .set = pb0100_set_gain,
- .get = pb0100_get_gain
- },
-#define RED_BALANCE_IDX 1
- {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = -255,
- .maximum = 255,
- .step = 1,
- .default_value = 0
- },
- .set = pb0100_set_red_balance,
- .get = pb0100_get_red_balance
- },
-#define BLUE_BALANCE_IDX 2
- {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = -255,
- .maximum = 255,
- .step = 1,
- .default_value = 0
- },
- .set = pb0100_set_blue_balance,
- .get = pb0100_get_blue_balance
- },
-#define EXPOSURE_IDX 3
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 511,
- .step = 1,
- .default_value = 12
- },
- .set = pb0100_set_exposure,
- .get = pb0100_get_exposure
- },
-#define AUTOGAIN_IDX 4
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Gain and Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
- },
- .set = pb0100_set_autogain,
- .get = pb0100_get_autogain
- },
-#define AUTOGAIN_TARGET_IDX 5
- {
- {
- .id = V4L2_CTRL_CLASS_USER + 0x1000,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Automatic Gain Target",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128
- },
- .set = pb0100_set_autogain_target,
- .get = pb0100_get_autogain_target
- },
-#define NATURAL_IDX 6
- {
- {
- .id = V4L2_CTRL_CLASS_USER + 0x1001,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Natural Light Source",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1
- },
- .set = pb0100_set_natural,
- .get = pb0100_get_natural
- }
+struct pb0100_ctrls {
+ struct { /* one big happy control cluster... */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *red;
+ struct v4l2_ctrl *blue;
+ struct v4l2_ctrl *natural;
+ };
+ struct v4l2_ctrl *target;
};
static struct v4l2_pix_format pb0100_mode[] = {
@@ -174,38 +85,104 @@ static struct v4l2_pix_format pb0100_mode[] = {
}
};
+static int pb0100_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
+ int err = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ err = pb0100_set_autogain(gspca_dev, ctrl->val);
+ if (err)
+ break;
+ if (ctrl->val)
+ break;
+ err = pb0100_set_gain(gspca_dev, ctrls->gain->val);
+ if (err)
+ break;
+ err = pb0100_set_exposure(gspca_dev, ctrls->exposure->val);
+ break;
+ case V4L2_CTRL_CLASS_USER + 0x1001:
+ err = pb0100_set_autogain_target(gspca_dev, ctrl->val);
+ break;
+ }
+ return err;
+}
+
+static const struct v4l2_ctrl_ops pb0100_ctrl_ops = {
+ .s_ctrl = pb0100_s_ctrl,
+};
+
+static int pb0100_init_controls(struct sd *sd)
+{
+ struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
+ struct pb0100_ctrls *ctrls;
+ static const struct v4l2_ctrl_config autogain_target = {
+ .ops = &pb0100_ctrl_ops,
+ .id = V4L2_CTRL_CLASS_USER + 0x1000,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Automatic Gain Target",
+ .max = 255,
+ .step = 1,
+ .def = 128,
+ };
+ static const struct v4l2_ctrl_config natural_light = {
+ .ops = &pb0100_ctrl_ops,
+ .id = V4L2_CTRL_CLASS_USER + 0x1001,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Natural Light Source",
+ .max = 1,
+ .step = 1,
+ .def = 1,
+ };
+
+ ctrls = kzalloc(sizeof(*ctrls), GFP_KERNEL);
+ if (!ctrls)
+ return -ENOMEM;
+
+ v4l2_ctrl_handler_init(hdl, 6);
+ ctrls->autogain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 511, 1, 12);
+ ctrls->gain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 128);
+ ctrls->red = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_RED_BALANCE, -255, 255, 1, 0);
+ ctrls->blue = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, -255, 255, 1, 0);
+ ctrls->natural = v4l2_ctrl_new_custom(hdl, &natural_light, NULL);
+ ctrls->target = v4l2_ctrl_new_custom(hdl, &autogain_target, NULL);
+ if (hdl->error) {
+ kfree(ctrls);
+ return hdl->error;
+ }
+ sd->sensor_priv = ctrls;
+ v4l2_ctrl_auto_cluster(5, &ctrls->autogain, 0, false);
+ return 0;
+}
+
static int pb0100_probe(struct sd *sd)
{
u16 sensor;
- int i, err;
- s32 *sensor_settings;
+ int err;
err = stv06xx_read_sensor(sd, PB_IDENT, &sensor);
if (err < 0)
return -ENODEV;
+ if ((sensor >> 8) != 0x64)
+ return -ENODEV;
- if ((sensor >> 8) == 0x64) {
- sensor_settings = kmalloc(
- ARRAY_SIZE(pb0100_ctrl) * sizeof(s32),
- GFP_KERNEL);
- if (!sensor_settings)
- return -ENOMEM;
-
- pr_info("Photobit pb0100 sensor detected\n");
-
- sd->gspca_dev.cam.cam_mode = pb0100_mode;
- sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode);
- sd->desc.ctrls = pb0100_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(pb0100_ctrl);
- for (i = 0; i < sd->desc.nctrls; i++)
- sensor_settings[i] = pb0100_ctrl[i].qctrl.default_value;
- sd->sensor_priv = sensor_settings;
+ pr_info("Photobit pb0100 sensor detected\n");
- return 0;
- }
+ sd->gspca_dev.cam.cam_mode = pb0100_mode;
+ sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode);
- return -ENODEV;
+ return 0;
}
static int pb0100_start(struct sd *sd)
@@ -214,7 +191,6 @@ static int pb0100_start(struct sd *sd)
struct usb_host_interface *alt;
struct usb_interface *intf;
struct cam *cam = &sd->gspca_dev.cam;
- s32 *sensor_settings = sd->sensor_priv;
u32 mode = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
@@ -255,13 +231,6 @@ static int pb0100_start(struct sd *sd)
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20);
}
- /* set_gain also sets red and blue balance */
- pb0100_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
- pb0100_set_exposure(&sd->gspca_dev, sensor_settings[EXPOSURE_IDX]);
- pb0100_set_autogain_target(&sd->gspca_dev,
- sensor_settings[AUTOGAIN_TARGET_IDX]);
- pb0100_set_autogain(&sd->gspca_dev, sensor_settings[AUTOGAIN_IDX]);
-
err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3)|BIT(1));
PDEBUG(D_STREAM, "Started stream, status: %d", err);
@@ -285,12 +254,6 @@ out:
return (err < 0) ? err : 0;
}
-static void pb0100_disconnect(struct sd *sd)
-{
- sd->sensor = NULL;
- kfree(sd->sensor_priv);
-}
-
/* FIXME: Sort the init commands out and put them into tables,
this is only for getting the camera to work */
/* FIXME: No error handling for now,
@@ -362,62 +325,32 @@ static int pb0100_dump(struct sd *sd)
return 0;
}
-static int pb0100_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[GAIN_IDX];
-
- return 0;
-}
-
static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
- if (sensor_settings[AUTOGAIN_IDX])
- return -EBUSY;
-
- sensor_settings[GAIN_IDX] = val;
err = stv06xx_write_sensor(sd, PB_G1GAIN, val);
if (!err)
err = stv06xx_write_sensor(sd, PB_G2GAIN, val);
PDEBUG(D_V4L2, "Set green gain to %d, status: %d", val, err);
if (!err)
- err = pb0100_set_red_balance(gspca_dev,
- sensor_settings[RED_BALANCE_IDX]);
+ err = pb0100_set_red_balance(gspca_dev, ctrls->red->val);
if (!err)
- err = pb0100_set_blue_balance(gspca_dev,
- sensor_settings[BLUE_BALANCE_IDX]);
+ err = pb0100_set_blue_balance(gspca_dev, ctrls->blue->val);
return err;
}
-static int pb0100_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[RED_BALANCE_IDX];
-
- return 0;
-}
-
static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
- if (sensor_settings[AUTOGAIN_IDX])
- return -EBUSY;
-
- sensor_settings[RED_BALANCE_IDX] = val;
- val += sensor_settings[GAIN_IDX];
+ val += ctrls->gain->val;
if (val < 0)
val = 0;
else if (val > 255)
@@ -429,27 +362,13 @@ static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-static int pb0100_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[BLUE_BALANCE_IDX];
-
- return 0;
-}
-
static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
- if (sensor_settings[AUTOGAIN_IDX])
- return -EBUSY;
-
- sensor_settings[BLUE_BALANCE_IDX] = val;
- val += sensor_settings[GAIN_IDX];
+ val += ctrls->gain->val;
if (val < 0)
val = 0;
else if (val > 255)
@@ -461,51 +380,25 @@ static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-static int pb0100_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[EXPOSURE_IDX];
-
- return 0;
-}
-
static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
- int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- if (sensor_settings[AUTOGAIN_IDX])
- return -EBUSY;
+ int err;
- sensor_settings[EXPOSURE_IDX] = val;
err = stv06xx_write_sensor(sd, PB_RINTTIME, val);
PDEBUG(D_V4L2, "Set exposure to %d, status: %d", val, err);
return err;
}
-static int pb0100_get_autogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[AUTOGAIN_IDX];
-
- return 0;
-}
-
static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
+ struct pb0100_ctrls *ctrls = sd->sensor_priv;
- sensor_settings[AUTOGAIN_IDX] = val;
- if (sensor_settings[AUTOGAIN_IDX]) {
- if (sensor_settings[NATURAL_IDX])
+ if (val) {
+ if (ctrls->natural->val)
val = BIT(6)|BIT(4)|BIT(0);
else
val = BIT(4)|BIT(0);
@@ -514,29 +407,15 @@ static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val)
err = stv06xx_write_sensor(sd, PB_EXPGAIN, val);
PDEBUG(D_V4L2, "Set autogain to %d (natural: %d), status: %d",
- sensor_settings[AUTOGAIN_IDX], sensor_settings[NATURAL_IDX],
- err);
+ val, ctrls->natural->val, err);
return err;
}
-static int pb0100_get_autogain_target(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[AUTOGAIN_TARGET_IDX];
-
- return 0;
-}
-
static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
{
int err, totalpixels, brightpixels, darkpixels;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- sensor_settings[AUTOGAIN_TARGET_IDX] = val;
/* Number of pixels counted by the sensor when subsampling the pixels.
* Slightly larger than the real value to avoid oscillation */
@@ -553,23 +432,3 @@ static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
return err;
}
-
-static int pb0100_get_natural(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[NATURAL_IDX];
-
- return 0;
-}
-
-static int pb0100_set_natural(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- sensor_settings[NATURAL_IDX] = val;
-
- return pb0100_set_autogain(gspca_dev, sensor_settings[AUTOGAIN_IDX]);
-}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h
index 757de246dc75..5071e5353fd3 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_pb0100.h
@@ -112,25 +112,17 @@
static int pb0100_probe(struct sd *sd);
static int pb0100_start(struct sd *sd);
static int pb0100_init(struct sd *sd);
+static int pb0100_init_controls(struct sd *sd);
static int pb0100_stop(struct sd *sd);
static int pb0100_dump(struct sd *sd);
-static void pb0100_disconnect(struct sd *sd);
/* V4L2 controls supported by the driver */
-static int pb0100_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_autogain(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_autogain_target(struct gspca_dev *gspca_dev, __s32 *val);
static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val);
-static int pb0100_get_natural(struct gspca_dev *gspca_dev, __s32 *val);
-static int pb0100_set_natural(struct gspca_dev *gspca_dev, __s32 val);
const struct stv06xx_sensor stv06xx_sensor_pb0100 = {
.name = "PB-0100",
@@ -142,11 +134,11 @@ const struct stv06xx_sensor stv06xx_sensor_pb0100 = {
.max_packet_size = { 847, 923 },
.init = pb0100_init,
+ .init_controls = pb0100_init_controls,
.probe = pb0100_probe,
.start = pb0100_start,
.stop = pb0100_stop,
.dump = pb0100_dump,
- .disconnect = pb0100_disconnect,
};
#endif
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
index fb229d8ded58..3a498c2495c6 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
@@ -63,8 +63,8 @@ struct stv06xx_sensor {
/* Performs a initialization sequence */
int (*init)(struct sd *sd);
- /* Executed at device disconnect */
- void (*disconnect)(struct sd *sd);
+ /* Initializes the controls */
+ int (*init_controls)(struct sd *sd);
/* Reads a sensor register */
int (*read_sensor)(struct sd *sd, const u8 address,
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
index 9940e035b3ab..8a57990dfe0f 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
@@ -30,20 +30,6 @@
#include "stv06xx_st6422.h"
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- GAIN,
- EXPOSURE,
- NCTRLS /* number of controls */
-};
-
-/* sensor settings */
-struct st6422_settings {
- struct gspca_ctrl ctrls[NCTRLS];
-};
-
static struct v4l2_pix_format st6422_mode[] = {
/* Note we actually get 124 lines of data, of which we skip the 4st
4 as they are garbage */
@@ -74,83 +60,70 @@ static struct v4l2_pix_format st6422_mode[] = {
};
/* V4L2 controls supported by the driver */
-static void st6422_set_brightness(struct gspca_dev *gspca_dev);
-static void st6422_set_contrast(struct gspca_dev *gspca_dev);
-static void st6422_set_gain(struct gspca_dev *gspca_dev);
-static void st6422_set_exposure(struct gspca_dev *gspca_dev);
-
-static const struct ctrl st6422_ctrl[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 31,
- .step = 1,
- .default_value = 3
- },
- .set_control = st6422_set_brightness
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
- .default_value = 11
- },
- .set_control = st6422_set_contrast
- },
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 64
- },
- .set_control = st6422_set_gain
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
-#define EXPOSURE_MAX 1023
- .maximum = EXPOSURE_MAX,
- .step = 1,
- .default_value = 256
- },
- .set_control = st6422_set_exposure
- },
+static int setbrightness(struct sd *sd, s32 val);
+static int setcontrast(struct sd *sd, s32 val);
+static int setgain(struct sd *sd, u8 gain);
+static int setexposure(struct sd *sd, s16 expo);
+
+static int st6422_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+ int err = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ err = setbrightness(sd, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ err = setcontrast(sd, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ err = setgain(sd, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ err = setexposure(sd, ctrl->val);
+ break;
+ }
+
+ /* commit settings */
+ if (err >= 0)
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ sd->gspca_dev.usb_err = err;
+ return err;
+}
+
+static const struct v4l2_ctrl_ops st6422_ctrl_ops = {
+ .s_ctrl = st6422_s_ctrl,
};
-static int st6422_probe(struct sd *sd)
+static int st6422_init_controls(struct sd *sd)
{
- struct st6422_settings *sensor_settings;
+ struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
+
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 31, 1, 3);
+ v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 15, 1, 11);
+ v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 1023, 1, 256);
+ v4l2_ctrl_new_std(hdl, &st6422_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 64);
+
+ return hdl->error;
+}
+static int st6422_probe(struct sd *sd)
+{
if (sd->bridge != BRIDGE_ST6422)
return -ENODEV;
pr_info("st6422 sensor detected\n");
- sensor_settings = kmalloc(sizeof *sensor_settings, GFP_KERNEL);
- if (!sensor_settings)
- return -ENOMEM;
-
sd->gspca_dev.cam.cam_mode = st6422_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(st6422_mode);
- sd->gspca_dev.cam.ctrls = sensor_settings->ctrls;
- sd->desc.ctrls = st6422_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(st6422_ctrl);
- sd->sensor_priv = sensor_settings;
-
return 0;
}
@@ -239,38 +212,22 @@ static int st6422_init(struct sd *sd)
return err;
}
-static void st6422_disconnect(struct sd *sd)
-{
- sd->sensor = NULL;
- kfree(sd->sensor_priv);
-}
-
-static int setbrightness(struct sd *sd)
+static int setbrightness(struct sd *sd, s32 val)
{
- struct st6422_settings *sensor_settings = sd->sensor_priv;
-
/* val goes from 0 -> 31 */
- return stv06xx_write_bridge(sd, 0x1432,
- sensor_settings->ctrls[BRIGHTNESS].val);
+ return stv06xx_write_bridge(sd, 0x1432, val);
}
-static int setcontrast(struct sd *sd)
+static int setcontrast(struct sd *sd, s32 val)
{
- struct st6422_settings *sensor_settings = sd->sensor_priv;
-
/* Val goes from 0 -> 15 */
- return stv06xx_write_bridge(sd, 0x143a,
- sensor_settings->ctrls[CONTRAST].val | 0xf0);
+ return stv06xx_write_bridge(sd, 0x143a, val | 0xf0);
}
-static int setgain(struct sd *sd)
+static int setgain(struct sd *sd, u8 gain)
{
- struct st6422_settings *sensor_settings = sd->sensor_priv;
- u8 gain;
int err;
- gain = sensor_settings->ctrls[GAIN].val;
-
/* Set red, green, blue, gain */
err = stv06xx_write_bridge(sd, 0x0509, gain);
if (err < 0)
@@ -292,13 +249,10 @@ static int setgain(struct sd *sd)
return stv06xx_write_bridge(sd, 0x050d, 0x01);
}
-static int setexposure(struct sd *sd)
+static int setexposure(struct sd *sd, s16 expo)
{
- struct st6422_settings *sensor_settings = sd->sensor_priv;
- u16 expo;
int err;
- expo = sensor_settings->ctrls[EXPOSURE].val;
err = stv06xx_write_bridge(sd, 0x143d, expo & 0xff);
if (err < 0)
return err;
@@ -318,22 +272,6 @@ static int st6422_start(struct sd *sd)
if (err < 0)
return err;
- err = setbrightness(sd);
- if (err < 0)
- return err;
-
- err = setcontrast(sd);
- if (err < 0)
- return err;
-
- err = setexposure(sd);
- if (err < 0)
- return err;
-
- err = setgain(sd);
- if (err < 0)
- return err;
-
/* commit settings */
err = stv06xx_write_bridge(sd, 0x143f, 0x01);
return (err < 0) ? err : 0;
@@ -345,59 +283,3 @@ static int st6422_stop(struct sd *sd)
return 0;
}
-
-static void st6422_set_brightness(struct gspca_dev *gspca_dev)
-{
- int err;
- struct sd *sd = (struct sd *) gspca_dev;
-
- err = setbrightness(sd);
-
- /* commit settings */
- if (err >= 0)
- err = stv06xx_write_bridge(sd, 0x143f, 0x01);
-
- gspca_dev->usb_err = err;
-}
-
-static void st6422_set_contrast(struct gspca_dev *gspca_dev)
-{
- int err;
- struct sd *sd = (struct sd *) gspca_dev;
-
- err = setcontrast(sd);
-
- /* commit settings */
- if (err >= 0)
- err = stv06xx_write_bridge(sd, 0x143f, 0x01);
-
- gspca_dev->usb_err = err;
-}
-
-static void st6422_set_gain(struct gspca_dev *gspca_dev)
-{
- int err;
- struct sd *sd = (struct sd *) gspca_dev;
-
- err = setgain(sd);
-
- /* commit settings */
- if (err >= 0)
- err = stv06xx_write_bridge(sd, 0x143f, 0x01);
-
- gspca_dev->usb_err = err;
-}
-
-static void st6422_set_exposure(struct gspca_dev *gspca_dev)
-{
- int err;
- struct sd *sd = (struct sd *) gspca_dev;
-
- err = setexposure(sd);
-
- /* commit settings */
- if (err >= 0)
- err = stv06xx_write_bridge(sd, 0x143f, 0x01);
-
- gspca_dev->usb_err = err;
-}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
index d7498e06432b..8f20fbf30f33 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
@@ -34,8 +34,8 @@
static int st6422_probe(struct sd *sd);
static int st6422_start(struct sd *sd);
static int st6422_init(struct sd *sd);
+static int st6422_init_controls(struct sd *sd);
static int st6422_stop(struct sd *sd);
-static void st6422_disconnect(struct sd *sd);
const struct stv06xx_sensor stv06xx_sensor_st6422 = {
.name = "ST6422",
@@ -43,10 +43,10 @@ const struct stv06xx_sensor stv06xx_sensor_st6422 = {
.min_packet_size = { 300, 847 },
.max_packet_size = { 300, 847 },
.init = st6422_init,
+ .init_controls = st6422_init_controls,
.probe = st6422_probe,
.start = st6422_start,
.stop = st6422_stop,
- .disconnect = st6422_disconnect,
};
#endif
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
index a5c69d9ebdd4..748e1421d6d8 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
@@ -44,130 +44,83 @@ static struct v4l2_pix_format vv6410_mode[] = {
}
};
-static const struct ctrl vv6410_ctrl[] = {
-#define HFLIP_IDX 0
- {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "horizontal flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
- },
- .set = vv6410_set_hflip,
- .get = vv6410_get_hflip
- },
-#define VFLIP_IDX 1
- {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "vertical flip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0
- },
- .set = vv6410_set_vflip,
- .get = vv6410_get_vflip
- },
-#define GAIN_IDX 2
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "analog gain",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
- .default_value = 10
- },
- .set = vv6410_set_analog_gain,
- .get = vv6410_get_analog_gain
- },
-#define EXPOSURE_IDX 3
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .minimum = 0,
- .maximum = 32768,
- .step = 1,
- .default_value = 20000
- },
- .set = vv6410_set_exposure,
- .get = vv6410_get_exposure
+static int vv6410_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ int err = -EINVAL;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ err = vv6410_set_hflip(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ err = vv6410_set_vflip(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ err = vv6410_set_analog_gain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ err = vv6410_set_exposure(gspca_dev, ctrl->val);
+ break;
}
- };
+ return err;
+}
+
+static const struct v4l2_ctrl_ops vv6410_ctrl_ops = {
+ .s_ctrl = vv6410_s_ctrl,
+};
static int vv6410_probe(struct sd *sd)
{
u16 data;
- int err, i;
- s32 *sensor_settings;
+ int err;
err = stv06xx_read_sensor(sd, VV6410_DEVICEH, &data);
if (err < 0)
return -ENODEV;
- if (data == 0x19) {
- pr_info("vv6410 sensor detected\n");
+ if (data != 0x19)
+ return -ENODEV;
- sensor_settings = kmalloc(ARRAY_SIZE(vv6410_ctrl) * sizeof(s32),
- GFP_KERNEL);
- if (!sensor_settings)
- return -ENOMEM;
+ pr_info("vv6410 sensor detected\n");
- sd->gspca_dev.cam.cam_mode = vv6410_mode;
- sd->gspca_dev.cam.nmodes = ARRAY_SIZE(vv6410_mode);
- sd->desc.ctrls = vv6410_ctrl;
- sd->desc.nctrls = ARRAY_SIZE(vv6410_ctrl);
+ sd->gspca_dev.cam.cam_mode = vv6410_mode;
+ sd->gspca_dev.cam.nmodes = ARRAY_SIZE(vv6410_mode);
+ return 0;
+}
- for (i = 0; i < sd->desc.nctrls; i++)
- sensor_settings[i] = vv6410_ctrl[i].qctrl.default_value;
- sd->sensor_priv = sensor_settings;
- return 0;
- }
- return -ENODEV;
+static int vv6410_init_controls(struct sd *sd)
+{
+ struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
+
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 32768, 1, 20000);
+ v4l2_ctrl_new_std(hdl, &vv6410_ctrl_ops,
+ V4L2_CID_GAIN, 0, 15, 1, 10);
+ return hdl->error;
}
static int vv6410_init(struct sd *sd)
{
int err = 0, i;
- s32 *sensor_settings = sd->sensor_priv;
- for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++) {
+ for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++)
stv06xx_write_bridge(sd, stv_bridge_init[i].addr, stv_bridge_init[i].data);
- }
if (err < 0)
return err;
err = stv06xx_write_sensor_bytes(sd, (u8 *) vv6410_sensor_init,
ARRAY_SIZE(vv6410_sensor_init));
- if (err < 0)
- return err;
-
- err = vv6410_set_exposure(&sd->gspca_dev,
- sensor_settings[EXPOSURE_IDX]);
- if (err < 0)
- return err;
-
- err = vv6410_set_analog_gain(&sd->gspca_dev,
- sensor_settings[GAIN_IDX]);
-
return (err < 0) ? err : 0;
}
-static void vv6410_disconnect(struct sd *sd)
-{
- sd->sensor = NULL;
- kfree(sd->sensor_priv);
-}
-
static int vv6410_start(struct sd *sd)
{
int err;
@@ -233,25 +186,12 @@ static int vv6410_dump(struct sd *sd)
return (err < 0) ? err : 0;
}
-static int vv6410_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[HFLIP_IDX];
- PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
-
- return 0;
-}
-
static int vv6410_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u16 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
- sensor_settings[HFLIP_IDX] = val;
err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
if (err < 0)
return err;
@@ -267,25 +207,12 @@ static int vv6410_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
return (err < 0) ? err : 0;
}
-static int vv6410_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[VFLIP_IDX];
- PDEBUG(D_V4L2, "Read vertical flip %d", *val);
-
- return 0;
-}
-
static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u16 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
- sensor_settings[VFLIP_IDX] = val;
err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
if (err < 0)
return err;
@@ -301,52 +228,23 @@ static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
return (err < 0) ? err : 0;
}
-static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[GAIN_IDX];
-
- PDEBUG(D_V4L2, "Read analog gain %d", *val);
-
- return 0;
-}
-
static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
- sensor_settings[GAIN_IDX] = val;
PDEBUG(D_V4L2, "Set analog gain to %d", val);
err = stv06xx_write_sensor(sd, VV6410_ANALOGGAIN, 0xf0 | (val & 0xf));
return (err < 0) ? err : 0;
}
-static int vv6410_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
-
- *val = sensor_settings[EXPOSURE_IDX];
-
- PDEBUG(D_V4L2, "Read exposure %d", *val);
-
- return 0;
-}
-
static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
- s32 *sensor_settings = sd->sensor_priv;
unsigned int fine, coarse;
- sensor_settings[EXPOSURE_IDX] = val;
-
val = (val * val >> 14) + val / 4;
fine = val % VV6410_CIF_LINELENGTH;
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
index a25b8873f2e6..53e67b40ca05 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -178,18 +178,14 @@
static int vv6410_probe(struct sd *sd);
static int vv6410_start(struct sd *sd);
static int vv6410_init(struct sd *sd);
+static int vv6410_init_controls(struct sd *sd);
static int vv6410_stop(struct sd *sd);
static int vv6410_dump(struct sd *sd);
-static void vv6410_disconnect(struct sd *sd);
/* V4L2 controls supported by the driver */
-static int vv6410_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
-static int vv6410_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
-static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val);
-static int vv6410_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
const struct stv06xx_sensor stv06xx_sensor_vv6410 = {
@@ -202,11 +198,11 @@ const struct stv06xx_sensor stv06xx_sensor_vv6410 = {
.min_packet_size = { 1023 },
.max_packet_size = { 1023 },
.init = vv6410_init,
+ .init_controls = vv6410_init_controls,
.probe = vv6410_probe,
.start = vv6410_start,
.stop = vv6410_stop,
.dump = vv6410_dump,
- .disconnect = vv6410_disconnect,
};
/* If NULL, only single value to write, stored in len */
diff --git a/drivers/media/video/gspca/topro.c b/drivers/media/video/gspca/topro.c
index 444d3c5b9079..c6326d177a3d 100644
--- a/drivers/media/video/gspca/topro.c
+++ b/drivers/media/video/gspca/topro.c
@@ -4675,11 +4675,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* -- do autogain -- */
/* gain setting is done in setexposure() for tp6810 */
static void setgain(struct gspca_dev *gspca_dev) {}
-/* !! coarse_grained_expo_autogain is not used !! */
-#define exp_too_low_cnt bridge
-#define exp_too_high_cnt sensor
-
+#define WANT_REGULAR_AUTOGAIN
#include "autogain_functions.h"
+
static void sd_dq_callback(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/gspca/vicam.c b/drivers/media/video/gspca/vicam.c
index 911152e169d6..15a30f7a4b2a 100644
--- a/drivers/media/video/gspca/vicam.c
+++ b/drivers/media/video/gspca/vicam.c
@@ -37,9 +37,12 @@
#include <linux/ihex.h>
#include "gspca.h"
+#define VICAM_FIRMWARE "vicam/firmware.fw"
+
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("GSPCA ViCam USB Camera Driver");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(VICAM_FIRMWARE);
enum e_ctrl {
GAIN,
@@ -222,7 +225,11 @@ static void vicam_dostream(struct work_struct *work)
goto exit;
}
- while (gspca_dev->present && gspca_dev->streaming) {
+ while (gspca_dev->dev && gspca_dev->streaming) {
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
+ break;
+#endif
ret = vicam_read_frame(gspca_dev, buffer, frame_sz);
if (ret < 0)
break;
@@ -268,7 +275,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
const struct firmware *uninitialized_var(fw);
u8 *firmware_buf;
- ret = request_ihex_firmware(&fw, "vicam/firmware.fw",
+ ret = request_ihex_firmware(&fw, VICAM_FIRMWARE,
&gspca_dev->dev->dev);
if (ret) {
pr_err("Failed to load \"vicam/firmware.fw\": %d\n", ret);
@@ -324,7 +331,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
dev->work_thread = NULL;
mutex_lock(&gspca_dev->usb_lock);
- if (gspca_dev->present)
+ if (gspca_dev->dev)
vicam_set_camera_power(gspca_dev, 0);
}
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 7d9a4f1be9dc..f0bacee33ef9 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -32,29 +32,25 @@ MODULE_LICENSE("GPL");
static int force_sensor = -1;
-#define REG08_DEF 3 /* default JPEG compression (70%) */
+#define REG08_DEF 3 /* default JPEG compression (75%) */
#include "zc3xx-reg.h"
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- EXPOSURE,
- GAMMA,
- AUTOGAIN,
- LIGHTFREQ,
- SHARPNESS,
- QUALITY,
- NCTRLS /* number of controls */
-};
-
-#define AUTOGAIN_DEF 1
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
+ struct { /* gamma/brightness/contrast control cluster */
+ struct v4l2_ctrl *gamma;
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ };
+ struct { /* autogain/exposure control cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *exposure;
+ };
+ struct v4l2_ctrl *plfreq;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *jpegqual;
struct work_struct work;
struct workqueue_struct *work_thread;
@@ -94,114 +90,6 @@ enum sensors {
SENSOR_MAX
};
-/* V4L2 controls supported by the driver */
-static void setcontrast(struct gspca_dev *gspca_dev);
-static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static void setlightfreq(struct gspca_dev *gspca_dev);
-static void setsharpness(struct gspca_dev *gspca_dev);
-static int sd_setquality(struct gspca_dev *gspca_dev, __s32 val);
-
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- },
- .set_control = setcontrast
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 128,
- },
- .set_control = setcontrast
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0x30d,
- .maximum = 0x493e,
- .step = 1,
- .default_value = 0x927
- },
- .set_control = setexposure
- },
-[GAMMA] = {
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 1,
- .maximum = 6,
- .step = 1,
- .default_value = 4,
- },
- .set_control = setcontrast
- },
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = AUTOGAIN_DEF,
- .flags = V4L2_CTRL_FLAG_UPDATE
- },
- .set = sd_setautogain
- },
-[LIGHTFREQ] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
- .step = 1,
- .default_value = 0,
- },
- .set_control = setlightfreq
- },
-[SHARPNESS] = {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 2,
- },
- .set_control = setsharpness
- },
-[QUALITY] = {
- {
- .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Compression Quality",
- .minimum = 40,
- .maximum = 70,
- .step = 1,
- .default_value = 70 /* updated in sd_init() */
- },
- .set = sd_setquality
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
@@ -241,8 +129,11 @@ static const struct v4l2_pix_format sif_mode[] = {
.priv = 0},
};
-/* bridge reg08 -> JPEG quality conversion table */
-static u8 jpeg_qual[] = {40, 50, 60, 70, /*80*/};
+/*
+ * Bridge reg08 bits 1-2 -> JPEG quality conversion table. Note the highest
+ * quality setting is not usable as USB 1 does not have enough bandwidth.
+ */
+static u8 jpeg_qual[] = {50, 75, 87, /* 94 */};
/* usb exchanges */
struct usb_action {
@@ -5818,10 +5709,8 @@ static void setmatrix(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, matrix[i], 0x010a + i);
}
-static void setsharpness(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int sharpness;
static const u8 sharpness_tb[][2] = {
{0x02, 0x03},
{0x04, 0x07},
@@ -5829,19 +5718,18 @@ static void setsharpness(struct gspca_dev *gspca_dev)
{0x10, 0x1e}
};
- sharpness = sd->ctrls[SHARPNESS].val;
- reg_w(gspca_dev, sharpness_tb[sharpness][0], 0x01c6);
+ reg_w(gspca_dev, sharpness_tb[val][0], 0x01c6);
reg_r(gspca_dev, 0x01c8);
reg_r(gspca_dev, 0x01c9);
reg_r(gspca_dev, 0x01ca);
- reg_w(gspca_dev, sharpness_tb[sharpness][1], 0x01cb);
+ reg_w(gspca_dev, sharpness_tb[val][1], 0x01cb);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev,
+ s32 gamma, s32 brightness, s32 contrast)
{
- struct sd *sd = (struct sd *) gspca_dev;
const u8 *Tgamma;
- int g, i, brightness, contrast, adj, gp1, gp2;
+ int g, i, adj, gp1, gp2;
u8 gr[16];
static const u8 delta_b[16] = /* delta for brightness */
{0x50, 0x38, 0x2d, 0x28, 0x24, 0x21, 0x1e, 0x1d,
@@ -5864,10 +5752,10 @@ static void setcontrast(struct gspca_dev *gspca_dev)
0xe0, 0xeb, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff},
};
- Tgamma = gamma_tb[sd->ctrls[GAMMA].val - 1];
+ Tgamma = gamma_tb[gamma - 1];
- contrast = ((int) sd->ctrls[CONTRAST].val - 128); /* -128 / 127 */
- brightness = ((int) sd->ctrls[BRIGHTNESS].val - 128); /* -128 / 92 */
+ contrast -= 128; /* -128 / 127 */
+ brightness -= 128; /* -128 / 92 */
adj = 0;
gp1 = gp2 = 0;
for (i = 0; i < 16; i++) {
@@ -5894,25 +5782,15 @@ static void setcontrast(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, gr[i], 0x0130 + i); /* gradient */
}
-static void getexposure(struct gspca_dev *gspca_dev)
+static s32 getexposure(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (sd->sensor != SENSOR_HV7131R)
- return;
- sd->ctrls[EXPOSURE].val = (i2c_read(gspca_dev, 0x25) << 9)
+ return (i2c_read(gspca_dev, 0x25) << 9)
| (i2c_read(gspca_dev, 0x26) << 1)
| (i2c_read(gspca_dev, 0x27) >> 7);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int val;
-
- if (sd->sensor != SENSOR_HV7131R)
- return;
- val = sd->ctrls[EXPOSURE].val;
i2c_write(gspca_dev, 0x25, val >> 9, 0x00);
i2c_write(gspca_dev, 0x26, val >> 1, 0x00);
i2c_write(gspca_dev, 0x27, val << 7, 0x00);
@@ -5921,20 +5799,8 @@ static void setexposure(struct gspca_dev *gspca_dev)
static void setquality(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- s8 reg07;
-
- reg07 = 0;
- switch (sd->sensor) {
- case SENSOR_OV7620:
- reg07 = 0x30;
- break;
- case SENSOR_HV7131R:
- case SENSOR_PAS202B:
- return; /* done by work queue */
- }
+ jpeg_set_qual(sd->jpeg_hdr, jpeg_qual[sd->reg08 >> 1]);
reg_w(gspca_dev, sd->reg08, ZC3XX_R008_CLOCKSETTING);
- if (reg07 != 0)
- reg_w(gspca_dev, reg07, 0x0007);
}
/* Matches the sensor's internal frame rate to the lighting frequency.
@@ -5943,7 +5809,7 @@ static void setquality(struct gspca_dev *gspca_dev)
* 60Hz, for American lighting
* 0 = No Fliker (for outdoore usage)
*/
-static void setlightfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
int i, mode;
@@ -6027,7 +5893,7 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
tas5130c_60HZ, tas5130c_60HZScale},
};
- i = sd->ctrls[LIGHTFREQ].val * 2;
+ i = val * 2;
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
if (mode)
i++; /* 320x240 */
@@ -6037,14 +5903,14 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
usb_exchange(gspca_dev, zc3_freq);
switch (sd->sensor) {
case SENSOR_GC0305:
- if (mode /* if 320x240 */
- && sd->ctrls[LIGHTFREQ].val == 1) /* and 50Hz */
+ if (mode /* if 320x240 */
+ && val == 1) /* and 50Hz */
reg_w(gspca_dev, 0x85, 0x018d);
/* win: 0x80, 0x018d */
break;
case SENSOR_OV7620:
- if (!mode) { /* if 640x480 */
- if (sd->ctrls[LIGHTFREQ].val != 0) /* and filter */
+ if (!mode) { /* if 640x480 */
+ if (val != 0) /* and filter */
reg_w(gspca_dev, 0x40, 0x0002);
else
reg_w(gspca_dev, 0x44, 0x0002);
@@ -6056,22 +5922,15 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
}
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setautogain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 autoval;
-
- if (sd->ctrls[AUTOGAIN].val)
- autoval = 0x42;
- else
- autoval = 0x02;
- reg_w(gspca_dev, autoval, 0x0180);
+ reg_w(gspca_dev, val ? 0x42 : 0x02, 0x0180);
}
-/* update the transfer parameters */
-/* This function is executed from a work queue. */
-/* The exact use of the bridge registers 07 and 08 is not known.
- * The following algorithm has been adapted from ms-win traces */
+/*
+ * Update the transfer parameters.
+ * This function is executed from a work queue.
+ */
static void transfer_update(struct work_struct *work)
{
struct sd *sd = container_of(work, struct sd, work);
@@ -6079,96 +5938,55 @@ static void transfer_update(struct work_struct *work)
int change, good;
u8 reg07, reg11;
- /* synchronize with the main driver and initialize the registers */
- mutex_lock(&gspca_dev->usb_lock);
- reg07 = 0; /* max */
- reg_w(gspca_dev, reg07, 0x0007);
- reg_w(gspca_dev, sd->reg08, ZC3XX_R008_CLOCKSETTING);
- mutex_unlock(&gspca_dev->usb_lock);
+ /* reg07 gets set to 0 by sd_start before starting us */
+ reg07 = 0;
good = 0;
for (;;) {
msleep(100);
- /* get the transfer status */
- /* the bit 0 of the bridge register 11 indicates overflow */
mutex_lock(&gspca_dev->usb_lock);
- if (!gspca_dev->present || !gspca_dev->streaming)
+#ifdef CONFIG_PM
+ if (gspca_dev->frozen)
goto err;
+#endif
+ if (!gspca_dev->dev || !gspca_dev->streaming)
+ goto err;
+
+ /* Bit 0 of register 11 indicates FIFO overflow */
+ gspca_dev->usb_err = 0;
reg11 = reg_r(gspca_dev, 0x0011);
- if (gspca_dev->usb_err < 0
- || !gspca_dev->present || !gspca_dev->streaming)
+ if (gspca_dev->usb_err)
goto err;
change = reg11 & 0x01;
if (change) { /* overflow */
- switch (reg07) {
- case 0: /* max */
- reg07 = sd->sensor == SENSOR_HV7131R
- ? 0x30 : 0x32;
- if (sd->reg08 != 0) {
- change = 3;
- sd->reg08--;
- }
- break;
- case 0x32:
- reg07 -= 4;
- break;
- default:
- reg07 -= 2;
- break;
- case 2:
- change = 0; /* already min */
- break;
- }
good = 0;
+
+ if (reg07 == 0) /* Bit Rate Control not enabled? */
+ reg07 = 0x32; /* Allow 98 bytes / unit */
+ else if (reg07 > 2)
+ reg07 -= 2; /* Decrease allowed bytes / unit */
+ else
+ change = 0;
} else { /* no overflow */
- if (reg07 != 0) { /* if not max */
- good++;
- if (good >= 10) {
- good = 0;
+ good++;
+ if (good >= 10) {
+ good = 0;
+ if (reg07) { /* BRC enabled? */
change = 1;
- reg07 += 2;
- switch (reg07) {
- case 0x30:
- if (sd->sensor == SENSOR_PAS202B)
- reg07 += 2;
- break;
- case 0x32:
- case 0x34:
+ if (reg07 < 0x32)
+ reg07 += 2;
+ else
reg07 = 0;
- break;
- }
- }
- } else { /* reg07 max */
- if (sd->reg08 < sizeof jpeg_qual - 1) {
- good++;
- if (good > 10) {
- sd->reg08++;
- change = 2;
- }
}
}
}
if (change) {
- if (change & 1) {
- reg_w(gspca_dev, reg07, 0x0007);
- if (gspca_dev->usb_err < 0
- || !gspca_dev->present
- || !gspca_dev->streaming)
- goto err;
- }
- if (change & 2) {
- reg_w(gspca_dev, sd->reg08,
- ZC3XX_R008_CLOCKSETTING);
- if (gspca_dev->usb_err < 0
- || !gspca_dev->present
- || !gspca_dev->streaming)
- goto err;
- sd->ctrls[QUALITY].val = jpeg_qual[sd->reg08];
- jpeg_set_qual(sd->jpeg_hdr,
- jpeg_qual[sd->reg08]);
- }
+ gspca_dev->usb_err = 0;
+ reg_w(gspca_dev, reg07, 0x0007);
+ if (gspca_dev->usb_err)
+ goto err;
}
mutex_unlock(&gspca_dev->usb_lock);
}
@@ -6503,7 +6321,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* define some sensors from the vendor/product */
sd->sensor = id->driver_info;
- gspca_dev->cam.ctrls = sd->ctrls;
sd->reg08 = REG08_DEF;
INIT_WORK(&sd->work, transfer_update);
@@ -6511,12 +6328,87 @@ static int sd_config(struct gspca_dev *gspca_dev,
return 0;
}
-/* this function is called at probe and resume time */
-static int sd_init(struct gspca_dev *gspca_dev)
+static int zcxx_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
- struct cam *cam;
- int sensor;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ gspca_dev->usb_err = 0;
+ if (ctrl->val && sd->exposure && gspca_dev->streaming)
+ sd->exposure->val = getexposure(gspca_dev);
+ return gspca_dev->usb_err;
+ }
+ return -EINVAL;
+}
+
+static int zcxx_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+ int i, qual;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_JPEG_COMPRESSION_QUALITY) {
+ qual = sd->reg08 >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(jpeg_qual); i++) {
+ if (ctrl->val <= jpeg_qual[i])
+ break;
+ }
+ if (i > 0 && i == qual && ctrl->val < jpeg_qual[i])
+ i--;
+
+ /* With high quality settings we need max bandwidth */
+ if (i >= 2 && gspca_dev->streaming &&
+ !gspca_dev->cam.needs_full_bandwidth)
+ return -EBUSY;
+
+ sd->reg08 = (i << 1) | 1;
+ ctrl->val = jpeg_qual[i];
+ }
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ /* gamma/brightness/contrast cluster */
+ case V4L2_CID_GAMMA:
+ setcontrast(gspca_dev, sd->gamma->val,
+ sd->brightness->val, sd->contrast->val);
+ break;
+ /* autogain/exposure cluster */
+ case V4L2_CID_AUTOGAIN:
+ setautogain(gspca_dev, ctrl->val);
+ if (!gspca_dev->usb_err && !ctrl->val && sd->exposure)
+ setexposure(gspca_dev, sd->exposure->val);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ setlightfreq(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ setquality(gspca_dev);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops zcxx_ctrl_ops = {
+ .g_volatile_ctrl = zcxx_g_volatile_ctrl,
+ .s_ctrl = zcxx_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
static const u8 gamma[SENSOR_MAX] = {
[SENSOR_ADCM2700] = 4,
[SENSOR_CS2102] = 4,
@@ -6538,6 +6430,48 @@ static int sd_init(struct gspca_dev *gspca_dev)
[SENSOR_PO2030] = 4,
[SENSOR_TAS5130C] = 3,
};
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 8);
+ sd->brightness = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ sd->contrast = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 128);
+ sd->gamma = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_GAMMA, 1, 6, 1, gamma[sd->sensor]);
+ if (sd->sensor == SENSOR_HV7131R)
+ sd->exposure = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0x30d, 0x493e, 1, 0x927);
+ sd->autogain = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ if (sd->sensor != SENSOR_OV7630C)
+ sd->plfreq = v4l2_ctrl_new_std_menu(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED);
+ sd->sharpness = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 3, 1,
+ sd->sensor == SENSOR_PO2030 ? 0 : 2);
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &zcxx_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ jpeg_qual[0], jpeg_qual[ARRAY_SIZE(jpeg_qual) - 1], 1,
+ jpeg_qual[REG08_DEF >> 1]);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ v4l2_ctrl_cluster(3, &sd->gamma);
+ if (sd->sensor == SENSOR_HV7131R)
+ v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, true);
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct cam *cam;
+ int sensor;
static const u8 mode_tb[SENSOR_MAX] = {
[SENSOR_ADCM2700] = 2,
[SENSOR_CS2102] = 1,
@@ -6559,27 +6493,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
[SENSOR_PO2030] = 1,
[SENSOR_TAS5130C] = 1,
};
- static const u8 reg08_tb[SENSOR_MAX] = {
- [SENSOR_ADCM2700] = 1,
- [SENSOR_CS2102] = 3,
- [SENSOR_CS2102K] = 3,
- [SENSOR_GC0303] = 2,
- [SENSOR_GC0305] = 3,
- [SENSOR_HDCS2020] = 1,
- [SENSOR_HV7131B] = 3,
- [SENSOR_HV7131R] = 3,
- [SENSOR_ICM105A] = 3,
- [SENSOR_MC501CB] = 3,
- [SENSOR_MT9V111_1] = 3,
- [SENSOR_MT9V111_3] = 3,
- [SENSOR_OV7620] = 1,
- [SENSOR_OV7630C] = 3,
- [SENSOR_PAS106] = 3,
- [SENSOR_PAS202B] = 3,
- [SENSOR_PB0330] = 3,
- [SENSOR_PO2030] = 2,
- [SENSOR_TAS5130C] = 3,
- };
sensor = zcxx_probeSensor(gspca_dev);
if (sensor >= 0)
@@ -6688,7 +6601,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
case 0x2030:
PDEBUG(D_PROBE, "Find Sensor PO2030");
sd->sensor = SENSOR_PO2030;
- sd->ctrls[SHARPNESS].def = 0; /* from win traces */
break;
case 0x7620:
PDEBUG(D_PROBE, "Find Sensor OV7620");
@@ -6730,36 +6642,18 @@ static int sd_init(struct gspca_dev *gspca_dev)
break;
}
- sd->ctrls[GAMMA].def = gamma[sd->sensor];
- sd->reg08 = reg08_tb[sd->sensor];
- sd->ctrls[QUALITY].def = jpeg_qual[sd->reg08];
- sd->ctrls[QUALITY].min = jpeg_qual[0];
- sd->ctrls[QUALITY].max = jpeg_qual[ARRAY_SIZE(jpeg_qual) - 1];
-
- switch (sd->sensor) {
- case SENSOR_HV7131R:
- gspca_dev->ctrl_dis = (1 << QUALITY);
- break;
- case SENSOR_OV7630C:
- gspca_dev->ctrl_dis = (1 << LIGHTFREQ) | (1 << EXPOSURE);
- break;
- case SENSOR_PAS202B:
- gspca_dev->ctrl_dis = (1 << QUALITY) | (1 << EXPOSURE);
- break;
- default:
- gspca_dev->ctrl_dis = (1 << EXPOSURE);
- break;
- }
-#if AUTOGAIN_DEF
- if (sd->ctrls[AUTOGAIN].val)
- gspca_dev->ctrl_inac = (1 << EXPOSURE);
-#endif
-
/* switch off the led */
reg_w(gspca_dev, 0x01, 0x0000);
return gspca_dev->usb_err;
}
+static int sd_pre_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ gspca_dev->cam.needs_full_bandwidth = (sd->reg08 >= 4) ? 1 : 0;
+ return 0;
+}
+
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -6864,7 +6758,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x03, 0x0008);
break;
}
- setsharpness(gspca_dev);
+ setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness));
/* set the gamma tables when not set */
switch (sd->sensor) {
@@ -6873,7 +6767,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_OV7630C:
break;
default:
- setcontrast(gspca_dev);
+ setcontrast(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma),
+ v4l2_ctrl_g_ctrl(sd->brightness),
+ v4l2_ctrl_g_ctrl(sd->contrast));
break;
}
setmatrix(gspca_dev); /* one more time? */
@@ -6885,8 +6781,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
setquality(gspca_dev);
- jpeg_set_qual(sd->jpeg_hdr, jpeg_qual[sd->reg08]);
- setlightfreq(gspca_dev);
+ /* Start with BRC disabled, transfer_update will enable it if needed */
+ reg_w(gspca_dev, 0x00, 0x0007);
+ if (sd->plfreq)
+ setlightfreq(gspca_dev, v4l2_ctrl_g_ctrl(sd->plfreq));
switch (sd->sensor) {
case SENSOR_ADCM2700:
@@ -6897,7 +6795,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x40, 0x0117);
break;
case SENSOR_HV7131R:
- setexposure(gspca_dev);
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
break;
case SENSOR_GC0305:
@@ -6921,21 +6819,16 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
- setautogain(gspca_dev);
+ setautogain(gspca_dev, v4l2_ctrl_g_ctrl(sd->autogain));
- /* start the transfer update thread if needed */
- if (gspca_dev->usb_err >= 0) {
- switch (sd->sensor) {
- case SENSOR_HV7131R:
- case SENSOR_PAS202B:
- sd->work_thread =
- create_singlethread_workqueue(KBUILD_MODNAME);
- queue_work(sd->work_thread, &sd->work);
- break;
- }
- }
+ if (gspca_dev->usb_err < 0)
+ return gspca_dev->usb_err;
- return gspca_dev->usb_err;
+ /* Start the transfer parameters update thread */
+ sd->work_thread = create_singlethread_workqueue(KBUILD_MODNAME);
+ queue_work(sd->work_thread, &sd->work);
+
+ return 0;
}
/* called on streamoff with alt 0 and on disconnect */
@@ -6949,7 +6842,7 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
mutex_lock(&gspca_dev->usb_lock);
sd->work_thread = NULL;
}
- if (!gspca_dev->present)
+ if (!gspca_dev->dev)
return;
send_unknown(gspca_dev, sd->sensor);
}
@@ -6987,72 +6880,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->ctrls[AUTOGAIN].val = val;
- if (val) {
- gspca_dev->ctrl_inac |= (1 << EXPOSURE);
- } else {
- gspca_dev->ctrl_inac &= ~(1 << EXPOSURE);
- if (gspca_dev->streaming)
- getexposure(gspca_dev);
- }
- if (gspca_dev->streaming)
- setautogain(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
-{
- switch (menu->id) {
- case V4L2_CID_POWER_LINE_FREQUENCY:
- switch (menu->index) {
- case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
- strcpy((char *) menu->name, "NoFliker");
- return 0;
- case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
- strcpy((char *) menu->name, "50 Hz");
- return 0;
- case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
- strcpy((char *) menu->name, "60 Hz");
- return 0;
- }
- break;
- }
- return -EINVAL;
-}
-
-static int sd_setquality(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(jpeg_qual) - 1; i++) {
- if (val <= jpeg_qual[i])
- break;
- }
- if (i > 0
- && i == sd->reg08
- && val < jpeg_qual[sd->reg08])
- i--;
- sd->reg08 = i;
- sd->ctrls[QUALITY].val = jpeg_qual[i];
- if (gspca_dev->streaming)
- jpeg_set_qual(sd->jpeg_hdr, sd->ctrls[QUALITY].val);
- return gspca_dev->usb_err;
-}
-
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
struct v4l2_jpegcompression *jcomp)
{
struct sd *sd = (struct sd *) gspca_dev;
+ int ret;
- sd_setquality(gspca_dev, jcomp->quality);
- jcomp->quality = sd->ctrls[QUALITY].val;
- return gspca_dev->usb_err;
+ ret = v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality);
+ if (ret)
+ return ret;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
+ return 0;
}
static int sd_get_jcomp(struct gspca_dev *gspca_dev,
@@ -7061,7 +6899,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->ctrls[QUALITY].val;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
| V4L2_JPEG_MARKER_DQT;
return 0;
@@ -7085,14 +6923,13 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
static const struct sd_desc sd_desc = {
.name = KBUILD_MODNAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
+ .isoc_init = sd_pre_start,
.start = sd_start,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
- .querymenu = sd_querymenu,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
@@ -7176,6 +7013,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/hdpvr/hdpvr-control.c b/drivers/media/video/hdpvr/hdpvr-control.c
index 068df4ba3f51..ae8f229d1141 100644
--- a/drivers/media/video/hdpvr/hdpvr-control.c
+++ b/drivers/media/video/hdpvr/hdpvr-control.c
@@ -113,6 +113,8 @@ int get_input_lines_info(struct hdpvr_device *dev)
"get input lines info returned: %d, %s\n", ret,
print_buf);
}
+#else
+ (void)ret; /* suppress compiler warning */
#endif
lines = dev->usbc_buf[1] << 8 | dev->usbc_buf[0];
mutex_unlock(&dev->usbc_mutex);
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 11ffe9cc1780..0e9e156bb2aa 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -994,7 +994,7 @@ static int hdpvr_try_ctrl(struct v4l2_ext_control *ctrl, int ac3)
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
static int vidioc_try_ext_ctrls(struct file *file, void *priv,
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index a62322d5c0d8..366434f5647e 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -40,15 +40,15 @@ static int hexium_num;
#define HEXIUM_INPUTS 9
static struct v4l2_input hexium_inputs[HEXIUM_INPUTS] = {
- { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
+ { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
};
#define HEXIUM_AUDIOS 0
@@ -59,11 +59,6 @@ struct hexium_data
u8 byte;
};
-#define HEXIUM_CONTROLS 1
-static struct v4l2_queryctrl hexium_controls[] = {
- { V4L2_CID_PRIVATE_BASE, V4L2_CTRL_TYPE_BOOLEAN, "B/W", 0, 1, 1, 0, 0 },
-};
-
#define HEXIUM_GEMINI_V_1_0 1
#define HEXIUM_GEMINI_DUAL_V_1_0 2
@@ -76,7 +71,6 @@ struct hexium
int cur_input; /* current input */
v4l2_std_id cur_std; /* current standard */
- int cur_bw; /* current black/white status */
};
/* Samsung KS0127B decoder default registers */
@@ -119,18 +113,10 @@ static struct hexium_data hexium_pal[] = {
{ 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF }
};
-static struct hexium_data hexium_pal_bw[] = {
- { 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF }
-};
-
static struct hexium_data hexium_ntsc[] = {
{ 0x01, 0x53 }, { 0x12, 0x04 }, { 0x2D, 0x23 }, { 0x2E, 0x81 }, { -1 , 0xFF }
};
-static struct hexium_data hexium_ntsc_bw[] = {
- { 0x01, 0x53 }, { 0x12, 0x04 }, { 0x2D, 0x23 }, { 0x2E, 0x81 }, { -1 , 0xFF }
-};
-
static struct hexium_data hexium_secam[] = {
{ 0x01, 0x52 }, { 0x12, 0x64 }, { 0x2D, 0x2C }, { 0x2E, 0x9B }, { -1 , 0xFF }
};
@@ -264,93 +250,6 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
return 0;
}
-/* the saa7146 provides some controls (brightness, contrast, saturation)
- which gets registered *after* this function. because of this we have
- to return with a value != 0 even if the function succeeded.. */
-static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- int i;
-
- for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) {
- if (hexium_controls[i].id == qc->id) {
- *qc = hexium_controls[i];
- DEB_D("VIDIOC_QUERYCTRL %d\n", qc->id);
- return 0;
- }
- }
- return dev->ext_vv_data->core_ops->vidioc_queryctrl(file, fh, qc);
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct hexium *hexium = (struct hexium *) dev->ext_priv;
- int i;
-
- for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) {
- if (hexium_controls[i].id == vc->id)
- break;
- }
-
- if (i < 0)
- return dev->ext_vv_data->core_ops->vidioc_g_ctrl(file, fh, vc);
-
- if (vc->id == V4L2_CID_PRIVATE_BASE) {
- vc->value = hexium->cur_bw;
- DEB_D("VIDIOC_G_CTRL BW:%d\n", vc->value);
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct hexium *hexium = (struct hexium *) dev->ext_priv;
- int i = 0;
-
- for (i = HEXIUM_CONTROLS - 1; i >= 0; i--) {
- if (hexium_controls[i].id == vc->id)
- break;
- }
-
- if (i < 0)
- return dev->ext_vv_data->core_ops->vidioc_s_ctrl(file, fh, vc);
-
- if (vc->id == V4L2_CID_PRIVATE_BASE)
- hexium->cur_bw = vc->value;
-
- DEB_D("VIDIOC_S_CTRL BW:%d\n", hexium->cur_bw);
-
- if (0 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_pal);
- return 0;
- }
- if (0 == hexium->cur_bw && V4L2_STD_NTSC == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_ntsc);
- return 0;
- }
- if (0 == hexium->cur_bw && V4L2_STD_SECAM == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_secam);
- return 0;
- }
- if (1 == hexium->cur_bw && V4L2_STD_PAL == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_pal_bw);
- return 0;
- }
- if (1 == hexium->cur_bw && V4L2_STD_NTSC == hexium->cur_std) {
- hexium_set_standard(hexium, hexium_ntsc_bw);
- return 0;
- }
- if (1 == hexium->cur_bw && V4L2_STD_SECAM == hexium->cur_std)
- /* fixme: is there no bw secam mode? */
- return -EINVAL;
-
- return -EINVAL;
-}
-
-
static struct saa7146_ext_vv vv_data;
/* this function only gets called when the probing was successful */
@@ -399,12 +298,10 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
hexium->cur_input = 0;
saa7146_vv_init(dev, &vv_data);
- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
- vv_data.ops.vidioc_g_input = vidioc_g_input;
- vv_data.ops.vidioc_s_input = vidioc_s_input;
+
+ vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
if (ret < 0) {
pr_err("cannot register capture v4l2 device. skipping.\n");
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index 23debc967d94..a1eb26d11070 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -41,15 +41,15 @@ static int hexium_num;
#define HEXIUM_INPUTS 9
static struct v4l2_input hexium_inputs[HEXIUM_INPUTS] = {
- { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
+ { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 0, 0, V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
};
#define HEXIUM_AUDIOS 0
@@ -371,9 +371,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
DEB_EE("\n");
saa7146_vv_init(dev, &vv_data);
- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
- vv_data.ops.vidioc_g_input = vidioc_g_input;
- vv_data.ops.vidioc_s_input = vidioc_s_input;
+ vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
pr_err("cannot register capture v4l2 device. skipping.\n");
return -1;
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 679262ed13bc..5462ce2f60ea 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -866,10 +866,10 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
pci_write_config_dword(pdev, 0x40, 0xffff);
IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%lx\n",
+ "irq: %d, latency: %d, memory: 0x%llx\n",
pdev->device, pdev->revision, pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pdev->irq, pci_latency, (unsigned long)itv->base_addr);
+ pdev->irq, pci_latency, (u64)itv->base_addr);
return 0;
}
@@ -1007,7 +1007,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
itv->cxhdl.priv = itv;
itv->cxhdl.func = ivtv_api_func;
- IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
+ IVTV_DEBUG_INFO("base addr: 0x%llx\n", (u64)itv->base_addr);
/* PCI Device Setup */
retval = ivtv_setup_pci(itv, pdev, pci_id);
@@ -1017,8 +1017,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
goto free_mem;
/* map io memory */
- IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
+ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
IVTV_ENCODER_SIZE);
if (!itv->enc_mem) {
@@ -1034,8 +1034,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
}
if (itv->has_cx23415) {
- IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
+ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
if (!itv->dec_mem) {
@@ -1056,8 +1056,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
}
/* map registers memory */
- IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
itv->reg_mem =
ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (!itv->reg_mem) {
@@ -1201,9 +1201,9 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
struct v4l2_ctrl_handler *hdl = itv->v4l2_dev.ctrl_handler;
itv->ctrl_pts = v4l2_ctrl_new_std(hdl, &ivtv_hdl_out_ops,
- V4L2_CID_MPEG_VIDEO_DEC_PTS, 0, 0, 1, 0);
+ V4L2_CID_MPEG_VIDEO_DEC_PTS, 0, 0, 0, 0);
itv->ctrl_frame = v4l2_ctrl_new_std(hdl, &ivtv_hdl_out_ops,
- V4L2_CID_MPEG_VIDEO_DEC_FRAME, 0, 0x7fffffff, 1, 0);
+ V4L2_CID_MPEG_VIDEO_DEC_FRAME, 0, 0, 0, 0);
/* Note: V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO is not supported,
mask that menu item. */
itv->ctrl_audio_playback =
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 2e220028aad2..a7e00f8938f8 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -622,7 +622,7 @@ struct ivtv {
struct v4l2_subdev *sd_video; /* controlling video decoder subdev */
struct v4l2_subdev *sd_audio; /* controlling audio subdev */
struct v4l2_subdev *sd_muxer; /* controlling audio muxer subdev */
- u32 base_addr; /* PCI resource base address */
+ resource_size_t base_addr; /* PCI resource base address */
volatile void __iomem *enc_mem; /* pointer to mapped encoder memory */
volatile void __iomem *dec_mem; /* pointer to mapped decoder memory */
volatile void __iomem *reg_mem; /* pointer to mapped registers */
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index c9663e885b9f..9ff69b5a87e2 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -746,8 +746,9 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
return res;
}
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
+unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
@@ -755,7 +756,8 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
unsigned res = 0;
/* Start a capture if there is none */
- if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
+ if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) &&
+ (req_events & (POLLIN | POLLRDNORM))) {
int rc;
rc = ivtv_start_capture(id);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 989e556913ed..f7d57b3f2842 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -135,7 +135,6 @@ void ivtv_set_osd_alpha(struct ivtv *itv)
int ivtv_set_speed(struct ivtv *itv, int speed)
{
u32 data[CX2341X_MBOX_MAX_DATA];
- struct ivtv_stream *s;
int single_step = (speed == 1 || speed == -1);
DEFINE_WAIT(wait);
@@ -145,8 +144,6 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
if (speed == itv->speed && !single_step)
return 0;
- s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
-
if (single_step && (speed < 0) == (itv->speed < 0)) {
/* Single step video and no need to change direction */
ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0);
@@ -1468,8 +1465,9 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscripti
switch (sub->type) {
case V4L2_EVENT_VSYNC:
case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
case V4L2_EVENT_CTRL:
- return v4l2_event_subscribe(fh, sub, 0);
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
default:
return -EINVAL;
}
@@ -1827,7 +1825,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
return ivtv_decoder_ioctls(file, cmd, (void *)arg);
default:
- return -EINVAL;
+ return -ENOTTY;
}
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 7ea5ca7f012b..6738592aa35d 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -228,6 +228,10 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
s->vdev->release = video_device_release;
s->vdev->tvnorms = V4L2_STD_ALL;
s->vdev->lock = &itv->serialize_lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &s->vdev->flags);
set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
ivtv_set_funcs(s->vdev);
return 0;
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index e5e7fa9e737b..05b94aa8ba32 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -1293,6 +1293,7 @@ static int __init ivtvfb_init(void)
drv = driver_find("ivtv", &pci_bus_type);
err = driver_for_each_device(drv, NULL, &registered, ivtvfb_callback_init);
+ (void)err; /* suppress compiler warning */
if (!registered) {
printk(KERN_ERR "ivtvfb: no cards found\n");
return -ENODEV;
@@ -1309,6 +1310,7 @@ static void ivtvfb_cleanup(void)
drv = driver_find("ivtv", &pci_bus_type);
err = driver_for_each_device(drv, NULL, NULL, ivtvfb_callback_cleanup);
+ (void)err; /* suppress compiler warning */
}
module_init(ivtvfb_init);
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 4b021e1ee5f2..bb589917b65b 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -21,11 +21,6 @@
extern int m5mols_debug;
-#define to_m5mols(__sd) container_of(__sd, struct m5mols_info, sd)
-
-#define to_sd(__ctrl) \
- (&container_of(__ctrl->handler, struct m5mols_info, handle)->sd)
-
enum m5mols_restype {
M5MOLS_RESTYPE_MONITOR,
M5MOLS_RESTYPE_CAPTURE,
@@ -163,21 +158,27 @@ struct m5mols_version {
* @ffmt: current fmt according to resolution type
* @res_type: current resolution type
* @irq_waitq: waitqueue for the capture
- * @flags: state variable for the interrupt handler
+ * @irq_done: set to 1 in the interrupt handler
* @handle: control handler
- * @autoexposure: Auto Exposure control
- * @exposure: Exposure control
- * @autowb: Auto White Balance control
- * @colorfx: Color effect control
- * @saturation: Saturation control
- * @zoom: Zoom control
+ * @auto_exposure: auto/manual exposure control
+ * @exposure_bias: exposure compensation control
+ * @exposure: manual exposure control
+ * @metering: exposure metering control
+ * @auto_iso: auto/manual ISO sensitivity control
+ * @iso: manual ISO sensitivity control
+ * @auto_wb: auto white balance control
+ * @lock_3a: 3A lock control
+ * @colorfx: color effect control
+ * @saturation: saturation control
+ * @zoom: zoom control
+ * @wdr: wide dynamic range control
+ * @stabilization: image stabilization control
+ * @jpeg_quality: JPEG compression quality control
* @ver: information of the version
* @cap: the capture mode attributes
- * @power: current sensor's power status
* @isp_ready: 1 when the ISP controller has completed booting
+ * @power: current sensor's power status
* @ctrl_sync: 1 when the control handler state is restored in H/W
- * @lock_ae: true means the Auto Exposure is locked
- * @lock_awb: true means the Aut WhiteBalance is locked
* @resolution: register value for current resolution
* @mode: register value for current operation mode
* @set_power: optional power callback to the board code
@@ -193,15 +194,27 @@ struct m5mols_info {
atomic_t irq_done;
struct v4l2_ctrl_handler handle;
+ struct {
+ /* exposure/exposure bias/auto exposure cluster */
+ struct v4l2_ctrl *auto_exposure;
+ struct v4l2_ctrl *exposure_bias;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *metering;
+ };
+ struct {
+ /* iso/auto iso cluster */
+ struct v4l2_ctrl *auto_iso;
+ struct v4l2_ctrl *iso;
+ };
+ struct v4l2_ctrl *auto_wb;
- /* Autoexposure/exposure control cluster */
- struct v4l2_ctrl *autoexposure;
- struct v4l2_ctrl *exposure;
-
- struct v4l2_ctrl *autowb;
+ struct v4l2_ctrl *lock_3a;
struct v4l2_ctrl *colorfx;
struct v4l2_ctrl *saturation;
struct v4l2_ctrl *zoom;
+ struct v4l2_ctrl *wdr;
+ struct v4l2_ctrl *stabilization;
+ struct v4l2_ctrl *jpeg_quality;
struct m5mols_version ver;
struct m5mols_capture cap;
@@ -210,8 +223,6 @@ struct m5mols_info {
unsigned int power:1;
unsigned int ctrl_sync:1;
- bool lock_ae;
- bool lock_awb;
u8 resolution;
u8 mode;
@@ -282,7 +293,7 @@ int m5mols_busy_wait(struct v4l2_subdev *sd, u32 reg, u32 value, u32 mask,
* The available executing order between each modes are as follows:
* PARAMETER <---> MONITOR <---> CAPTURE
*/
-int m5mols_mode(struct m5mols_info *info, u8 mode);
+int m5mols_set_mode(struct m5mols_info *info, u8 mode);
int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg);
int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 condition, u32 timeout);
@@ -291,9 +302,33 @@ int m5mols_start_capture(struct m5mols_info *info);
int m5mols_do_scenemode(struct m5mols_info *info, u8 mode);
int m5mols_lock_3a(struct m5mols_info *info, bool lock);
int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
+int m5mols_init_controls(struct v4l2_subdev *sd);
/* The firmware function */
int m5mols_update_fw(struct v4l2_subdev *sd,
int (*set_power)(struct m5mols_info *, bool));
+static inline struct m5mols_info *to_m5mols(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct m5mols_info, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ struct m5mols_info *info = container_of(ctrl->handler,
+ struct m5mols_info, handle);
+ return &info->sd;
+}
+
+static inline void m5mols_set_ctrl_mode(struct v4l2_ctrl *ctrl,
+ unsigned int mode)
+{
+ ctrl->priv = (void *)mode;
+}
+
+static inline unsigned int m5mols_get_ctrl_mode(struct v4l2_ctrl *ctrl)
+{
+ return (unsigned int)ctrl->priv;
+}
+
#endif /* M5MOLS_H */
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
index ba25e8e2ba4c..cb243bd278ce 100644
--- a/drivers/media/video/m5mols/m5mols_capture.c
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -106,7 +106,6 @@ static int m5mols_capture_info(struct m5mols_info *info)
int m5mols_start_capture(struct m5mols_info *info)
{
struct v4l2_subdev *sd = &info->sd;
- u8 resolution = info->resolution;
int ret;
/*
@@ -114,22 +113,18 @@ int m5mols_start_capture(struct m5mols_info *info)
* format. The frame capture is initiated during switching from Monitor
* to Capture mode.
*/
- ret = m5mols_mode(info, REG_MONITOR);
+ ret = m5mols_set_mode(info, REG_MONITOR);
if (!ret)
ret = m5mols_restore_controls(info);
if (!ret)
ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG);
if (!ret)
- ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution);
+ ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, info->resolution);
if (!ret)
- ret = m5mols_lock_3a(info, true);
- if (!ret)
- ret = m5mols_mode(info, REG_CAPTURE);
+ ret = m5mols_set_mode(info, REG_CAPTURE);
if (!ret)
/* Wait until a frame is captured to ISP internal memory */
ret = m5mols_wait_interrupt(sd, REG_INT_CAPTURE, 2000);
- if (!ret)
- ret = m5mols_lock_3a(info, false);
if (ret)
return ret;
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
index d135d20d09cf..392a028730e2 100644
--- a/drivers/media/video/m5mols/m5mols_controls.c
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -139,7 +139,7 @@ int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
if (mode > REG_SCENE_CANDLE)
return -EINVAL;
- ret = m5mols_lock_3a(info, false);
+ ret = v4l2_ctrl_s_ctrl(info->lock_3a, 0);
if (!ret)
ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, mode);
if (!ret)
@@ -169,7 +169,7 @@ int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
if (!ret)
ret = m5mols_write(sd, AE_ISO, scenemode.iso);
if (!ret)
- ret = m5mols_mode(info, REG_CAPTURE);
+ ret = m5mols_set_mode(info, REG_CAPTURE);
if (!ret)
ret = m5mols_write(sd, CAPP_WDR_EN, scenemode.wdr);
if (!ret)
@@ -181,119 +181,448 @@ int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
if (!ret)
ret = m5mols_write(sd, CAPC_MODE, scenemode.capt_mode);
if (!ret)
- ret = m5mols_mode(info, REG_MONITOR);
+ ret = m5mols_set_mode(info, REG_MONITOR);
return ret;
}
-static int m5mols_lock_ae(struct m5mols_info *info, bool lock)
+static int m5mols_3a_lock(struct m5mols_info *info, struct v4l2_ctrl *ctrl)
{
+ bool af_lock = ctrl->val & V4L2_LOCK_FOCUS;
int ret = 0;
- if (info->lock_ae != lock)
- ret = m5mols_write(&info->sd, AE_LOCK,
- lock ? REG_AE_LOCK : REG_AE_UNLOCK);
- if (!ret)
- info->lock_ae = lock;
+ if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_EXPOSURE) {
+ bool ae_lock = ctrl->val & V4L2_LOCK_EXPOSURE;
+
+ ret = m5mols_write(&info->sd, AE_LOCK, ae_lock ?
+ REG_AE_LOCK : REG_AE_UNLOCK);
+ if (ret)
+ return ret;
+ }
+
+ if (((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_WHITE_BALANCE)
+ && info->auto_wb->val) {
+ bool awb_lock = ctrl->val & V4L2_LOCK_WHITE_BALANCE;
+
+ ret = m5mols_write(&info->sd, AWB_LOCK, awb_lock ?
+ REG_AWB_LOCK : REG_AWB_UNLOCK);
+ if (ret)
+ return ret;
+ }
+
+ if (!info->ver.af || !af_lock)
+ return ret;
+
+ if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
+ ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP);
+
+ return ret;
+}
+
+static int m5mols_set_metering_mode(struct m5mols_info *info, int mode)
+{
+ unsigned int metering;
+
+ switch (mode) {
+ case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
+ metering = REG_AE_CENTER;
+ break;
+ case V4L2_EXPOSURE_METERING_SPOT:
+ metering = REG_AE_SPOT;
+ break;
+ default:
+ metering = REG_AE_ALL;
+ break;
+ }
+
+ return m5mols_write(&info->sd, AE_MODE, metering);
+}
+
+static int m5mols_set_exposure(struct m5mols_info *info, int exposure)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ int ret = 0;
+
+ if (exposure == V4L2_EXPOSURE_AUTO) {
+ /* Unlock auto exposure */
+ info->lock_3a->val &= ~V4L2_LOCK_EXPOSURE;
+ m5mols_3a_lock(info, info->lock_3a);
+
+ ret = m5mols_set_metering_mode(info, info->metering->val);
+ if (ret < 0)
+ return ret;
+
+ v4l2_dbg(1, m5mols_debug, sd,
+ "%s: exposure bias: %#x, metering: %#x\n",
+ __func__, info->exposure_bias->val,
+ info->metering->val);
+
+ return m5mols_write(sd, AE_INDEX, info->exposure_bias->val);
+ }
+
+ if (exposure == V4L2_EXPOSURE_MANUAL) {
+ ret = m5mols_write(sd, AE_MODE, REG_AE_OFF);
+ if (ret == 0)
+ ret = m5mols_write(sd, AE_MAN_GAIN_MON,
+ info->exposure->val);
+ if (ret == 0)
+ ret = m5mols_write(sd, AE_MAN_GAIN_CAP,
+ info->exposure->val);
+
+ v4l2_dbg(1, m5mols_debug, sd, "%s: exposure: %#x\n",
+ __func__, info->exposure->val);
+ }
+
+ return ret;
+}
+
+static int m5mols_set_white_balance(struct m5mols_info *info, int val)
+{
+ static const unsigned short wb[][2] = {
+ { V4L2_WHITE_BALANCE_INCANDESCENT, REG_AWB_INCANDESCENT },
+ { V4L2_WHITE_BALANCE_FLUORESCENT, REG_AWB_FLUORESCENT_1 },
+ { V4L2_WHITE_BALANCE_FLUORESCENT_H, REG_AWB_FLUORESCENT_2 },
+ { V4L2_WHITE_BALANCE_HORIZON, REG_AWB_HORIZON },
+ { V4L2_WHITE_BALANCE_DAYLIGHT, REG_AWB_DAYLIGHT },
+ { V4L2_WHITE_BALANCE_FLASH, REG_AWB_LEDLIGHT },
+ { V4L2_WHITE_BALANCE_CLOUDY, REG_AWB_CLOUDY },
+ { V4L2_WHITE_BALANCE_SHADE, REG_AWB_SHADE },
+ { V4L2_WHITE_BALANCE_AUTO, REG_AWB_AUTO },
+ };
+ int i;
+ struct v4l2_subdev *sd = &info->sd;
+ int ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(wb); i++) {
+ int awb;
+ if (wb[i][0] != val)
+ continue;
+
+ v4l2_dbg(1, m5mols_debug, sd,
+ "Setting white balance to: %#x\n", wb[i][0]);
+
+ awb = wb[i][0] == V4L2_WHITE_BALANCE_AUTO;
+ ret = m5mols_write(sd, AWB_MODE, awb ? REG_AWB_AUTO :
+ REG_AWB_PRESET);
+ if (ret < 0)
+ return ret;
+
+ if (!awb)
+ ret = m5mols_write(sd, AWB_MANUAL, wb[i][1]);
+ }
return ret;
}
-static int m5mols_lock_awb(struct m5mols_info *info, bool lock)
+static int m5mols_set_saturation(struct m5mols_info *info, int val)
+{
+ int ret = m5mols_write(&info->sd, MON_CHROMA_LVL, val);
+ if (ret < 0)
+ return ret;
+
+ return m5mols_write(&info->sd, MON_CHROMA_EN, REG_CHROMA_ON);
+}
+
+static int m5mols_set_color_effect(struct m5mols_info *info, int val)
{
+ unsigned int m_effect = REG_COLOR_EFFECT_OFF;
+ unsigned int p_effect = REG_EFFECT_OFF;
+ unsigned int cfix_r = 0, cfix_b = 0;
+ struct v4l2_subdev *sd = &info->sd;
int ret = 0;
- if (info->lock_awb != lock)
- ret = m5mols_write(&info->sd, AWB_LOCK,
- lock ? REG_AWB_LOCK : REG_AWB_UNLOCK);
+ switch (val) {
+ case V4L2_COLORFX_BW:
+ m_effect = REG_COLOR_EFFECT_ON;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ p_effect = REG_EFFECT_NEGA;
+ break;
+ case V4L2_COLORFX_EMBOSS:
+ p_effect = REG_EFFECT_EMBOSS;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ m_effect = REG_COLOR_EFFECT_ON;
+ cfix_r = REG_CFIXR_SEPIA;
+ cfix_b = REG_CFIXB_SEPIA;
+ break;
+ }
+
+ ret = m5mols_write(sd, PARM_EFFECT, p_effect);
if (!ret)
- info->lock_awb = lock;
+ ret = m5mols_write(sd, MON_EFFECT, m_effect);
+
+ if (ret == 0 && m_effect == REG_COLOR_EFFECT_ON) {
+ ret = m5mols_write(sd, MON_CFIXR, cfix_r);
+ if (!ret)
+ ret = m5mols_write(sd, MON_CFIXB, cfix_b);
+ }
+
+ v4l2_dbg(1, m5mols_debug, sd,
+ "p_effect: %#x, m_effect: %#x, r: %#x, b: %#x (%d)\n",
+ p_effect, m_effect, cfix_r, cfix_b, ret);
return ret;
}
-/* m5mols_lock_3a() - Lock 3A(Auto Exposure, Auto Whitebalance, Auto Focus) */
-int m5mols_lock_3a(struct m5mols_info *info, bool lock)
+static int m5mols_set_iso(struct m5mols_info *info, int auto_iso)
+{
+ u32 iso = auto_iso ? 0 : info->iso->val + 1;
+
+ return m5mols_write(&info->sd, AE_ISO, iso);
+}
+
+static int m5mols_set_wdr(struct m5mols_info *info, int wdr)
{
int ret;
- ret = m5mols_lock_ae(info, lock);
- if (!ret)
- ret = m5mols_lock_awb(info, lock);
- /* Don't need to handle unlocking AF */
- if (!ret && is_available_af(info) && lock)
- ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP);
+ ret = m5mols_write(&info->sd, MON_TONE_CTL, wdr ? 9 : 5);
+ if (ret < 0)
+ return ret;
+
+ ret = m5mols_set_mode(info, REG_CAPTURE);
+ if (ret < 0)
+ return ret;
+
+ return m5mols_write(&info->sd, CAPP_WDR_EN, wdr);
+}
+
+static int m5mols_set_stabilization(struct m5mols_info *info, int val)
+{
+ struct v4l2_subdev *sd = &info->sd;
+ unsigned int evp = val ? 0xe : 0x0;
+ int ret;
+
+ ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, evp);
+ if (ret < 0)
+ return ret;
+
+ return m5mols_write(sd, AE_EV_PRESET_CAPTURE, evp);
+}
+
+static int m5mols_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct m5mols_info *info = to_m5mols(sd);
+ int ret = 0;
+ u8 status;
+
+ v4l2_dbg(1, m5mols_debug, sd, "%s: ctrl: %s (%d)\n",
+ __func__, ctrl->name, info->isp_ready);
+
+ if (!info->isp_ready)
+ return -EBUSY;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ ret = m5mols_read_u8(sd, AE_ISO, &status);
+ if (ret == 0)
+ ctrl->val = !status;
+ if (status != REG_ISO_AUTO)
+ info->iso->val = status - 1;
+ break;
+
+ case V4L2_CID_3A_LOCK:
+ ctrl->val &= ~0x7;
+
+ ret = m5mols_read_u8(sd, AE_LOCK, &status);
+ if (ret)
+ return ret;
+ if (status)
+ info->lock_3a->val |= V4L2_LOCK_EXPOSURE;
+
+ ret = m5mols_read_u8(sd, AWB_LOCK, &status);
+ if (ret)
+ return ret;
+ if (status)
+ info->lock_3a->val |= V4L2_LOCK_EXPOSURE;
+
+ ret = m5mols_read_u8(sd, AF_EXECUTE, &status);
+ if (!status)
+ info->lock_3a->val |= V4L2_LOCK_EXPOSURE;
+ break;
+ }
return ret;
}
-/* m5mols_set_ctrl() - The main s_ctrl function called by m5mols_set_ctrl() */
-int m5mols_set_ctrl(struct v4l2_ctrl *ctrl)
+static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ unsigned int ctrl_mode = m5mols_get_ctrl_mode(ctrl);
struct v4l2_subdev *sd = to_sd(ctrl);
struct m5mols_info *info = to_m5mols(sd);
- int ret;
+ int last_mode = info->mode;
+ int ret = 0;
+
+ /*
+ * If needed, defer restoring the controls until
+ * the device is fully initialized.
+ */
+ if (!info->isp_ready) {
+ info->ctrl_sync = 0;
+ return 0;
+ }
+
+ v4l2_dbg(1, m5mols_debug, sd, "%s: %s, val: %d, priv: %#x\n",
+ __func__, ctrl->name, ctrl->val, (int)ctrl->priv);
+
+ if (ctrl_mode && ctrl_mode != info->mode) {
+ ret = m5mols_set_mode(info, ctrl_mode);
+ if (ret < 0)
+ return ret;
+ }
switch (ctrl->id) {
+ case V4L2_CID_3A_LOCK:
+ ret = m5mols_3a_lock(info, ctrl);
+ break;
+
case V4L2_CID_ZOOM_ABSOLUTE:
- return m5mols_write(sd, MON_ZOOM, ctrl->val);
+ ret = m5mols_write(sd, MON_ZOOM, ctrl->val);
+ break;
case V4L2_CID_EXPOSURE_AUTO:
- ret = m5mols_lock_ae(info,
- ctrl->val == V4L2_EXPOSURE_AUTO ? false : true);
- if (!ret && ctrl->val == V4L2_EXPOSURE_AUTO)
- ret = m5mols_write(sd, AE_MODE, REG_AE_ALL);
- if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL) {
- int val = info->exposure->val;
- ret = m5mols_write(sd, AE_MODE, REG_AE_OFF);
- if (!ret)
- ret = m5mols_write(sd, AE_MAN_GAIN_MON, val);
- if (!ret)
- ret = m5mols_write(sd, AE_MAN_GAIN_CAP, val);
- }
- return ret;
+ ret = m5mols_set_exposure(info, ctrl->val);
+ break;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = m5mols_lock_awb(info, ctrl->val ? false : true);
- if (!ret)
- ret = m5mols_write(sd, AWB_MODE, ctrl->val ?
- REG_AWB_AUTO : REG_AWB_PRESET);
- return ret;
+ case V4L2_CID_ISO_SENSITIVITY:
+ ret = m5mols_set_iso(info, ctrl->val);
+ break;
+
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ ret = m5mols_set_white_balance(info, ctrl->val);
+ break;
case V4L2_CID_SATURATION:
- ret = m5mols_write(sd, MON_CHROMA_LVL, ctrl->val);
- if (!ret)
- ret = m5mols_write(sd, MON_CHROMA_EN, REG_CHROMA_ON);
- return ret;
+ ret = m5mols_set_saturation(info, ctrl->val);
+ break;
case V4L2_CID_COLORFX:
- /*
- * This control uses two kinds of registers: normal & color.
- * The normal effect belongs to category 1, while the color
- * one belongs to category 2.
- *
- * The normal effect uses one register: CAT1_EFFECT.
- * The color effect uses three registers:
- * CAT2_COLOR_EFFECT, CAT2_CFIXR, CAT2_CFIXB.
- */
- ret = m5mols_write(sd, PARM_EFFECT,
- ctrl->val == V4L2_COLORFX_NEGATIVE ? REG_EFFECT_NEGA :
- ctrl->val == V4L2_COLORFX_EMBOSS ? REG_EFFECT_EMBOSS :
- REG_EFFECT_OFF);
- if (!ret)
- ret = m5mols_write(sd, MON_EFFECT,
- ctrl->val == V4L2_COLORFX_SEPIA ?
- REG_COLOR_EFFECT_ON : REG_COLOR_EFFECT_OFF);
- if (!ret)
- ret = m5mols_write(sd, MON_CFIXR,
- ctrl->val == V4L2_COLORFX_SEPIA ?
- REG_CFIXR_SEPIA : 0);
- if (!ret)
- ret = m5mols_write(sd, MON_CFIXB,
- ctrl->val == V4L2_COLORFX_SEPIA ?
- REG_CFIXB_SEPIA : 0);
+ ret = m5mols_set_color_effect(info, ctrl->val);
+ break;
+
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ ret = m5mols_set_wdr(info, ctrl->val);
+ break;
+
+ case V4L2_CID_IMAGE_STABILIZATION:
+ ret = m5mols_set_stabilization(info, ctrl->val);
+ break;
+
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ret = m5mols_write(sd, CAPP_JPEG_RATIO, ctrl->val);
+ break;
+ }
+
+ if (ret == 0 && info->mode != last_mode)
+ ret = m5mols_set_mode(info, last_mode);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
+ .g_volatile_ctrl = m5mols_g_volatile_ctrl,
+ .s_ctrl = m5mols_s_ctrl,
+};
+
+/* Supported manual ISO values */
+static const s64 iso_qmenu[] = {
+ /* AE_ISO: 0x01...0x07 */
+ 50, 100, 200, 400, 800, 1600, 3200
+};
+
+/* Supported Exposure Bias values, -2.0EV...+2.0EV */
+static const s64 ev_bias_qmenu[] = {
+ /* AE_INDEX: 0x00...0x08 */
+ -2000, -1500, -1000, -500, 0, 500, 1000, 1500, 2000
+};
+
+int m5mols_init_controls(struct v4l2_subdev *sd)
+{
+ struct m5mols_info *info = to_m5mols(sd);
+ u16 exposure_max;
+ u16 zoom_step;
+ int ret;
+
+ /* Determine the firmware dependant control range and step values */
+ ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max);
+ if (ret < 0)
+ return ret;
+
+ zoom_step = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
+ v4l2_ctrl_handler_init(&info->handle, 20);
+
+ info->auto_wb = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ 9, ~0x3fe, V4L2_WHITE_BALANCE_AUTO);
+
+ /* Exposure control cluster */
+ info->auto_exposure = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
+ 1, ~0x03, V4L2_EXPOSURE_AUTO);
+
+ info->exposure = v4l2_ctrl_new_std(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE,
+ 0, exposure_max, 1, exposure_max / 2);
+
+ info->exposure_bias = v4l2_ctrl_new_int_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_AUTO_EXPOSURE_BIAS,
+ ARRAY_SIZE(ev_bias_qmenu) - 1,
+ ARRAY_SIZE(ev_bias_qmenu)/2 - 1,
+ ev_bias_qmenu);
+
+ info->metering = v4l2_ctrl_new_std_menu(&info->handle,
+ &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_METERING,
+ 2, ~0x7, V4L2_EXPOSURE_METERING_AVERAGE);
+
+ /* ISO control cluster */
+ info->auto_iso = v4l2_ctrl_new_std_menu(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_ISO_SENSITIVITY_AUTO, 1, ~0x03, 1);
+
+ info->iso = v4l2_ctrl_new_int_menu(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_ISO_SENSITIVITY, ARRAY_SIZE(iso_qmenu) - 1,
+ ARRAY_SIZE(iso_qmenu)/2 - 1, iso_qmenu);
+
+ info->saturation = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_SATURATION, 1, 5, 1, 3);
+
+ info->zoom = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_ZOOM_ABSOLUTE, 1, 70, zoom_step, 1);
+
+ info->colorfx = v4l2_ctrl_new_std_menu(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_COLORFX, 4, 0, V4L2_COLORFX_NONE);
+
+ info->wdr = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_WIDE_DYNAMIC_RANGE, 0, 1, 1, 0);
+
+ info->stabilization = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_IMAGE_STABILIZATION, 0, 1, 1, 0);
+
+ info->jpeg_quality = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 1, 100, 1, 80);
+
+ info->lock_3a = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops,
+ V4L2_CID_3A_LOCK, 0, 0x7, 0, 0);
+
+ if (info->handle.error) {
+ int ret = info->handle.error;
+ v4l2_err(sd, "Failed to initialize controls: %d\n", ret);
+ v4l2_ctrl_handler_free(&info->handle);
return ret;
}
- return -EINVAL;
+ v4l2_ctrl_auto_cluster(4, &info->auto_exposure, 1, false);
+ info->auto_iso->flags |= V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_UPDATE;
+ v4l2_ctrl_auto_cluster(2, &info->auto_iso, 0, false);
+
+ info->lock_3a->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ m5mols_set_ctrl_mode(info->auto_exposure, REG_PARAMETER);
+ m5mols_set_ctrl_mode(info->auto_wb, REG_PARAMETER);
+ m5mols_set_ctrl_mode(info->colorfx, REG_MONITOR);
+
+ sd->ctrl_handler = &info->handle;
+
+ return 0;
}
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index d718aee01c77..ac7d28b6ddf2 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -362,14 +362,14 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
}
/**
- * m5mols_mode - manage the M-5MOLS's mode
+ * m5mols_set_mode - set the M-5MOLS controller mode
* @mode: the required operation mode
*
* The commands of M-5MOLS are grouped into specific modes. Each functionality
- * can be guaranteed only when the sensor is operating in mode which which
- * a command belongs to.
+ * can be guaranteed only when the sensor is operating in mode which a command
+ * belongs to.
*/
-int m5mols_mode(struct m5mols_info *info, u8 mode)
+int m5mols_set_mode(struct m5mols_info *info, u8 mode)
{
struct v4l2_subdev *sd = &info->sd;
int ret = -EINVAL;
@@ -645,13 +645,13 @@ static int m5mols_start_monitor(struct m5mols_info *info)
struct v4l2_subdev *sd = &info->sd;
int ret;
- ret = m5mols_mode(info, REG_PARAMETER);
+ ret = m5mols_set_mode(info, REG_PARAMETER);
if (!ret)
ret = m5mols_write(sd, PARM_MON_SIZE, info->resolution);
if (!ret)
ret = m5mols_write(sd, PARM_MON_FPS, REG_FPS_30);
if (!ret)
- ret = m5mols_mode(info, REG_MONITOR);
+ ret = m5mols_set_mode(info, REG_MONITOR);
if (!ret)
ret = m5mols_restore_controls(info);
@@ -674,42 +674,13 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
- return m5mols_mode(info, REG_PARAMETER);
+ return m5mols_set_mode(info, REG_PARAMETER);
}
static const struct v4l2_subdev_video_ops m5mols_video_ops = {
.s_stream = m5mols_s_stream,
};
-static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_subdev *sd = to_sd(ctrl);
- struct m5mols_info *info = to_m5mols(sd);
- int ispstate = info->mode;
- int ret;
-
- /*
- * If needed, defer restoring the controls until
- * the device is fully initialized.
- */
- if (!info->isp_ready) {
- info->ctrl_sync = 0;
- return 0;
- }
-
- ret = m5mols_mode(info, REG_PARAMETER);
- if (ret < 0)
- return ret;
- ret = m5mols_set_ctrl(ctrl);
- if (ret < 0)
- return ret;
- return m5mols_mode(info, ispstate);
-}
-
-static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
- .s_ctrl = m5mols_s_ctrl,
-};
-
static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
{
struct v4l2_subdev *sd = &info->sd;
@@ -802,52 +773,6 @@ static int m5mols_fw_start(struct v4l2_subdev *sd)
return ret;
}
-static int m5mols_init_controls(struct m5mols_info *info)
-{
- struct v4l2_subdev *sd = &info->sd;
- u16 max_exposure;
- u16 step_zoom;
- int ret;
-
- /* Determine value's range & step of controls for various FW version */
- ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &max_exposure);
- if (!ret)
- step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
- if (ret)
- return ret;
-
- v4l2_ctrl_handler_init(&info->handle, 6);
- info->autowb = v4l2_ctrl_new_std(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE,
- 0, 1, 1, 0);
- info->saturation = v4l2_ctrl_new_std(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_SATURATION,
- 1, 5, 1, 3);
- info->zoom = v4l2_ctrl_new_std(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_ZOOM_ABSOLUTE,
- 1, 70, step_zoom, 1);
- info->exposure = v4l2_ctrl_new_std(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_EXPOSURE,
- 0, max_exposure, 1, (int)max_exposure/2);
- info->colorfx = v4l2_ctrl_new_std_menu(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_COLORFX,
- 4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE);
- info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle,
- &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
- 1, 0, V4L2_EXPOSURE_AUTO);
-
- sd->ctrl_handler = &info->handle;
- if (info->handle.error) {
- v4l2_err(sd, "Failed to initialize controls: %d\n", ret);
- v4l2_ctrl_handler_free(&info->handle);
- return info->handle.error;
- }
-
- v4l2_ctrl_cluster(2, &info->autoexposure);
-
- return 0;
-}
-
/**
* m5mols_s_power - Main sensor power control function
*
@@ -868,7 +793,7 @@ static int m5mols_s_power(struct v4l2_subdev *sd, int on)
}
if (is_manufacturer(info, REG_SAMSUNG_TECHWIN)) {
- ret = m5mols_mode(info, REG_MONITOR);
+ ret = m5mols_set_mode(info, REG_MONITOR);
if (!ret)
ret = m5mols_write(sd, AF_EXECUTE, REG_AF_STOP);
if (!ret)
@@ -1010,7 +935,7 @@ static int __devinit m5mols_probe(struct i2c_client *client,
ret = m5mols_fw_start(sd);
if (!ret)
- ret = m5mols_init_controls(info);
+ ret = m5mols_init_controls(sd);
m5mols_sensor_power(info, false);
if (!ret)
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
index ae4aced0f9b2..14d4be72aeff 100644
--- a/drivers/media/video/m5mols/m5mols_reg.h
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -310,6 +310,7 @@
#define REG_JPEG 0x10
#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, 0x01, 1)
+#define CAPP_JPEG_RATIO I2C_REG(CAT_CAPT_PARM, 0x17, 1)
#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, 0x1d, 1)
#define REG_MCC_OFF 0x00
diff --git a/drivers/media/video/marvell-ccic/mcam-core.c b/drivers/media/video/marvell-ccic/mcam-core.c
index 996ac34d9a89..ce2b7b4788d6 100644
--- a/drivers/media/video/marvell-ccic/mcam-core.c
+++ b/drivers/media/video/marvell-ccic/mcam-core.c
@@ -1356,7 +1356,6 @@ static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
goto out;
}
mcam_set_config_needed(cam, 1);
- ret = 0;
out:
mutex_unlock(&cam->s_mutex);
return ret;
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index 12897e8a3314..3945556f5733 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -40,7 +40,7 @@ MODULE_VERSION("0.1.1");
#define MIN_H 32
#define MAX_W 640
#define MAX_H 480
-#define DIM_ALIGN_MASK 0x08 /* 8-alignment for dimensions */
+#define DIM_ALIGN_MASK 7 /* 8-byte alignment for line length */
/* Flags that indicate a format can be used for capture/output */
#define MEM2MEM_CAPTURE (1 << 0)
@@ -110,22 +110,6 @@ enum {
V4L2_M2M_DST = 1,
};
-/* Source and destination queue data */
-static struct m2mtest_q_data q_data[2];
-
-static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
-{
- switch (type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return &q_data[V4L2_M2M_SRC];
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return &q_data[V4L2_M2M_DST];
- default:
- BUG();
- }
- return NULL;
-}
-
#define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE
#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1)
@@ -198,8 +182,26 @@ struct m2mtest_ctx {
int aborting;
struct v4l2_m2m_ctx *m2m_ctx;
+
+ /* Source and destination queue data */
+ struct m2mtest_q_data q_data[2];
};
+static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+
static struct v4l2_queryctrl *get_ctrl(int id)
{
int i;
@@ -223,7 +225,7 @@ static int device_process(struct m2mtest_ctx *ctx,
int tile_w, bytes_left;
int width, height, bytesperline;
- q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
width = q_data->width;
height = q_data->height;
@@ -436,7 +438,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
if (!vq)
return -EINVAL;
- q_data = get_q_data(f->type);
+ q_data = get_q_data(ctx, f->type);
f->fmt.pix.width = q_data->width;
f->fmt.pix.height = q_data->height;
@@ -535,7 +537,7 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
if (!vq)
return -EINVAL;
- q_data = get_q_data(f->type);
+ q_data = get_q_data(ctx, f->type);
if (!q_data)
return -EINVAL;
@@ -747,7 +749,7 @@ static int m2mtest_queue_setup(struct vb2_queue *vq,
struct m2mtest_q_data *q_data;
unsigned int size, count = *nbuffers;
- q_data = get_q_data(vq->type);
+ q_data = get_q_data(ctx, vq->type);
size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
@@ -775,7 +777,7 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
- q_data = get_q_data(vb->vb2_queue->type);
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
@@ -860,6 +862,9 @@ static int m2mtest_open(struct file *file)
ctx->transtime = MEM2MEM_DEF_TRANSTIME;
ctx->num_processed = 0;
+ ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+ ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
@@ -958,6 +963,10 @@ static int m2mtest_probe(struct platform_device *pdev)
}
*vfd = m2mtest_videodev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->lock = &dev->dev_mutex;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
@@ -982,9 +991,6 @@ static int m2mtest_probe(struct platform_device *pdev)
goto err_m2m;
}
- q_data[V4L2_M2M_SRC].fmt = &formats[0];
- q_data[V4L2_M2M_DST].fmt = &formats[0];
-
return 0;
v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index b09a3c80a15e..7bc775219f97 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1570,7 +1570,7 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
return meyeioc_stilljcapt((int *) arg);
default:
- return -EINVAL;
+ return -ENOTTY;
}
}
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index 82ce50721de3..aeb22be7dcbd 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -597,19 +597,23 @@ static int msp_log_status(struct v4l2_subdev *sd)
return 0;
}
-static int msp_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int msp_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
v4l_dbg(1, msp_debug, client, "suspend\n");
msp_reset(client);
return 0;
}
-static int msp_resume(struct i2c_client *client)
+static int msp_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
v4l_dbg(1, msp_debug, client, "resume\n");
msp_wake_thread(client);
return 0;
}
+#endif
/* ----------------------------------------------------------------------- */
@@ -863,6 +867,10 @@ static int msp_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
+static const struct dev_pm_ops msp3400_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(msp_suspend, msp_resume)
+};
+
static const struct i2c_device_id msp_id[] = {
{ "msp3400", 0 },
{ }
@@ -873,11 +881,10 @@ static struct i2c_driver msp_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "msp3400",
+ .pm = &msp3400_pm_ops,
},
.probe = msp_probe,
.remove = msp_remove,
- .suspend = msp_suspend,
- .resume = msp_resume,
.id_table = msp_id,
};
diff --git a/drivers/media/video/mt9m032.c b/drivers/media/video/mt9m032.c
index 645973c5feb0..3c1e626139b7 100644
--- a/drivers/media/video/mt9m032.c
+++ b/drivers/media/video/mt9m032.c
@@ -838,9 +838,9 @@ static int mt9m032_remove(struct i2c_client *client)
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9m032 *sensor = to_mt9m032(subdev);
- v4l2_device_unregister_subdev(&sensor->subdev);
+ v4l2_device_unregister_subdev(subdev);
v4l2_ctrl_handler_free(&sensor->ctrls);
- media_entity_cleanup(&sensor->subdev.entity);
+ media_entity_cleanup(&subdev->entity);
mutex_destroy(&sensor->lock);
kfree(sensor);
return 0;
diff --git a/drivers/media/video/mt9p031.c b/drivers/media/video/mt9p031.c
index c81eaf4fbe01..8f061d9ac443 100644
--- a/drivers/media/video/mt9p031.c
+++ b/drivers/media/video/mt9p031.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/log2.h>
@@ -90,7 +91,14 @@
#define MT9P031_GLOBAL_GAIN_MAX 1024
#define MT9P031_GLOBAL_GAIN_DEF 8
#define MT9P031_GLOBAL_GAIN_MULT (1 << 6)
+#define MT9P031_ROW_BLACK_TARGET 0x49
#define MT9P031_ROW_BLACK_DEF_OFFSET 0x4b
+#define MT9P031_GREEN1_OFFSET 0x60
+#define MT9P031_GREEN2_OFFSET 0x61
+#define MT9P031_BLACK_LEVEL_CALIBRATION 0x62
+#define MT9P031_BLC_MANUAL_BLC (1 << 0)
+#define MT9P031_RED_OFFSET 0x63
+#define MT9P031_BLUE_OFFSET 0x64
#define MT9P031_TEST_PATTERN 0xa0
#define MT9P031_TEST_PATTERN_SHIFT 3
#define MT9P031_TEST_PATTERN_ENABLE (1 << 0)
@@ -99,17 +107,27 @@
#define MT9P031_TEST_PATTERN_RED 0xa2
#define MT9P031_TEST_PATTERN_BLUE 0xa3
+enum mt9p031_model {
+ MT9P031_MODEL_COLOR,
+ MT9P031_MODEL_MONOCHROME,
+};
+
struct mt9p031 {
struct v4l2_subdev subdev;
struct media_pad pad;
struct v4l2_rect crop; /* Sensor window */
struct v4l2_mbus_framefmt format;
- struct v4l2_ctrl_handler ctrls;
struct mt9p031_platform_data *pdata;
struct mutex power_lock; /* lock to protect power_count */
int power_count;
+ enum mt9p031_model model;
struct aptina_pll pll;
+ int reset;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *blc_auto;
+ struct v4l2_ctrl *blc_offset;
/* Registers cache */
u16 output_control;
@@ -241,8 +259,8 @@ static inline int mt9p031_pll_disable(struct mt9p031 *mt9p031)
static int mt9p031_power_on(struct mt9p031 *mt9p031)
{
/* Ensure RESET_BAR is low */
- if (mt9p031->pdata->reset) {
- mt9p031->pdata->reset(&mt9p031->subdev, 1);
+ if (mt9p031->reset != -1) {
+ gpio_set_value(mt9p031->reset, 0);
usleep_range(1000, 2000);
}
@@ -252,8 +270,8 @@ static int mt9p031_power_on(struct mt9p031 *mt9p031)
mt9p031->pdata->ext_freq);
/* Now RESET_BAR must be high */
- if (mt9p031->pdata->reset) {
- mt9p031->pdata->reset(&mt9p031->subdev, 0);
+ if (mt9p031->reset != -1) {
+ gpio_set_value(mt9p031->reset, 1);
usleep_range(1000, 2000);
}
@@ -262,8 +280,8 @@ static int mt9p031_power_on(struct mt9p031 *mt9p031)
static void mt9p031_power_off(struct mt9p031 *mt9p031)
{
- if (mt9p031->pdata->reset) {
- mt9p031->pdata->reset(&mt9p031->subdev, 1);
+ if (mt9p031->reset != -1) {
+ gpio_set_value(mt9p031->reset, 0);
usleep_range(1000, 2000);
}
@@ -557,6 +575,10 @@ static int mt9p031_set_crop(struct v4l2_subdev *subdev,
*/
#define V4L2_CID_TEST_PATTERN (V4L2_CID_USER_BASE | 0x1001)
+#define V4L2_CID_BLC_AUTO (V4L2_CID_USER_BASE | 0x1002)
+#define V4L2_CID_BLC_TARGET_LEVEL (V4L2_CID_USER_BASE | 0x1003)
+#define V4L2_CID_BLC_ANALOG_OFFSET (V4L2_CID_USER_BASE | 0x1004)
+#define V4L2_CID_BLC_DIGITAL_OFFSET (V4L2_CID_USER_BASE | 0x1005)
static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
{
@@ -621,11 +643,17 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_TEST_PATTERN:
if (!ctrl->val) {
- ret = mt9p031_set_mode2(mt9p031,
- 0, MT9P031_READ_MODE_2_ROW_BLC);
- if (ret < 0)
- return ret;
-
+ /* Restore the black level compensation settings. */
+ if (mt9p031->blc_auto->cur.val != 0) {
+ ret = mt9p031_s_ctrl(mt9p031->blc_auto);
+ if (ret < 0)
+ return ret;
+ }
+ if (mt9p031->blc_offset->cur.val != 0) {
+ ret = mt9p031_s_ctrl(mt9p031->blc_offset);
+ if (ret < 0)
+ return ret;
+ }
return mt9p031_write(client, MT9P031_TEST_PATTERN,
MT9P031_TEST_PATTERN_DISABLE);
}
@@ -640,10 +668,14 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret < 0)
return ret;
+ /* Disable digital black level compensation when using a test
+ * pattern.
+ */
ret = mt9p031_set_mode2(mt9p031, MT9P031_READ_MODE_2_ROW_BLC,
0);
if (ret < 0)
return ret;
+
ret = mt9p031_write(client, MT9P031_ROW_BLACK_DEF_OFFSET, 0);
if (ret < 0)
return ret;
@@ -651,7 +683,40 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
return mt9p031_write(client, MT9P031_TEST_PATTERN,
((ctrl->val - 1) << MT9P031_TEST_PATTERN_SHIFT)
| MT9P031_TEST_PATTERN_ENABLE);
+
+ case V4L2_CID_BLC_AUTO:
+ ret = mt9p031_set_mode2(mt9p031,
+ ctrl->val ? 0 : MT9P031_READ_MODE_2_ROW_BLC,
+ ctrl->val ? MT9P031_READ_MODE_2_ROW_BLC : 0);
+ if (ret < 0)
+ return ret;
+
+ return mt9p031_write(client, MT9P031_BLACK_LEVEL_CALIBRATION,
+ ctrl->val ? 0 : MT9P031_BLC_MANUAL_BLC);
+
+ case V4L2_CID_BLC_TARGET_LEVEL:
+ return mt9p031_write(client, MT9P031_ROW_BLACK_TARGET,
+ ctrl->val);
+
+ case V4L2_CID_BLC_ANALOG_OFFSET:
+ data = ctrl->val & ((1 << 9) - 1);
+
+ ret = mt9p031_write(client, MT9P031_GREEN1_OFFSET, data);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_GREEN2_OFFSET, data);
+ if (ret < 0)
+ return ret;
+ ret = mt9p031_write(client, MT9P031_RED_OFFSET, data);
+ if (ret < 0)
+ return ret;
+ return mt9p031_write(client, MT9P031_BLUE_OFFSET, data);
+
+ case V4L2_CID_BLC_DIGITAL_OFFSET:
+ return mt9p031_write(client, MT9P031_ROW_BLACK_DEF_OFFSET,
+ ctrl->val & ((1 << 12) - 1));
}
+
return 0;
}
@@ -685,6 +750,46 @@ static const struct v4l2_ctrl_config mt9p031_ctrls[] = {
.flags = 0,
.menu_skip_mask = 0,
.qmenu = mt9p031_test_pattern_menu,
+ }, {
+ .ops = &mt9p031_ctrl_ops,
+ .id = V4L2_CID_BLC_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "BLC, Auto",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+ .flags = 0,
+ }, {
+ .ops = &mt9p031_ctrl_ops,
+ .id = V4L2_CID_BLC_TARGET_LEVEL,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "BLC Target Level",
+ .min = 0,
+ .max = 4095,
+ .step = 1,
+ .def = 168,
+ .flags = 0,
+ }, {
+ .ops = &mt9p031_ctrl_ops,
+ .id = V4L2_CID_BLC_ANALOG_OFFSET,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "BLC Analog Offset",
+ .min = -255,
+ .max = 255,
+ .step = 1,
+ .def = 32,
+ .flags = 0,
+ }, {
+ .ops = &mt9p031_ctrl_ops,
+ .id = V4L2_CID_BLC_DIGITAL_OFFSET,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "BLC Digital Offset",
+ .min = -2048,
+ .max = 2047,
+ .step = 1,
+ .def = 40,
+ .flags = 0,
}
};
@@ -764,7 +869,7 @@ static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
format = v4l2_subdev_get_try_format(fh, 0);
- if (mt9p031->pdata->version == MT9P031_MONOCHROME_VERSION)
+ if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
format->code = V4L2_MBUS_FMT_Y12_1X12;
else
format->code = V4L2_MBUS_FMT_SGRBG12_1X12;
@@ -842,6 +947,8 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->pdata = pdata;
mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
+ mt9p031->model = did->driver_data;
+ mt9p031->reset = -1;
v4l2_ctrl_handler_init(&mt9p031->ctrls, ARRAY_SIZE(mt9p031_ctrls) + 4);
@@ -862,9 +969,16 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->subdev.ctrl_handler = &mt9p031->ctrls;
- if (mt9p031->ctrls.error)
+ if (mt9p031->ctrls.error) {
printk(KERN_INFO "%s: control initialization error %d\n",
__func__, mt9p031->ctrls.error);
+ ret = mt9p031->ctrls.error;
+ goto done;
+ }
+
+ mt9p031->blc_auto = v4l2_ctrl_find(&mt9p031->ctrls, V4L2_CID_BLC_AUTO);
+ mt9p031->blc_offset = v4l2_ctrl_find(&mt9p031->ctrls,
+ V4L2_CID_BLC_DIGITAL_OFFSET);
mutex_init(&mt9p031->power_lock);
v4l2_i2c_subdev_init(&mt9p031->subdev, client, &mt9p031_subdev_ops);
@@ -882,7 +996,7 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->crop.left = MT9P031_COLUMN_START_DEF;
mt9p031->crop.top = MT9P031_ROW_START_DEF;
- if (mt9p031->pdata->version == MT9P031_MONOCHROME_VERSION)
+ if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
mt9p031->format.code = V4L2_MBUS_FMT_Y12_1X12;
else
mt9p031->format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
@@ -892,10 +1006,22 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->format.field = V4L2_FIELD_NONE;
mt9p031->format.colorspace = V4L2_COLORSPACE_SRGB;
+ if (pdata->reset != -1) {
+ ret = gpio_request_one(pdata->reset, GPIOF_OUT_INIT_LOW,
+ "mt9p031_rst");
+ if (ret < 0)
+ goto done;
+
+ mt9p031->reset = pdata->reset;
+ }
+
ret = mt9p031_pll_setup(mt9p031);
done:
if (ret < 0) {
+ if (mt9p031->reset != -1)
+ gpio_free(mt9p031->reset);
+
v4l2_ctrl_handler_free(&mt9p031->ctrls);
media_entity_cleanup(&mt9p031->subdev.entity);
kfree(mt9p031);
@@ -912,13 +1038,16 @@ static int mt9p031_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&mt9p031->ctrls);
v4l2_device_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
+ if (mt9p031->reset != -1)
+ gpio_free(mt9p031->reset);
kfree(mt9p031);
return 0;
}
static const struct i2c_device_id mt9p031_id[] = {
- { "mt9p031", 0 },
+ { "mt9p031", MT9P031_MODEL_COLOR },
+ { "mt9p031m", MT9P031_MODEL_MONOCHROME },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9p031_id);
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index 8d1445f12708..e1ae46a7ee96 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -453,6 +453,7 @@ static int mt9t112_init_pll(const struct i2c_client *client)
* I2C Master Clock Divider
*/
mt9t112_reg_write(ret, client, 0x0014, 0x3046);
+ mt9t112_reg_write(ret, client, 0x0016, 0x0400); /* JPEG initialization workaround */
mt9t112_reg_write(ret, client, 0x0022, 0x0190);
mt9t112_reg_write(ret, client, 0x3B84, 0x0212);
diff --git a/drivers/media/video/mt9v032.c b/drivers/media/video/mt9v032.c
index 75e253a343c5..4ba4884c016e 100644
--- a/drivers/media/video/mt9v032.c
+++ b/drivers/media/video/mt9v032.c
@@ -481,7 +481,7 @@ static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_EXPOSURE_AUTO:
return mt9v032_update_aec_agc(mt9v032, MT9V032_AEC_ENABLE,
- ctrl->val);
+ !ctrl->val);
case V4L2_CID_EXPOSURE:
return mt9v032_write(client, MT9V032_TOTAL_SHUTTER_WIDTH,
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 055d11ddb038..4296a8350298 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -126,13 +126,8 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct soc_camera_device *icd = vq->priv_data;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- *size = bytes_per_line * icd->user_height;
+ *size = icd->sizeimage;
if (!*count)
*count = 32;
@@ -171,11 +166,6 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
struct soc_camera_device *icd = vq->priv_data;
struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
int ret;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -202,7 +192,7 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
vb->state = VIDEOBUF_NEEDS_INIT;
}
- vb->size = bytes_per_line * vb->height;
+ vb->size = icd->sizeimage;
if (0 != vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
goto out;
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 18afaeeadb7b..637bde8aca28 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -22,6 +22,7 @@
#include <linux/gcd.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
@@ -82,6 +83,7 @@
#define CSICR1_INV_DATA (1 << 3)
#define CSICR1_INV_PCLK (1 << 2)
#define CSICR1_REDGE (1 << 1)
+#define CSICR1_FMT_MASK (CSICR1_PACK_DIR | CSICR1_SWAP16_EN)
#define SHIFT_STATFF_LEVEL 22
#define SHIFT_RXFF_LEVEL 19
@@ -229,6 +231,7 @@ struct mx2_prp_cfg {
u32 src_pixel;
u32 ch1_pixel;
u32 irq_flags;
+ u32 csicr1;
};
/* prp resizing parameters */
@@ -329,6 +332,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
.ch1_pixel = 0x2ca00565, /* RGB565 */
.irq_flags = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+ .csicr1 = 0,
}
},
{
@@ -342,6 +346,21 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
.irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
PRP_INTR_CH2FC | PRP_INTR_LBOVF |
PRP_INTR_CH2OVF,
+ .csicr1 = CSICR1_PACK_DIR,
+ }
+ },
+ {
+ .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8,
+ .out_fmt = V4L2_PIX_FMT_YUV420,
+ .cfg = {
+ .channel = 2,
+ .in_fmt = PRP_CNTL_DATA_IN_YUV422,
+ .out_fmt = PRP_CNTL_CH2_OUT_YUV420,
+ .src_pixel = 0x22000888, /* YUV422 (YUYV) */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
+ PRP_INTR_CH2FC | PRP_INTR_LBOVF |
+ PRP_INTR_CH2OVF,
+ .csicr1 = CSICR1_SWAP16_EN,
}
},
};
@@ -525,8 +544,6 @@ static int mx2_videobuf_setup(struct vb2_queue *vq,
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
dev_dbg(icd->parent, "count=%d, size=%d\n", *count, sizes[0]);
@@ -534,12 +551,9 @@ static int mx2_videobuf_setup(struct vb2_queue *vq,
if (fmt != NULL)
return -ENOTTY;
- if (bytes_per_line < 0)
- return bytes_per_line;
-
alloc_ctxs[0] = pcdev->alloc_ctx;
- sizes[0] = bytes_per_line * icd->user_height;
+ sizes[0] = icd->sizeimage;
if (0 == *count)
*count = 32;
@@ -555,16 +569,11 @@ static int mx2_videobuf_setup(struct vb2_queue *vq,
static int mx2_videobuf_prepare(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
int ret = 0;
dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
- if (bytes_per_line < 0)
- return bytes_per_line;
-
#ifdef DEBUG
/*
* This can be useful if you want to see if we actually fill
@@ -574,7 +583,7 @@ static int mx2_videobuf_prepare(struct vb2_buffer *vb)
0xaa, vb2_get_plane_payload(vb, 0));
#endif
- vb2_set_plane_payload(vb, 0, bytes_per_line * icd->user_height);
+ vb2_set_plane_payload(vb, 0, icd->sizeimage);
if (vb2_plane_vaddr(vb, 0) &&
vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
ret = -EINVAL;
@@ -1024,14 +1033,14 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
return ret;
}
+ csicr1 = (csicr1 & ~CSICR1_FMT_MASK) | pcdev->emma_prp->cfg.csicr1;
+
if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
csicr1 |= CSICR1_REDGE;
if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_SOF_POL;
if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_HSYNC_POL;
- if (pcdev->platform_flags & MX2_CAMERA_SWAP16)
- csicr1 |= CSICR1_SWAP16_EN;
if (pcdev->platform_flags & MX2_CAMERA_EXT_VSYNC)
csicr1 |= CSICR1_EXT_VSYNC;
if (pcdev->platform_flags & MX2_CAMERA_CCIR)
@@ -1042,8 +1051,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
csicr1 |= CSICR1_GCLK_MODE;
if (pcdev->platform_flags & MX2_CAMERA_INV_DATA)
csicr1 |= CSICR1_INV_DATA;
- if (pcdev->platform_flags & MX2_CAMERA_PACK_DIR_MSB)
- csicr1 |= CSICR1_PACK_DIR;
pcdev->csicr1 = csicr1;
@@ -1118,7 +1125,8 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
return 0;
}
- if (code == V4L2_MBUS_FMT_YUYV8_2X8) {
+ if (code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+ code == V4L2_MBUS_FMT_UYVY8_2X8) {
formats++;
if (xlate) {
/*
@@ -1363,17 +1371,20 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
xlate->host_fmt);
if (pix->bytesperline < 0)
return pix->bytesperline;
- pix->sizeimage = pix->height * pix->bytesperline;
+ pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
+ pix->bytesperline, pix->height);
/* Check against the CSIRXCNT limit */
if (pix->sizeimage > 4 * 0x3ffff) {
/* Adjust geometry, preserve aspect ratio */
- unsigned int new_height = int_sqrt(4 * 0x3ffff *
- pix->height / pix->bytesperline);
+ unsigned int new_height = int_sqrt(div_u64(0x3ffffULL *
+ 4 * pix->height, pix->bytesperline));
pix->width = new_height * pix->width / pix->height;
pix->height = new_height;
pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
xlate->host_fmt);
BUG_ON(pix->bytesperline < 0);
+ pix->sizeimage = soc_mbus_image_size(xlate->host_fmt,
+ pix->bytesperline, pix->height);
}
}
@@ -1752,6 +1763,8 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
pcdev->soc_host.priv = pcdev;
pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
pcdev->soc_host.nr = pdev->id;
+ if (cpu_is_mx25())
+ pcdev->soc_host.capabilities = SOCAM_HOST_CAP_STRIDE;
pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(pcdev->alloc_ctx)) {
diff --git a/drivers/media/video/mx2_emmaprp.c b/drivers/media/video/mx2_emmaprp.c
index ba89a7401c8c..0bd5815de369 100644
--- a/drivers/media/video/mx2_emmaprp.c
+++ b/drivers/media/video/mx2_emmaprp.c
@@ -755,7 +755,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
memset(src_vq, 0, sizeof(*src_vq));
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_MMAP;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &emmaprp_qops;
@@ -767,7 +767,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
memset(dst_vq, 0, sizeof(*dst_vq));
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- dst_vq->io_modes = VB2_MMAP;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &emmaprp_qops;
@@ -904,6 +904,10 @@ static int emmaprp_probe(struct platform_device *pdev)
}
*vfd = emmaprp_videodev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->lock = &pcdev->dev_mutex;
video_set_drvdata(vfd, pcdev);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 93c35ef5f0ad..f13643d31353 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -199,8 +199,6 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- int bytes_per_line;
- unsigned int height;
if (!mx3_cam->idmac_channel[0])
return -EINVAL;
@@ -208,21 +206,29 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
if (fmt) {
const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
fmt->fmt.pix.pixelformat);
+ unsigned int bytes_per_line;
+ int ret;
+
if (!xlate)
return -EINVAL;
- bytes_per_line = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
- xlate->host_fmt);
- height = fmt->fmt.pix.height;
+
+ ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+ xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
+
+ ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
+ fmt->fmt.pix.height);
+ if (ret < 0)
+ return ret;
+
+ sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
} else {
/* Called from VIDIOC_REQBUFS or in compatibility mode */
- bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- height = icd->user_height;
+ sizes[0] = icd->sizeimage;
}
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- sizes[0] = bytes_per_line * height;
alloc_ctxs[0] = mx3_cam->alloc_ctx;
@@ -267,14 +273,11 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
struct idmac_video_param *video = &ichan->params.video;
const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, host_fmt);
unsigned long flags;
dma_cookie_t cookie;
size_t new_size;
- BUG_ON(bytes_per_line <= 0);
-
- new_size = bytes_per_line * icd->user_height;
+ new_size = icd->sizeimage;
if (vb2_plane_size(vb, 0) < new_size) {
dev_err(icd->parent, "Buffer #%d too small (%lu < %zu)\n",
@@ -314,9 +317,9 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
* horizontal parameters in this case are expressed in bytes,
* not in pixels.
*/
- video->out_width = bytes_per_line;
+ video->out_width = icd->bytesperline;
video->out_height = icd->user_height;
- video->out_stride = bytes_per_line;
+ video->out_stride = icd->bytesperline;
} else {
/*
* For IPU known formats the pixel unit will be managed
@@ -508,7 +511,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
/* ipu_csi_init_interface() */
csi_reg_write(mx3_cam, conf, CSI_SENS_CONF);
- clk_enable(mx3_cam->clk);
+ clk_prepare_enable(mx3_cam->clk);
rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
if (rate)
@@ -549,7 +552,7 @@ static void mx3_camera_remove_device(struct soc_camera_device *icd)
*ichan = NULL;
}
- clk_disable(mx3_cam->clk);
+ clk_disable_unprepare(mx3_cam->clk);
mx3_cam->icd = NULL;
@@ -642,12 +645,14 @@ static const struct soc_mbus_pixelfmt mx3_camera_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
}, {
.fourcc = V4L2_PIX_FMT_GREY,
.name = "Monochrome 8 bit",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
};
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index 2e4131748438..b520a45cb3f3 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -31,10 +31,11 @@
#include <media/saa7115.h>
#include <linux/module.h>
-#include "mxb.h"
#include "tea6415c.h"
#include "tea6420.h"
+#define MXB_AUDIOS 6
+
#define I2C_SAA7111A 0x24
#define I2C_TDA9840 0x42
#define I2C_TEA6415C 0x43
@@ -62,10 +63,14 @@ MODULE_PARM_DESC(debug, "Turn on/off device debugging (default:off).");
enum { TUNER, AUX1, AUX3, AUX3_YC };
static struct v4l2_input mxb_inputs[MXB_INPUTS] = {
- { TUNER, "Tuner", V4L2_INPUT_TYPE_TUNER, 1, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { AUX1, "AUX1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { AUX3, "AUX3 Composite", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
- { AUX3_YC, "AUX3 S-Video", V4L2_INPUT_TYPE_CAMERA, 4, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
+ { TUNER, "Tuner", V4L2_INPUT_TYPE_TUNER, 0x3f, 0,
+ V4L2_STD_PAL_BG | V4L2_STD_PAL_I, 0, V4L2_IN_CAP_STD },
+ { AUX1, "AUX1", V4L2_INPUT_TYPE_CAMERA, 0x3f, 0,
+ V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { AUX3, "AUX3 Composite", V4L2_INPUT_TYPE_CAMERA, 0x3f, 0,
+ V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
+ { AUX3_YC, "AUX3 S-Video", V4L2_INPUT_TYPE_CAMERA, 0x3f, 0,
+ V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
};
/* this array holds the information, which port of the saa7146 each
@@ -90,6 +95,36 @@ struct mxb_routing {
u32 output;
};
+/* these are the available audio sources, which can switched
+ to the line- and cd-output individually */
+static struct v4l2_audio mxb_audios[MXB_AUDIOS] = {
+ {
+ .index = 0,
+ .name = "Tuner",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 1,
+ .name = "AUX1",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 2,
+ .name = "AUX2",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 3,
+ .name = "AUX3",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 4,
+ .name = "Radio (X9)",
+ .capability = V4L2_AUDCAP_STEREO,
+ } , {
+ .index = 5,
+ .name = "CD-ROM (X10)",
+ .capability = V4L2_AUDCAP_STEREO,
+ }
+};
+
/* These are the necessary input-output-pins for bringing one audio source
(see above) to the CD-output. Note that gain is set to 0 in this table. */
static struct mxb_routing TEA6420_cd[MXB_AUDIOS + 1][2] = {
@@ -114,11 +149,6 @@ static struct mxb_routing TEA6420_line[MXB_AUDIOS + 1][2] = {
{ { 6, 3 }, { 6, 2 } } /* Mute */
};
-#define MAXCONTROLS 1
-static struct v4l2_queryctrl mxb_controls[] = {
- { V4L2_CID_AUDIO_MUTE, V4L2_CTRL_TYPE_BOOLEAN, "Mute", 0, 1, 1, 0, 0 },
-};
-
struct mxb
{
struct video_device *video_dev;
@@ -135,6 +165,7 @@ struct mxb
int cur_mode; /* current audio mode (mono, stereo, ...) */
int cur_input; /* current input */
+ int cur_audinput; /* current audio input */
int cur_mute; /* current mute status */
struct v4l2_frequency cur_freq; /* current frequency the tuner is tuned to */
};
@@ -150,16 +181,21 @@ struct mxb
#define call_all(dev, o, f, args...) \
v4l2_device_call_until_err(&dev->v4l2_dev, 0, o, f, ##args)
-static inline void tea6420_route_cd(struct mxb *mxb, int idx)
+static void mxb_update_audmode(struct mxb *mxb)
+{
+ struct v4l2_tuner t = {
+ .audmode = mxb->cur_mode,
+ };
+
+ tda9840_call(mxb, tuner, s_tuner, &t);
+}
+
+static inline void tea6420_route(struct mxb *mxb, int idx)
{
v4l2_subdev_call(mxb->tea6420_1, audio, s_routing,
TEA6420_cd[idx][0].input, TEA6420_cd[idx][0].output, 0);
v4l2_subdev_call(mxb->tea6420_2, audio, s_routing,
TEA6420_cd[idx][1].input, TEA6420_cd[idx][1].output, 0);
-}
-
-static inline void tea6420_route_line(struct mxb *mxb, int idx)
-{
v4l2_subdev_call(mxb->tea6420_1, audio, s_routing,
TEA6420_line[idx][0].input, TEA6420_line[idx][0].output, 0);
v4l2_subdev_call(mxb->tea6420_2, audio, s_routing,
@@ -168,16 +204,45 @@ static inline void tea6420_route_line(struct mxb *mxb, int idx)
static struct saa7146_extension extension;
+static int mxb_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct saa7146_dev *dev = container_of(ctrl->handler,
+ struct saa7146_dev, ctrl_handler);
+ struct mxb *mxb = dev->ext_priv;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ mxb->cur_mute = ctrl->val;
+ /* switch the audio-source */
+ tea6420_route(mxb, ctrl->val ? 6 :
+ video_audio_connect[mxb->cur_input]);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mxb_ctrl_ops = {
+ .s_ctrl = mxb_s_ctrl,
+};
+
static int mxb_probe(struct saa7146_dev *dev)
{
+ struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
struct mxb *mxb = NULL;
+ v4l2_ctrl_new_std(hdl, &mxb_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ if (hdl->error)
+ return hdl->error;
mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL);
if (mxb == NULL) {
DEB_D("not enough kernel memory\n");
return -ENOMEM;
}
+
snprintf(mxb->i2c_adapter.name, sizeof(mxb->i2c_adapter.name), "mxb%d", mxb_num);
saa7146_i2c_adapter_prepare(dev, &mxb->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
@@ -214,6 +279,8 @@ static int mxb_probe(struct saa7146_dev *dev)
/* we store the pointer in our private data field */
dev->ext_priv = mxb;
+ v4l2_ctrl_handler_setup(hdl);
+
return 0;
}
@@ -286,6 +353,9 @@ static int mxb_init_done(struct saa7146_dev* dev)
int i = 0, err = 0;
+ /* mute audio on tea6420s */
+ tea6420_route(mxb, 6);
+
/* select video mode in saa7111a */
saa7111a_call(mxb, core, s_std, std);
@@ -306,12 +376,12 @@ static int mxb_init_done(struct saa7146_dev* dev)
tuner_call(mxb, tuner, s_frequency, &mxb->cur_freq);
/* set a default video standard */
+ /* These two gpio calls set the GPIO pins that control the tda9820 */
+ saa7146_write(dev, GPIO_CTRL, 0x00404050);
+ saa7111a_call(mxb, core, s_gpio, 1);
+ saa7111a_call(mxb, core, s_std, std);
tuner_call(mxb, core, s_std, std);
- /* mute audio on tea6420s */
- tea6420_route_line(mxb, 6);
- tea6420_route_cd(mxb, 6);
-
/* switch to tuner-channel on tea6415c */
tea6415c_call(mxb, video, s_routing, 3, 17, 0);
@@ -320,9 +390,11 @@ static int mxb_init_done(struct saa7146_dev* dev)
/* the rest for mxb */
mxb->cur_input = 0;
+ mxb->cur_audinput = video_audio_connect[mxb->cur_input];
mxb->cur_mute = 1;
mxb->cur_mode = V4L2_TUNER_MODE_STEREO;
+ mxb_update_audmode(mxb);
/* check if the saa7740 (aka 'sound arena module') is present
on the mxb. if so, we must initialize it. due to lack of
@@ -385,69 +457,6 @@ void mxb_irq_bh(struct saa7146_dev* dev, u32* irq_mask)
}
*/
-static int vidioc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- int i;
-
- for (i = MAXCONTROLS - 1; i >= 0; i--) {
- if (mxb_controls[i].id == qc->id) {
- *qc = mxb_controls[i];
- DEB_D("VIDIOC_QUERYCTRL %d\n", qc->id);
- return 0;
- }
- }
- return dev->ext_vv_data->core_ops->vidioc_queryctrl(file, fh, qc);
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct mxb *mxb = (struct mxb *)dev->ext_priv;
- int i;
-
- for (i = MAXCONTROLS - 1; i >= 0; i--) {
- if (mxb_controls[i].id == vc->id)
- break;
- }
-
- if (i < 0)
- return dev->ext_vv_data->core_ops->vidioc_g_ctrl(file, fh, vc);
-
- if (vc->id == V4L2_CID_AUDIO_MUTE) {
- vc->value = mxb->cur_mute;
- DEB_D("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d\n", vc->value);
- return 0;
- }
-
- DEB_EE("VIDIOC_G_CTRL V4L2_CID_AUDIO_MUTE:%d\n", vc->value);
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct mxb *mxb = (struct mxb *)dev->ext_priv;
- int i = 0;
-
- for (i = MAXCONTROLS - 1; i >= 0; i--) {
- if (mxb_controls[i].id == vc->id)
- break;
- }
-
- if (i < 0)
- return dev->ext_vv_data->core_ops->vidioc_s_ctrl(file, fh, vc);
-
- if (vc->id == V4L2_CID_AUDIO_MUTE) {
- mxb->cur_mute = vc->value;
- /* switch the audio-source */
- tea6420_route_line(mxb, vc->value ? 6 :
- video_audio_connect[mxb->cur_input]);
- DEB_EE("VIDIOC_S_CTRL, V4L2_CID_AUDIO_MUTE: %d\n", vc->value);
- }
- return 0;
-}
-
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index);
@@ -519,9 +528,12 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
if (saa7111a_call(mxb, video, s_routing, i, SAA7111_FMT_CCIR, 0))
pr_err("VIDIOC_S_INPUT: could not address saa7111a\n");
+ mxb->cur_audinput = video_audio_connect[input];
/* switch the audio-source only if necessary */
if (0 == mxb->cur_mute)
- tea6420_route_line(mxb, video_audio_connect[input]);
+ tea6420_route(mxb, mxb->cur_audinput);
+ if (mxb->cur_audinput == 0)
+ mxb_update_audmode(mxb);
return 0;
}
@@ -563,17 +575,20 @@ static int vidioc_s_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
return call_all(dev, tuner, s_tuner, t);
}
+static int vidioc_querystd(struct file *file, void *fh, v4l2_std_id *norm)
+{
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+
+ return call_all(dev, video, querystd, norm);
+}
+
static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *f)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
- if (mxb->cur_input) {
- DEB_D("VIDIOC_G_FREQ: channel %d does not have a tuner!\n",
- mxb->cur_input);
+ if (f->tuner)
return -EINVAL;
- }
-
*f = mxb->cur_freq;
DEB_EE("VIDIOC_G_FREQ: freq:0x%08x\n", mxb->cur_freq.frequency);
@@ -592,17 +607,18 @@ static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency
if (V4L2_TUNER_ANALOG_TV != f->type)
return -EINVAL;
- if (mxb->cur_input) {
- DEB_D("VIDIOC_S_FREQ: channel %d does not have a tuner!\n",
- mxb->cur_input);
- return -EINVAL;
- }
-
- mxb->cur_freq = *f;
DEB_EE("VIDIOC_S_FREQUENCY: freq:0x%08x\n", mxb->cur_freq.frequency);
/* tune in desired frequency */
- tuner_call(mxb, tuner, s_frequency, &mxb->cur_freq);
+ tuner_call(mxb, tuner, s_frequency, f);
+ /* let the tuner subdev clamp the frequency to the tuner range */
+ tuner_call(mxb, tuner, g_frequency, f);
+ mxb->cur_freq = *f;
+ if (mxb->cur_audinput == 0)
+ mxb_update_audmode(mxb);
+
+ if (mxb->cur_input)
+ return 0;
/* hack: changing the frequency should invalidate the vbi-counter (=> alevt) */
spin_lock(&dev->slock);
@@ -612,25 +628,40 @@ static int vidioc_s_frequency(struct file *file, void *fh, struct v4l2_frequency
return 0;
}
+static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
+{
+ if (a->index >= MXB_AUDIOS)
+ return -EINVAL;
+ *a = mxb_audios[a->index];
+ return 0;
+}
+
static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
- if (a->index > MXB_INPUTS) {
- DEB_D("VIDIOC_G_AUDIO %d out of range\n", a->index);
- return -EINVAL;
- }
-
- DEB_EE("VIDIOC_G_AUDIO %d\n", a->index);
- memcpy(a, &mxb_audios[video_audio_connect[mxb->cur_input]], sizeof(struct v4l2_audio));
+ DEB_EE("VIDIOC_G_AUDIO\n");
+ *a = mxb_audios[mxb->cur_audinput];
return 0;
}
static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
+ struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
+ struct mxb *mxb = (struct mxb *)dev->ext_priv;
+
DEB_D("VIDIOC_S_AUDIO %d\n", a->index);
- return 0;
+ if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) {
+ if (mxb->cur_audinput != a->index) {
+ mxb->cur_audinput = a->index;
+ tea6420_route(mxb, a->index);
+ if (mxb->cur_audinput == 0)
+ mxb_update_audmode(mxb);
+ }
+ return 0;
+ }
+ return -EINVAL;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -638,60 +669,31 @@ static int vidioc_g_register(struct file *file, void *fh, struct v4l2_dbg_regist
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- return call_all(dev, core, g_register, reg);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (v4l2_chip_match_host(&reg->match)) {
+ reg->val = saa7146_read(dev, reg->reg);
+ reg->size = 4;
+ return 0;
+ }
+ call_all(dev, core, g_register, reg);
+ return 0;
}
static int vidioc_s_register(struct file *file, void *fh, struct v4l2_dbg_register *reg)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- return call_all(dev, core, s_register, reg);
-}
-#endif
-
-static long vidioc_default(struct file *file, void *fh, bool valid_prio,
- int cmd, void *arg)
-{
- struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
- struct mxb *mxb = (struct mxb *)dev->ext_priv;
-
- switch (cmd) {
- case MXB_S_AUDIO_CD:
- {
- int i = *(int *)arg;
-
- if (i < 0 || i >= MXB_AUDIOS) {
- DEB_D("invalid argument to MXB_S_AUDIO_CD: i:%d\n", i);
- return -EINVAL;
- }
-
- DEB_EE("MXB_S_AUDIO_CD: i:%d\n", i);
-
- tea6420_route_cd(mxb, i);
- return 0;
- }
- case MXB_S_AUDIO_LINE:
- {
- int i = *(int *)arg;
-
- if (i < 0 || i >= MXB_AUDIOS) {
- DEB_D("invalid argument to MXB_S_AUDIO_LINE: i:%d\n",
- i);
- return -EINVAL;
- }
-
- DEB_EE("MXB_S_AUDIO_LINE: i:%d\n", i);
- tea6420_route_line(mxb, i);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (v4l2_chip_match_host(&reg->match)) {
+ saa7146_write(dev, reg->reg, reg->val);
+ reg->size = 4;
return 0;
}
- default:
-/*
- DEB2(pr_err("does not handle this ioctl\n"));
-*/
- return -ENOIOCTLCMD;
- }
- return 0;
+ return call_all(dev, core, s_register, reg);
}
+#endif
static struct saa7146_ext_vv vv_data;
@@ -709,23 +711,21 @@ static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data
}
mxb = (struct mxb *)dev->ext_priv;
- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
- vv_data.ops.vidioc_g_input = vidioc_g_input;
- vv_data.ops.vidioc_s_input = vidioc_s_input;
- vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
- vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
- vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
- vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
- vv_data.ops.vidioc_g_audio = vidioc_g_audio;
- vv_data.ops.vidioc_s_audio = vidioc_s_audio;
+ vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
+ vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
+ vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
+ vv_data.vid_ops.vidioc_querystd = vidioc_querystd;
+ vv_data.vid_ops.vidioc_g_tuner = vidioc_g_tuner;
+ vv_data.vid_ops.vidioc_s_tuner = vidioc_s_tuner;
+ vv_data.vid_ops.vidioc_g_frequency = vidioc_g_frequency;
+ vv_data.vid_ops.vidioc_s_frequency = vidioc_s_frequency;
+ vv_data.vid_ops.vidioc_enumaudio = vidioc_enumaudio;
+ vv_data.vid_ops.vidioc_g_audio = vidioc_g_audio;
+ vv_data.vid_ops.vidioc_s_audio = vidioc_s_audio;
#ifdef CONFIG_VIDEO_ADV_DEBUG
- vv_data.ops.vidioc_g_register = vidioc_g_register;
- vv_data.ops.vidioc_s_register = vidioc_s_register;
+ vv_data.vid_ops.vidioc_g_register = vidioc_g_register;
+ vv_data.vid_ops.vidioc_s_register = vidioc_s_register;
#endif
- vv_data.ops.vidioc_default = vidioc_default;
if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
ERR("cannot register capture v4l2 device. skipping.\n");
saa7146_vv_release(dev);
@@ -752,6 +752,9 @@ static int mxb_detach(struct saa7146_dev *dev)
DEB_EE("dev:%p\n", dev);
+ /* mute audio on tea6420s */
+ tea6420_route(mxb, 6);
+
saa7146_unregister_device(&mxb->video_dev,dev);
if (MXB_BOARD_CAN_DO_VBI(dev))
saa7146_unregister_device(&mxb->vbi_dev, dev);
@@ -773,20 +776,24 @@ static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *standa
v4l2_std_id std = V4L2_STD_PAL_I;
DEB_D("VIDIOC_S_STD: setting mxb for PAL_I\n");
- /* set the 7146 gpio register -- I don't know what this does exactly */
+ /* These two gpio calls set the GPIO pins that control the tda9820 */
saa7146_write(dev, GPIO_CTRL, 0x00404050);
- /* unset the 7111 gpio register -- I don't know what this does exactly */
saa7111a_call(mxb, core, s_gpio, 0);
- tuner_call(mxb, core, s_std, std);
+ saa7111a_call(mxb, core, s_std, std);
+ if (mxb->cur_input == 0)
+ tuner_call(mxb, core, s_std, std);
} else {
v4l2_std_id std = V4L2_STD_PAL_BG;
+ if (mxb->cur_input)
+ std = standard->id;
DEB_D("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM\n");
- /* set the 7146 gpio register -- I don't know what this does exactly */
+ /* These two gpio calls set the GPIO pins that control the tda9820 */
saa7146_write(dev, GPIO_CTRL, 0x00404050);
- /* set the 7111 gpio register -- I don't know what this does exactly */
saa7111a_call(mxb, core, s_gpio, 1);
- tuner_call(mxb, core, s_std, std);
+ saa7111a_call(mxb, core, s_std, std);
+ if (mxb->cur_input == 0)
+ tuner_call(mxb, core, s_std, std);
}
return 0;
}
@@ -836,14 +843,14 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_ext_vv vv_data = {
.inputs = MXB_INPUTS,
- .capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE,
+ .capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_AUDIO,
.stds = &standard[0],
.num_stds = sizeof(standard)/sizeof(struct saa7146_standard),
.std_callback = &std_callback,
};
static struct saa7146_extension extension = {
- .name = MXB_IDENTIFIER,
+ .name = "Multimedia eXtension Board",
.flags = SAA7146_USE_I2C_IRQ,
.pci_tbl = &pci_tbl[0],
diff --git a/drivers/media/video/mxb.h b/drivers/media/video/mxb.h
deleted file mode 100644
index 400a57ba62ec..000000000000
--- a/drivers/media/video/mxb.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef __MXB__
-#define __MXB__
-
-#define BASE_VIDIOC_MXB 10
-
-#define MXB_S_AUDIO_CD _IOW ('V', BASE_VIDIOC_PRIVATE+BASE_VIDIOC_MXB+0, int)
-#define MXB_S_AUDIO_LINE _IOW ('V', BASE_VIDIOC_PRIVATE+BASE_VIDIOC_MXB+1, int)
-
-#define MXB_IDENTIFIER "Multimedia eXtension Board"
-
-#define MXB_AUDIOS 6
-
-/* these are the available audio sources, which can switched
- to the line- and cd-output individually */
-static struct v4l2_audio mxb_audios[MXB_AUDIOS] = {
- {
- .index = 0,
- .name = "Tuner",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 1,
- .name = "AUX1",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 2,
- .name = "AUX2",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 3,
- .name = "AUX3",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 4,
- .name = "Radio (X9)",
- .capability = V4L2_AUDCAP_STEREO,
- } , {
- .index = 5,
- .name = "CD-ROM (X10)",
- .capability = V4L2_AUDCAP_STEREO,
- }
-};
-#endif
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index c20f5ecd6790..c7e41145041f 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -206,15 +206,10 @@ static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct soc_camera_device *icd = vq->priv_data;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- *size = bytes_per_line * icd->user_height;
+ *size = icd->sizeimage;
if (!*count || *count < OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode))
*count = OMAP1_CAMERA_MIN_BUF_COUNT(pcdev->vb_mode);
@@ -256,15 +251,10 @@ static int omap1_videobuf_prepare(struct videobuf_queue *vq,
{
struct soc_camera_device *icd = vq->priv_data;
struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb);
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
int ret;
- if (bytes_per_line < 0)
- return bytes_per_line;
-
WARN_ON(!list_empty(&vb->queue));
BUG_ON(NULL == icd->current_fmt);
@@ -281,7 +271,7 @@ static int omap1_videobuf_prepare(struct videobuf_queue *vq,
vb->state = VIDEOBUF_NEEDS_INIT;
}
- vb->size = bytes_per_line * vb->height;
+ vb->size = icd->sizeimage;
if (vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
@@ -999,6 +989,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_VYUY8_2X8,
@@ -1008,6 +999,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YUYV8_2X8,
@@ -1017,6 +1009,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YVYU8_2X8,
@@ -1026,6 +1019,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
@@ -1035,6 +1029,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
@@ -1044,6 +1039,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_BE,
@@ -1053,6 +1049,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_LE,
@@ -1062,6 +1059,7 @@ static const struct soc_mbus_lookup omap1_cam_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
},
};
diff --git a/drivers/media/video/omap24xxcam-dma.c b/drivers/media/video/omap24xxcam-dma.c
index 3ea38a8def8e..b5ae170de4a5 100644
--- a/drivers/media/video/omap24xxcam-dma.c
+++ b/drivers/media/video/omap24xxcam-dma.c
@@ -38,7 +38,7 @@
*/
/* Ack all interrupt on CSR and IRQSTATUS_L0 */
-static void omap24xxcam_dmahw_ack_all(unsigned long base)
+static void omap24xxcam_dmahw_ack_all(void __iomem *base)
{
u32 csr;
int i;
@@ -52,7 +52,7 @@ static void omap24xxcam_dmahw_ack_all(unsigned long base)
}
/* Ack dmach on CSR and IRQSTATUS_L0 */
-static u32 omap24xxcam_dmahw_ack_ch(unsigned long base, int dmach)
+static u32 omap24xxcam_dmahw_ack_ch(void __iomem *base, int dmach)
{
u32 csr;
@@ -65,12 +65,12 @@ static u32 omap24xxcam_dmahw_ack_ch(unsigned long base, int dmach)
return csr;
}
-static int omap24xxcam_dmahw_running(unsigned long base, int dmach)
+static int omap24xxcam_dmahw_running(void __iomem *base, int dmach)
{
return omap24xxcam_reg_in(base, CAMDMA_CCR(dmach)) & CAMDMA_CCR_ENABLE;
}
-static void omap24xxcam_dmahw_transfer_setup(unsigned long base, int dmach,
+static void omap24xxcam_dmahw_transfer_setup(void __iomem *base, int dmach,
dma_addr_t start, u32 len)
{
omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
@@ -112,7 +112,7 @@ static void omap24xxcam_dmahw_transfer_setup(unsigned long base, int dmach,
| CAMDMA_CICR_DROP_IE);
}
-static void omap24xxcam_dmahw_transfer_start(unsigned long base, int dmach)
+static void omap24xxcam_dmahw_transfer_start(void __iomem *base, int dmach)
{
omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
CAMDMA_CCR_SEL_SRC_DST_SYNC
@@ -124,7 +124,7 @@ static void omap24xxcam_dmahw_transfer_start(unsigned long base, int dmach)
| CAMDMA_CCR_SYNCHRO_CAMERA);
}
-static void omap24xxcam_dmahw_transfer_chain(unsigned long base, int dmach,
+static void omap24xxcam_dmahw_transfer_chain(void __iomem *base, int dmach,
int free_dmach)
{
int prev_dmach, ch;
@@ -160,7 +160,7 @@ static void omap24xxcam_dmahw_transfer_chain(unsigned long base, int dmach,
* controller may not be idle after this routine completes, because
* the completion routines might start new transfers.
*/
-static void omap24xxcam_dmahw_abort_ch(unsigned long base, int dmach)
+static void omap24xxcam_dmahw_abort_ch(void __iomem *base, int dmach)
{
/* mask all interrupts from this channel */
omap24xxcam_reg_out(base, CAMDMA_CICR(dmach), 0);
@@ -171,7 +171,7 @@ static void omap24xxcam_dmahw_abort_ch(unsigned long base, int dmach)
omap24xxcam_reg_merge(base, CAMDMA_CCR(dmach), 0, CAMDMA_CCR_ENABLE);
}
-static void omap24xxcam_dmahw_init(unsigned long base)
+static void omap24xxcam_dmahw_init(void __iomem *base)
{
omap24xxcam_reg_out(base, CAMDMA_OCP_SYSCONFIG,
CAMDMA_OCP_SYSCONFIG_MIDLEMODE_FSTANDBY
@@ -362,7 +362,7 @@ void omap24xxcam_dma_hwinit(struct omap24xxcam_dma *dma)
}
static void omap24xxcam_dma_init(struct omap24xxcam_dma *dma,
- unsigned long base)
+ void __iomem *base)
{
int ch;
@@ -577,7 +577,7 @@ void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma)
}
void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
- unsigned long base,
+ void __iomem *base,
void (*reset_callback)(unsigned long data),
unsigned long reset_callback_data)
{
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 7d3864144368..e5015b0d5508 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -1776,8 +1776,7 @@ static int __devinit omap24xxcam_probe(struct platform_device *pdev)
cam->mmio_size = resource_size(mem);
/* map the region */
- cam->mmio_base = (unsigned long)
- ioremap_nocache(cam->mmio_base_phys, cam->mmio_size);
+ cam->mmio_base = ioremap_nocache(cam->mmio_base_phys, cam->mmio_size);
if (!cam->mmio_base) {
dev_err(cam->dev, "cannot map camera register I/O region\n");
goto err;
diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
index 2ce67f5a48d5..d59727afe894 100644
--- a/drivers/media/video/omap24xxcam.h
+++ b/drivers/media/video/omap24xxcam.h
@@ -429,7 +429,7 @@ struct sgdma_state {
struct omap24xxcam_dma {
spinlock_t lock; /* Lock for the whole structure. */
- unsigned long base; /* base address for dma controller */
+ void __iomem *base; /* base address for dma controller */
/* While dma_stop!=0, an attempt to start a new DMA transfer will
* fail.
@@ -491,7 +491,7 @@ struct omap24xxcam_device {
/*** hardware resources ***/
unsigned int irq;
- unsigned long mmio_base;
+ void __iomem *mmio_base;
unsigned long mmio_base_phys;
unsigned long mmio_size;
@@ -544,22 +544,22 @@ struct omap24xxcam_fh {
*
*/
-static inline u32 omap24xxcam_reg_in(unsigned long base, u32 offset)
+static inline u32 omap24xxcam_reg_in(u32 __iomem *base, u32 offset)
{
return readl(base + offset);
}
-static inline u32 omap24xxcam_reg_out(unsigned long base, u32 offset,
+static inline u32 omap24xxcam_reg_out(u32 __iomem *base, u32 offset,
u32 val)
{
writel(val, base + offset);
return val;
}
-static inline u32 omap24xxcam_reg_merge(unsigned long base, u32 offset,
+static inline u32 omap24xxcam_reg_merge(u32 __iomem *base, u32 offset,
u32 val, u32 mask)
{
- u32 addr = base + offset;
+ u32 __iomem *addr = base + offset;
u32 new_val = (readl(addr) & ~mask) | (val & mask);
writel(new_val, addr);
@@ -585,7 +585,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
int len, sgdma_callback_t callback, void *arg);
void omap24xxcam_sgdma_sync(struct omap24xxcam_sgdma *sgdma);
void omap24xxcam_sgdma_init(struct omap24xxcam_sgdma *sgdma,
- unsigned long base,
+ void __iomem *base,
void (*reset_callback)(unsigned long data),
unsigned long reset_callback_data);
void omap24xxcam_sgdma_exit(struct omap24xxcam_sgdma *sgdma);
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 12d5f923e1d0..1c347633e663 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -329,19 +329,6 @@ void omap3isp_configure_bridge(struct isp_device *isp,
isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL);
}
-/**
- * isp_set_pixel_clock - Configures the ISP pixel clock
- * @isp: OMAP3 ISP device
- * @pixelclk: Average pixel clock in Hz
- *
- * Set the average pixel clock required by the sensor. The ISP will use the
- * lowest possible memory bandwidth settings compatible with the clock.
- **/
-static void isp_set_pixel_clock(struct isp_device *isp, unsigned int pixelclk)
-{
- isp->isp_ccdc.vpcfg.pixelclk = pixelclk;
-}
-
void omap3isp_hist_dma_done(struct isp_device *isp)
{
if (omap3isp_ccdc_busy(&isp->isp_ccdc) ||
@@ -739,6 +726,17 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
unsigned long flags;
int ret;
+ /* If the preview engine crashed it might not respond to read/write
+ * operations on the L4 bus. This would result in a bus fault and a
+ * kernel oops. Refuse to start streaming in that case. This check must
+ * be performed before the loop below to avoid starting entities if the
+ * pipeline won't start anyway (those entities would then likely fail to
+ * stop, making the problem worse).
+ */
+ if ((pipe->entities & isp->crashed) &
+ (1U << isp->isp_prev.subdev.entity.id))
+ return -EIO;
+
spin_lock_irqsave(&pipe->lock, flags);
pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT);
spin_unlock_irqrestore(&pipe->lock, flags);
@@ -774,14 +772,6 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
}
}
- /* Frame number propagation. In continuous streaming mode the number
- * is incremented in the frame start ISR. In mem-to-mem mode
- * singleshot is used and frame start IRQs are not available.
- * Thus we have to increment the number here.
- */
- if (pipe->do_propagation && mode == ISP_PIPELINE_STREAM_SINGLESHOT)
- atomic_inc(&pipe->frame_number);
-
return 0;
}
@@ -879,13 +869,15 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
if (ret) {
dev_info(isp->dev, "Unable to stop %s\n", subdev->name);
+ /* If the entity failed to stopped, assume it has
+ * crashed. Mark it as such, the ISP will be reset when
+ * applications will release it.
+ */
+ isp->crashed |= 1U << subdev->entity.id;
failure = -ETIMEDOUT;
}
}
- if (failure < 0)
- isp->needs_reset = true;
-
return failure;
}
@@ -1069,6 +1061,7 @@ static int isp_reset(struct isp_device *isp)
udelay(1);
}
+ isp->crashed = 0;
return 0;
}
@@ -1495,11 +1488,13 @@ void omap3isp_put(struct isp_device *isp)
BUG_ON(isp->ref_count == 0);
if (--isp->ref_count == 0) {
isp_disable_interrupts(isp);
- isp_save_ctx(isp);
- if (isp->needs_reset) {
+ if (isp->domain)
+ isp_save_ctx(isp);
+ /* Reset the ISP if an entity has failed to stop. This is the
+ * only way to recover from such conditions.
+ */
+ if (isp->crashed)
isp_reset(isp);
- isp->needs_reset = false;
- }
isp_disable_clocks(isp);
}
mutex_unlock(&isp->isp_mutex);
@@ -1970,7 +1965,7 @@ error_csiphy:
*
* Always returns 0.
*/
-static int isp_remove(struct platform_device *pdev)
+static int __devexit isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
int i;
@@ -1981,6 +1976,7 @@ static int isp_remove(struct platform_device *pdev)
omap3isp_get(isp);
iommu_detach_device(isp->domain, &pdev->dev);
iommu_domain_free(isp->domain);
+ isp->domain = NULL;
omap3isp_put(isp);
free_irq(isp->irq_num, isp);
@@ -2050,7 +2046,7 @@ static int isp_map_mem_resource(struct platform_device *pdev,
* -EINVAL if couldn't install ISR,
* or clk_get return error value.
*/
-static int isp_probe(struct platform_device *pdev)
+static int __devinit isp_probe(struct platform_device *pdev)
{
struct isp_platform_data *pdata = pdev->dev.platform_data;
struct isp_device *isp;
@@ -2068,7 +2064,6 @@ static int isp_probe(struct platform_device *pdev)
isp->autoidle = autoidle;
isp->platform_cb.set_xclk = isp_set_xclk;
- isp->platform_cb.set_pixel_clock = isp_set_pixel_clock;
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
@@ -2218,7 +2213,7 @@ MODULE_DEVICE_TABLE(platform, omap3isp_id_table);
static struct platform_driver omap3isp_driver = {
.probe = isp_probe,
- .remove = isp_remove,
+ .remove = __devexit_p(isp_remove),
.id_table = omap3isp_id_table,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index d96603eb0d17..fc7af3e32efd 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -129,7 +129,6 @@ struct isp_platform_callback {
int (*csiphy_config)(struct isp_csiphy *phy,
struct isp_csiphy_dphy_cfg *dphy,
struct isp_csiphy_lanes_cfg *lanes);
- void (*set_pixel_clock)(struct isp_device *isp, unsigned int pixelclk);
};
/*
@@ -145,6 +144,7 @@ struct isp_platform_callback {
* @raw_dmamask: Raw DMA mask
* @stat_lock: Spinlock for handling statistics
* @isp_mutex: Mutex for serializing requests to ISP.
+ * @crashed: Bitmask of crashed entities (indexed by entity ID)
* @has_context: Context has been saved at least once and can be restored.
* @ref_count: Reference count for handling multiple ISP requests.
* @cam_ick: Pointer to camera interface clock structure.
@@ -184,7 +184,7 @@ struct isp_device {
/* ISP Obj */
spinlock_t stat_lock; /* common lock for statistic drivers */
struct mutex isp_mutex; /* For handling ref_count field */
- bool needs_reset;
+ u32 crashed;
int has_context;
int ref_count;
unsigned int autoidle;
@@ -237,10 +237,6 @@ void omap3isp_configure_bridge(struct isp_device *isp,
const struct isp_parallel_platform_data *pdata,
unsigned int shift);
-#define ISP_XCLK_NONE 0
-#define ISP_XCLK_A 1
-#define ISP_XCLK_B 2
-
struct isp_device *omap3isp_get(struct isp_device *isp);
void omap3isp_put(struct isp_device *isp);
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index eaabc27f0fa2..7e32331b60fb 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -38,6 +38,9 @@
#include "ispreg.h"
#include "ispccdc.h"
+#define CCDC_MIN_WIDTH 32
+#define CCDC_MIN_HEIGHT 32
+
static struct v4l2_mbus_framefmt *
__ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
unsigned int pad, enum v4l2_subdev_format_whence which);
@@ -836,8 +839,8 @@ static void ccdc_config_vp(struct isp_ccdc_device *ccdc)
if (pipe->input)
div = DIV_ROUND_UP(l3_ick, pipe->max_rate);
- else if (ccdc->vpcfg.pixelclk)
- div = l3_ick / ccdc->vpcfg.pixelclk;
+ else if (pipe->external_rate)
+ div = l3_ick / pipe->external_rate;
div = clamp(div, 2U, max_div);
fmtcfg_vp |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT;
@@ -1118,6 +1121,7 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
struct isp_parallel_platform_data *pdata = NULL;
struct v4l2_subdev *sensor;
struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
const struct isp_format_info *fmt_info;
struct v4l2_subdev_format fmt_src;
unsigned int depth_out;
@@ -1211,14 +1215,14 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT);
/* CCDC_PAD_SOURCE_OF */
- format = &ccdc->formats[CCDC_PAD_SOURCE_OF];
+ crop = &ccdc->crop;
- isp_reg_writel(isp, (0 << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
- ((format->width - 1) << ISPCCDC_HORZ_INFO_NPH_SHIFT),
+ isp_reg_writel(isp, (crop->left << ISPCCDC_HORZ_INFO_SPH_SHIFT) |
+ ((crop->width - 1) << ISPCCDC_HORZ_INFO_NPH_SHIFT),
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO);
- isp_reg_writel(isp, 0 << ISPCCDC_VERT_START_SLV0_SHIFT,
+ isp_reg_writel(isp, crop->top << ISPCCDC_VERT_START_SLV0_SHIFT,
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START);
- isp_reg_writel(isp, (format->height - 1)
+ isp_reg_writel(isp, (crop->height - 1)
<< ISPCCDC_VERT_LINES_NLV_SHIFT,
OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES);
@@ -1410,6 +1414,9 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
struct video_device *vdev = ccdc->subdev.devnode;
struct v4l2_event event;
+ /* Frame number propagation */
+ atomic_inc(&pipe->frame_number);
+
memset(&event, 0, sizeof(event));
event.type = V4L2_EVENT_FRAME_SYNC;
event.u.frame_sync.frame_sequence = atomic_read(&pipe->frame_number);
@@ -1703,7 +1710,7 @@ static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
if (sub->id != 0)
return -EINVAL;
- return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS);
+ return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS, NULL);
}
static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
@@ -1790,6 +1797,16 @@ __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
return &ccdc->formats[pad];
}
+static struct v4l2_rect *
+__ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(fh, CCDC_PAD_SOURCE_OF);
+ else
+ return &ccdc->crop;
+}
+
/*
* ccdc_try_format - Try video format on a pad
* @ccdc: ISP CCDC device
@@ -1806,6 +1823,7 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
const struct isp_format_info *info;
unsigned int width = fmt->width;
unsigned int height = fmt->height;
+ struct v4l2_rect *crop;
unsigned int i;
switch (pad) {
@@ -1831,14 +1849,10 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, which);
memcpy(fmt, format, sizeof(*fmt));
- /* The data formatter truncates the number of horizontal output
- * pixels to a multiple of 16. To avoid clipping data, allow
- * callers to request an output size bigger than the input size
- * up to the nearest multiple of 16.
- */
- fmt->width = clamp_t(u32, width, 32, fmt->width + 15);
- fmt->width &= ~15;
- fmt->height = clamp_t(u32, height, 32, fmt->height);
+ /* Hardcode the output size to the crop rectangle size. */
+ crop = __ccdc_get_crop(ccdc, fh, which);
+ fmt->width = crop->width;
+ fmt->height = crop->height;
break;
case CCDC_PAD_SOURCE_VP:
@@ -1866,6 +1880,49 @@ ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
}
/*
+ * ccdc_try_crop - Validate a crop rectangle
+ * @ccdc: ISP CCDC device
+ * @sink: format on the sink pad
+ * @crop: crop rectangle to be validated
+ */
+static void ccdc_try_crop(struct isp_ccdc_device *ccdc,
+ const struct v4l2_mbus_framefmt *sink,
+ struct v4l2_rect *crop)
+{
+ const struct isp_format_info *info;
+ unsigned int max_width;
+
+ /* For Bayer formats, restrict left/top and width/height to even values
+ * to keep the Bayer pattern.
+ */
+ info = omap3isp_video_format_info(sink->code);
+ if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+ crop->left &= ~1;
+ crop->top &= ~1;
+ }
+
+ crop->left = clamp_t(u32, crop->left, 0, sink->width - CCDC_MIN_WIDTH);
+ crop->top = clamp_t(u32, crop->top, 0, sink->height - CCDC_MIN_HEIGHT);
+
+ /* The data formatter truncates the number of horizontal output pixels
+ * to a multiple of 16. To avoid clipping data, allow callers to request
+ * an output size bigger than the input size up to the nearest multiple
+ * of 16.
+ */
+ max_width = (sink->width - crop->left + 15) & ~15;
+ crop->width = clamp_t(u32, crop->width, CCDC_MIN_WIDTH, max_width)
+ & ~15;
+ crop->height = clamp_t(u32, crop->height, CCDC_MIN_HEIGHT,
+ sink->height - crop->top);
+
+ /* Odd width/height values don't make sense for Bayer formats. */
+ if (info->flavor != V4L2_MBUS_FMT_Y8_1X8) {
+ crop->width &= ~1;
+ crop->height &= ~1;
+ }
+}
+
+/*
* ccdc_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
* @fh : V4L2 subdev file handle
@@ -1937,6 +1994,93 @@ static int ccdc_enum_frame_size(struct v4l2_subdev *sd,
}
/*
+ * ccdc_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the output formatter
+ * source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != CCDC_PAD_SOURCE_OF)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+ ccdc_try_crop(ccdc, format, &sel->r);
+ break;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * ccdc_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP CCDC V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the output
+ * formatter source pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ sel->pad != CCDC_PAD_SOURCE_OF)
+ return -EINVAL;
+
+ /* The crop rectangle can't be changed while streaming. */
+ if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED)
+ return -EBUSY;
+
+ /* Modifying the crop rectangle always changes the format on the source
+ * pad. If the KEEP_CONFIG flag is set, just return the current crop
+ * rectangle.
+ */
+ if (sel->flags & V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
+ return 0;
+ }
+
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SINK, sel->which);
+ ccdc_try_crop(ccdc, format, &sel->r);
+ *__ccdc_get_crop(ccdc, fh, sel->which) = sel->r;
+
+ /* Update the source format. */
+ format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF, sel->which);
+ ccdc_try_format(ccdc, fh, CCDC_PAD_SOURCE_OF, format, sel->which);
+
+ return 0;
+}
+
+/*
* ccdc_get_format - Retrieve the video format on a pad
* @sd : ISP CCDC V4L2 subdevice
* @fh : V4L2 subdev file handle
@@ -1973,6 +2117,7 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
{
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
format = __ccdc_get_format(ccdc, fh, fmt->pad, fmt->which);
if (format == NULL)
@@ -1983,6 +2128,16 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
/* Propagate the format from sink to source */
if (fmt->pad == CCDC_PAD_SINK) {
+ /* Reset the crop rectangle. */
+ crop = __ccdc_get_crop(ccdc, fh, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+
+ ccdc_try_crop(ccdc, &fmt->format, crop);
+
+ /* Update the source formats. */
format = __ccdc_get_format(ccdc, fh, CCDC_PAD_SOURCE_OF,
fmt->which);
*format = fmt->format;
@@ -2000,6 +2155,69 @@ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
}
/*
+ * Decide whether desired output pixel code can be obtained with
+ * the lane shifter by shifting the input pixel code.
+ * @in: input pixelcode to shifter
+ * @out: output pixelcode from shifter
+ * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
+ *
+ * return true if the combination is possible
+ * return false otherwise
+ */
+static bool ccdc_is_shiftable(enum v4l2_mbus_pixelcode in,
+ enum v4l2_mbus_pixelcode out,
+ unsigned int additional_shift)
+{
+ const struct isp_format_info *in_info, *out_info;
+
+ if (in == out)
+ return true;
+
+ in_info = omap3isp_video_format_info(in);
+ out_info = omap3isp_video_format_info(out);
+
+ if ((in_info->flavor == 0) || (out_info->flavor == 0))
+ return false;
+
+ if (in_info->flavor != out_info->flavor)
+ return false;
+
+ return in_info->bpp - out_info->bpp + additional_shift <= 6;
+}
+
+static int ccdc_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
+ unsigned long parallel_shift;
+
+ /* Check if the two ends match */
+ if (source_fmt->format.width != sink_fmt->format.width ||
+ source_fmt->format.height != sink_fmt->format.height)
+ return -EPIPE;
+
+ /* We've got a parallel sensor here. */
+ if (ccdc->input == CCDC_INPUT_PARALLEL) {
+ struct isp_parallel_platform_data *pdata =
+ &((struct isp_v4l2_subdevs_group *)
+ media_entity_to_v4l2_subdev(link->source->entity)
+ ->host_priv)->bus.parallel;
+ parallel_shift = pdata->data_lane_shift * 2;
+ } else {
+ parallel_shift = 0;
+ }
+
+ /* Lane shifter may be used to drop bits on CCDC sink pad */
+ if (!ccdc_is_shiftable(source_fmt->format.code,
+ sink_fmt->format.code, parallel_shift))
+ return -EPIPE;
+
+ return 0;
+}
+
+/*
* ccdc_init_formats - Initialize formats on all pads
* @sd: ISP CCDC V4L2 subdevice
* @fh: V4L2 subdev file handle
@@ -2041,6 +2259,9 @@ static const struct v4l2_subdev_pad_ops ccdc_v4l2_pad_ops = {
.enum_frame_size = ccdc_enum_frame_size,
.get_fmt = ccdc_get_format,
.set_fmt = ccdc_set_format,
+ .get_selection = ccdc_get_selection,
+ .set_selection = ccdc_set_selection,
+ .link_validate = ccdc_link_validate,
};
/* V4L2 subdev operations */
@@ -2150,6 +2371,7 @@ static int ccdc_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations ccdc_media_ops = {
.link_setup = ccdc_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc)
@@ -2276,8 +2498,6 @@ int omap3isp_ccdc_init(struct isp_device *isp)
ccdc->clamp.oblen = 0;
ccdc->clamp.dcsubval = 0;
- ccdc->vpcfg.pixelclk = 0;
-
ccdc->update = OMAP3ISP_CCDC_BLCLAMP;
ccdc_apply_controls(ccdc);
diff --git a/drivers/media/video/omap3isp/ispccdc.h b/drivers/media/video/omap3isp/ispccdc.h
index 6d0264bab75b..890f6b3a68fd 100644
--- a/drivers/media/video/omap3isp/ispccdc.h
+++ b/drivers/media/video/omap3isp/ispccdc.h
@@ -80,14 +80,6 @@ struct ispccdc_syncif {
u8 bt_r656_en;
};
-/*
- * struct ispccdc_vp - Structure for Video Port parameters
- * @pixelclk: Input pixel clock in Hz
- */
-struct ispccdc_vp {
- unsigned int pixelclk;
-};
-
enum ispccdc_lsc_state {
LSC_STATE_STOPPED = 0,
LSC_STATE_STOPPING = 1,
@@ -147,6 +139,7 @@ struct ispccdc_lsc {
* @subdev: V4L2 subdevice
* @pads: Sink and source media entity pads
* @formats: Active video formats
+ * @crop: Active crop rectangle on the OF source pad
* @input: Active input
* @output: Active outputs
* @video_out: Output video node
@@ -161,7 +154,6 @@ struct ispccdc_lsc {
* @update: Bitmask of controls to update during the next interrupt
* @shadow_update: Controls update in progress by userspace
* @syncif: Interface synchronization configuration
- * @vpcfg: Video port configuration
* @underrun: A buffer underrun occurred and a new buffer has been queued
* @state: Streaming state
* @lock: Serializes shadow_update with interrupt handler
@@ -173,6 +165,7 @@ struct isp_ccdc_device {
struct v4l2_subdev subdev;
struct media_pad pads[CCDC_PADS_NUM];
struct v4l2_mbus_framefmt formats[CCDC_PADS_NUM];
+ struct v4l2_rect crop;
enum ccdc_input_entity input;
unsigned int output;
@@ -190,7 +183,6 @@ struct isp_ccdc_device {
unsigned int shadow_update;
struct ispccdc_syncif syncif;
- struct ispccdc_vp vpcfg;
unsigned int underrun:1;
enum isp_pipeline_stream_state state;
diff --git a/drivers/media/video/omap3isp/ispccp2.c b/drivers/media/video/omap3isp/ispccp2.c
index 70ddbf35b223..85f0de85f37c 100644
--- a/drivers/media/video/omap3isp/ispccp2.c
+++ b/drivers/media/video/omap3isp/ispccp2.c
@@ -161,7 +161,6 @@ static void ccp2_pwr_cfg(struct isp_ccp2_device *ccp2)
static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
{
struct isp_device *isp = to_isp_device(ccp2);
- struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
int i;
if (enable && ccp2->vdds_csib)
@@ -178,19 +177,6 @@ static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN,
enable ? (ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN) : 0);
- /* For frame count propagation */
- if (pipe->do_propagation) {
- /* We may want the Frame Start IRQ from LC0 */
- if (enable)
- isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2,
- ISPCCP2_LC01_IRQENABLE,
- ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ);
- else
- isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCP2,
- ISPCCP2_LC01_IRQENABLE,
- ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ);
- }
-
if (!enable && ccp2->vdds_csib)
regulator_disable(ccp2->vdds_csib);
}
@@ -350,7 +336,6 @@ static void ccp2_lcx_config(struct isp_ccp2_device *ccp2,
ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ |
ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ |
ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ |
- ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ |
ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ |
ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ;
@@ -613,14 +598,6 @@ void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2)
if (omap3isp_module_sync_is_stopping(&ccp2->wait, &ccp2->stopping))
return;
- /* Frame number propagation */
- if (lcx_irqstatus & ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ) {
- struct isp_pipeline *pipe =
- to_isp_pipeline(&ccp2->subdev.entity);
- if (pipe->do_propagation)
- atomic_inc(&pipe->frame_number);
- }
-
/* Handle queued buffers on frame end interrupts */
if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_EOF_IRQ)
ccp2_isr_buffer(ccp2);
@@ -1021,6 +998,7 @@ static int ccp2_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations ccp2_media_ops = {
.link_setup = ccp2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
/*
diff --git a/drivers/media/video/omap3isp/ispcsi2.c b/drivers/media/video/omap3isp/ispcsi2.c
index fcb5168996a7..a1724362b6d5 100644
--- a/drivers/media/video/omap3isp/ispcsi2.c
+++ b/drivers/media/video/omap3isp/ispcsi2.c
@@ -378,21 +378,17 @@ static void csi2_timing_config(struct isp_device *isp,
static void csi2_irq_ctx_set(struct isp_device *isp,
struct isp_csi2_device *csi2, int enable)
{
- u32 reg = ISPCSI2_CTX_IRQSTATUS_FE_IRQ;
int i;
- if (csi2->use_fs_irq)
- reg |= ISPCSI2_CTX_IRQSTATUS_FS_IRQ;
-
for (i = 0; i < 8; i++) {
- isp_reg_writel(isp, reg, csi2->regs1,
+ isp_reg_writel(isp, ISPCSI2_CTX_IRQSTATUS_FE_IRQ, csi2->regs1,
ISPCSI2_CTX_IRQSTATUS(i));
if (enable)
isp_reg_set(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
- reg);
+ ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
else
isp_reg_clr(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i),
- reg);
+ ISPCSI2_CTX_IRQSTATUS_FE_IRQ);
}
}
@@ -690,14 +686,6 @@ static void csi2_isr_ctx(struct isp_csi2_device *csi2,
status = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
isp_reg_writel(isp, status, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n));
- /* Propagate frame number */
- if (status & ISPCSI2_CTX_IRQSTATUS_FS_IRQ) {
- struct isp_pipeline *pipe =
- to_isp_pipeline(&csi2->subdev.entity);
- if (pipe->do_propagation)
- atomic_inc(&pipe->frame_number);
- }
-
if (!(status & ISPCSI2_CTX_IRQSTATUS_FE_IRQ))
return;
@@ -1047,14 +1035,12 @@ static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
{
struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct isp_device *isp = csi2->isp;
- struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
struct isp_video *video_out = &csi2->video_out;
switch (enable) {
case ISP_PIPELINE_STREAM_CONTINUOUS:
if (omap3isp_csiphy_acquire(csi2->phy) < 0)
return -ENODEV;
- csi2->use_fs_irq = pipe->do_propagation;
if (csi2->output & CSI2_OUTPUT_MEMORY)
omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI2A_WRITE);
csi2_configure(csi2);
@@ -1181,6 +1167,7 @@ static int csi2_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations csi2_media_ops = {
.link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2)
diff --git a/drivers/media/video/omap3isp/ispcsi2.h b/drivers/media/video/omap3isp/ispcsi2.h
index 885ad79a7678..c57729b7e86e 100644
--- a/drivers/media/video/omap3isp/ispcsi2.h
+++ b/drivers/media/video/omap3isp/ispcsi2.h
@@ -145,7 +145,6 @@ struct isp_csi2_device {
u32 output; /* output to CCDC, memory or both? */
bool dpcm_decompress;
unsigned int frame_skip;
- bool use_fs_irq;
struct isp_csiphy *phy;
struct isp_csi2_ctx_cfg contexts[ISP_CSI2_MAX_CTX_NUM + 1];
diff --git a/drivers/media/video/omap3isp/ispcsiphy.c b/drivers/media/video/omap3isp/ispcsiphy.c
index 5be37ce7d0c2..348f67ebbbc9 100644
--- a/drivers/media/video/omap3isp/ispcsiphy.c
+++ b/drivers/media/video/omap3isp/ispcsiphy.c
@@ -186,7 +186,9 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
if (rval < 0)
goto done;
- omap3isp_csi2_reset(phy->csi2);
+ rval = omap3isp_csi2_reset(phy->csi2);
+ if (rval < 0)
+ goto done;
csiphy_dphy_config(phy);
csiphy_lanes_config(phy);
diff --git a/drivers/media/video/omap3isp/ispcsiphy.h b/drivers/media/video/omap3isp/ispcsiphy.h
index 9596dc6830a6..e93a661e65d9 100644
--- a/drivers/media/video/omap3isp/ispcsiphy.h
+++ b/drivers/media/video/omap3isp/ispcsiphy.h
@@ -27,22 +27,11 @@
#ifndef OMAP3_ISP_CSI_PHY_H
#define OMAP3_ISP_CSI_PHY_H
+#include <media/omap3isp.h>
+
struct isp_csi2_device;
struct regulator;
-struct csiphy_lane {
- u8 pos;
- u8 pol;
-};
-
-#define ISP_CSIPHY2_NUM_DATA_LANES 2
-#define ISP_CSIPHY1_NUM_DATA_LANES 1
-
-struct isp_csiphy_lanes_cfg {
- struct csiphy_lane data[ISP_CSIPHY2_NUM_DATA_LANES];
- struct csiphy_lane clk;
-};
-
struct isp_csiphy_dphy_cfg {
u8 ths_term;
u8 ths_settle;
diff --git a/drivers/media/video/omap3isp/isppreview.c b/drivers/media/video/omap3isp/isppreview.c
index 6d0fb2c8c26d..dd91da26f1b0 100644
--- a/drivers/media/video/omap3isp/isppreview.c
+++ b/drivers/media/video/omap3isp/isppreview.c
@@ -441,23 +441,6 @@ preview_enable_dcor(struct isp_prev_device *prev, u8 enable)
}
/*
- * preview_enable_cfa - Enable/Disable the CFA Interpolation.
- * @enable: 1 - Enables the CFA.
- */
-static void
-preview_enable_cfa(struct isp_prev_device *prev, u8 enable)
-{
- struct isp_device *isp = to_isp_device(prev);
-
- if (enable)
- isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_CFAEN);
- else
- isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
- ISPPRV_PCR_CFAEN);
-}
-
-/*
* preview_enable_gammabypass - Enables/Disables the GammaByPass
* @enable: 1 - Bypasses Gamma - 10bit input is cropped to 8MSB.
* 0 - Goes through Gamma Correction. input and output is 10bit.
@@ -608,12 +591,12 @@ preview_config_rgb_blending(struct isp_prev_device *prev, const void *rgb2rgb)
}
/*
- * Configures the RGB-YCbYCr conversion matrix
+ * Configures the color space conversion (RGB toYCbYCr) matrix
* @prev_csc: Structure containing the RGB to YCbYCr matrix and the
* YCbCr offset.
*/
static void
-preview_config_rgb_to_ycbcr(struct isp_prev_device *prev, const void *prev_csc)
+preview_config_csc(struct isp_prev_device *prev, const void *prev_csc)
{
struct isp_device *isp = to_isp_device(prev);
const struct omap3isp_prev_csc *csc = prev_csc;
@@ -649,12 +632,18 @@ preview_config_rgb_to_ycbcr(struct isp_prev_device *prev, const void *prev_csc)
static void
preview_update_contrast(struct isp_prev_device *prev, u8 contrast)
{
- struct prev_params *params = &prev->params;
+ struct prev_params *params;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ params = (prev->params.active & OMAP3ISP_PREV_CONTRAST)
+ ? &prev->params.params[0] : &prev->params.params[1];
if (params->contrast != (contrast * ISPPRV_CONTRAST_UNITS)) {
params->contrast = contrast * ISPPRV_CONTRAST_UNITS;
- prev->update |= PREV_CONTRAST;
+ params->update |= OMAP3ISP_PREV_CONTRAST;
}
+ spin_unlock_irqrestore(&prev->params.lock, flags);
}
/*
@@ -681,12 +670,18 @@ preview_config_contrast(struct isp_prev_device *prev, const void *params)
static void
preview_update_brightness(struct isp_prev_device *prev, u8 brightness)
{
- struct prev_params *params = &prev->params;
+ struct prev_params *params;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ params = (prev->params.active & OMAP3ISP_PREV_BRIGHTNESS)
+ ? &prev->params.params[0] : &prev->params.params[1];
if (params->brightness != (brightness * ISPPRV_BRIGHT_UNITS)) {
params->brightness = brightness * ISPPRV_BRIGHT_UNITS;
- prev->update |= PREV_BRIGHTNESS;
+ params->update |= OMAP3ISP_PREV_BRIGHTNESS;
}
+ spin_unlock_irqrestore(&prev->params.lock, flags);
}
/*
@@ -721,159 +716,188 @@ preview_config_yc_range(struct isp_prev_device *prev, const void *yclimit)
OMAP3_ISP_IOMEM_PREV, ISPPRV_SETUP_YC);
}
+static u32
+preview_params_lock(struct isp_prev_device *prev, u32 update, bool shadow)
+{
+ u32 active = prev->params.active;
+
+ if (shadow) {
+ /* Mark all shadow parameters we are going to touch as busy. */
+ prev->params.params[0].busy |= ~active & update;
+ prev->params.params[1].busy |= active & update;
+ } else {
+ /* Mark all active parameters we are going to touch as busy. */
+ update = (prev->params.params[0].update & active)
+ | (prev->params.params[1].update & ~active);
+
+ prev->params.params[0].busy |= active & update;
+ prev->params.params[1].busy |= ~active & update;
+ }
+
+ return update;
+}
+
+static void
+preview_params_unlock(struct isp_prev_device *prev, u32 update, bool shadow)
+{
+ u32 active = prev->params.active;
+
+ if (shadow) {
+ /* Set the update flag for shadow parameters that have been
+ * updated and clear the busy flag for all shadow parameters.
+ */
+ prev->params.params[0].update |= (~active & update);
+ prev->params.params[1].update |= (active & update);
+ prev->params.params[0].busy &= active;
+ prev->params.params[1].busy &= ~active;
+ } else {
+ /* Clear the update flag for active parameters that have been
+ * applied and the busy flag for all active parameters.
+ */
+ prev->params.params[0].update &= ~(active & update);
+ prev->params.params[1].update &= ~(~active & update);
+ prev->params.params[0].busy &= ~active;
+ prev->params.params[1].busy &= active;
+ }
+}
+
+static void preview_params_switch(struct isp_prev_device *prev)
+{
+ u32 to_switch;
+
+ /* Switch active parameters with updated shadow parameters when the
+ * shadow parameter has been updated and neither the active not the
+ * shadow parameter is busy.
+ */
+ to_switch = (prev->params.params[0].update & ~prev->params.active)
+ | (prev->params.params[1].update & prev->params.active);
+ to_switch &= ~(prev->params.params[0].busy |
+ prev->params.params[1].busy);
+ if (to_switch == 0)
+ return;
+
+ prev->params.active ^= to_switch;
+
+ /* Remove the update flag for the shadow copy of parameters we have
+ * switched.
+ */
+ prev->params.params[0].update &= ~(~prev->params.active & to_switch);
+ prev->params.params[1].update &= ~(prev->params.active & to_switch);
+}
+
/* preview parameters update structure */
struct preview_update {
- int cfg_bit;
- int feature_bit;
void (*config)(struct isp_prev_device *, const void *);
void (*enable)(struct isp_prev_device *, u8);
+ unsigned int param_offset;
+ unsigned int param_size;
+ unsigned int config_offset;
+ bool skip;
};
-static struct preview_update update_attrs[] = {
- {OMAP3ISP_PREV_LUMAENH, PREV_LUMA_ENHANCE,
+/* Keep the array indexed by the OMAP3ISP_PREV_* bit number. */
+static const struct preview_update update_attrs[] = {
+ /* OMAP3ISP_PREV_LUMAENH */ {
preview_config_luma_enhancement,
- preview_enable_luma_enhancement},
- {OMAP3ISP_PREV_INVALAW, PREV_INVERSE_ALAW,
+ preview_enable_luma_enhancement,
+ offsetof(struct prev_params, luma),
+ FIELD_SIZEOF(struct prev_params, luma),
+ offsetof(struct omap3isp_prev_update_config, luma),
+ }, /* OMAP3ISP_PREV_INVALAW */ {
NULL,
- preview_enable_invalaw},
- {OMAP3ISP_PREV_HRZ_MED, PREV_HORZ_MEDIAN_FILTER,
+ preview_enable_invalaw,
+ }, /* OMAP3ISP_PREV_HRZ_MED */ {
preview_config_hmed,
- preview_enable_hmed},
- {OMAP3ISP_PREV_CFA, PREV_CFA,
+ preview_enable_hmed,
+ offsetof(struct prev_params, hmed),
+ FIELD_SIZEOF(struct prev_params, hmed),
+ offsetof(struct omap3isp_prev_update_config, hmed),
+ }, /* OMAP3ISP_PREV_CFA */ {
preview_config_cfa,
- preview_enable_cfa},
- {OMAP3ISP_PREV_CHROMA_SUPP, PREV_CHROMA_SUPPRESS,
+ NULL,
+ offsetof(struct prev_params, cfa),
+ FIELD_SIZEOF(struct prev_params, cfa),
+ offsetof(struct omap3isp_prev_update_config, cfa),
+ }, /* OMAP3ISP_PREV_CHROMA_SUPP */ {
preview_config_chroma_suppression,
- preview_enable_chroma_suppression},
- {OMAP3ISP_PREV_WB, PREV_WB,
+ preview_enable_chroma_suppression,
+ offsetof(struct prev_params, csup),
+ FIELD_SIZEOF(struct prev_params, csup),
+ offsetof(struct omap3isp_prev_update_config, csup),
+ }, /* OMAP3ISP_PREV_WB */ {
preview_config_whitebalance,
- NULL},
- {OMAP3ISP_PREV_BLKADJ, PREV_BLKADJ,
+ NULL,
+ offsetof(struct prev_params, wbal),
+ FIELD_SIZEOF(struct prev_params, wbal),
+ offsetof(struct omap3isp_prev_update_config, wbal),
+ }, /* OMAP3ISP_PREV_BLKADJ */ {
preview_config_blkadj,
- NULL},
- {OMAP3ISP_PREV_RGB2RGB, PREV_RGB2RGB,
+ NULL,
+ offsetof(struct prev_params, blkadj),
+ FIELD_SIZEOF(struct prev_params, blkadj),
+ offsetof(struct omap3isp_prev_update_config, blkadj),
+ }, /* OMAP3ISP_PREV_RGB2RGB */ {
preview_config_rgb_blending,
- NULL},
- {OMAP3ISP_PREV_COLOR_CONV, PREV_COLOR_CONV,
- preview_config_rgb_to_ycbcr,
- NULL},
- {OMAP3ISP_PREV_YC_LIMIT, PREV_YCLIMITS,
+ NULL,
+ offsetof(struct prev_params, rgb2rgb),
+ FIELD_SIZEOF(struct prev_params, rgb2rgb),
+ offsetof(struct omap3isp_prev_update_config, rgb2rgb),
+ }, /* OMAP3ISP_PREV_COLOR_CONV */ {
+ preview_config_csc,
+ NULL,
+ offsetof(struct prev_params, csc),
+ FIELD_SIZEOF(struct prev_params, csc),
+ offsetof(struct omap3isp_prev_update_config, csc),
+ }, /* OMAP3ISP_PREV_YC_LIMIT */ {
preview_config_yc_range,
- NULL},
- {OMAP3ISP_PREV_DEFECT_COR, PREV_DEFECT_COR,
+ NULL,
+ offsetof(struct prev_params, yclimit),
+ FIELD_SIZEOF(struct prev_params, yclimit),
+ offsetof(struct omap3isp_prev_update_config, yclimit),
+ }, /* OMAP3ISP_PREV_DEFECT_COR */ {
preview_config_dcor,
- preview_enable_dcor},
- {OMAP3ISP_PREV_GAMMABYPASS, PREV_GAMMA_BYPASS,
+ preview_enable_dcor,
+ offsetof(struct prev_params, dcor),
+ FIELD_SIZEOF(struct prev_params, dcor),
+ offsetof(struct omap3isp_prev_update_config, dcor),
+ }, /* OMAP3ISP_PREV_GAMMABYPASS */ {
NULL,
- preview_enable_gammabypass},
- {OMAP3ISP_PREV_DRK_FRM_CAPTURE, PREV_DARK_FRAME_CAPTURE,
+ preview_enable_gammabypass,
+ }, /* OMAP3ISP_PREV_DRK_FRM_CAPTURE */ {
NULL,
- preview_enable_drkframe_capture},
- {OMAP3ISP_PREV_DRK_FRM_SUBTRACT, PREV_DARK_FRAME_SUBTRACT,
+ preview_enable_drkframe_capture,
+ }, /* OMAP3ISP_PREV_DRK_FRM_SUBTRACT */ {
NULL,
- preview_enable_drkframe},
- {OMAP3ISP_PREV_LENS_SHADING, PREV_LENS_SHADING,
+ preview_enable_drkframe,
+ }, /* OMAP3ISP_PREV_LENS_SHADING */ {
preview_config_drkf_shadcomp,
- preview_enable_drkframe},
- {OMAP3ISP_PREV_NF, PREV_NOISE_FILTER,
+ preview_enable_drkframe,
+ }, /* OMAP3ISP_PREV_NF */ {
preview_config_noisefilter,
- preview_enable_noisefilter},
- {OMAP3ISP_PREV_GAMMA, PREV_GAMMA,
+ preview_enable_noisefilter,
+ offsetof(struct prev_params, nf),
+ FIELD_SIZEOF(struct prev_params, nf),
+ offsetof(struct omap3isp_prev_update_config, nf),
+ }, /* OMAP3ISP_PREV_GAMMA */ {
preview_config_gammacorrn,
- NULL},
- {-1, PREV_CONTRAST,
+ NULL,
+ offsetof(struct prev_params, gamma),
+ FIELD_SIZEOF(struct prev_params, gamma),
+ offsetof(struct omap3isp_prev_update_config, gamma),
+ }, /* OMAP3ISP_PREV_CONTRAST */ {
preview_config_contrast,
- NULL},
- {-1, PREV_BRIGHTNESS,
+ NULL,
+ offsetof(struct prev_params, contrast),
+ 0, 0, true,
+ }, /* OMAP3ISP_PREV_BRIGHTNESS */ {
preview_config_brightness,
- NULL},
+ NULL,
+ offsetof(struct prev_params, brightness),
+ 0, 0, true,
+ },
};
/*
- * __preview_get_ptrs - helper function which return pointers to members
- * of params and config structures.
- * @params - pointer to preview_params structure.
- * @param - return pointer to appropriate structure field.
- * @configs - pointer to update config structure.
- * @config - return pointer to appropriate structure field.
- * @bit - for which feature to return pointers.
- * Return size of corresponding prev_params member
- */
-static u32
-__preview_get_ptrs(struct prev_params *params, void **param,
- struct omap3isp_prev_update_config *configs,
- void __user **config, u32 bit)
-{
-#define CHKARG(cfgs, cfg, field) \
- if (cfgs && cfg) { \
- *(cfg) = (cfgs)->field; \
- }
-
- switch (bit) {
- case PREV_HORZ_MEDIAN_FILTER:
- *param = &params->hmed;
- CHKARG(configs, config, hmed)
- return sizeof(params->hmed);
- case PREV_NOISE_FILTER:
- *param = &params->nf;
- CHKARG(configs, config, nf)
- return sizeof(params->nf);
- break;
- case PREV_CFA:
- *param = &params->cfa;
- CHKARG(configs, config, cfa)
- return sizeof(params->cfa);
- case PREV_LUMA_ENHANCE:
- *param = &params->luma;
- CHKARG(configs, config, luma)
- return sizeof(params->luma);
- case PREV_CHROMA_SUPPRESS:
- *param = &params->csup;
- CHKARG(configs, config, csup)
- return sizeof(params->csup);
- case PREV_DEFECT_COR:
- *param = &params->dcor;
- CHKARG(configs, config, dcor)
- return sizeof(params->dcor);
- case PREV_BLKADJ:
- *param = &params->blk_adj;
- CHKARG(configs, config, blkadj)
- return sizeof(params->blk_adj);
- case PREV_YCLIMITS:
- *param = &params->yclimit;
- CHKARG(configs, config, yclimit)
- return sizeof(params->yclimit);
- case PREV_RGB2RGB:
- *param = &params->rgb2rgb;
- CHKARG(configs, config, rgb2rgb)
- return sizeof(params->rgb2rgb);
- case PREV_COLOR_CONV:
- *param = &params->rgb2ycbcr;
- CHKARG(configs, config, csc)
- return sizeof(params->rgb2ycbcr);
- case PREV_WB:
- *param = &params->wbal;
- CHKARG(configs, config, wbal)
- return sizeof(params->wbal);
- case PREV_GAMMA:
- *param = &params->gamma;
- CHKARG(configs, config, gamma)
- return sizeof(params->gamma);
- case PREV_CONTRAST:
- *param = &params->contrast;
- return 0;
- case PREV_BRIGHTNESS:
- *param = &params->brightness;
- return 0;
- default:
- *param = NULL;
- *config = NULL;
- break;
- }
- return 0;
-}
-
-/*
* preview_config - Copy and update local structure with userspace preview
* configuration.
* @prev: ISP preview engine
@@ -885,84 +909,103 @@ __preview_get_ptrs(struct prev_params *params, void **param,
static int preview_config(struct isp_prev_device *prev,
struct omap3isp_prev_update_config *cfg)
{
- struct prev_params *params;
- struct preview_update *attr;
- int i, bit, rval = 0;
+ unsigned long flags;
+ unsigned int i;
+ int rval = 0;
+ u32 update;
+ u32 active;
- params = &prev->params;
+ if (cfg->update == 0)
+ return 0;
- if (prev->state != ISP_PIPELINE_STREAM_STOPPED) {
- unsigned long flags;
+ /* Mark the shadow parameters we're going to update as busy. */
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_lock(prev, cfg->update, true);
+ active = prev->params.active;
+ spin_unlock_irqrestore(&prev->params.lock, flags);
- spin_lock_irqsave(&prev->lock, flags);
- prev->shadow_update = 1;
- spin_unlock_irqrestore(&prev->lock, flags);
- }
+ update = 0;
for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
- attr = &update_attrs[i];
- bit = 0;
+ const struct preview_update *attr = &update_attrs[i];
+ struct prev_params *params;
+ unsigned int bit = 1 << i;
- if (!(cfg->update & attr->cfg_bit))
+ if (attr->skip || !(cfg->update & bit))
continue;
- bit = cfg->flag & attr->cfg_bit;
- if (bit) {
- void *to = NULL, __user *from = NULL;
- unsigned long sz = 0;
+ params = &prev->params.params[!!(active & bit)];
+
+ if (cfg->flag & bit) {
+ void __user *from = *(void * __user *)
+ ((void *)cfg + attr->config_offset);
+ void *to = (void *)params + attr->param_offset;
+ size_t size = attr->param_size;
- sz = __preview_get_ptrs(params, &to, cfg, &from,
- bit);
- if (to && from && sz) {
- if (copy_from_user(to, from, sz)) {
+ if (to && from && size) {
+ if (copy_from_user(to, from, size)) {
rval = -EFAULT;
break;
}
}
- params->features |= attr->feature_bit;
+ params->features |= bit;
} else {
- params->features &= ~attr->feature_bit;
+ params->features &= ~bit;
}
- prev->update |= attr->feature_bit;
+ update |= bit;
}
- prev->shadow_update = 0;
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_unlock(prev, update, true);
+ preview_params_switch(prev);
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+
return rval;
}
/*
* preview_setup_hw - Setup preview registers and/or internal memory
* @prev: pointer to preview private structure
+ * @update: Bitmask of parameters to setup
+ * @active: Bitmask of parameters active in set 0
* Note: can be called from interrupt context
* Return none
*/
-static void preview_setup_hw(struct isp_prev_device *prev)
+static void preview_setup_hw(struct isp_prev_device *prev, u32 update,
+ u32 active)
{
- struct prev_params *params = &prev->params;
- struct preview_update *attr;
- int i, bit;
- void *param_ptr;
+ unsigned int i;
+ u32 features;
+
+ if (update == 0)
+ return;
+
+ features = (prev->params.params[0].features & active)
+ | (prev->params.params[1].features & ~active);
for (i = 0; i < ARRAY_SIZE(update_attrs); i++) {
- attr = &update_attrs[i];
+ const struct preview_update *attr = &update_attrs[i];
+ struct prev_params *params;
+ unsigned int bit = 1 << i;
+ void *param_ptr;
- if (!(prev->update & attr->feature_bit))
+ if (!(update & bit))
continue;
- bit = params->features & attr->feature_bit;
- if (bit) {
+
+ params = &prev->params.params[!(active & bit)];
+
+ if (params->features & bit) {
if (attr->config) {
- __preview_get_ptrs(params, &param_ptr, NULL,
- NULL, bit);
+ param_ptr = (void *)params + attr->param_offset;
attr->config(prev, param_ptr);
}
if (attr->enable)
attr->enable(prev, 1);
- } else
+ } else {
if (attr->enable)
attr->enable(prev, 0);
-
- prev->update &= ~attr->feature_bit;
+ }
}
}
@@ -1000,13 +1043,17 @@ preview_config_ycpos(struct isp_prev_device *prev,
static void preview_config_averager(struct isp_prev_device *prev, u8 average)
{
struct isp_device *isp = to_isp_device(prev);
+ struct prev_params *params;
int reg = 0;
- if (prev->params.cfa.format == OMAP3ISP_CFAFMT_BAYER)
+ params = (prev->params.active & OMAP3ISP_PREV_CFA)
+ ? &prev->params.params[0] : &prev->params.params[1];
+
+ if (params->cfa.format == OMAP3ISP_CFAFMT_BAYER)
reg = ISPPRV_AVE_EVENDIST_2 << ISPPRV_AVE_EVENDIST_SHIFT |
ISPPRV_AVE_ODDDIST_2 << ISPPRV_AVE_ODDDIST_SHIFT |
average;
- else if (prev->params.cfa.format == OMAP3ISP_CFAFMT_RGBFOVEON)
+ else if (params->cfa.format == OMAP3ISP_CFAFMT_RGBFOVEON)
reg = ISPPRV_AVE_EVENDIST_3 << ISPPRV_AVE_EVENDIST_SHIFT |
ISPPRV_AVE_ODDDIST_3 << ISPPRV_AVE_ODDDIST_SHIFT |
average;
@@ -1014,6 +1061,27 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
}
/*
+ * preview_config_input_format - Configure the input format
+ * @prev: The preview engine
+ * @format: Format on the preview engine sink pad
+ *
+ * Enable CFA interpolation for Bayer formats and disable it for greyscale
+ * formats.
+ */
+static void preview_config_input_format(struct isp_prev_device *prev,
+ const struct v4l2_mbus_framefmt *format)
+{
+ struct isp_device *isp = to_isp_device(prev);
+
+ if (format->code != V4L2_MBUS_FMT_Y10_1X10)
+ isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_CFAEN);
+ else
+ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
+ ISPPRV_PCR_CFAEN);
+}
+
+/*
* preview_config_input_size - Configure the input frame size
*
* The preview engine crops several rows and columns internally depending on
@@ -1024,32 +1092,37 @@ static void preview_config_averager(struct isp_prev_device *prev, u8 average)
*
* See the explanation at the PREV_MARGIN_* definitions for more details.
*/
-static void preview_config_input_size(struct isp_prev_device *prev)
+static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
{
+ const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK];
struct isp_device *isp = to_isp_device(prev);
- struct prev_params *params = &prev->params;
unsigned int sph = prev->crop.left;
unsigned int eph = prev->crop.left + prev->crop.width - 1;
unsigned int slv = prev->crop.top;
unsigned int elv = prev->crop.top + prev->crop.height - 1;
+ u32 features;
- if (params->features & PREV_CFA) {
+ if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
sph -= 2;
eph += 2;
slv -= 2;
elv += 2;
}
- if (params->features & (PREV_DEFECT_COR | PREV_NOISE_FILTER)) {
+
+ features = (prev->params.params[0].features & active)
+ | (prev->params.params[1].features & ~active);
+
+ if (features & (OMAP3ISP_PREV_DEFECT_COR | OMAP3ISP_PREV_NF)) {
sph -= 2;
eph += 2;
slv -= 2;
elv += 2;
}
- if (params->features & PREV_HORZ_MEDIAN_FILTER) {
+ if (features & OMAP3ISP_PREV_HRZ_MED) {
sph -= 2;
eph += 2;
}
- if (params->features & (PREV_CHROMA_SUPPRESS | PREV_LUMA_ENHANCE))
+ if (features & (OMAP3ISP_PREV_CHROMA_SUPP | OMAP3ISP_PREV_LUMAENH))
sph -= 2;
isp_reg_writel(isp, (sph << ISPPRV_HORZ_INFO_SPH_SHIFT) | eph,
@@ -1184,8 +1257,16 @@ int omap3isp_preview_busy(struct isp_prev_device *prev)
*/
void omap3isp_preview_restore_context(struct isp_device *isp)
{
- isp->isp_prev.update = PREV_FEATURES_END - 1;
- preview_setup_hw(&isp->isp_prev);
+ struct isp_prev_device *prev = &isp->isp_prev;
+ const u32 update = OMAP3ISP_PREV_FEATURES_END - 1;
+
+ prev->params.params[0].update = prev->params.active & update;
+ prev->params.params[1].update = ~prev->params.active & update;
+
+ preview_setup_hw(prev, update, prev->params.active);
+
+ prev->params.params[0].update = 0;
+ prev->params.params[1].update = 0;
}
/*
@@ -1244,12 +1325,21 @@ static void preview_print_status(struct isp_prev_device *prev)
/*
* preview_init_params - init image processing parameters.
* @prev: pointer to previewer private structure
- * return none
*/
static void preview_init_params(struct isp_prev_device *prev)
{
- struct prev_params *params = &prev->params;
- int i = 0;
+ struct prev_params *params;
+ unsigned int i;
+
+ spin_lock_init(&prev->params.lock);
+
+ prev->params.active = ~0;
+ prev->params.params[0].busy = 0;
+ prev->params.params[0].update = OMAP3ISP_PREV_FEATURES_END - 1;
+ prev->params.params[1].busy = 0;
+ prev->params.params[1].update = 0;
+
+ params = &prev->params.params[0];
/* Init values */
params->contrast = ISPPRV_CONTRAST_DEF * ISPPRV_CONTRAST_UNITS;
@@ -1277,22 +1367,22 @@ static void preview_init_params(struct isp_prev_device *prev)
params->wbal.coef1 = FLR_WBAL_COEF;
params->wbal.coef2 = FLR_WBAL_COEF;
params->wbal.coef3 = FLR_WBAL_COEF;
- params->blk_adj.red = FLR_BLKADJ_RED;
- params->blk_adj.green = FLR_BLKADJ_GREEN;
- params->blk_adj.blue = FLR_BLKADJ_BLUE;
+ params->blkadj.red = FLR_BLKADJ_RED;
+ params->blkadj.green = FLR_BLKADJ_GREEN;
+ params->blkadj.blue = FLR_BLKADJ_BLUE;
params->rgb2rgb = flr_rgb2rgb;
- params->rgb2ycbcr = flr_prev_csc;
+ params->csc = flr_prev_csc;
params->yclimit.minC = ISPPRV_YC_MIN;
params->yclimit.maxC = ISPPRV_YC_MAX;
params->yclimit.minY = ISPPRV_YC_MIN;
params->yclimit.maxY = ISPPRV_YC_MAX;
- params->features = PREV_CFA | PREV_DEFECT_COR | PREV_NOISE_FILTER
- | PREV_GAMMA | PREV_BLKADJ | PREV_YCLIMITS
- | PREV_RGB2RGB | PREV_COLOR_CONV | PREV_WB
- | PREV_BRIGHTNESS | PREV_CONTRAST;
-
- prev->update = PREV_FEATURES_END - 1;
+ params->features = OMAP3ISP_PREV_CFA | OMAP3ISP_PREV_DEFECT_COR
+ | OMAP3ISP_PREV_NF | OMAP3ISP_PREV_GAMMA
+ | OMAP3ISP_PREV_BLKADJ | OMAP3ISP_PREV_YC_LIMIT
+ | OMAP3ISP_PREV_RGB2RGB | OMAP3ISP_PREV_COLOR_CONV
+ | OMAP3ISP_PREV_WB | OMAP3ISP_PREV_BRIGHTNESS
+ | OMAP3ISP_PREV_CONTRAST;
}
/*
@@ -1321,8 +1411,17 @@ static void preview_configure(struct isp_prev_device *prev)
{
struct isp_device *isp = to_isp_device(prev);
struct v4l2_mbus_framefmt *format;
+ unsigned long flags;
+ u32 update;
+ u32 active;
- preview_setup_hw(prev);
+ spin_lock_irqsave(&prev->params.lock, flags);
+ /* Mark all active parameters we are going to touch as busy. */
+ update = preview_params_lock(prev, 0, false);
+ active = prev->params.active;
+ spin_unlock_irqrestore(&prev->params.lock, flags);
+
+ preview_setup_hw(prev, update, active);
if (prev->output & PREVIEW_OUTPUT_MEMORY)
isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR,
@@ -1343,7 +1442,8 @@ static void preview_configure(struct isp_prev_device *prev)
preview_adjust_bandwidth(prev);
- preview_config_input_size(prev);
+ preview_config_input_format(prev, format);
+ preview_config_input_size(prev, active);
if (prev->input == PREVIEW_INPUT_CCDC)
preview_config_inlineoffset(prev, 0);
@@ -1360,6 +1460,10 @@ static void preview_configure(struct isp_prev_device *prev)
preview_config_averager(prev, 0);
preview_config_ycpos(prev, format->code);
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_unlock(prev, update, false);
+ spin_unlock_irqrestore(&prev->params.lock, flags);
}
/* -----------------------------------------------------------------------------
@@ -1448,25 +1552,30 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
void omap3isp_preview_isr(struct isp_prev_device *prev)
{
unsigned long flags;
+ u32 update;
+ u32 active;
if (omap3isp_module_sync_is_stopping(&prev->wait, &prev->stopping))
return;
- spin_lock_irqsave(&prev->lock, flags);
- if (prev->shadow_update)
- goto done;
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_switch(prev);
+ update = preview_params_lock(prev, 0, false);
+ active = prev->params.active;
+ spin_unlock_irqrestore(&prev->params.lock, flags);
- preview_setup_hw(prev);
- preview_config_input_size(prev);
-
-done:
- spin_unlock_irqrestore(&prev->lock, flags);
+ preview_setup_hw(prev, update, active);
+ preview_config_input_size(prev, active);
if (prev->input == PREVIEW_INPUT_MEMORY ||
prev->output & PREVIEW_OUTPUT_MEMORY)
preview_isr_buffer(prev);
else if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS)
preview_enable_oneshot(prev);
+
+ spin_lock_irqsave(&prev->params.lock, flags);
+ preview_params_unlock(prev, update, false);
+ spin_unlock_irqrestore(&prev->params.lock, flags);
}
/* -----------------------------------------------------------------------------
@@ -1552,7 +1661,6 @@ static int preview_set_stream(struct v4l2_subdev *sd, int enable)
struct isp_video *video_out = &prev->video_out;
struct isp_device *isp = to_isp_device(prev);
struct device *dev = to_device(prev);
- unsigned long flags;
if (prev->state == ISP_PIPELINE_STREAM_STOPPED) {
if (enable == ISP_PIPELINE_STREAM_STOPPED)
@@ -1589,11 +1697,9 @@ static int preview_set_stream(struct v4l2_subdev *sd, int enable)
if (omap3isp_module_sync_idle(&sd->entity, &prev->wait,
&prev->stopping))
dev_dbg(dev, "%s: stop timeout.\n", sd->name);
- spin_lock_irqsave(&prev->lock, flags);
omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_READ);
omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE);
omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_PREVIEW);
- spin_unlock_irqrestore(&prev->lock, flags);
isp_video_dmaqueue_flags_clr(video_out);
break;
}
@@ -1624,6 +1730,7 @@ __preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_fh *fh,
/* previewer format descriptions */
static const unsigned int preview_input_fmts[] = {
+ V4L2_MBUS_FMT_Y10_1X10,
V4L2_MBUS_FMT_SGRBG10_1X10,
V4L2_MBUS_FMT_SRGGB10_1X10,
V4L2_MBUS_FMT_SBGGR10_1X10,
@@ -1822,55 +1929,89 @@ static int preview_enum_frame_size(struct v4l2_subdev *sd,
}
/*
- * preview_get_crop - Retrieve the crop rectangle on a pad
+ * preview_get_selection - Retrieve a selection rectangle on a pad
* @sd: ISP preview V4L2 subdevice
* @fh: V4L2 subdev file handle
- * @crop: crop rectangle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the sink pad.
*
* Return 0 on success or a negative error code otherwise.
*/
-static int preview_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int preview_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != PREV_PAD_SINK)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ format = __preview_get_format(prev, fh, PREV_PAD_SINK,
+ sel->which);
+ preview_try_crop(prev, format, &sel->r);
+ break;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ sel->r = *__preview_get_crop(prev, fh, sel->which);
+ break;
- /* Cropping is only supported on the sink pad. */
- if (crop->pad != PREV_PAD_SINK)
+ default:
return -EINVAL;
+ }
- crop->rect = *__preview_get_crop(prev, fh, crop->which);
return 0;
}
/*
- * preview_set_crop - Retrieve the crop rectangle on a pad
+ * preview_set_selection - Set a selection rectangle on a pad
* @sd: ISP preview V4L2 subdevice
* @fh: V4L2 subdev file handle
- * @crop: crop rectangle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the sink pad.
*
* Return 0 on success or a negative error code otherwise.
*/
-static int preview_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int preview_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- /* Cropping is only supported on the sink pad. */
- if (crop->pad != PREV_PAD_SINK)
+ if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ sel->pad != PREV_PAD_SINK)
return -EINVAL;
/* The crop rectangle can't be changed while streaming. */
if (prev->state != ISP_PIPELINE_STREAM_STOPPED)
return -EBUSY;
- format = __preview_get_format(prev, fh, PREV_PAD_SINK, crop->which);
- preview_try_crop(prev, format, &crop->rect);
- *__preview_get_crop(prev, fh, crop->which) = crop->rect;
+ /* Modifying the crop rectangle always changes the format on the source
+ * pad. If the KEEP_CONFIG flag is set, just return the current crop
+ * rectangle.
+ */
+ if (sel->flags & V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = *__preview_get_crop(prev, fh, sel->which);
+ return 0;
+ }
+
+ format = __preview_get_format(prev, fh, PREV_PAD_SINK, sel->which);
+ preview_try_crop(prev, format, &sel->r);
+ *__preview_get_crop(prev, fh, sel->which) = sel->r;
/* Update the source format. */
- format = __preview_get_format(prev, fh, PREV_PAD_SOURCE, crop->which);
- preview_try_format(prev, fh, PREV_PAD_SOURCE, format, crop->which);
+ format = __preview_get_format(prev, fh, PREV_PAD_SOURCE, sel->which);
+ preview_try_format(prev, fh, PREV_PAD_SOURCE, format, sel->which);
return 0;
}
@@ -1979,8 +2120,8 @@ static const struct v4l2_subdev_pad_ops preview_v4l2_pad_ops = {
.enum_frame_size = preview_enum_frame_size,
.get_fmt = preview_get_format,
.set_fmt = preview_set_format,
- .get_crop = preview_get_crop,
- .set_crop = preview_set_crop,
+ .get_selection = preview_get_selection,
+ .set_selection = preview_set_selection,
};
/* subdev operations */
@@ -2076,6 +2217,7 @@ static int preview_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations preview_media_ops = {
.link_setup = preview_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
void omap3isp_preview_unregister_entities(struct isp_prev_device *prev)
@@ -2201,7 +2343,7 @@ error_video_in:
}
/*
- * isp_preview_init - Previewer initialization.
+ * omap3isp_preview_init - Previewer initialization.
* @dev : Pointer to ISP device
* return -ENOMEM or zero on success
*/
@@ -2209,8 +2351,8 @@ int omap3isp_preview_init(struct isp_device *isp)
{
struct isp_prev_device *prev = &isp->isp_prev;
- spin_lock_init(&prev->lock);
init_waitqueue_head(&prev->wait);
+
preview_init_params(prev);
return preview_init_entities(prev);
diff --git a/drivers/media/video/omap3isp/isppreview.h b/drivers/media/video/omap3isp/isppreview.h
index 09686607973c..6663ab64e4b1 100644
--- a/drivers/media/video/omap3isp/isppreview.h
+++ b/drivers/media/video/omap3isp/isppreview.h
@@ -45,29 +45,10 @@
#define ISPPRV_CONTRAST_HIGH 0xFF
#define ISPPRV_CONTRAST_UNITS 0x1
-/* Features list */
-#define PREV_LUMA_ENHANCE OMAP3ISP_PREV_LUMAENH
-#define PREV_INVERSE_ALAW OMAP3ISP_PREV_INVALAW
-#define PREV_HORZ_MEDIAN_FILTER OMAP3ISP_PREV_HRZ_MED
-#define PREV_CFA OMAP3ISP_PREV_CFA
-#define PREV_CHROMA_SUPPRESS OMAP3ISP_PREV_CHROMA_SUPP
-#define PREV_WB OMAP3ISP_PREV_WB
-#define PREV_BLKADJ OMAP3ISP_PREV_BLKADJ
-#define PREV_RGB2RGB OMAP3ISP_PREV_RGB2RGB
-#define PREV_COLOR_CONV OMAP3ISP_PREV_COLOR_CONV
-#define PREV_YCLIMITS OMAP3ISP_PREV_YC_LIMIT
-#define PREV_DEFECT_COR OMAP3ISP_PREV_DEFECT_COR
-#define PREV_GAMMA_BYPASS OMAP3ISP_PREV_GAMMABYPASS
-#define PREV_DARK_FRAME_CAPTURE OMAP3ISP_PREV_DRK_FRM_CAPTURE
-#define PREV_DARK_FRAME_SUBTRACT OMAP3ISP_PREV_DRK_FRM_SUBTRACT
-#define PREV_LENS_SHADING OMAP3ISP_PREV_LENS_SHADING
-#define PREV_NOISE_FILTER OMAP3ISP_PREV_NF
-#define PREV_GAMMA OMAP3ISP_PREV_GAMMA
-
-#define PREV_CONTRAST (1 << 17)
-#define PREV_BRIGHTNESS (1 << 18)
-#define PREV_AVERAGER (1 << 19)
-#define PREV_FEATURES_END (1 << 20)
+/* Additional features not listed in linux/omap3isp.h */
+#define OMAP3ISP_PREV_CONTRAST (1 << 17)
+#define OMAP3ISP_PREV_BRIGHTNESS (1 << 18)
+#define OMAP3ISP_PREV_FEATURES_END (1 << 19)
enum preview_input_entity {
PREVIEW_INPUT_NONE,
@@ -88,6 +69,8 @@ enum preview_ycpos_mode {
/*
* struct prev_params - Structure for all configuration
+ * @busy: Bitmask of busy parameters (being updated or used)
+ * @update: Bitmask of the parameters to be updated
* @features: Set of features enabled.
* @cfa: CFA coefficients.
* @csup: Chroma suppression coefficients.
@@ -96,15 +79,17 @@ enum preview_ycpos_mode {
* @dcor: Noise filter coefficients.
* @gamma: Gamma coefficients.
* @wbal: White Balance parameters.
- * @blk_adj: Black adjustment parameters.
+ * @blkadj: Black adjustment parameters.
* @rgb2rgb: RGB blending parameters.
- * @rgb2ycbcr: RGB to ycbcr parameters.
+ * @csc: Color space conversion (RGB to YCbCr) parameters.
* @hmed: Horizontal median filter.
* @yclimit: YC limits parameters.
* @contrast: Contrast.
* @brightness: Brightness.
*/
struct prev_params {
+ u32 busy;
+ u32 update;
u32 features;
struct omap3isp_prev_cfa cfa;
struct omap3isp_prev_csup csup;
@@ -113,35 +98,15 @@ struct prev_params {
struct omap3isp_prev_dcor dcor;
struct omap3isp_prev_gtables gamma;
struct omap3isp_prev_wbal wbal;
- struct omap3isp_prev_blkadj blk_adj;
+ struct omap3isp_prev_blkadj blkadj;
struct omap3isp_prev_rgbtorgb rgb2rgb;
- struct omap3isp_prev_csc rgb2ycbcr;
+ struct omap3isp_prev_csc csc;
struct omap3isp_prev_hmed hmed;
struct omap3isp_prev_yclimit yclimit;
u8 contrast;
u8 brightness;
};
-/*
- * struct isptables_update - Structure for Table Configuration.
- * @update: Specifies which tables should be updated.
- * @flag: Specifies which tables should be enabled.
- * @nf: Pointer to structure for Noise Filter
- * @lsc: Pointer to LSC gain table. (currently not used)
- * @gamma: Pointer to gamma correction tables.
- * @cfa: Pointer to color filter array configuration.
- * @wbal: Pointer to colour and digital gain configuration.
- */
-struct isptables_update {
- u32 update;
- u32 flag;
- struct omap3isp_prev_nf *nf;
- u32 *lsc;
- struct omap3isp_prev_gtables *gamma;
- struct omap3isp_prev_cfa *cfa;
- struct omap3isp_prev_wbal *wbal;
-};
-
/* Sink and source previewer pads */
#define PREV_PAD_SINK 0
#define PREV_PAD_SOURCE 1
@@ -157,12 +122,11 @@ struct isptables_update {
* @output: Bitmask of the active output
* @video_in: Input video entity
* @video_out: Output video entity
- * @params: Module configuration data
- * @shadow_update: If set, update the hardware configured in the next interrupt
+ * @params.params : Active and shadow parameters sets
+ * @params.active: Bitmask of parameters active in set 0
+ * @params.lock: Parameters lock, protects params.active and params.shadow
* @underrun: Whether the preview entity has queued buffers on the output
* @state: Current preview pipeline state
- * @lock: Shadow update lock
- * @update: Bitmask of the parameters to be updated
*
* This structure is used to store the OMAP ISP Preview module Information.
*/
@@ -179,13 +143,15 @@ struct isp_prev_device {
struct isp_video video_in;
struct isp_video video_out;
- struct prev_params params;
- unsigned int shadow_update:1;
+ struct {
+ struct prev_params params[2];
+ u32 active;
+ spinlock_t lock;
+ } params;
+
enum isp_pipeline_stream_state state;
wait_queue_head_t wait;
atomic_t stopping;
- spinlock_t lock;
- u32 update;
};
struct isp_device;
diff --git a/drivers/media/video/omap3isp/ispqueue.h b/drivers/media/video/omap3isp/ispqueue.h
index 92c5a12157d5..908dfd712e8e 100644
--- a/drivers/media/video/omap3isp/ispqueue.h
+++ b/drivers/media/video/omap3isp/ispqueue.h
@@ -90,7 +90,7 @@ struct isp_video_buffer {
void *vaddr;
/* For userspace buffers. */
- unsigned long vm_flags;
+ vm_flags_t vm_flags;
unsigned long offset;
unsigned int npages;
struct page **pages;
diff --git a/drivers/media/video/omap3isp/ispresizer.c b/drivers/media/video/omap3isp/ispresizer.c
index 6958a9e3dc22..14041c9c8643 100644
--- a/drivers/media/video/omap3isp/ispresizer.c
+++ b/drivers/media/video/omap3isp/ispresizer.c
@@ -1188,32 +1188,6 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
}
/*
- * resizer_g_crop - handle get crop subdev operation
- * @sd : pointer to v4l2 subdev structure
- * @pad : subdev pad
- * @crop : pointer to crop structure
- * @which : active or try format
- * return zero
- */
-static int resizer_g_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
-{
- struct isp_res_device *res = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *format;
- struct resizer_ratio ratio;
-
- /* Only sink pad has crop capability */
- if (crop->pad != RESZ_PAD_SINK)
- return -EINVAL;
-
- format = __resizer_get_format(res, fh, RESZ_PAD_SOURCE, crop->which);
- crop->rect = *__resizer_get_crop(res, fh, crop->which);
- resizer_calc_ratios(res, &crop->rect, format, &ratio);
-
- return 0;
-}
-
-/*
* resizer_try_crop - mangles crop parameters.
*/
static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
@@ -1223,7 +1197,7 @@ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
const unsigned int spv = DEFAULT_PHASE;
const unsigned int sph = DEFAULT_PHASE;
- /* Crop rectangle is constrained to the output size so that zoom ratio
+ /* Crop rectangle is constrained by the output size so that zoom ratio
* cannot exceed +/-4.0.
*/
unsigned int min_width =
@@ -1248,51 +1222,115 @@ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink,
}
/*
- * resizer_s_crop - handle set crop subdev operation
- * @sd : pointer to v4l2 subdev structure
- * @pad : subdev pad
- * @crop : pointer to crop structure
- * @which : active or try format
- * return -EINVAL or zero when succeed
+ * resizer_get_selection - Retrieve a selection rectangle on a pad
+ * @sd: ISP resizer V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangles are the crop rectangles on the sink pad.
+ *
+ * Return 0 on success or a negative error code otherwise.
*/
-static int resizer_s_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int resizer_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct isp_res_device *res = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format_source;
+ struct v4l2_mbus_framefmt *format_sink;
+ struct resizer_ratio ratio;
+
+ if (sel->pad != RESZ_PAD_SINK)
+ return -EINVAL;
+
+ format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
+ sel->which);
+ format_source = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
+ sel->which);
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = INT_MAX;
+ sel->r.height = INT_MAX;
+
+ resizer_try_crop(format_sink, format_source, &sel->r);
+ resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+ break;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ sel->r = *__resizer_get_crop(res, fh, sel->which);
+ resizer_calc_ratios(res, &sel->r, format_source, &ratio);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * resizer_set_selection - Set a selection rectangle on a pad
+ * @sd: ISP resizer V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ * @sel: Selection rectangle
+ *
+ * The only supported rectangle is the actual crop rectangle on the sink pad.
+ *
+ * FIXME: This function currently behaves as if the KEEP_CONFIG selection flag
+ * was always set.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int resizer_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct isp_res_device *res = v4l2_get_subdevdata(sd);
struct isp_device *isp = to_isp_device(res);
struct v4l2_mbus_framefmt *format_sink, *format_source;
struct resizer_ratio ratio;
- /* Only sink pad has crop capability */
- if (crop->pad != RESZ_PAD_SINK)
+ if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ sel->pad != RESZ_PAD_SINK)
return -EINVAL;
format_sink = __resizer_get_format(res, fh, RESZ_PAD_SINK,
- crop->which);
+ sel->which);
format_source = __resizer_get_format(res, fh, RESZ_PAD_SOURCE,
- crop->which);
+ sel->which);
dev_dbg(isp->dev, "%s: L=%d,T=%d,W=%d,H=%d,which=%d\n", __func__,
- crop->rect.left, crop->rect.top, crop->rect.width,
- crop->rect.height, crop->which);
+ sel->r.left, sel->r.top, sel->r.width, sel->r.height,
+ sel->which);
dev_dbg(isp->dev, "%s: input=%dx%d, output=%dx%d\n", __func__,
format_sink->width, format_sink->height,
format_source->width, format_source->height);
- resizer_try_crop(format_sink, format_source, &crop->rect);
- *__resizer_get_crop(res, fh, crop->which) = crop->rect;
- resizer_calc_ratios(res, &crop->rect, format_source, &ratio);
+ /* Clamp the crop rectangle to the bounds, and then mangle it further to
+ * fulfill the TRM equations. Store the clamped but otherwise unmangled
+ * rectangle to avoid cropping the input multiple times: when an
+ * application sets the output format, the current crop rectangle is
+ * mangled during crop rectangle computation, which would lead to a new,
+ * smaller input crop rectangle every time the output size is set if we
+ * stored the mangled rectangle.
+ */
+ resizer_try_crop(format_sink, format_source, &sel->r);
+ *__resizer_get_crop(res, fh, sel->which) = sel->r;
+ resizer_calc_ratios(res, &sel->r, format_source, &ratio);
- if (crop->which == V4L2_SUBDEV_FORMAT_TRY)
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
return 0;
res->ratio = ratio;
- res->crop.active = crop->rect;
+ res->crop.active = sel->r;
/*
- * s_crop can be called while streaming is on. In this case
- * the crop values will be set in the next IRQ.
+ * set_selection can be called while streaming is on. In this case the
+ * crop values will be set in the next IRQ.
*/
if (res->state != ISP_PIPELINE_STREAM_STOPPED)
res->applycrop = 1;
@@ -1530,8 +1568,8 @@ static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
.enum_frame_size = resizer_enum_frame_size,
.get_fmt = resizer_get_format,
.set_fmt = resizer_set_format,
- .get_crop = resizer_g_crop,
- .set_crop = resizer_s_crop,
+ .get_selection = resizer_get_selection,
+ .set_selection = resizer_set_selection,
};
/* subdev operations */
@@ -1603,6 +1641,7 @@ static int resizer_link_setup(struct media_entity *entity,
/* media operations */
static const struct media_entity_operations resizer_media_ops = {
.link_setup = resizer_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
};
void omap3isp_resizer_unregister_entities(struct isp_res_device *res)
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index 11871ecc6d25..b8640be692f1 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -1032,7 +1032,7 @@ int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
if (sub->type != stat->event_type)
return -EINVAL;
- return v4l2_event_subscribe(fh, sub, STAT_NEVENTS);
+ return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL);
}
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index b02070057724..b37379d39cdd 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -46,6 +46,10 @@
* Helper functions
*/
+/*
+ * NOTE: When adding new media bus codes, always remember to add
+ * corresponding in-memory formats to the table below!!!
+ */
static struct isp_format_info formats[] = {
{ V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
@@ -68,9 +72,18 @@ static struct isp_format_info formats[] = {
{ V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB8, 8, },
+ { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SBGGR10_1X10, 0,
+ V4L2_PIX_FMT_SBGGR10DPCM8, 8, },
+ { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SGBRG10_1X10, 0,
+ V4L2_PIX_FMT_SGBRG10DPCM8, 8, },
{ V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
V4L2_MBUS_FMT_SGRBG10_1X10, 0,
V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
+ { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
+ V4L2_MBUS_FMT_SRGGB10_1X10, 0,
+ V4L2_PIX_FMT_SRGGB10DPCM8, 8, },
{ V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR10, 10, },
@@ -117,37 +130,6 @@ omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
}
/*
- * Decide whether desired output pixel code can be obtained with
- * the lane shifter by shifting the input pixel code.
- * @in: input pixelcode to shifter
- * @out: output pixelcode from shifter
- * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
- *
- * return true if the combination is possible
- * return false otherwise
- */
-static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,
- enum v4l2_mbus_pixelcode out,
- unsigned int additional_shift)
-{
- const struct isp_format_info *in_info, *out_info;
-
- if (in == out)
- return true;
-
- in_info = omap3isp_video_format_info(in);
- out_info = omap3isp_video_format_info(out);
-
- if ((in_info->flavor == 0) || (out_info->flavor == 0))
- return false;
-
- if (in_info->flavor != out_info->flavor)
- return false;
-
- return in_info->bpp - out_info->bpp + additional_shift <= 6;
-}
-
-/*
* isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
* @video: ISP video instance
* @mbus: v4l2_mbus_framefmt format (input)
@@ -242,8 +224,8 @@ isp_video_remote_subdev(struct isp_video *video, u32 *pad)
}
/* Return a pointer to the ISP video instance at the far end of the pipeline. */
-static struct isp_video *
-isp_video_far_end(struct isp_video *video)
+static int isp_video_get_graph_data(struct isp_video *video,
+ struct isp_pipeline *pipe)
{
struct media_entity_graph graph;
struct media_entity *entity = &video->video.entity;
@@ -254,21 +236,38 @@ isp_video_far_end(struct isp_video *video)
media_entity_graph_walk_start(&graph, entity);
while ((entity = media_entity_graph_walk_next(&graph))) {
+ struct isp_video *__video;
+
+ pipe->entities |= 1 << entity->id;
+
+ if (far_end != NULL)
+ continue;
+
if (entity == &video->video.entity)
continue;
if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
continue;
- far_end = to_isp_video(media_entity_to_video_device(entity));
- if (far_end->type != video->type)
- break;
-
- far_end = NULL;
+ __video = to_isp_video(media_entity_to_video_device(entity));
+ if (__video->type != video->type)
+ far_end = __video;
}
mutex_unlock(&mdev->graph_mutex);
- return far_end;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ pipe->input = far_end;
+ pipe->output = video;
+ } else {
+ if (far_end == NULL)
+ return -EPIPE;
+
+ pipe->input = video;
+ pipe->output = far_end;
+ }
+
+ return 0;
}
/*
@@ -285,52 +284,24 @@ isp_video_far_end(struct isp_video *video)
static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
{
struct isp_device *isp = pipe->output->isp;
- struct v4l2_subdev_format fmt_source;
- struct v4l2_subdev_format fmt_sink;
struct media_pad *pad;
struct v4l2_subdev *subdev;
- int ret;
-
- pipe->max_rate = pipe->l3_ick;
subdev = isp_video_remote_subdev(pipe->output, NULL);
if (subdev == NULL)
return -EPIPE;
while (1) {
- unsigned int shifter_link;
/* Retrieve the sink format */
pad = &subdev->entity.pads[0];
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
- fmt_sink.pad = pad->index;
- fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return -EPIPE;
-
/* Update the maximum frame rate */
if (subdev == &isp->isp_res.subdev)
omap3isp_resizer_max_rate(&isp->isp_res,
&pipe->max_rate);
- /* Check ccdc maximum data rate when data comes from sensor
- * TODO: Include ccdc rate in pipe->max_rate and compare the
- * total pipe rate with the input data rate from sensor.
- */
- if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) {
- unsigned int rate = UINT_MAX;
-
- omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
- if (isp->isp_ccdc.vpcfg.pixelclk > rate)
- return -ENOSPC;
- }
-
- /* If sink pad is on CCDC, the link has the lane shifter
- * in the middle of it. */
- shifter_link = subdev == &isp->isp_ccdc.subdev;
-
/* Retrieve the source format. Return an error if no source
* entity can be found, and stop checking the pipeline if the
* source entity isn't a subdev.
@@ -343,32 +314,6 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
break;
subdev = media_entity_to_v4l2_subdev(pad->entity);
-
- fmt_source.pad = pad->index;
- fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return -EPIPE;
-
- /* Check if the two ends match */
- if (fmt_source.format.width != fmt_sink.format.width ||
- fmt_source.format.height != fmt_sink.format.height)
- return -EPIPE;
-
- if (shifter_link) {
- unsigned int parallel_shift = 0;
- if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) {
- struct isp_parallel_platform_data *pdata =
- &((struct isp_v4l2_subdevs_group *)
- subdev->host_priv)->bus.parallel;
- parallel_shift = pdata->data_lane_shift * 2;
- }
- if (!isp_video_is_shiftable(fmt_source.format.code,
- fmt_sink.format.code,
- parallel_shift))
- return -EPIPE;
- } else if (fmt_source.format.code != fmt_sink.format.code)
- return -EPIPE;
}
return 0;
@@ -923,6 +868,92 @@ isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
file->f_flags & O_NONBLOCK);
}
+static int isp_video_check_external_subdevs(struct isp_video *video,
+ struct isp_pipeline *pipe)
+{
+ struct isp_device *isp = video->isp;
+ struct media_entity *ents[] = {
+ &isp->isp_csi2a.subdev.entity,
+ &isp->isp_csi2c.subdev.entity,
+ &isp->isp_ccp2.subdev.entity,
+ &isp->isp_ccdc.subdev.entity
+ };
+ struct media_pad *source_pad;
+ struct media_entity *source = NULL;
+ struct media_entity *sink;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_ext_controls ctrls;
+ struct v4l2_ext_control ctrl;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ents); i++) {
+ /* Is the entity part of the pipeline? */
+ if (!(pipe->entities & (1 << ents[i]->id)))
+ continue;
+
+ /* ISP entities have always sink pad == 0. Find source. */
+ source_pad = media_entity_remote_source(&ents[i]->pads[0]);
+ if (source_pad == NULL)
+ continue;
+
+ source = source_pad->entity;
+ sink = ents[i];
+ break;
+ }
+
+ if (!source) {
+ dev_warn(isp->dev, "can't find source, failing now\n");
+ return ret;
+ }
+
+ if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return 0;
+
+ pipe->external = media_entity_to_v4l2_subdev(source);
+
+ fmt.pad = source_pad->index;
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
+ pad, get_fmt, NULL, &fmt);
+ if (unlikely(ret < 0)) {
+ dev_warn(isp->dev, "get_fmt returned null!\n");
+ return ret;
+ }
+
+ pipe->external_bpp = omap3isp_video_format_info(fmt.format.code)->bpp;
+
+ memset(&ctrls, 0, sizeof(ctrls));
+ memset(&ctrl, 0, sizeof(ctrl));
+
+ ctrl.id = V4L2_CID_PIXEL_RATE;
+
+ ctrls.count = 1;
+ ctrls.controls = &ctrl;
+
+ ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
+ if (ret < 0) {
+ dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
+ pipe->external->name);
+ return ret;
+ }
+
+ pipe->external_rate = ctrl.value64;
+
+ if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
+ unsigned int rate = UINT_MAX;
+ /*
+ * Check that maximum allowed CCDC pixel rate isn't
+ * exceeded by the pixel rate.
+ */
+ omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
+ if (pipe->external_rate > rate)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
/*
* Stream management
*
@@ -961,7 +992,6 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
struct isp_video *video = video_drvdata(file);
enum isp_pipeline_state state;
struct isp_pipeline *pipe;
- struct isp_video *far_end;
unsigned long flags;
int ret;
@@ -980,46 +1010,45 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
*/
pipe = video->video.entity.pipe
? to_isp_pipeline(&video->video.entity) : &video->pipe;
- media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+
+ pipe->entities = 0;
+
+ if (video->isp->pdata->set_constraints)
+ video->isp->pdata->set_constraints(video->isp, true);
+ pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
+ pipe->max_rate = pipe->l3_ick;
+
+ ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto err_pipeline_start;
/* Verify that the currently configured format matches the output of
* the connected subdev.
*/
ret = isp_video_check_format(video, vfh);
if (ret < 0)
- goto error;
+ goto err_check_format;
video->bpl_padding = ret;
video->bpl_value = vfh->format.fmt.pix.bytesperline;
- /* Find the ISP video node connected at the far end of the pipeline and
- * update the pipeline.
- */
- far_end = isp_video_far_end(video);
+ ret = isp_video_get_graph_data(video, pipe);
+ if (ret < 0)
+ goto err_check_format;
- if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
- pipe->input = far_end;
- pipe->output = video;
- } else {
- if (far_end == NULL) {
- ret = -EPIPE;
- goto error;
- }
-
+ else
state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
- pipe->input = video;
- pipe->output = far_end;
- }
- if (video->isp->pdata->set_constraints)
- video->isp->pdata->set_constraints(video->isp, true);
- pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
+ ret = isp_video_check_external_subdevs(video, pipe);
+ if (ret < 0)
+ goto err_check_format;
/* Validate the pipeline and update its state. */
ret = isp_video_validate_pipeline(pipe);
if (ret < 0)
- goto error;
+ goto err_check_format;
pipe->error = false;
@@ -1041,7 +1070,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
ret = omap3isp_video_queue_streamon(&vfh->queue);
if (ret < 0)
- goto error;
+ goto err_check_format;
/* In sensor-to-memory mode, the stream can be started synchronously
* to the stream on command. In memory-to-memory mode, it will be
@@ -1051,32 +1080,34 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
ret = omap3isp_pipeline_set_stream(pipe,
ISP_PIPELINE_STREAM_CONTINUOUS);
if (ret < 0)
- goto error;
+ goto err_set_stream;
spin_lock_irqsave(&video->queue->irqlock, flags);
if (list_empty(&video->dmaqueue))
video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
spin_unlock_irqrestore(&video->queue->irqlock, flags);
}
-error:
- if (ret < 0) {
- omap3isp_video_queue_streamoff(&vfh->queue);
- if (video->isp->pdata->set_constraints)
- video->isp->pdata->set_constraints(video->isp, false);
- media_entity_pipeline_stop(&video->video.entity);
- /* The DMA queue must be emptied here, otherwise CCDC interrupts
- * that will get triggered the next time the CCDC is powered up
- * will try to access buffers that might have been freed but
- * still present in the DMA queue. This can easily get triggered
- * if the above omap3isp_pipeline_set_stream() call fails on a
- * system with a free-running sensor.
- */
- INIT_LIST_HEAD(&video->dmaqueue);
- video->queue = NULL;
- }
+ video->streaming = 1;
+
+ mutex_unlock(&video->stream_lock);
+ return 0;
- if (!ret)
- video->streaming = 1;
+err_set_stream:
+ omap3isp_video_queue_streamoff(&vfh->queue);
+err_check_format:
+ media_entity_pipeline_stop(&video->video.entity);
+err_pipeline_start:
+ if (video->isp->pdata->set_constraints)
+ video->isp->pdata->set_constraints(video->isp, false);
+ /* The DMA queue must be emptied here, otherwise CCDC interrupts that
+ * will get triggered the next time the CCDC is powered up will try to
+ * access buffers that might have been freed but still present in the
+ * DMA queue. This can easily get triggered if the above
+ * omap3isp_pipeline_set_stream() call fails on a system with a
+ * free-running sensor.
+ */
+ INIT_LIST_HEAD(&video->dmaqueue);
+ video->queue = NULL;
mutex_unlock(&video->stream_lock);
return ret;
diff --git a/drivers/media/video/omap3isp/ispvideo.h b/drivers/media/video/omap3isp/ispvideo.h
index d91bdb919be0..5acc909500ec 100644
--- a/drivers/media/video/omap3isp/ispvideo.h
+++ b/drivers/media/video/omap3isp/ispvideo.h
@@ -88,6 +88,7 @@ enum isp_pipeline_state {
/*
* struct isp_pipeline - An ISP hardware pipeline
* @error: A hardware error occurred during capture
+ * @entities: Bitmask of entities in the pipeline (indexed by entity ID)
*/
struct isp_pipeline {
struct media_pipeline pipe;
@@ -96,12 +97,16 @@ struct isp_pipeline {
enum isp_pipeline_stream_state stream_state;
struct isp_video *input;
struct isp_video *output;
+ u32 entities;
unsigned long l3_ick;
unsigned int max_rate;
atomic_t frame_number;
bool do_propagation; /* of frame number */
bool error;
struct v4l2_fract max_timeperframe;
+ struct v4l2_subdev *external;
+ unsigned int external_rate;
+ unsigned int external_bpp;
};
#define to_isp_pipeline(__e) \
diff --git a/drivers/media/video/ov5642.c b/drivers/media/video/ov5642.c
index 80e07794ac8e..0bc93313d37a 100644
--- a/drivers/media/video/ov5642.c
+++ b/drivers/media/video/ov5642.c
@@ -1025,8 +1025,6 @@ static int ov5642_probe(struct i2c_client *client,
priv->crop_rect.height = OV5642_DEFAULT_HEIGHT;
priv->crop_rect.left = (OV5642_MAX_WIDTH - OV5642_DEFAULT_WIDTH) / 2;
priv->crop_rect.top = (OV5642_MAX_HEIGHT - OV5642_DEFAULT_HEIGHT) / 2;
- priv->crop_rect.width = OV5642_DEFAULT_WIDTH;
- priv->crop_rect.height = OV5642_DEFAULT_HEIGHT;
priv->total_width = OV5642_DEFAULT_WIDTH + BLANKING_EXTRA_WIDTH;
priv->total_height = BLANKING_MIN_HEIGHT;
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index e753b5e4d2ce..b4c679b3fb0f 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -26,19 +26,25 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/isa.h>
#include <asm/io.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.0.4");
+MODULE_VERSION("0.0.5");
#define MOTOROLA 1
#define PHILIPS2 2 /* SAA7191 */
@@ -55,11 +61,11 @@ struct i2c_info {
struct pms {
struct v4l2_device v4l2_dev;
struct video_device vdev;
+ struct v4l2_ctrl_handler hdl;
int height;
int width;
int depth;
int input;
- s32 brightness, saturation, hue, contrast;
struct mutex lock;
int i2c_count;
struct i2c_info i2cinfo[64];
@@ -72,8 +78,6 @@ struct pms {
void __iomem *mem;
};
-static struct pms pms_card;
-
/*
* I/O ports and Shared Memory
*/
@@ -676,8 +680,10 @@ static int pms_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, dev->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Mediavision PMS", sizeof(vcap->card));
- strlcpy(vcap->bus_info, "ISA", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ snprintf(vcap->bus_info, sizeof(vcap->bus_info),
+ "ISA:%s", dev->v4l2_dev.name);
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -716,11 +722,9 @@ static int pms_s_input(struct file *file, void *fh, unsigned int inp)
if (inp > 3)
return -EINVAL;
- mutex_lock(&dev->lock);
dev->input = inp;
pms_videosource(dev, inp & 1);
pms_vcrinput(dev, inp >> 1);
- mutex_unlock(&dev->lock);
return 0;
}
@@ -738,7 +742,6 @@ static int pms_s_std(struct file *file, void *fh, v4l2_std_id *std)
int ret = 0;
dev->std = *std;
- mutex_lock(&dev->lock);
if (dev->std & V4L2_STD_NTSC) {
pms_framerate(dev, 30);
pms_secamcross(dev, 0);
@@ -762,81 +765,31 @@ static int pms_s_std(struct file *file, void *fh, v4l2_std_id *std)
pms_format(dev, 0);
break;
}*/
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static int pms_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 139);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 70);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 64);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 0);
- }
- return -EINVAL;
-}
-
-static int pms_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct pms *dev = video_drvdata(file);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = dev->brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = dev->contrast;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = dev->saturation;
- break;
- case V4L2_CID_HUE:
- ctrl->value = dev->hue;
- break;
- default:
- ret = -EINVAL;
- break;
- }
return ret;
}
-static int pms_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int pms_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct pms *dev = video_drvdata(file);
+ struct pms *dev = container_of(ctrl->handler, struct pms, hdl);
int ret = 0;
- mutex_lock(&dev->lock);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- dev->brightness = ctrl->value;
- pms_brightness(dev, dev->brightness);
+ pms_brightness(dev, ctrl->val);
break;
case V4L2_CID_CONTRAST:
- dev->contrast = ctrl->value;
- pms_contrast(dev, dev->contrast);
+ pms_contrast(dev, ctrl->val);
break;
case V4L2_CID_SATURATION:
- dev->saturation = ctrl->value;
- pms_saturation(dev, dev->saturation);
+ pms_saturation(dev, ctrl->val);
break;
case V4L2_CID_HUE:
- dev->hue = ctrl->value;
- pms_hue(dev, dev->hue);
+ pms_hue(dev, ctrl->val);
break;
default:
ret = -EINVAL;
break;
}
- mutex_unlock(&dev->lock);
return ret;
}
@@ -884,13 +837,11 @@ static int pms_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fm
if (ret)
return ret;
- mutex_lock(&dev->lock);
dev->width = pix->width;
dev->height = pix->height;
dev->depth = (pix->pixelformat == V4L2_PIX_FMT_RGB555) ? 15 : 16;
pms_resolution(dev, dev->width, dev->height);
/* Ok we figured out what to use from our wide choice */
- mutex_unlock(&dev->lock);
return 0;
}
@@ -901,7 +852,7 @@ static int pms_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc
"RGB 5:5:5", V4L2_PIX_FMT_RGB555,
{ 0, 0, 0, 0 }
},
- { 0, 0, 0,
+ { 1, 0, 0,
"RGB 5:6:5", V4L2_PIX_FMT_RGB565,
{ 0, 0, 0, 0 }
},
@@ -922,32 +873,43 @@ static ssize_t pms_read(struct file *file, char __user *buf,
struct pms *dev = video_drvdata(file);
int len;
- mutex_lock(&dev->lock);
len = pms_capture(dev, buf, (dev->depth == 15), count);
- mutex_unlock(&dev->lock);
return len;
}
+static unsigned int pms_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+ unsigned int res = POLLIN | POLLRDNORM;
+
+ if (v4l2_event_pending(fh))
+ res |= POLLPRI;
+ poll_wait(file, &fh->wait, wait);
+ return res;
+}
+
static const struct v4l2_file_operations pms_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = pms_poll,
.unlocked_ioctl = video_ioctl2,
.read = pms_read,
};
static const struct v4l2_ioctl_ops pms_ioctl_ops = {
- .vidioc_querycap = pms_querycap,
- .vidioc_g_input = pms_g_input,
- .vidioc_s_input = pms_s_input,
- .vidioc_enum_input = pms_enum_input,
- .vidioc_g_std = pms_g_std,
- .vidioc_s_std = pms_s_std,
- .vidioc_queryctrl = pms_queryctrl,
- .vidioc_g_ctrl = pms_g_ctrl,
- .vidioc_s_ctrl = pms_s_ctrl,
- .vidioc_enum_fmt_vid_cap = pms_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = pms_g_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = pms_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = pms_try_fmt_vid_cap,
+ .vidioc_querycap = pms_querycap,
+ .vidioc_g_input = pms_g_input,
+ .vidioc_s_input = pms_s_input,
+ .vidioc_enum_input = pms_enum_input,
+ .vidioc_g_std = pms_g_std,
+ .vidioc_s_std = pms_s_std,
+ .vidioc_enum_fmt_vid_cap = pms_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = pms_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = pms_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = pms_try_fmt_vid_cap,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*
@@ -956,7 +918,6 @@ static const struct v4l2_ioctl_ops pms_ioctl_ops = {
static int init_mediavision(struct pms *dev)
{
- int id;
int idec, decst;
int i;
static const unsigned char i2c_defs[] = {
@@ -988,7 +949,6 @@ static int init_mediavision(struct pms *dev)
outb(dev->io >> 4, 0x9a01); /* Set IO port */
- id = mvv_read(dev, 3);
decst = pms_i2c_stat(dev, 0x43);
if (decst != -1)
@@ -1068,76 +1028,125 @@ static int enable;
module_param(enable, int, 0);
#endif
-static int __init pms_init(void)
+static const struct v4l2_ctrl_ops pms_ctrl_ops = {
+ .s_ctrl = pms_s_ctrl,
+};
+
+static int pms_probe(struct device *pdev, unsigned int card)
{
- struct pms *dev = &pms_card;
- struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
+ struct pms *dev;
+ struct v4l2_device *v4l2_dev;
+ struct v4l2_ctrl_handler *hdl;
int res;
- strlcpy(v4l2_dev->name, "pms", sizeof(v4l2_dev->name));
-
- v4l2_info(v4l2_dev, "Mediavision Pro Movie Studio driver 0.03\n");
-
#ifndef MODULE
if (!enable) {
- v4l2_err(v4l2_dev,
- "PMS: not enabled, use pms.enable=1 to probe\n");
+ pr_err("PMS: not enabled, use pms.enable=1 to probe\n");
return -ENODEV;
}
#endif
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL)
+ return -ENOMEM;
+
dev->decoder = PHILIPS2;
dev->io = io_port;
dev->data = io_port + 1;
+ v4l2_dev = &dev->v4l2_dev;
+ hdl = &dev->hdl;
- if (init_mediavision(dev)) {
+ res = v4l2_device_register(pdev, v4l2_dev);
+ if (res < 0) {
+ v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ goto free_dev;
+ }
+ v4l2_info(v4l2_dev, "Mediavision Pro Movie Studio driver 0.05\n");
+
+ res = init_mediavision(dev);
+ if (res) {
v4l2_err(v4l2_dev, "Board not found.\n");
- return -ENODEV;
+ goto free_io;
}
- res = v4l2_device_register(NULL, v4l2_dev);
- if (res < 0) {
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- return res;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 139);
+ v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 70);
+ v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 64);
+ v4l2_ctrl_new_std(hdl, &pms_ctrl_ops,
+ V4L2_CID_HUE, 0, 255, 1, 0);
+ if (hdl->error) {
+ res = hdl->error;
+ goto free_hdl;
}
+ mutex_init(&dev->lock);
strlcpy(dev->vdev.name, v4l2_dev->name, sizeof(dev->vdev.name));
dev->vdev.v4l2_dev = v4l2_dev;
+ dev->vdev.ctrl_handler = hdl;
dev->vdev.fops = &pms_fops;
dev->vdev.ioctl_ops = &pms_ioctl_ops;
dev->vdev.release = video_device_release_empty;
+ dev->vdev.lock = &dev->lock;
+ dev->vdev.tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
+ set_bit(V4L2_FL_USE_FH_PRIO, &dev->vdev.flags);
video_set_drvdata(&dev->vdev, dev);
- mutex_init(&dev->lock);
dev->std = V4L2_STD_NTSC_M;
dev->height = 240;
dev->width = 320;
- dev->depth = 15;
- dev->brightness = 139;
- dev->contrast = 70;
- dev->hue = 0;
- dev->saturation = 64;
+ dev->depth = 16;
pms_swsense(dev, 75);
pms_resolution(dev, 320, 240);
pms_videosource(dev, 0);
pms_vcrinput(dev, 0);
- if (video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
- v4l2_device_unregister(&dev->v4l2_dev);
- release_region(dev->io, 3);
- release_region(0x9a01, 1);
- iounmap(dev->mem);
- return -EINVAL;
- }
- return 0;
+ v4l2_ctrl_handler_setup(hdl);
+ res = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, video_nr);
+ if (res >= 0)
+ return 0;
+
+free_hdl:
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
+free_io:
+ release_region(dev->io, 3);
+ release_region(0x9a01, 1);
+ iounmap(dev->mem);
+free_dev:
+ kfree(dev);
+ return res;
}
-static void __exit pms_exit(void)
+static int pms_remove(struct device *pdev, unsigned int card)
{
- struct pms *dev = &pms_card;
+ struct pms *dev = dev_get_drvdata(pdev);
video_unregister_device(&dev->vdev);
+ v4l2_ctrl_handler_free(&dev->hdl);
release_region(dev->io, 3);
release_region(0x9a01, 1);
iounmap(dev->mem);
+ return 0;
+}
+
+static struct isa_driver pms_driver = {
+ .probe = pms_probe,
+ .remove = pms_remove,
+ .driver = {
+ .name = "pms",
+ },
+};
+
+static int __init pms_init(void)
+{
+ return isa_register_driver(&pms_driver, 1);
+}
+
+static void __exit pms_exit(void)
+{
+ isa_unregister_driver(&pms_driver);
}
module_init(pms_init);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index 305e6aaa844a..036952f2a3cb 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -317,18 +317,16 @@ struct pvr2_hdw {
v4l2_std_id std_mask_eeprom; // Hardware supported selections
v4l2_std_id std_mask_avail; // Which standards we may select from
v4l2_std_id std_mask_cur; // Currently selected standard(s)
- unsigned int std_enum_cnt; // # of enumerated standards
int std_enum_cur; // selected standard enumeration value
int std_dirty; // True if std_mask_cur has changed
struct pvr2_ctl_info std_info_enum;
struct pvr2_ctl_info std_info_avail;
struct pvr2_ctl_info std_info_cur;
- struct v4l2_standard *std_defs;
- const char **std_enum_names;
+ struct pvr2_ctl_info std_info_detect;
// Generated string names, one per actual V4L2 standard
const char *std_mask_ptrs[32];
- char std_mask_names[32][10];
+ char std_mask_names[32][16];
int unit_number; /* ID for driver instance */
unsigned long serial_number; /* ID for hardware itself */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index ebc2c7e39233..fb828ba1dbbe 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -334,8 +334,6 @@ static void pvr2_hdw_state_log_state(struct pvr2_hdw *);
static int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl);
static int pvr2_hdw_commit_setup(struct pvr2_hdw *hdw);
static int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw);
-static void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw);
-static void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw);
static void pvr2_hdw_quiescent_timeout(unsigned long);
static void pvr2_hdw_decoder_stabilization_timeout(unsigned long);
static void pvr2_hdw_encoder_wait_timeout(unsigned long);
@@ -346,7 +344,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
void *write_data,unsigned int write_len,
void *read_data,unsigned int read_len);
static int pvr2_hdw_check_cropcap(struct pvr2_hdw *hdw);
-
+static v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw);
static void trace_stbit(const char *name,int val)
{
@@ -840,6 +838,12 @@ static int ctrl_hsm_get(struct pvr2_ctrl *cptr,int *vp)
return 0;
}
+static int ctrl_stddetect_get(struct pvr2_ctrl *cptr, int *vp)
+{
+ *vp = pvr2_hdw_get_detected_std(cptr->hdw);
+ return 0;
+}
+
static int ctrl_stdavail_get(struct pvr2_ctrl *cptr,int *vp)
{
*vp = cptr->hdw->std_mask_avail;
@@ -854,8 +858,7 @@ static int ctrl_stdavail_set(struct pvr2_ctrl *cptr,int m,int v)
ns = (ns & ~m) | (v & m);
if (ns == hdw->std_mask_avail) return 0;
hdw->std_mask_avail = ns;
- pvr2_hdw_internal_set_std_avail(hdw);
- pvr2_hdw_internal_find_stdenum(hdw);
+ hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail;
return 0;
}
@@ -895,7 +898,6 @@ static int ctrl_stdcur_set(struct pvr2_ctrl *cptr,int m,int v)
if (ns == hdw->std_mask_cur) return 0;
hdw->std_mask_cur = ns;
hdw->std_dirty = !0;
- pvr2_hdw_internal_find_stdenum(hdw);
return 0;
}
@@ -941,40 +943,6 @@ static int ctrl_audio_modes_present_get(struct pvr2_ctrl *cptr,int *vp)
}
-static int ctrl_stdenumcur_set(struct pvr2_ctrl *cptr,int m,int v)
-{
- struct pvr2_hdw *hdw = cptr->hdw;
- if (v < 0) return -EINVAL;
- if (v > hdw->std_enum_cnt) return -EINVAL;
- hdw->std_enum_cur = v;
- if (!v) return 0;
- v--;
- if (hdw->std_mask_cur == hdw->std_defs[v].id) return 0;
- hdw->std_mask_cur = hdw->std_defs[v].id;
- hdw->std_dirty = !0;
- return 0;
-}
-
-
-static int ctrl_stdenumcur_get(struct pvr2_ctrl *cptr,int *vp)
-{
- *vp = cptr->hdw->std_enum_cur;
- return 0;
-}
-
-
-static int ctrl_stdenumcur_is_dirty(struct pvr2_ctrl *cptr)
-{
- return cptr->hdw->std_dirty != 0;
-}
-
-
-static void ctrl_stdenumcur_clear_dirty(struct pvr2_ctrl *cptr)
-{
- cptr->hdw->std_dirty = 0;
-}
-
-
#define DEFINT(vmin,vmax) \
.type = pvr2_ctl_int, \
.def.type_int.min_value = vmin, \
@@ -1293,15 +1261,14 @@ static const struct pvr2_ctl_info control_defs[] = {
.sym_to_val = ctrl_std_sym_to_val,
.type = pvr2_ctl_bitmask,
},{
- .desc = "Video Standard Name",
- .name = "video_standard",
- .internal_id = PVR2_CID_STDENUM,
+ .desc = "Video Standards Detected Mask",
+ .name = "video_standard_mask_detected",
+ .internal_id = PVR2_CID_STDDETECT,
.skip_init = !0,
- .get_value = ctrl_stdenumcur_get,
- .set_value = ctrl_stdenumcur_set,
- .is_dirty = ctrl_stdenumcur_is_dirty,
- .clear_dirty = ctrl_stdenumcur_clear_dirty,
- .type = pvr2_ctl_enum,
+ .get_value = ctrl_stddetect_get,
+ .val_to_sym = ctrl_std_val_to_sym,
+ .sym_to_val = ctrl_std_sym_to_val,
+ .type = pvr2_ctl_bitmask,
}
};
@@ -1936,7 +1903,7 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
hdw->std_mask_avail |= std2;
}
- pvr2_hdw_internal_set_std_avail(hdw);
+ hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail;
if (std1) {
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std1);
@@ -1945,7 +1912,6 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
bcnt,buf);
hdw->std_mask_cur = std1;
hdw->std_dirty = !0;
- pvr2_hdw_internal_find_stdenum(hdw);
return;
}
if (std3) {
@@ -1955,7 +1921,6 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
" (determined by device type): %.*s",bcnt,buf);
hdw->std_mask_cur = std3;
hdw->std_dirty = !0;
- pvr2_hdw_internal_find_stdenum(hdw);
return;
}
@@ -1975,24 +1940,10 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
bcnt,buf);
hdw->std_mask_cur = std_eeprom_maps[idx].std;
hdw->std_dirty = !0;
- pvr2_hdw_internal_find_stdenum(hdw);
return;
}
}
- if (hdw->std_enum_cnt > 1) {
- // Autoselect the first listed standard
- hdw->std_enum_cur = 1;
- hdw->std_mask_cur = hdw->std_defs[hdw->std_enum_cur-1].id;
- hdw->std_dirty = !0;
- pvr2_trace(PVR2_TRACE_STD,
- "Initial video standard auto-selected to %s",
- hdw->std_defs[hdw->std_enum_cur-1].name);
- return;
- }
-
- pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Unable to select a viable initial video standard");
}
@@ -2594,14 +2545,6 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
cptr->info = ciptr;
}
- // Initialize video standard enum dynamic control
- cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDENUM);
- if (cptr) {
- memcpy(&hdw->std_info_enum,cptr->info,
- sizeof(hdw->std_info_enum));
- cptr->info = &hdw->std_info_enum;
-
- }
// Initialize control data regarding video standard masks
valid_std_mask = pvr2_std_get_usable();
for (idx = 0; idx < 32; idx++) {
@@ -2629,7 +2572,17 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
cptr->info = &hdw->std_info_cur;
hdw->std_info_cur.def.type_bitmask.bit_names =
hdw->std_mask_ptrs;
- hdw->std_info_avail.def.type_bitmask.valid_bits =
+ hdw->std_info_cur.def.type_bitmask.valid_bits =
+ valid_std_mask;
+ }
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDDETECT);
+ if (cptr) {
+ memcpy(&hdw->std_info_detect,cptr->info,
+ sizeof(hdw->std_info_detect));
+ cptr->info = &hdw->std_info_detect;
+ hdw->std_info_detect.def.type_bitmask.bit_names =
+ hdw->std_mask_ptrs;
+ hdw->std_info_detect.def.type_bitmask.valid_bits =
valid_std_mask;
}
@@ -2711,8 +2664,6 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
kfree(hdw->ctl_write_buffer);
kfree(hdw->controls);
kfree(hdw->mpeg_ctrl_info);
- kfree(hdw->std_defs);
- kfree(hdw->std_enum_names);
kfree(hdw);
}
return NULL;
@@ -2788,8 +2739,6 @@ void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
} while (0); mutex_unlock(&pvr2_unit_mtx);
kfree(hdw->controls);
kfree(hdw->mpeg_ctrl_info);
- kfree(hdw->std_defs);
- kfree(hdw->std_enum_names);
kfree(hdw);
}
@@ -2812,86 +2761,6 @@ void pvr2_hdw_disconnect(struct pvr2_hdw *hdw)
}
-// Attempt to autoselect an appropriate value for std_enum_cur given
-// whatever is currently in std_mask_cur
-static void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw)
-{
- unsigned int idx;
- for (idx = 1; idx < hdw->std_enum_cnt; idx++) {
- if (hdw->std_defs[idx-1].id == hdw->std_mask_cur) {
- hdw->std_enum_cur = idx;
- return;
- }
- }
- hdw->std_enum_cur = 0;
-}
-
-
-// Calculate correct set of enumerated standards based on currently known
-// set of available standards bits.
-static void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw)
-{
- struct v4l2_standard *newstd;
- unsigned int std_cnt;
- unsigned int idx;
-
- newstd = pvr2_std_create_enum(&std_cnt,hdw->std_mask_avail);
-
- if (hdw->std_defs) {
- kfree(hdw->std_defs);
- hdw->std_defs = NULL;
- }
- hdw->std_enum_cnt = 0;
- if (hdw->std_enum_names) {
- kfree(hdw->std_enum_names);
- hdw->std_enum_names = NULL;
- }
-
- if (!std_cnt) {
- pvr2_trace(
- PVR2_TRACE_ERROR_LEGS,
- "WARNING: Failed to identify any viable standards");
- }
-
- /* Set up the dynamic control for this standard */
- hdw->std_enum_names = kmalloc(sizeof(char *)*(std_cnt+1),GFP_KERNEL);
- if (hdw->std_enum_names) {
- hdw->std_enum_names[0] = "none";
- for (idx = 0; idx < std_cnt; idx++)
- hdw->std_enum_names[idx+1] = newstd[idx].name;
- hdw->std_info_enum.def.type_enum.value_names =
- hdw->std_enum_names;
- hdw->std_info_enum.def.type_enum.count = std_cnt+1;
- } else {
- pvr2_trace(
- PVR2_TRACE_ERROR_LEGS,
- "WARNING: Failed to alloc memory for names");
- hdw->std_info_enum.def.type_enum.value_names = NULL;
- hdw->std_info_enum.def.type_enum.count = 0;
- }
- hdw->std_defs = newstd;
- hdw->std_enum_cnt = std_cnt+1;
- hdw->std_enum_cur = 0;
- hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail;
-}
-
-
-int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,
- struct v4l2_standard *std,
- unsigned int idx)
-{
- int ret = -EINVAL;
- if (!idx) return ret;
- LOCK_TAKE(hdw->big_lock); do {
- if (idx >= hdw->std_enum_cnt) break;
- idx--;
- memcpy(std,hdw->std_defs+idx,sizeof(*std));
- ret = 0;
- } while (0); LOCK_GIVE(hdw->big_lock);
- return ret;
-}
-
-
/* Get the number of defined controls */
unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *hdw)
{
@@ -2995,11 +2864,13 @@ static void pvr2_subdev_set_control(struct pvr2_hdw *hdw, int id,
pvr2_subdev_set_control(hdw, id, #lab, (hdw)->lab##_val); \
}
-int pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw, v4l2_std_id *std)
+v4l2_std_id pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw)
{
+ v4l2_std_id std;
+ std = (v4l2_std_id)hdw->std_mask_avail;
v4l2_device_call_all(&hdw->v4l2_dev, 0,
- video, querystd, std);
- return 0;
+ video, querystd, &std);
+ return std;
}
/* Execute whatever commands are required to update the state of all the
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.h b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
index 66546580b17d..8060fc666eeb 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
@@ -28,7 +28,6 @@
/* Private internal control ids, look these up with
pvr2_hdw_get_ctrl_by_id() - these are NOT visible in V4L */
-#define PVR2_CID_STDENUM 1
#define PVR2_CID_STDCUR 2
#define PVR2_CID_STDAVAIL 3
#define PVR2_CID_INPUT 4
@@ -46,6 +45,7 @@
#define PVR2_CID_CROPCAPBT 16
#define PVR2_CID_CROPCAPBW 17
#define PVR2_CID_CROPCAPBH 18
+#define PVR2_CID_STDDETECT 19
/* Legal values for the INPUT state variable */
#define PVR2_CVAL_INPUT_TV 0
@@ -210,13 +210,6 @@ int pvr2_hdw_set_stream_type(struct pvr2_hdw *, enum pvr2_config);
/* Get handle to video output stream */
struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *);
-/* Emit a video standard struct */
-int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,struct v4l2_standard *std,
- unsigned int idx);
-
-/* Get the detected video standard */
-int pvr2_hdw_get_detected_std(struct pvr2_hdw *hdw, v4l2_std_id *std);
-
/* Enable / disable retrieval of CPU firmware or prom contents. This must
be enabled before pvr2_hdw_cpufw_get() will function. Note that doing
this may prevent the device from running (and leaving this mode may
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index e1111d968a3d..7bddfaeeafc3 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -107,7 +107,6 @@ static struct v4l2_fmtdesc pvr_fmtdesc [] = {
// This should really be V4L2_PIX_FMT_MPEG, but xawtv
// breaks when I do that.
.pixelformat = 0, // V4L2_PIX_FMT_MPEG,
- .reserved = { 0, 0, 0, 0 }
}
};
@@ -145,740 +144,739 @@ static struct v4l2_format pvr_format [] = {
.start = { 0, 0 },
.count = { 0, 0 },
.flags = 0,
- .reserved = { 0, 0 }
}
}
}
};
+
/*
- * pvr_ioctl()
- *
- * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
- *
+ * This is part of Video 4 Linux API. These procedures handle ioctl() calls.
*/
-static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
{
struct pvr2_v4l2_fh *fh = file->private_data;
- struct pvr2_v4l2 *vp = fh->vhead;
- struct pvr2_v4l2_dev *pdi = fh->pdi;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- long ret = -EINVAL;
- if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
- v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),cmd);
- }
+ memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+ strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw),
+ sizeof(cap->bus_info));
+ strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card));
+ return 0;
+}
- if (!pvr2_hdw_dev_ok(hdw)) {
- pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "ioctl failed - bad or no context");
- return -EFAULT;
- }
+static int pvr2_g_priority(struct file *file, void *priv, enum v4l2_priority *p)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2 *vp = fh->vhead;
- /* check priority */
- switch (cmd) {
- case VIDIOC_S_CTRL:
- case VIDIOC_S_STD:
- case VIDIOC_S_INPUT:
- case VIDIOC_S_TUNER:
- case VIDIOC_S_FREQUENCY:
- ret = v4l2_prio_check(&vp->prio, fh->prio);
- if (ret)
- return ret;
- }
+ *p = v4l2_prio_max(&vp->prio);
+ return 0;
+}
- switch (cmd) {
- case VIDIOC_QUERYCAP:
- {
- struct v4l2_capability *cap = arg;
+static int pvr2_s_priority(struct file *file, void *priv, enum v4l2_priority prio)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2 *vp = fh->vhead;
- memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
- strlcpy(cap->bus_info,pvr2_hdw_get_bus_info(hdw),
- sizeof(cap->bus_info));
- strlcpy(cap->card,pvr2_hdw_get_desc(hdw),sizeof(cap->card));
+ return v4l2_prio_change(&vp->prio, &fh->prio, prio);
+}
- ret = 0;
- break;
- }
+static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int ret;
- case VIDIOC_G_PRIORITY:
- {
- enum v4l2_priority *p = arg;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_STDCUR), &val);
+ *std = val;
+ return ret;
+}
- *p = v4l2_prio_max(&vp->prio);
- ret = 0;
- break;
- }
+int pvr2_s_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- case VIDIOC_S_PRIORITY:
- {
- enum v4l2_priority *prio = arg;
+ return pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_STDCUR), *std);
+}
- ret = v4l2_prio_change(&vp->prio, &fh->prio, *prio);
- break;
- }
+static int pvr2_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int ret;
- case VIDIOC_ENUMSTD:
- {
- struct v4l2_standard *vs = (struct v4l2_standard *)arg;
- int idx = vs->index;
- ret = pvr2_hdw_get_stdenum_value(hdw,vs,idx+1);
- break;
- }
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_STDDETECT), &val);
+ *std = val;
+ return ret;
+}
- case VIDIOC_QUERYSTD:
- {
- v4l2_std_id *std = arg;
- *std = V4L2_STD_ALL;
- ret = pvr2_hdw_get_detected_std(hdw, std);
- break;
- }
+static int pvr2_enum_input(struct file *file, void *priv, struct v4l2_input *vi)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct pvr2_ctrl *cptr;
+ struct v4l2_input tmp;
+ unsigned int cnt;
+ int val;
+ int ret;
- case VIDIOC_G_STD:
- {
- int val = 0;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),&val);
- *(v4l2_std_id *)arg = val;
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT);
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.index = vi->index;
+ ret = 0;
+ if (vi->index >= fh->input_cnt)
+ return -EINVAL;
+ val = fh->input_map[vi->index];
+ switch (val) {
+ case PVR2_CVAL_INPUT_TV:
+ case PVR2_CVAL_INPUT_DTV:
+ case PVR2_CVAL_INPUT_RADIO:
+ tmp.type = V4L2_INPUT_TYPE_TUNER;
break;
- }
-
- case VIDIOC_S_STD:
- {
- ret = pvr2_ctrl_set_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),
- *(v4l2_std_id *)arg);
+ case PVR2_CVAL_INPUT_SVIDEO:
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ tmp.type = V4L2_INPUT_TYPE_CAMERA;
break;
+ default:
+ return -EINVAL;
}
- case VIDIOC_ENUMINPUT:
- {
- struct pvr2_ctrl *cptr;
- struct v4l2_input *vi = (struct v4l2_input *)arg;
- struct v4l2_input tmp;
- unsigned int cnt;
- int val;
+ cnt = 0;
+ pvr2_ctrl_get_valname(cptr, val,
+ tmp.name, sizeof(tmp.name) - 1, &cnt);
+ tmp.name[cnt] = 0;
- cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
+ /* Don't bother with audioset, since this driver currently
+ always switches the audio whenever the video is
+ switched. */
- memset(&tmp,0,sizeof(tmp));
- tmp.index = vi->index;
- ret = 0;
- if (vi->index >= fh->input_cnt) {
- ret = -EINVAL;
- break;
- }
- val = fh->input_map[vi->index];
- switch (val) {
- case PVR2_CVAL_INPUT_TV:
- case PVR2_CVAL_INPUT_DTV:
- case PVR2_CVAL_INPUT_RADIO:
- tmp.type = V4L2_INPUT_TYPE_TUNER;
- break;
- case PVR2_CVAL_INPUT_SVIDEO:
- case PVR2_CVAL_INPUT_COMPOSITE:
- tmp.type = V4L2_INPUT_TYPE_CAMERA;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret < 0) break;
-
- cnt = 0;
- pvr2_ctrl_get_valname(cptr,val,
- tmp.name,sizeof(tmp.name)-1,&cnt);
- tmp.name[cnt] = 0;
-
- /* Don't bother with audioset, since this driver currently
- always switches the audio whenever the video is
- switched. */
-
- /* Handling std is a tougher problem. It doesn't make
- sense in cases where a device might be multi-standard.
- We could just copy out the current value for the
- standard, but it can change over time. For now just
- leave it zero. */
-
- memcpy(vi, &tmp, sizeof(tmp));
-
- ret = 0;
- break;
- }
+ /* Handling std is a tougher problem. It doesn't make
+ sense in cases where a device might be multi-standard.
+ We could just copy out the current value for the
+ standard, but it can change over time. For now just
+ leave it zero. */
+ *vi = tmp;
+ return 0;
+}
- case VIDIOC_G_INPUT:
- {
- unsigned int idx;
- struct pvr2_ctrl *cptr;
- struct v4l2_input *vi = (struct v4l2_input *)arg;
- int val;
- cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
- val = 0;
- ret = pvr2_ctrl_get_value(cptr,&val);
- vi->index = 0;
- for (idx = 0; idx < fh->input_cnt; idx++) {
- if (fh->input_map[idx] == val) {
- vi->index = idx;
- break;
- }
- }
- break;
- }
+static int pvr2_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ unsigned int idx;
+ struct pvr2_ctrl *cptr;
+ int val;
+ int ret;
- case VIDIOC_S_INPUT:
- {
- struct v4l2_input *vi = (struct v4l2_input *)arg;
- if (vi->index >= fh->input_cnt) {
- ret = -ERANGE;
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT);
+ val = 0;
+ ret = pvr2_ctrl_get_value(cptr, &val);
+ *i = 0;
+ for (idx = 0; idx < fh->input_cnt; idx++) {
+ if (fh->input_map[idx] == val) {
+ *i = idx;
break;
}
- ret = pvr2_ctrl_set_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT),
- fh->input_map[vi->index]);
- break;
}
+ return ret;
+}
- case VIDIOC_ENUMAUDIO:
- {
- /* pkt: FIXME: We are returning one "fake" input here
- which could very well be called "whatever_we_like".
- This is for apps that want to see an audio input
- just to feel comfortable, as well as to test if
- it can do stereo or sth. There is actually no guarantee
- that the actual audio input cannot change behind the app's
- back, but most applications should not mind that either.
-
- Hopefully, mplayer people will work with us on this (this
- whole mess is to support mplayer pvr://), or Hans will come
- up with a more standard way to say "we have inputs but we
- don 't want you to change them independent of video" which
- will sort this mess.
- */
- struct v4l2_audio *vin = arg;
- ret = -EINVAL;
- if (vin->index > 0) break;
- strncpy(vin->name, "PVRUSB2 Audio",14);
- vin->capability = V4L2_AUDCAP_STEREO;
- ret = 0;
- break;
- break;
- }
+static int pvr2_s_input(struct file *file, void *priv, unsigned int inp)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- case VIDIOC_G_AUDIO:
- {
- /* pkt: FIXME: see above comment (VIDIOC_ENUMAUDIO) */
- struct v4l2_audio *vin = arg;
- memset(vin,0,sizeof(*vin));
- vin->index = 0;
- strncpy(vin->name, "PVRUSB2 Audio",14);
- vin->capability = V4L2_AUDCAP_STEREO;
- ret = 0;
- break;
- }
+ if (inp >= fh->input_cnt)
+ return -EINVAL;
+ return pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT),
+ fh->input_map[inp]);
+}
- case VIDIOC_G_TUNER:
- {
- struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
+static int pvr2_enumaudio(struct file *file, void *priv, struct v4l2_audio *vin)
+{
+ /* pkt: FIXME: We are returning one "fake" input here
+ which could very well be called "whatever_we_like".
+ This is for apps that want to see an audio input
+ just to feel comfortable, as well as to test if
+ it can do stereo or sth. There is actually no guarantee
+ that the actual audio input cannot change behind the app's
+ back, but most applications should not mind that either.
+
+ Hopefully, mplayer people will work with us on this (this
+ whole mess is to support mplayer pvr://), or Hans will come
+ up with a more standard way to say "we have inputs but we
+ don 't want you to change them independent of video" which
+ will sort this mess.
+ */
+
+ if (vin->index > 0)
+ return -EINVAL;
+ strncpy(vin->name, "PVRUSB2 Audio", 14);
+ vin->capability = V4L2_AUDCAP_STEREO;
+ return 0;
+}
- if (vt->index != 0) break; /* Only answer for the 1st tuner */
+static int pvr2_g_audio(struct file *file, void *priv, struct v4l2_audio *vin)
+{
+ /* pkt: FIXME: see above comment (VIDIOC_ENUMAUDIO) */
+ vin->index = 0;
+ strncpy(vin->name, "PVRUSB2 Audio", 14);
+ vin->capability = V4L2_AUDCAP_STEREO;
+ return 0;
+}
- pvr2_hdw_execute_tuner_poll(hdw);
- ret = pvr2_hdw_get_tuner_status(hdw,vt);
- break;
- }
+static int pvr2_s_audio(struct file *file, void *priv, struct v4l2_audio *vout)
+{
+ if (vout->index)
+ return -EINVAL;
+ return 0;
+}
- case VIDIOC_S_TUNER:
- {
- struct v4l2_tuner *vt=(struct v4l2_tuner *)arg;
+static int pvr2_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- if (vt->index != 0)
- break;
+ if (vt->index != 0)
+ return -EINVAL; /* Only answer for the 1st tuner */
- ret = pvr2_ctrl_set_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE),
+ pvr2_hdw_execute_tuner_poll(hdw);
+ return pvr2_hdw_get_tuner_status(hdw, vt);
+}
+
+static int pvr2_s_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+
+ if (vt->index != 0)
+ return -EINVAL;
+
+ return pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_AUDIOMODE),
vt->audmode);
- break;
- }
+}
- case VIDIOC_S_FREQUENCY:
- {
- const struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
- unsigned long fv;
- struct v4l2_tuner vt;
- int cur_input;
- struct pvr2_ctrl *ctrlp;
- ret = pvr2_hdw_get_tuner_status(hdw,&vt);
- if (ret != 0) break;
- ctrlp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
- ret = pvr2_ctrl_get_value(ctrlp,&cur_input);
- if (ret != 0) break;
- if (vf->type == V4L2_TUNER_RADIO) {
- if (cur_input != PVR2_CVAL_INPUT_RADIO) {
- pvr2_ctrl_set_value(ctrlp,
- PVR2_CVAL_INPUT_RADIO);
- }
- } else {
- if (cur_input == PVR2_CVAL_INPUT_RADIO) {
- pvr2_ctrl_set_value(ctrlp,
- PVR2_CVAL_INPUT_TV);
- }
- }
- fv = vf->frequency;
- if (vt.capability & V4L2_TUNER_CAP_LOW) {
- fv = (fv * 125) / 2;
- } else {
- fv = fv * 62500;
- }
- ret = pvr2_ctrl_set_value(
+int pvr2_s_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ unsigned long fv;
+ struct v4l2_tuner vt;
+ int cur_input;
+ struct pvr2_ctrl *ctrlp;
+ int ret;
+
+ ret = pvr2_hdw_get_tuner_status(hdw, &vt);
+ if (ret != 0)
+ return ret;
+ ctrlp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT);
+ ret = pvr2_ctrl_get_value(ctrlp, &cur_input);
+ if (ret != 0)
+ return ret;
+ if (vf->type == V4L2_TUNER_RADIO) {
+ if (cur_input != PVR2_CVAL_INPUT_RADIO)
+ pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_RADIO);
+ } else {
+ if (cur_input == PVR2_CVAL_INPUT_RADIO)
+ pvr2_ctrl_set_value(ctrlp, PVR2_CVAL_INPUT_TV);
+ }
+ fv = vf->frequency;
+ if (vt.capability & V4L2_TUNER_CAP_LOW)
+ fv = (fv * 125) / 2;
+ else
+ fv = fv * 62500;
+ return pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),fv);
- break;
- }
+}
- case VIDIOC_G_FREQUENCY:
- {
- struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
- int val = 0;
- int cur_input;
- struct v4l2_tuner vt;
- ret = pvr2_hdw_get_tuner_status(hdw,&vt);
- if (ret != 0) break;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),
+static int pvr2_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int cur_input;
+ struct v4l2_tuner vt;
+ int ret;
+
+ ret = pvr2_hdw_get_tuner_status(hdw, &vt);
+ if (ret != 0)
+ return ret;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_FREQUENCY),
&val);
- if (ret != 0) break;
- pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT),
+ if (ret != 0)
+ return ret;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT),
&cur_input);
- if (cur_input == PVR2_CVAL_INPUT_RADIO) {
- vf->type = V4L2_TUNER_RADIO;
- } else {
- vf->type = V4L2_TUNER_ANALOG_TV;
- }
- if (vt.capability & V4L2_TUNER_CAP_LOW) {
- val = (val * 2) / 125;
- } else {
- val /= 62500;
- }
- vf->frequency = val;
- break;
- }
+ if (cur_input == PVR2_CVAL_INPUT_RADIO)
+ vf->type = V4L2_TUNER_RADIO;
+ else
+ vf->type = V4L2_TUNER_ANALOG_TV;
+ if (vt.capability & V4L2_TUNER_CAP_LOW)
+ val = (val * 2) / 125;
+ else
+ val /= 62500;
+ vf->frequency = val;
+ return 0;
+}
- case VIDIOC_ENUM_FMT:
- {
- struct v4l2_fmtdesc *fd = (struct v4l2_fmtdesc *)arg;
+static int pvr2_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fd)
+{
+ /* Only one format is supported : mpeg.*/
+ if (fd->index != 0)
+ return -EINVAL;
- /* Only one format is supported : mpeg.*/
- if (fd->index != 0)
- break;
+ memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc));
+ return 0;
+}
- memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc));
- ret = 0;
- break;
- }
+static int pvr2_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val;
- case VIDIOC_G_FMT:
- {
- struct v4l2_format *vf = (struct v4l2_format *)arg;
- int val;
- switch(vf->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
- sizeof(struct v4l2_format));
- val = 0;
- pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES),
- &val);
- vf->fmt.pix.width = val;
- val = 0;
- pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES),
- &val);
- vf->fmt.pix.height = val;
- ret = 0;
- break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- // ????? Still need to figure out to do VBI correctly
- ret = -EINVAL;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- break;
- }
+ memcpy(vf, &pvr_format[PVR_FORMAT_PIX], sizeof(struct v4l2_format));
+ val = 0;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_HRES),
+ &val);
+ vf->fmt.pix.width = val;
+ val = 0;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_VRES),
+ &val);
+ vf->fmt.pix.height = val;
+ return 0;
+}
- case VIDIOC_TRY_FMT:
- case VIDIOC_S_FMT:
- {
- struct v4l2_format *vf = (struct v4l2_format *)arg;
-
- ret = 0;
- switch(vf->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
- int lmin,lmax,ldef;
- struct pvr2_ctrl *hcp,*vcp;
- int h = vf->fmt.pix.height;
- int w = vf->fmt.pix.width;
- hcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES);
- vcp = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES);
-
- lmin = pvr2_ctrl_get_min(hcp);
- lmax = pvr2_ctrl_get_max(hcp);
- pvr2_ctrl_get_def(hcp, &ldef);
- if (w == -1) {
- w = ldef;
- } else if (w < lmin) {
- w = lmin;
- } else if (w > lmax) {
- w = lmax;
- }
- lmin = pvr2_ctrl_get_min(vcp);
- lmax = pvr2_ctrl_get_max(vcp);
- pvr2_ctrl_get_def(vcp, &ldef);
- if (h == -1) {
- h = ldef;
- } else if (h < lmin) {
- h = lmin;
- } else if (h > lmax) {
- h = lmax;
- }
+static int pvr2_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int lmin, lmax, ldef;
+ struct pvr2_ctrl *hcp, *vcp;
+ int h = vf->fmt.pix.height;
+ int w = vf->fmt.pix.width;
+
+ hcp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_HRES);
+ vcp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_VRES);
+
+ lmin = pvr2_ctrl_get_min(hcp);
+ lmax = pvr2_ctrl_get_max(hcp);
+ pvr2_ctrl_get_def(hcp, &ldef);
+ if (w == -1)
+ w = ldef;
+ else if (w < lmin)
+ w = lmin;
+ else if (w > lmax)
+ w = lmax;
+ lmin = pvr2_ctrl_get_min(vcp);
+ lmax = pvr2_ctrl_get_max(vcp);
+ pvr2_ctrl_get_def(vcp, &ldef);
+ if (h == -1)
+ h = ldef;
+ else if (h < lmin)
+ h = lmin;
+ else if (h > lmax)
+ h = lmax;
+
+ memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
+ sizeof(struct v4l2_format));
+ vf->fmt.pix.width = w;
+ vf->fmt.pix.height = h;
+ return 0;
+}
- memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
- sizeof(struct v4l2_format));
- vf->fmt.pix.width = w;
- vf->fmt.pix.height = h;
+static int pvr2_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct pvr2_ctrl *hcp, *vcp;
+ int ret = pvr2_try_fmt_vid_cap(file, fh, vf);
- if (cmd == VIDIOC_S_FMT) {
- pvr2_ctrl_set_value(hcp,vf->fmt.pix.width);
- pvr2_ctrl_set_value(vcp,vf->fmt.pix.height);
- }
- } break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- // ????? Still need to figure out to do VBI correctly
- ret = -EINVAL;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- break;
- }
+ if (ret)
+ return ret;
+ hcp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_HRES);
+ vcp = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_VRES);
+ pvr2_ctrl_set_value(hcp, vf->fmt.pix.width);
+ pvr2_ctrl_set_value(vcp, vf->fmt.pix.height);
+ return 0;
+}
- case VIDIOC_STREAMON:
- {
- if (!fh->pdi->stream) {
- /* No stream defined for this node. This means
- that we're not currently allowed to stream from
- this node. */
- ret = -EPERM;
- break;
- }
- ret = pvr2_hdw_set_stream_type(hdw,pdi->config);
- if (ret < 0) return ret;
- ret = pvr2_hdw_set_streaming(hdw,!0);
- break;
+static int pvr2_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct pvr2_v4l2_dev *pdi = fh->pdi;
+ int ret;
+
+ if (!fh->pdi->stream) {
+ /* No stream defined for this node. This means
+ that we're not currently allowed to stream from
+ this node. */
+ return -EPERM;
}
+ ret = pvr2_hdw_set_stream_type(hdw, pdi->config);
+ if (ret < 0)
+ return ret;
+ return pvr2_hdw_set_streaming(hdw, !0);
+}
- case VIDIOC_STREAMOFF:
- {
- if (!fh->pdi->stream) {
- /* No stream defined for this node. This means
- that we're not currently allowed to stream from
- this node. */
- ret = -EPERM;
- break;
- }
- ret = pvr2_hdw_set_streaming(hdw,0);
- break;
+static int pvr2_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+
+ if (!fh->pdi->stream) {
+ /* No stream defined for this node. This means
+ that we're not currently allowed to stream from
+ this node. */
+ return -EPERM;
}
+ return pvr2_hdw_set_streaming(hdw, 0);
+}
- case VIDIOC_QUERYCTRL:
- {
- struct pvr2_ctrl *cptr;
- int val;
- struct v4l2_queryctrl *vc = (struct v4l2_queryctrl *)arg;
- ret = 0;
- if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
- cptr = pvr2_hdw_get_ctrl_nextv4l(
- hdw,(vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL));
- if (cptr) vc->id = pvr2_ctrl_get_v4lid(cptr);
- } else {
- cptr = pvr2_hdw_get_ctrl_v4l(hdw,vc->id);
- }
- if (!cptr) {
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x not implemented here",
- vc->id);
- ret = -EINVAL;
- break;
- }
+static int pvr2_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *vc)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct pvr2_ctrl *cptr;
+ int val;
+ int ret;
+ ret = 0;
+ if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
+ cptr = pvr2_hdw_get_ctrl_nextv4l(
+ hdw, (vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL));
+ if (cptr)
+ vc->id = pvr2_ctrl_get_v4lid(cptr);
+ } else {
+ cptr = pvr2_hdw_get_ctrl_v4l(hdw, vc->id);
+ }
+ if (!cptr) {
pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x mapping name=%s (%s)",
- vc->id,pvr2_ctrl_get_name(cptr),
- pvr2_ctrl_get_desc(cptr));
- strlcpy(vc->name,pvr2_ctrl_get_desc(cptr),sizeof(vc->name));
- vc->flags = pvr2_ctrl_get_v4lflags(cptr);
- pvr2_ctrl_get_def(cptr, &val);
- vc->default_value = val;
- switch (pvr2_ctrl_get_type(cptr)) {
- case pvr2_ctl_enum:
- vc->type = V4L2_CTRL_TYPE_MENU;
- vc->minimum = 0;
- vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1;
- vc->step = 1;
- break;
- case pvr2_ctl_bool:
- vc->type = V4L2_CTRL_TYPE_BOOLEAN;
- vc->minimum = 0;
- vc->maximum = 1;
- vc->step = 1;
- break;
- case pvr2_ctl_int:
- vc->type = V4L2_CTRL_TYPE_INTEGER;
- vc->minimum = pvr2_ctrl_get_min(cptr);
- vc->maximum = pvr2_ctrl_get_max(cptr);
- vc->step = 1;
- break;
- default:
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x name=%s not mappable",
- vc->id,pvr2_ctrl_get_name(cptr));
- ret = -EINVAL;
- break;
- }
+ "QUERYCTRL id=0x%x not implemented here",
+ vc->id);
+ return -EINVAL;
+ }
+
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x mapping name=%s (%s)",
+ vc->id, pvr2_ctrl_get_name(cptr),
+ pvr2_ctrl_get_desc(cptr));
+ strlcpy(vc->name, pvr2_ctrl_get_desc(cptr), sizeof(vc->name));
+ vc->flags = pvr2_ctrl_get_v4lflags(cptr);
+ pvr2_ctrl_get_def(cptr, &val);
+ vc->default_value = val;
+ switch (pvr2_ctrl_get_type(cptr)) {
+ case pvr2_ctl_enum:
+ vc->type = V4L2_CTRL_TYPE_MENU;
+ vc->minimum = 0;
+ vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1;
+ vc->step = 1;
break;
- }
-
- case VIDIOC_QUERYMENU:
- {
- struct v4l2_querymenu *vm = (struct v4l2_querymenu *)arg;
- unsigned int cnt = 0;
- ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw,vm->id),
- vm->index,
- vm->name,sizeof(vm->name)-1,
- &cnt);
- vm->name[cnt] = 0;
+ case pvr2_ctl_bool:
+ vc->type = V4L2_CTRL_TYPE_BOOLEAN;
+ vc->minimum = 0;
+ vc->maximum = 1;
+ vc->step = 1;
break;
- }
-
- case VIDIOC_G_CTRL:
- {
- struct v4l2_control *vc = (struct v4l2_control *)arg;
- int val = 0;
- ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
- &val);
- vc->value = val;
+ case pvr2_ctl_int:
+ vc->type = V4L2_CTRL_TYPE_INTEGER;
+ vc->minimum = pvr2_ctrl_get_min(cptr);
+ vc->maximum = pvr2_ctrl_get_max(cptr);
+ vc->step = 1;
break;
+ default:
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x name=%s not mappable",
+ vc->id, pvr2_ctrl_get_name(cptr));
+ return -EINVAL;
}
+ return 0;
+}
- case VIDIOC_S_CTRL:
- {
- struct v4l2_control *vc = (struct v4l2_control *)arg;
- ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
- vc->value);
- break;
- }
+static int pvr2_querymenu(struct file *file, void *priv, struct v4l2_querymenu *vm)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ unsigned int cnt = 0;
+ int ret;
- case VIDIOC_G_EXT_CTRLS:
- {
- struct v4l2_ext_controls *ctls =
- (struct v4l2_ext_controls *)arg;
- struct v4l2_ext_control *ctrl;
- unsigned int idx;
- int val;
- ret = 0;
- for (idx = 0; idx < ctls->count; idx++) {
- ctrl = ctls->controls + idx;
- ret = pvr2_ctrl_get_value(
- pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),&val);
- if (ret) {
- ctls->error_idx = idx;
- break;
- }
- /* Ensure that if read as a 64 bit value, the user
- will still get a hopefully sane value */
- ctrl->value64 = 0;
- ctrl->value = val;
+ ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw, vm->id),
+ vm->index,
+ vm->name, sizeof(vm->name) - 1,
+ &cnt);
+ vm->name[cnt] = 0;
+ return ret;
+}
+
+static int pvr2_g_ctrl(struct file *file, void *priv, struct v4l2_control *vc)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int ret;
+
+ ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw, vc->id),
+ &val);
+ vc->value = val;
+ return ret;
+}
+
+static int pvr2_s_ctrl(struct file *file, void *priv, struct v4l2_control *vc)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+
+ return pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw, vc->id),
+ vc->value);
+}
+
+static int pvr2_g_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctls)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_ext_control *ctrl;
+ unsigned int idx;
+ int val;
+ int ret;
+
+ ret = 0;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_v4l(hdw, ctrl->id), &val);
+ if (ret) {
+ ctls->error_idx = idx;
+ return ret;
}
- break;
+ /* Ensure that if read as a 64 bit value, the user
+ will still get a hopefully sane value */
+ ctrl->value64 = 0;
+ ctrl->value = val;
}
+ return 0;
+}
- case VIDIOC_S_EXT_CTRLS:
- {
- struct v4l2_ext_controls *ctls =
- (struct v4l2_ext_controls *)arg;
- struct v4l2_ext_control *ctrl;
- unsigned int idx;
- ret = 0;
- for (idx = 0; idx < ctls->count; idx++) {
- ctrl = ctls->controls + idx;
- ret = pvr2_ctrl_set_value(
- pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),
+static int pvr2_s_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctls)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_ext_control *ctrl;
+ unsigned int idx;
+ int ret;
+
+ ret = 0;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_v4l(hdw, ctrl->id),
ctrl->value);
- if (ret) {
- ctls->error_idx = idx;
- break;
- }
+ if (ret) {
+ ctls->error_idx = idx;
+ return ret;
}
- break;
}
+ return 0;
+}
- case VIDIOC_TRY_EXT_CTRLS:
- {
- struct v4l2_ext_controls *ctls =
- (struct v4l2_ext_controls *)arg;
- struct v4l2_ext_control *ctrl;
- struct pvr2_ctrl *pctl;
- unsigned int idx;
- /* For the moment just validate that the requested control
- actually exists. */
- ret = 0;
- for (idx = 0; idx < ctls->count; idx++) {
- ctrl = ctls->controls + idx;
- pctl = pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id);
- if (!pctl) {
- ret = -EINVAL;
- ctls->error_idx = idx;
- break;
- }
- }
- break;
- }
+static int pvr2_try_ext_ctrls(struct file *file, void *priv,
+ struct v4l2_ext_controls *ctls)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_ext_control *ctrl;
+ struct pvr2_ctrl *pctl;
+ unsigned int idx;
+ int ret;
- case VIDIOC_CROPCAP:
- {
- struct v4l2_cropcap *cap = (struct v4l2_cropcap *)arg;
- if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- ret = -EINVAL;
- break;
+ /* For the moment just validate that the requested control
+ actually exists. */
+ ret = 0;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ pctl = pvr2_hdw_get_ctrl_v4l(hdw, ctrl->id);
+ if (!pctl) {
+ ctls->error_idx = idx;
+ return -EINVAL;
}
- ret = pvr2_hdw_get_cropcap(hdw, cap);
- cap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* paranoia */
- break;
}
- case VIDIOC_G_CROP:
- {
- struct v4l2_crop *crop = (struct v4l2_crop *)arg;
- int val = 0;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_get_value(
+ return 0;
+}
+
+static int pvr2_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cap)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int ret;
+
+ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ ret = pvr2_hdw_get_cropcap(hdw, cap);
+ cap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* paranoia */
+ return ret;
+}
+
+static int pvr2_g_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int val = 0;
+ int ret;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ ret = pvr2_ctrl_get_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL), &val);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- crop->c.left = val;
- ret = pvr2_ctrl_get_value(
+ if (ret != 0)
+ return -EINVAL;
+ crop->c.left = val;
+ ret = pvr2_ctrl_get_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT), &val);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- crop->c.top = val;
- ret = pvr2_ctrl_get_value(
+ if (ret != 0)
+ return -EINVAL;
+ crop->c.top = val;
+ ret = pvr2_ctrl_get_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW), &val);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- crop->c.width = val;
- ret = pvr2_ctrl_get_value(
+ if (ret != 0)
+ return -EINVAL;
+ crop->c.width = val;
+ ret = pvr2_ctrl_get_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH), &val);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- crop->c.height = val;
- }
- case VIDIOC_S_CROP:
- {
- struct v4l2_crop *crop = (struct v4l2_crop *)arg;
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_set_value(
+ if (ret != 0)
+ return -EINVAL;
+ crop->c.height = val;
+ return 0;
+}
+
+static int pvr2_s_crop(struct file *file, void *priv, struct v4l2_crop *crop)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_cropcap cap;
+ int ret;
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL),
crop->c.left);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_set_value(
+ if (ret != 0)
+ return -EINVAL;
+ ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPT),
crop->c.top);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_set_value(
+ if (ret != 0)
+ return -EINVAL;
+ ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPW),
crop->c.width);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- ret = pvr2_ctrl_set_value(
+ if (ret != 0)
+ return -EINVAL;
+ ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPH),
crop->c.height);
- if (ret != 0) {
- ret = -EINVAL;
- break;
- }
- }
- case VIDIOC_LOG_STATUS:
- {
- pvr2_hdw_trigger_module_log(hdw);
- ret = 0;
- break;
- }
+ if (ret != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int pvr2_log_status(struct file *file, void *priv)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+
+ pvr2_hdw_trigger_module_log(hdw);
+ return 0;
+}
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
- case VIDIOC_DBG_S_REGISTER:
- case VIDIOC_DBG_G_REGISTER:
- {
- u64 val;
- struct v4l2_dbg_register *req = (struct v4l2_dbg_register *)arg;
- if (cmd == VIDIOC_DBG_S_REGISTER) val = req->val;
- ret = pvr2_hdw_register_access(
- hdw, &req->match, req->reg,
- cmd == VIDIOC_DBG_S_REGISTER, &val);
- if (cmd == VIDIOC_DBG_G_REGISTER) req->val = val;
- break;
- }
-#endif
+static int pvr2_g_register(struct file *file, void *priv, struct v4l2_dbg_register *req)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ u64 val;
+ int ret;
- default :
- ret = -ENOTTY;
- break;
- }
+ ret = pvr2_hdw_register_access(
+ hdw, &req->match, req->reg,
+ 0, &val);
+ req->val = val;
+ return ret;
+}
- pvr2_hdw_commit_ctl(hdw);
+static int pvr2_s_register(struct file *file, void *priv, struct v4l2_dbg_register *req)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ u64 val;
+ int ret;
- if (ret < 0) {
- if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "pvr2_v4l2_do_ioctl failure, ret=%ld", ret);
- } else {
- if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "pvr2_v4l2_do_ioctl failure, ret=%ld"
- " command was:", ret);
- v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),
- cmd);
- }
- }
- } else {
- pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "pvr2_v4l2_do_ioctl complete, ret=%ld (0x%lx)",
- ret, ret);
- }
+ val = req->val;
+ ret = pvr2_hdw_register_access(
+ hdw, &req->match, req->reg,
+ 1, &val);
return ret;
}
+#endif
+
+static const struct v4l2_ioctl_ops pvr2_ioctl_ops = {
+ .vidioc_querycap = pvr2_querycap,
+ .vidioc_g_priority = pvr2_g_priority,
+ .vidioc_s_priority = pvr2_s_priority,
+ .vidioc_s_audio = pvr2_s_audio,
+ .vidioc_g_audio = pvr2_g_audio,
+ .vidioc_enumaudio = pvr2_enumaudio,
+ .vidioc_enum_input = pvr2_enum_input,
+ .vidioc_cropcap = pvr2_cropcap,
+ .vidioc_s_crop = pvr2_s_crop,
+ .vidioc_g_crop = pvr2_g_crop,
+ .vidioc_g_input = pvr2_g_input,
+ .vidioc_s_input = pvr2_s_input,
+ .vidioc_g_frequency = pvr2_g_frequency,
+ .vidioc_s_frequency = pvr2_s_frequency,
+ .vidioc_s_tuner = pvr2_s_tuner,
+ .vidioc_g_tuner = pvr2_g_tuner,
+ .vidioc_g_std = pvr2_g_std,
+ .vidioc_s_std = pvr2_s_std,
+ .vidioc_querystd = pvr2_querystd,
+ .vidioc_log_status = pvr2_log_status,
+ .vidioc_enum_fmt_vid_cap = pvr2_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = pvr2_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = pvr2_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = pvr2_try_fmt_vid_cap,
+ .vidioc_streamon = pvr2_streamon,
+ .vidioc_streamoff = pvr2_streamoff,
+ .vidioc_queryctrl = pvr2_queryctrl,
+ .vidioc_querymenu = pvr2_querymenu,
+ .vidioc_g_ctrl = pvr2_g_ctrl,
+ .vidioc_s_ctrl = pvr2_s_ctrl,
+ .vidioc_g_ext_ctrls = pvr2_g_ext_ctrls,
+ .vidioc_s_ext_ctrls = pvr2_s_ext_ctrls,
+ .vidioc_try_ext_ctrls = pvr2_try_ext_ctrls,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = pvr2_g_register,
+ .vidioc_s_register = pvr2_s_register,
+#endif
+};
static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
{
@@ -961,7 +959,56 @@ static long pvr2_v4l2_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- return video_usercopy(file, cmd, arg, pvr2_v4l2_do_ioctl);
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2 *vp = fh->vhead;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ long ret = -EINVAL;
+
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL)
+ v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw), cmd);
+
+ if (!pvr2_hdw_dev_ok(hdw)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "ioctl failed - bad or no context");
+ return -EFAULT;
+ }
+
+ /* check priority */
+ switch (cmd) {
+ case VIDIOC_S_CTRL:
+ case VIDIOC_S_STD:
+ case VIDIOC_S_INPUT:
+ case VIDIOC_S_TUNER:
+ case VIDIOC_S_FREQUENCY:
+ ret = v4l2_prio_check(&vp->prio, fh->prio);
+ if (ret)
+ return ret;
+ }
+
+ ret = video_ioctl2(file, cmd, arg);
+
+ pvr2_hdw_commit_ctl(hdw);
+
+ if (ret < 0) {
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl failure, ret=%ld", ret);
+ } else {
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl failure, ret=%ld"
+ " command was:", ret);
+ v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),
+ cmd);
+ }
+ }
+ } else {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl complete, ret=%ld (0x%lx)",
+ ret, ret);
+ }
+ return ret;
+
}
@@ -1262,10 +1309,12 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
struct usb_device *usbdev;
int mindevnum;
int unit_number;
+ struct pvr2_hdw *hdw;
int *nr_ptr = NULL;
dip->v4lp = vp;
- usbdev = pvr2_hdw_get_dev(vp->channel.mc_head->hdw);
+ hdw = vp->channel.mc_head->hdw;
+ usbdev = pvr2_hdw_get_dev(hdw);
dip->v4l_type = v4l_type;
switch (v4l_type) {
case VFL_TYPE_GRABBER:
@@ -1300,9 +1349,17 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
memcpy(&dip->devbase,&vdev_template,sizeof(vdev_template));
dip->devbase.release = pvr2_video_device_release;
+ dip->devbase.ioctl_ops = &pvr2_ioctl_ops;
+ {
+ int val;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,
+ PVR2_CID_STDAVAIL), &val);
+ dip->devbase.tvnorms = (v4l2_std_id)val;
+ }
mindevnum = -1;
- unit_number = pvr2_hdw_get_unit_number(vp->channel.mc_head->hdw);
+ unit_number = pvr2_hdw_get_unit_number(hdw);
if (nr_ptr && (unit_number >= 0) && (unit_number < PVR_NUM)) {
mindevnum = nr_ptr[unit_number];
}
@@ -1319,7 +1376,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
video_device_node_name(&dip->devbase),
pvr2_config_get_name(dip->config));
- pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
+ pvr2_hdw_v4l_store_minor_number(hdw,
dip->minor_type,dip->devbase.minor);
}
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 122fbd0081eb..ec4e2ef54e65 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -357,6 +357,7 @@ handler_end:
PWC_ERROR("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i);
}
+/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
static int pwc_isoc_init(struct pwc_device *pdev)
{
struct usb_device *udev;
@@ -366,9 +367,6 @@ static int pwc_isoc_init(struct pwc_device *pdev)
struct usb_host_interface *idesc = NULL;
int compression = 0; /* 0..3 = uncompressed..high */
- if (pdev->iso_init)
- return 0;
-
pdev->vsync = 0;
pdev->vlast_packet_size = 0;
pdev->fill_buf = NULL;
@@ -418,7 +416,6 @@ retry:
urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
if (urb == NULL) {
PWC_ERROR("Failed to allocate urb %d\n", i);
- pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
return -ENOMEM;
}
@@ -435,7 +432,6 @@ retry:
&urb->transfer_dma);
if (urb->transfer_buffer == NULL) {
PWC_ERROR("Failed to allocate urb buffer %d\n", i);
- pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
return -ENOMEM;
}
@@ -455,13 +451,11 @@ retry:
ret = usb_submit_urb(pdev->urbs[i], GFP_KERNEL);
if (ret == -ENOSPC && compression < 3) {
compression++;
- pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
goto retry;
}
if (ret) {
PWC_ERROR("isoc_init() submit_urb %d failed with error %d\n", i, ret);
- pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
return ret;
}
@@ -469,7 +463,6 @@ retry:
}
/* All is done... */
- pdev->iso_init = 1;
PWC_DEBUG_OPEN("<< pwc_isoc_init()\n");
return 0;
}
@@ -507,21 +500,19 @@ static void pwc_iso_free(struct pwc_device *pdev)
}
}
+/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
static void pwc_isoc_cleanup(struct pwc_device *pdev)
{
PWC_DEBUG_OPEN(">> pwc_isoc_cleanup()\n");
- if (pdev->iso_init == 0)
- return;
-
pwc_iso_stop(pdev);
pwc_iso_free(pdev);
usb_set_interface(pdev->udev, 0, 0);
- pdev->iso_init = 0;
PWC_DEBUG_OPEN("<< pwc_isoc_cleanup()\n");
}
+/* Must be called with vb_queue_lock hold */
static void pwc_cleanup_queued_bufs(struct pwc_device *pdev)
{
unsigned long flags = 0;
@@ -573,18 +564,13 @@ static const char *pwc_sensor_type_to_string(unsigned int sensor_type)
int pwc_test_n_set_capt_file(struct pwc_device *pdev, struct file *file)
{
- int r = 0;
-
- mutex_lock(&pdev->capt_file_lock);
if (pdev->capt_file != NULL &&
- pdev->capt_file != file) {
- r = -EBUSY;
- goto leave;
- }
+ pdev->capt_file != file)
+ return -EBUSY;
+
pdev->capt_file = file;
-leave:
- mutex_unlock(&pdev->capt_file_lock);
- return r;
+
+ return 0;
}
static void pwc_video_release(struct v4l2_device *v)
@@ -592,6 +578,7 @@ static void pwc_video_release(struct v4l2_device *v)
struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
v4l2_ctrl_handler_free(&pdev->ctrl_handler);
+ v4l2_device_unregister(&pdev->v4l2_dev);
kfree(pdev->ctrl_buf);
kfree(pdev);
}
@@ -600,10 +587,25 @@ static int pwc_video_close(struct file *file)
{
struct pwc_device *pdev = video_drvdata(file);
+ /*
+ * If we're still streaming vb2_queue_release will call stream_stop
+ * so we must take both the v4l2_lock and the vb_queue_lock.
+ */
+ if (mutex_lock_interruptible(&pdev->v4l2_lock))
+ return -ERESTARTSYS;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock)) {
+ mutex_unlock(&pdev->v4l2_lock);
+ return -ERESTARTSYS;
+ }
+
if (pdev->capt_file == file) {
vb2_queue_release(&pdev->vb_queue);
pdev->capt_file = NULL;
}
+
+ mutex_unlock(&pdev->vb_queue_lock);
+ mutex_unlock(&pdev->v4l2_lock);
+
return v4l2_fh_release(file);
}
@@ -611,35 +613,81 @@ static ssize_t pwc_video_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct pwc_device *pdev = video_drvdata(file);
+ int lock_v4l2 = 0;
+ ssize_t ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pwc_test_n_set_capt_file(pdev, file))
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret)
+ goto out;
- return vb2_read(&pdev->vb_queue, buf, count, ppos,
- file->f_flags & O_NONBLOCK);
+ /* stream_start will get called so we must take the v4l2_lock */
+ if (pdev->vb_queue.fileio == NULL)
+ lock_v4l2 = 1;
+
+ /* Use try_lock, since we're taking the locks in the *wrong* order! */
+ if (lock_v4l2 && !mutex_trylock(&pdev->v4l2_lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+ ret = vb2_read(&pdev->vb_queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (lock_v4l2)
+ mutex_unlock(&pdev->v4l2_lock);
+out:
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
{
struct pwc_device *pdev = video_drvdata(file);
+ struct vb2_queue *q = &pdev->vb_queue;
+ unsigned long req_events = poll_requested_events(wait);
+ unsigned int ret = POLL_ERR;
+ int lock_v4l2 = 0;
- if (!pdev->udev)
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
return POLL_ERR;
- return vb2_poll(&pdev->vb_queue, file, wait);
+ /* Will this start fileio and thus call start_stream? */
+ if ((req_events & (POLLIN | POLLRDNORM)) &&
+ q->num_buffers == 0 && !q->streaming && q->fileio == NULL) {
+ if (pwc_test_n_set_capt_file(pdev, file))
+ goto out;
+ lock_v4l2 = 1;
+ }
+
+ /* Use try_lock, since we're taking the locks in the *wrong* order! */
+ if (lock_v4l2 && !mutex_trylock(&pdev->v4l2_lock))
+ goto out;
+ ret = vb2_poll(&pdev->vb_queue, file, wait);
+ if (lock_v4l2)
+ mutex_unlock(&pdev->v4l2_lock);
+
+out:
+ if (!pdev->udev)
+ ret |= POLLHUP;
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (pdev->capt_file != file)
- return -EBUSY;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- return vb2_mmap(&pdev->vb_queue, vma);
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_mmap(&pdev->vb_queue, vma);
+
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
/***************************************************************************/
@@ -715,12 +763,14 @@ static void buffer_queue(struct vb2_buffer *vb)
struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
unsigned long flags = 0;
- spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
/* Check the device has not disconnected between prep and queuing */
- if (pdev->udev)
- list_add_tail(&buf->list, &pdev->queued_bufs);
- else
+ if (!pdev->udev) {
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
+ list_add_tail(&buf->list, &pdev->queued_bufs);
spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
}
@@ -729,11 +779,8 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
struct pwc_device *pdev = vb2_get_drv_priv(vq);
int r;
- mutex_lock(&pdev->udevlock);
- if (!pdev->udev) {
- r = -ENODEV;
- goto leave;
- }
+ if (!pdev->udev)
+ return -ENODEV;
/* Turn on camera and set LEDS on */
pwc_camera_power(pdev, 1);
@@ -747,8 +794,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
/* And cleanup any queued bufs!! */
pwc_cleanup_queued_bufs(pdev);
}
-leave:
- mutex_unlock(&pdev->udevlock);
+
return r;
}
@@ -756,19 +802,29 @@ static int stop_streaming(struct vb2_queue *vq)
{
struct pwc_device *pdev = vb2_get_drv_priv(vq);
- mutex_lock(&pdev->udevlock);
if (pdev->udev) {
pwc_set_leds(pdev, 0, 0);
pwc_camera_power(pdev, 0);
pwc_isoc_cleanup(pdev);
}
- mutex_unlock(&pdev->udevlock);
pwc_cleanup_queued_bufs(pdev);
return 0;
}
+static void wait_prepare(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ mutex_unlock(&pdev->vb_queue_lock);
+}
+
+static void wait_finish(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ mutex_lock(&pdev->vb_queue_lock);
+}
+
static struct vb2_ops pwc_vb_queue_ops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
@@ -778,6 +834,8 @@ static struct vb2_ops pwc_vb_queue_ops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
+ .wait_prepare = wait_prepare,
+ .wait_finish = wait_finish,
};
/***************************************************************************/
@@ -1057,8 +1115,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->features = features;
pwc_construct(pdev); /* set min/max sizes correct */
- mutex_init(&pdev->capt_file_lock);
- mutex_init(&pdev->udevlock);
+ mutex_init(&pdev->v4l2_lock);
+ mutex_init(&pdev->vb_queue_lock);
spin_lock_init(&pdev->queued_bufs_lock);
INIT_LIST_HEAD(&pdev->queued_bufs);
@@ -1130,6 +1188,16 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
+ pdev->vdev.lock = &pdev->v4l2_lock;
+
+ /*
+ * Don't take v4l2_lock for these ioctls. This improves latency if
+ * v4l2_lock is taken for a long time, e.g. when changing a control
+ * value, and a new frame is ready to be dequeued.
+ */
+ v4l2_disable_ioctl_locking(&pdev->vdev, VIDIOC_DQBUF);
+ v4l2_disable_ioctl_locking(&pdev->vdev, VIDIOC_QBUF);
+ v4l2_disable_ioctl_locking(&pdev->vdev, VIDIOC_QUERYBUF);
rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
if (rc < 0) {
@@ -1185,16 +1253,20 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
struct v4l2_device *v = usb_get_intfdata(intf);
struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
- mutex_lock(&pdev->udevlock);
+ mutex_lock(&pdev->v4l2_lock);
+
+ mutex_lock(&pdev->vb_queue_lock);
/* No need to keep the urbs around after disconnection */
- pwc_isoc_cleanup(pdev);
+ if (pdev->vb_queue.streaming)
+ pwc_isoc_cleanup(pdev);
pdev->udev = NULL;
- mutex_unlock(&pdev->udevlock);
-
pwc_cleanup_queued_bufs(pdev);
+ mutex_unlock(&pdev->vb_queue_lock);
+ v4l2_device_disconnect(&pdev->v4l2_dev);
video_unregister_device(&pdev->vdev);
- v4l2_device_unregister(&pdev->v4l2_dev);
+
+ mutex_unlock(&pdev->v4l2_lock);
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
if (pdev->button_dev)
@@ -1229,15 +1301,4 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("pwcx");
MODULE_VERSION( PWC_VERSION );
-static int __init usb_pwc_init(void)
-{
- return usb_register(&pwc_driver);
-}
-
-static void __exit usb_pwc_exit(void)
-{
- usb_deregister(&pwc_driver);
-}
-
-module_init(usb_pwc_init);
-module_exit(usb_pwc_exit);
+module_usb_driver(pwc_driver);
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index 2834e3e65b39..c691e29cc36e 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -464,26 +464,24 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
struct pwc_device *pdev = video_drvdata(file);
int ret, pixelformat, compression = 0;
- if (pwc_test_n_set_capt_file(pdev, file))
- return -EBUSY;
-
ret = pwc_vidioc_try_fmt(pdev, f);
if (ret < 0)
return ret;
- pixelformat = f->fmt.pix.pixelformat;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- mutex_lock(&pdev->udevlock);
- if (!pdev->udev) {
- ret = -ENODEV;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret)
goto leave;
- }
- if (pdev->iso_init) {
+ if (pdev->vb_queue.streaming) {
ret = -EBUSY;
goto leave;
}
+ pixelformat = f->fmt.pix.pixelformat;
+
PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
"format=%c%c%c%c\n",
f->fmt.pix.width, f->fmt.pix.height, pdev->vframes,
@@ -499,7 +497,7 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
leave:
- mutex_unlock(&pdev->udevlock);
+ mutex_unlock(&pdev->vb_queue_lock);
return ret;
}
@@ -507,9 +505,6 @@ static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap
{
struct pwc_device *pdev = video_drvdata(file);
- if (!pdev->udev)
- return -ENODEV;
-
strcpy(cap->driver, PWC_NAME);
strlcpy(cap->card, pdev->vdev.name, sizeof(cap->card));
usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info));
@@ -540,15 +535,12 @@ static int pwc_s_input(struct file *file, void *fh, unsigned int i)
return i ? -EINVAL : 0;
}
-static int pwc_g_volatile_ctrl_unlocked(struct v4l2_ctrl *ctrl)
+static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct pwc_device *pdev =
container_of(ctrl->handler, struct pwc_device, ctrl_handler);
int ret = 0;
- if (!pdev->udev)
- return -ENODEV;
-
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
if (pdev->color_bal_valid &&
@@ -615,18 +607,6 @@ static int pwc_g_volatile_ctrl_unlocked(struct v4l2_ctrl *ctrl)
return ret;
}
-static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct pwc_device *pdev =
- container_of(ctrl->handler, struct pwc_device, ctrl_handler);
- int ret;
-
- mutex_lock(&pdev->udevlock);
- ret = pwc_g_volatile_ctrl_unlocked(ctrl);
- mutex_unlock(&pdev->udevlock);
- return ret;
-}
-
static int pwc_set_awb(struct pwc_device *pdev)
{
int ret;
@@ -648,7 +628,7 @@ static int pwc_set_awb(struct pwc_device *pdev)
if (pdev->auto_white_balance->val == awb_indoor ||
pdev->auto_white_balance->val == awb_outdoor ||
pdev->auto_white_balance->val == awb_fl)
- pwc_g_volatile_ctrl_unlocked(pdev->auto_white_balance);
+ pwc_g_volatile_ctrl(pdev->auto_white_balance);
}
if (pdev->auto_white_balance->val != awb_manual)
return 0;
@@ -812,13 +792,6 @@ static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
container_of(ctrl->handler, struct pwc_device, ctrl_handler);
int ret = 0;
- mutex_lock(&pdev->udevlock);
-
- if (!pdev->udev) {
- ret = -ENODEV;
- goto leave;
- }
-
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
@@ -915,8 +888,6 @@ static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret)
PWC_ERROR("s_ctrl %s error %d\n", ctrl->name, ret);
-leave:
- mutex_unlock(&pdev->udevlock);
return ret;
}
@@ -949,11 +920,9 @@ static int pwc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- mutex_lock(&pdev->udevlock); /* To avoid race with s_fmt */
PWC_DEBUG_IOCTL("ioctl(VIDIOC_G_FMT) return size %dx%d\n",
pdev->width, pdev->height);
pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
- mutex_unlock(&pdev->udevlock);
return 0;
}
@@ -968,70 +937,98 @@ static int pwc_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *rb)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (pwc_test_n_set_capt_file(pdev, file))
- return -EBUSY;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- return vb2_reqbufs(&pdev->vb_queue, rb);
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_reqbufs(&pdev->vb_queue, rb);
+
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- return vb2_querybuf(&pdev->vb_queue, buf);
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
+
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_querybuf(&pdev->vb_queue, buf);
+
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pdev->capt_file != file)
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_qbuf(&pdev->vb_queue, buf);
- return vb2_qbuf(&pdev->vb_queue, buf);
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pdev->capt_file != file)
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_dqbuf(&pdev->vb_queue, buf,
+ file->f_flags & O_NONBLOCK);
- return vb2_dqbuf(&pdev->vb_queue, buf, file->f_flags & O_NONBLOCK);
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pdev->capt_file != file)
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_streamon(&pdev->vb_queue, i);
- return vb2_streamon(&pdev->vb_queue, i);
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
+ int ret;
- if (!pdev->udev)
- return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
- if (pdev->capt_file != file)
- return -EBUSY;
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret == 0)
+ ret = vb2_streamoff(&pdev->vb_queue, i);
- return vb2_streamoff(&pdev->vb_queue, i);
+ mutex_unlock(&pdev->vb_queue_lock);
+ return ret;
}
static int pwc_enum_framesizes(struct file *file, void *fh,
@@ -1119,19 +1116,17 @@ static int pwc_s_parm(struct file *file, void *fh,
parm->parm.capture.timeperframe.numerator == 0)
return -EINVAL;
- if (pwc_test_n_set_capt_file(pdev, file))
- return -EBUSY;
-
fps = parm->parm.capture.timeperframe.denominator /
parm->parm.capture.timeperframe.numerator;
- mutex_lock(&pdev->udevlock);
- if (!pdev->udev) {
- ret = -ENODEV;
+ if (mutex_lock_interruptible(&pdev->vb_queue_lock))
+ return -ERESTARTSYS;
+
+ ret = pwc_test_n_set_capt_file(pdev, file);
+ if (ret)
goto leave;
- }
- if (pdev->iso_init) {
+ if (pdev->vb_queue.streaming) {
ret = -EBUSY;
goto leave;
}
@@ -1142,7 +1137,7 @@ static int pwc_s_parm(struct file *file, void *fh,
pwc_g_parm(file, fh, parm);
leave:
- mutex_unlock(&pdev->udevlock);
+ mutex_unlock(&pdev->vb_queue_lock);
return ret;
}
@@ -1166,4 +1161,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_enum_frameintervals = pwc_enum_frameintervals,
.vidioc_g_parm = pwc_g_parm,
.vidioc_s_parm = pwc_s_parm,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index e4d4d711dd1f..d6b5b216b9d6 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -221,9 +221,17 @@ struct pwc_device
struct video_device vdev;
struct v4l2_device v4l2_dev;
- /* Pointer to our usb_device, may be NULL after unplug */
- struct usb_device *udev;
- struct mutex udevlock;
+ /* videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+
+ /* Note if taking both locks v4l2_lock must always be locked first! */
+ struct mutex v4l2_lock; /* Protects everything else */
+ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */
+
+ /* Pointer to our usb_device, will be NULL after unplug */
+ struct usb_device *udev; /* Both mutexes most be hold when setting! */
/* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
int type;
@@ -232,7 +240,6 @@ struct pwc_device
/*** Video data ***/
struct file *capt_file; /* file doing video capture */
- struct mutex capt_file_lock;
int vendpoint; /* video isoc endpoint */
int vcinterface; /* video control interface */
int valternate; /* alternate interface needed */
@@ -251,12 +258,6 @@ struct pwc_device
unsigned char *ctrl_buf;
struct urb *urbs[MAX_ISO_BUFS];
- char iso_init;
-
- /* videobuf2 queue and queued buffers list */
- struct vb2_queue vb_queue;
- struct list_head queued_bufs;
- spinlock_t queued_bufs_lock;
/*
* Frame currently being filled, this only gets touched by the
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 5a413f4427e0..9c21e01f2c24 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -241,15 +241,10 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct soc_camera_device *icd = vq->priv_data;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
- *size = bytes_per_line * icd->user_height;
+ *size = icd->sizeimage;
if (0 == *count)
*count = 32;
@@ -435,11 +430,6 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
int ret;
int size_y, size_u = 0, size_v = 0;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
-
- if (bytes_per_line < 0)
- return bytes_per_line;
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -474,7 +464,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
vb->state = VIDEOBUF_NEEDS_INIT;
}
- vb->size = bytes_per_line * vb->height;
+ vb->size = icd->sizeimage;
if (0 != vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
goto out;
@@ -1244,6 +1234,7 @@ static const struct soc_mbus_pixelfmt pxa_camera_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_U_V,
},
};
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 4894cbb1c547..01c2179f0520 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -634,13 +634,11 @@ static void s2255_fillbuff(struct s2255_channel *channel,
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
- struct s2255_framei *frm;
if (!vbuf)
return;
last_frame = channel->last_frame;
if (last_frame != -1) {
- frm = &channel->buffer.frame[last_frame];
tmpbuf =
(const char *)channel->buffer.frame[last_frame].lpvbits;
switch (buf->fmt->fourcc) {
@@ -987,7 +985,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct videobuf_queue *q = &fh->vb_vidq;
struct s2255_mode mode;
int ret;
- int norm;
ret = vidioc_try_fmt_vid_cap(file, fh, f);
@@ -1018,7 +1015,6 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
channel->height = f->fmt.pix.height;
fh->vb_vidq.field = f->fmt.pix.field;
fh->type = f->type;
- norm = norm_minw(&channel->vdev);
if (channel->width > norm_minw(&channel->vdev)) {
if (channel->height > norm_minh(&channel->vdev)) {
if (channel->cap_parm.capturemode &
@@ -1826,8 +1822,7 @@ static void s2255_destroy(struct s2255_dev *dev)
usb_free_urb(dev->fw_data->fw_urb);
dev->fw_data->fw_urb = NULL;
}
- if (dev->fw_data->fw)
- release_firmware(dev->fw_data->fw);
+ release_firmware(dev->fw_data->fw);
kfree(dev->fw_data->pfw_data);
kfree(dev->fw_data);
/* reset the DSP so firmware can be reloaded next time */
@@ -1949,6 +1944,10 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
/* register 4 video devices */
channel->vdev = template;
channel->vdev.lock = &dev->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &channel->vdev.flags);
channel->vdev.v4l2_dev = &dev->v4l2_dev;
video_set_drvdata(&channel->vdev, channel);
if (video_nr == -1)
diff --git a/drivers/media/video/s5p-fimc/Kconfig b/drivers/media/video/s5p-fimc/Kconfig
new file mode 100644
index 000000000000..a564f7eeb064
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/Kconfig
@@ -0,0 +1,48 @@
+
+config VIDEO_SAMSUNG_S5P_FIMC
+ bool "Samsung S5P/EXYNOS SoC camera interface driver (experimental)"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && PLAT_S5P && PM_RUNTIME
+ depends on EXPERIMENTAL
+ help
+ Say Y here to enable camera host interface devices for
+ Samsung S5P and EXYNOS SoC series.
+
+if VIDEO_SAMSUNG_S5P_FIMC
+
+config VIDEO_S5P_FIMC
+ tristate "S5P/EXYNOS4 FIMC/CAMIF camera interface driver"
+ depends on I2C
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC camera host
+ interface and video postprocessor (FIMC and FIMC-LITE) devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s5p-fimc.
+
+config VIDEO_S5P_MIPI_CSIS
+ tristate "S5P/EXYNOS MIPI-CSI2 receiver (MIPI-CSIS) driver"
+ depends on REGULATOR
+ help
+ This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC MIPI-CSI2
+ receiver (MIPI-CSIS) devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s5p-csis.
+
+if ARCH_EXYNOS
+
+config VIDEO_EXYNOS_FIMC_LITE
+ tristate "EXYNOS FIMC-LITE camera interface driver"
+ depends on I2C
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ This is a V4L2 driver for Samsung EXYNOS4/5 SoC FIMC-LITE camera
+ host interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called exynos-fimc-lite.
+endif
+
+endif # VIDEO_SAMSUNG_S5P_FIMC
diff --git a/drivers/media/video/s5p-fimc/Makefile b/drivers/media/video/s5p-fimc/Makefile
index 33dec7f890e7..46485143e1ca 100644
--- a/drivers/media/video/s5p-fimc/Makefile
+++ b/drivers/media/video/s5p-fimc/Makefile
@@ -1,5 +1,7 @@
-s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-capture.o fimc-mdevice.o
+s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-m2m.o fimc-capture.o fimc-mdevice.o
+exynos-fimc-lite-objs += fimc-lite-reg.o fimc-lite.o
s5p-csis-objs := mipi-csis.o
obj-$(CONFIG_VIDEO_S5P_MIPI_CSIS) += s5p-csis.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc.o
+obj-$(CONFIG_VIDEO_EXYNOS_FIMC_LITE) += exynos-fimc-lite.o
+obj-$(CONFIG_VIDEO_S5P_FIMC) += s5p-fimc.o
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 7e9b2c612b03..725812aa0c30 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -1,8 +1,8 @@
/*
* Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
*
- * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
- * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -29,20 +29,22 @@
#include "fimc-mdevice.h"
#include "fimc-core.h"
+#include "fimc-reg.h"
-static int fimc_init_capture(struct fimc_dev *fimc)
+static int fimc_capture_hw_init(struct fimc_dev *fimc)
{
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
+ struct fimc_pipeline *p = &fimc->pipeline;
struct fimc_sensor_info *sensor;
unsigned long flags;
int ret = 0;
- if (fimc->pipeline.sensor == NULL || ctx == NULL)
+ if (p->subdevs[IDX_SENSOR] == NULL || ctx == NULL)
return -ENXIO;
if (ctx->s_frame.fmt == NULL)
return -EINVAL;
- sensor = v4l2_get_subdev_hostdata(fimc->pipeline.sensor);
+ sensor = v4l2_get_subdev_hostdata(p->subdevs[IDX_SENSOR]);
spin_lock_irqsave(&fimc->slock, flags);
fimc_prepare_dma_offset(ctx, &ctx->d_frame);
@@ -60,7 +62,7 @@ static int fimc_init_capture(struct fimc_dev *fimc)
fimc_hw_set_mainscaler(ctx);
fimc_hw_set_target_format(ctx);
fimc_hw_set_rotation(ctx);
- fimc_hw_set_effect(ctx, false);
+ fimc_hw_set_effect(ctx);
fimc_hw_set_output_path(ctx);
fimc_hw_set_out_dma(ctx);
if (fimc->variant->has_alpha)
@@ -71,6 +73,14 @@ static int fimc_init_capture(struct fimc_dev *fimc)
return ret;
}
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
{
struct fimc_vid_cap *cap = &fimc->vid_cap;
@@ -83,7 +93,9 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_SHUT |
1 << ST_CAPT_STREAM | 1 << ST_CAPT_ISP_STREAM);
- if (!suspend)
+ if (suspend)
+ fimc->state |= (1 << ST_CAPT_SUSPENDED);
+ else
fimc->state &= ~(1 << ST_CAPT_PEND | 1 << ST_CAPT_SUSPENDED);
/* Release unused buffers */
@@ -99,7 +111,6 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
else
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
- set_bit(ST_CAPT_SUSPENDED, &fimc->state);
fimc_hw_reset(fimc);
cap->buf_index = 0;
@@ -107,7 +118,7 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
spin_unlock_irqrestore(&fimc->slock, flags);
if (streaming)
- return fimc_pipeline_s_stream(fimc, 0);
+ return fimc_pipeline_s_stream(&fimc->pipeline, 0);
else
return 0;
}
@@ -138,32 +149,96 @@ static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend)
* spinlock held. It updates the camera pixel crop, rotation and
* image flip in H/W.
*/
-int fimc_capture_config_update(struct fimc_ctx *ctx)
+static int fimc_capture_config_update(struct fimc_ctx *ctx)
{
struct fimc_dev *fimc = ctx->fimc_dev;
int ret;
- if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
- return 0;
-
- spin_lock(&ctx->slock);
fimc_hw_set_camera_offset(fimc, &ctx->s_frame);
+
ret = fimc_set_scaler_info(ctx);
- if (ret == 0) {
- fimc_hw_set_prescaler(ctx);
- fimc_hw_set_mainscaler(ctx);
- fimc_hw_set_target_format(ctx);
- fimc_hw_set_rotation(ctx);
- fimc_prepare_dma_offset(ctx, &ctx->d_frame);
- fimc_hw_set_out_dma(ctx);
- if (fimc->variant->has_alpha)
- fimc_hw_set_rgb_alpha(ctx);
- clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
- }
- spin_unlock(&ctx->slock);
+ if (ret)
+ return ret;
+
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_prepare_dma_offset(ctx, &ctx->d_frame);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->variant->has_alpha)
+ fimc_hw_set_rgb_alpha(ctx);
+
+ clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
return ret;
}
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf)
+{
+ struct fimc_vid_cap *cap = &fimc->vid_cap;
+ struct fimc_vid_buffer *v_buf;
+ struct timeval *tv;
+ struct timespec ts;
+
+ if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ goto done;
+ }
+
+ if (!list_empty(&cap->active_buf_q) &&
+ test_bit(ST_CAPT_RUN, &fimc->state) && deq_buf) {
+ ktime_get_real_ts(&ts);
+
+ v_buf = fimc_active_queue_pop(cap);
+
+ tv = &v_buf->vb.v4l2_buf.timestamp;
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ v_buf->vb.v4l2_buf.sequence = cap->frame_count++;
+
+ vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
+ }
+
+ if (!list_empty(&cap->pending_buf_q)) {
+
+ v_buf = fimc_pending_queue_pop(cap);
+ fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
+ v_buf->index = cap->buf_index;
+
+ /* Move the buffer to the capture active queue */
+ fimc_active_queue_add(cap, v_buf);
+
+ dbg("next frame: %d, done frame: %d",
+ fimc_hw_get_frame_index(fimc), v_buf->index);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ }
+
+ if (cap->active_buf_cnt == 0) {
+ if (deq_buf)
+ clear_bit(ST_CAPT_RUN, &fimc->state);
+
+ if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
+ cap->buf_index = 0;
+ } else {
+ set_bit(ST_CAPT_RUN, &fimc->state);
+ }
+
+ if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+ fimc_capture_config_update(cap->ctx);
+done:
+ if (cap->active_buf_cnt == 1) {
+ fimc_deactivate_capture(fimc);
+ clear_bit(ST_CAPT_STREAM, &fimc->state);
+ }
+
+ dbg("frame: %d, active_buf_cnt: %d",
+ fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
+}
+
+
static int start_streaming(struct vb2_queue *q, unsigned int count)
{
struct fimc_ctx *ctx = q->drv_priv;
@@ -174,9 +249,11 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
vid_cap->frame_count = 0;
- ret = fimc_init_capture(fimc);
- if (ret)
- goto error;
+ ret = fimc_capture_hw_init(fimc);
+ if (ret) {
+ fimc_capture_state_cleanup(fimc, false);
+ return ret;
+ }
set_bit(ST_CAPT_PEND, &fimc->state);
@@ -187,13 +264,10 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
fimc_activate_capture(ctx);
if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
- fimc_pipeline_s_stream(fimc, 1);
+ fimc_pipeline_s_stream(&fimc->pipeline, 1);
}
return 0;
-error:
- fimc_capture_state_cleanup(fimc, false);
- return ret;
}
static int stop_streaming(struct vb2_queue *q)
@@ -214,7 +288,7 @@ int fimc_capture_suspend(struct fimc_dev *fimc)
int ret = fimc_stop_capture(fimc, suspend);
if (ret)
return ret;
- return fimc_pipeline_shutdown(fimc);
+ return fimc_pipeline_shutdown(&fimc->pipeline);
}
static void buffer_queue(struct vb2_buffer *vb);
@@ -230,9 +304,9 @@ int fimc_capture_resume(struct fimc_dev *fimc)
INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
vid_cap->buf_index = 0;
- fimc_pipeline_initialize(fimc, &fimc->vid_cap.vfd->entity,
+ fimc_pipeline_initialize(&fimc->pipeline, &vid_cap->vfd->entity,
false);
- fimc_init_capture(fimc);
+ fimc_capture_hw_init(fimc);
clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
@@ -276,7 +350,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
if (pixm)
sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
else
- sizes[i] = size;
+ sizes[i] = max_t(u32, size, frame->payload[i]);
+
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
@@ -347,7 +422,7 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&fimc->slock, flags);
if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state))
- fimc_pipeline_s_stream(fimc, 1);
+ fimc_pipeline_s_stream(&fimc->pipeline, 1);
return;
}
spin_unlock_irqrestore(&fimc->slock, flags);
@@ -389,15 +464,15 @@ int fimc_capture_ctrls_create(struct fimc_dev *fimc)
if (WARN_ON(vid_cap->ctx == NULL))
return -ENXIO;
- if (vid_cap->ctx->ctrls_rdy)
+ if (vid_cap->ctx->ctrls.ready)
return 0;
ret = fimc_ctrls_create(vid_cap->ctx);
- if (ret || vid_cap->user_subdev_api)
+ if (ret || vid_cap->user_subdev_api || !vid_cap->ctx->ctrls.ready)
return ret;
- return v4l2_ctrl_add_handler(&vid_cap->ctx->ctrl_handler,
- fimc->pipeline.sensor->ctrl_handler);
+ return v4l2_ctrl_add_handler(&vid_cap->ctx->ctrls.handler,
+ fimc->pipeline.subdevs[IDX_SENSOR]->ctrl_handler);
}
static int fimc_capture_set_default_format(struct fimc_dev *fimc);
@@ -405,37 +480,39 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc);
static int fimc_capture_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
- int ret = v4l2_fh_open(file);
-
- if (ret)
- return ret;
+ int ret;
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
- /* Return if the corresponding video mem2mem node is already opened. */
if (fimc_m2m_active(fimc))
return -EBUSY;
set_bit(ST_CAPT_BUSY, &fimc->state);
- pm_runtime_get_sync(&fimc->pdev->dev);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ return ret;
- if (++fimc->vid_cap.refcnt == 1) {
- ret = fimc_pipeline_initialize(fimc,
- &fimc->vid_cap.vfd->entity, true);
- if (ret < 0) {
- dev_err(&fimc->pdev->dev,
- "Video pipeline initialization failed\n");
- pm_runtime_put_sync(&fimc->pdev->dev);
- fimc->vid_cap.refcnt--;
- v4l2_fh_release(file);
- clear_bit(ST_CAPT_BUSY, &fimc->state);
- return ret;
- }
- ret = fimc_capture_ctrls_create(fimc);
+ ret = v4l2_fh_open(file);
+ if (ret)
+ return ret;
+
+ if (++fimc->vid_cap.refcnt != 1)
+ return 0;
- if (!ret && !fimc->vid_cap.user_subdev_api)
- ret = fimc_capture_set_default_format(fimc);
+ ret = fimc_pipeline_initialize(&fimc->pipeline,
+ &fimc->vid_cap.vfd->entity, true);
+ if (ret < 0) {
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ fimc->vid_cap.refcnt--;
+ v4l2_fh_release(file);
+ return ret;
}
+ ret = fimc_capture_ctrls_create(fimc);
+
+ if (!ret && !fimc->vid_cap.user_subdev_api)
+ ret = fimc_capture_set_default_format(fimc);
+
return ret;
}
@@ -448,7 +525,7 @@ static int fimc_capture_close(struct file *file)
if (--fimc->vid_cap.refcnt == 0) {
clear_bit(ST_CAPT_BUSY, &fimc->state);
fimc_stop_capture(fimc, false);
- fimc_pipeline_shutdown(fimc);
+ fimc_pipeline_shutdown(&fimc->pipeline);
clear_bit(ST_CAPT_SUSPENDED, &fimc->state);
}
@@ -495,7 +572,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
{
bool rotation = ctx->rotation == 90 || ctx->rotation == 270;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct samsung_fimc_variant *var = fimc->variant;
+ struct fimc_variant *var = fimc->variant;
struct fimc_pix_limit *pl = var->pix_limit;
struct fimc_frame *dst = &ctx->d_frame;
u32 depth, min_w, max_w, min_h, align_h = 3;
@@ -537,8 +614,13 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
}
/* Apply the scaler and the output DMA constraints */
max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
- min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
- min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
+ if (ctx->state & FIMC_COMPOSE) {
+ min_w = dst->offs_h + dst->width;
+ min_h = dst->offs_v + dst->height;
+ } else {
+ min_w = var->min_out_pixsize;
+ min_h = var->min_out_pixsize;
+ }
if (var->min_vsize_align == 1 && !rotation)
align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
@@ -556,12 +638,13 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
return ffmt;
}
-static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
- int pad)
+static void fimc_capture_try_selection(struct fimc_ctx *ctx,
+ struct v4l2_rect *r,
+ int target)
{
bool rotate = ctx->rotation == 90 || ctx->rotation == 270;
struct fimc_dev *fimc = ctx->fimc_dev;
- struct samsung_fimc_variant *var = fimc->variant;
+ struct fimc_variant *var = fimc->variant;
struct fimc_pix_limit *pl = var->pix_limit;
struct fimc_frame *sink = &ctx->s_frame;
u32 max_w, max_h, min_w = 0, min_h = 0, min_sz;
@@ -575,7 +658,7 @@ static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
r->left = r->top = 0;
return;
}
- if (pad == FIMC_SD_PAD_SOURCE) {
+ if (target == V4L2_SEL_TGT_COMPOSE_ACTIVE) {
if (ctx->rotation != 90 && ctx->rotation != 270)
align_h = 1;
max_sc_h = min(SCALER_MAX_HRATIO, 1 << (ffs(sink->width) - 3));
@@ -589,8 +672,7 @@ static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
max_sc_h = max_sc_v = 1;
}
/*
- * For the crop rectangle at source pad the following constraints
- * must be met:
+ * For the compose rectangle the following constraints must be met:
* - it must fit in the sink pad format rectangle (f_width/f_height);
* - maximum downscaling ratio is 64;
* - maximum crop size depends if the rotator is used or not;
@@ -602,7 +684,8 @@ static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
rotate ? pl->out_rot_en_w : pl->out_rot_dis_w,
rotate ? sink->f_height : sink->f_width);
max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height);
- if (pad == FIMC_SD_PAD_SOURCE) {
+
+ if (target == V4L2_SEL_TGT_COMPOSE_ACTIVE) {
min_w = min_t(u32, max_w, sink->f_width / max_sc_h);
min_h = min_t(u32, max_h, sink->f_height / max_sc_v);
if (rotate) {
@@ -613,13 +696,13 @@ static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r,
v4l_bound_align_image(&r->width, min_w, max_w, ffs(min_sz) - 1,
&r->height, min_h, max_h, align_h,
align_sz);
- /* Adjust left/top if cropping rectangle is out of bounds */
+ /* Adjust left/top if crop/compose rectangle is out of bounds */
r->left = clamp_t(u32, r->left, 0, sink->f_width - r->width);
r->top = clamp_t(u32, r->top, 0, sink->f_height - r->height);
r->left = round_down(r->left, var->hor_offs_align);
- dbg("pad%d: (%d,%d)/%dx%d, sink fmt: %dx%d",
- pad, r->left, r->top, r->width, r->height,
+ dbg("target %#x: (%d,%d)/%dx%d, sink fmt: %dx%d",
+ target, r->left, r->top, r->width, r->height,
sink->f_width, sink->f_height);
}
@@ -669,8 +752,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
bool set)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct v4l2_subdev *sd = fimc->pipeline.sensor;
- struct v4l2_subdev *csis = fimc->pipeline.csis;
+ struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
+ struct v4l2_subdev *csis = fimc->pipeline.subdevs[IDX_CSIS];
struct v4l2_subdev_format sfmt;
struct v4l2_mbus_framefmt *mf = &sfmt.format;
struct fimc_fmt *ffmt = NULL;
@@ -738,9 +821,6 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return -EINVAL;
-
return fimc_fill_format(&ctx->d_frame, f);
}
@@ -753,9 +833,6 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_mbus_framefmt mf;
struct fimc_fmt *ffmt = NULL;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return -EINVAL;
-
if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
fimc_capture_try_format(ctx, &pix->width, &pix->height,
NULL, &pix->pixelformat,
@@ -807,8 +884,6 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
struct fimc_fmt *s_fmt = NULL;
int ret, i;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return -EINVAL;
if (vb2_is_busy(&fimc->vid_cap.vbq))
return -EBUSY;
@@ -844,14 +919,14 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
pix->width = mf->width;
pix->height = mf->height;
}
+
fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
for (i = 0; i < ff->fmt->colplanes; i++)
- ff->payload[i] =
- (pix->width * pix->height * ff->fmt->depth[i]) / 8;
+ ff->payload[i] = pix->plane_fmt[i].sizeimage;
set_frame_bounds(ff, pix->width, pix->height);
/* Reset the composition rectangle if not yet configured */
- if (!(ctx->state & FIMC_DST_CROP))
+ if (!(ctx->state & FIMC_COMPOSE))
set_frame_crop(ff, 0, 0, pix->width, pix->height);
fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ff->fmt->color));
@@ -878,7 +953,7 @@ static int fimc_cap_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
struct fimc_dev *fimc = video_drvdata(file);
- struct v4l2_subdev *sd = fimc->pipeline.sensor;
+ struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
if (i->index != 0)
return -EINVAL;
@@ -927,7 +1002,7 @@ static int fimc_pipeline_validate(struct fimc_dev *fimc)
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
/* Don't call FIMC subdev operation to avoid nested locking */
- if (sd == fimc->vid_cap.subdev) {
+ if (sd == &fimc->vid_cap.subdev) {
struct fimc_frame *ff = &vid_cap->ctx->s_frame;
sink_fmt.format.width = ff->f_width;
sink_fmt.format.height = ff->f_height;
@@ -965,17 +1040,22 @@ static int fimc_cap_streamon(struct file *file, void *priv,
{
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_pipeline *p = &fimc->pipeline;
+ struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
int ret;
if (fimc_capture_active(fimc))
return -EBUSY;
- media_entity_pipeline_start(&p->sensor->entity, p->pipe);
+ ret = media_entity_pipeline_start(&sd->entity, p->m_pipeline);
+ if (ret < 0)
+ return ret;
if (fimc->vid_cap.user_subdev_api) {
ret = fimc_pipeline_validate(fimc);
- if (ret)
+ if (ret < 0) {
+ media_entity_pipeline_stop(&sd->entity);
return ret;
+ }
}
return vb2_streamon(&fimc->vid_cap.vbq, type);
}
@@ -984,7 +1064,7 @@ static int fimc_cap_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct fimc_dev *fimc = video_drvdata(file);
- struct v4l2_subdev *sd = fimc->pipeline.sensor;
+ struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
int ret;
ret = vb2_streamoff(&fimc->vid_cap.vbq, type);
@@ -1100,29 +1180,18 @@ static int fimc_cap_s_selection(struct file *file, void *fh,
struct v4l2_rect rect = s->r;
struct fimc_frame *f;
unsigned long flags;
- unsigned int pad;
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
- switch (s->target) {
- case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ if (s->target == V4L2_SEL_TGT_COMPOSE_ACTIVE)
f = &ctx->d_frame;
- pad = FIMC_SD_PAD_SOURCE;
- break;
- case V4L2_SEL_TGT_CROP_BOUNDS:
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_ACTIVE:
+ else if (s->target == V4L2_SEL_TGT_CROP_ACTIVE)
f = &ctx->s_frame;
- pad = FIMC_SD_PAD_SINK;
- break;
- default:
+ else
return -EINVAL;
- }
- fimc_capture_try_crop(ctx, &rect, pad);
+ fimc_capture_try_selection(ctx, &rect, s->target);
if (s->flags & V4L2_SEL_FLAG_LE &&
!enclosed_rectangle(&rect, &s->r))
@@ -1243,7 +1312,7 @@ void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
struct fimc_vid_buffer, list);
vb2_set_plane_payload(&buf->vb, 0, *((u32 *)arg));
}
- fimc_capture_irq_handler(fimc, true);
+ fimc_capture_irq_handler(fimc, 1);
fimc_deactivate_capture(fimc);
spin_unlock_irqrestore(&fimc->slock, irq_flags);
}
@@ -1334,77 +1403,122 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
ff->fmt = ffmt;
/* Reset the crop rectangle if required. */
- if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_DST_CROP)))
+ if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_COMPOSE)))
set_frame_crop(ff, 0, 0, mf->width, mf->height);
if (fmt->pad == FIMC_SD_PAD_SINK)
- ctx->state &= ~FIMC_DST_CROP;
+ ctx->state &= ~FIMC_COMPOSE;
mutex_unlock(&fimc->lock);
return 0;
}
-static int fimc_subdev_get_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- struct v4l2_rect *r = &crop->rect;
- struct fimc_frame *ff;
+ struct fimc_frame *f = &ctx->s_frame;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
+
+ if (sel->pad != FIMC_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
- if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
- crop->rect = *v4l2_subdev_get_try_crop(fh, crop->pad);
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ f = &ctx->d_frame;
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ r->width = f->o_width;
+ r->height = f->o_height;
+ r->left = 0;
+ r->top = 0;
+ mutex_unlock(&fimc->lock);
return 0;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+ break;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+ f = &ctx->d_frame;
+ break;
+ default:
+ mutex_unlock(&fimc->lock);
+ return -EINVAL;
}
- ff = crop->pad == FIMC_SD_PAD_SINK ?
- &ctx->s_frame : &ctx->d_frame;
- mutex_lock(&fimc->lock);
- r->left = ff->offs_h;
- r->top = ff->offs_v;
- r->width = ff->width;
- r->height = ff->height;
- mutex_unlock(&fimc->lock);
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *try_sel;
+ } else {
+ r->left = f->offs_h;
+ r->top = f->offs_v;
+ r->width = f->width;
+ r->height = f->height;
+ }
- dbg("ff:%p, pad%d: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
- ff, crop->pad, r->left, r->top, r->width, r->height,
- ff->f_width, ff->f_height);
+ dbg("target %#x: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
+ sel->pad, r->left, r->top, r->width, r->height,
+ f->f_width, f->f_height);
+ mutex_unlock(&fimc->lock);
return 0;
}
-static int fimc_subdev_set_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_fh *fh,
- struct v4l2_subdev_crop *crop)
+static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
{
struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- struct v4l2_rect *r = &crop->rect;
- struct fimc_frame *ff;
+ struct fimc_frame *f = &ctx->s_frame;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
unsigned long flags;
- dbg("(%d,%d)/%dx%d", r->left, r->top, r->width, r->height);
-
- ff = crop->pad == FIMC_SD_PAD_SOURCE ?
- &ctx->d_frame : &ctx->s_frame;
+ if (sel->pad != FIMC_SD_PAD_SINK)
+ return -EINVAL;
mutex_lock(&fimc->lock);
- fimc_capture_try_crop(ctx, r, crop->pad);
+ fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP_ACTIVE);
- if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ f = &ctx->d_frame;
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ r->width = f->o_width;
+ r->height = f->o_height;
+ r->left = 0;
+ r->top = 0;
mutex_unlock(&fimc->lock);
- *v4l2_subdev_get_try_crop(fh, crop->pad) = *r;
return 0;
+
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
+ break;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
+ f = &ctx->d_frame;
+ break;
+ default:
+ mutex_unlock(&fimc->lock);
+ return -EINVAL;
}
- spin_lock_irqsave(&fimc->slock, flags);
- set_frame_crop(ff, r->left, r->top, r->width, r->height);
- if (crop->pad == FIMC_SD_PAD_SOURCE)
- ctx->state |= FIMC_DST_CROP;
- set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
- spin_unlock_irqrestore(&fimc->slock, flags);
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *try_sel = sel->r;
+ } else {
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_frame_crop(f, r->left, r->top, r->width, r->height);
+ set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ if (sel->target == V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL)
+ ctx->state |= FIMC_COMPOSE;
+ }
- dbg("pad%d: (%d,%d)/%dx%d", crop->pad, r->left, r->top,
+ dbg("target %#x: (%d,%d)/%dx%d", sel->target, r->left, r->top,
r->width, r->height);
mutex_unlock(&fimc->lock);
@@ -1413,63 +1527,16 @@ static int fimc_subdev_set_crop(struct v4l2_subdev *sd,
static struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = {
.enum_mbus_code = fimc_subdev_enum_mbus_code,
+ .get_selection = fimc_subdev_get_selection,
+ .set_selection = fimc_subdev_set_selection,
.get_fmt = fimc_subdev_get_fmt,
.set_fmt = fimc_subdev_set_fmt,
- .get_crop = fimc_subdev_get_crop,
- .set_crop = fimc_subdev_set_crop,
};
static struct v4l2_subdev_ops fimc_subdev_ops = {
.pad = &fimc_subdev_pad_ops,
};
-static int fimc_create_capture_subdev(struct fimc_dev *fimc,
- struct v4l2_device *v4l2_dev)
-{
- struct v4l2_subdev *sd;
- int ret;
-
- sd = kzalloc(sizeof(*sd), GFP_KERNEL);
- if (!sd)
- return -ENOMEM;
-
- v4l2_subdev_init(sd, &fimc_subdev_ops);
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
- snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->pdev->id);
-
- fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
- fimc->vid_cap.sd_pads, 0);
- if (ret)
- goto me_err;
- ret = v4l2_device_register_subdev(v4l2_dev, sd);
- if (ret)
- goto sd_err;
-
- fimc->vid_cap.subdev = sd;
- v4l2_set_subdevdata(sd, fimc);
- sd->entity.ops = &fimc_sd_media_ops;
- return 0;
-sd_err:
- media_entity_cleanup(&sd->entity);
-me_err:
- kfree(sd);
- return ret;
-}
-
-static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
-{
- struct v4l2_subdev *sd = fimc->vid_cap.subdev;
-
- if (!sd)
- return;
- media_entity_cleanup(&sd->entity);
- v4l2_device_unregister_subdev(sd);
- kfree(sd);
- fimc->vid_cap.subdev = NULL;
-}
-
/* Set default format at the sensor and host interface */
static int fimc_capture_set_default_format(struct fimc_dev *fimc)
{
@@ -1488,7 +1555,7 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc)
}
/* fimc->lock must be already initialized */
-int fimc_register_capture_device(struct fimc_dev *fimc,
+static int fimc_register_capture_device(struct fimc_dev *fimc,
struct v4l2_device *v4l2_dev)
{
struct video_device *vfd;
@@ -1502,11 +1569,11 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
return -ENOMEM;
ctx->fimc_dev = fimc;
- ctx->in_path = FIMC_CAMERA;
- ctx->out_path = FIMC_DMA;
+ ctx->in_path = FIMC_IO_CAMERA;
+ ctx->out_path = FIMC_IO_DMA;
ctx->state = FIMC_CTX_CAP;
ctx->s_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
- ctx->d_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0);
+ ctx->d_frame.fmt = ctx->s_frame.fmt;
vfd = video_device_alloc();
if (!vfd) {
@@ -1514,8 +1581,7 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
goto err_vd_alloc;
}
- snprintf(vfd->name, sizeof(vfd->name), "%s.capture",
- dev_name(&fimc->pdev->dev));
+ snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.capture", fimc->id);
vfd->fops = &fimc_capture_fops;
vfd->ioctl_ops = &fimc_capture_ioctl_ops;
@@ -1523,6 +1589,10 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
vfd->minor = -1;
vfd->release = video_device_release;
vfd->lock = &fimc->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
video_set_drvdata(vfd, fimc);
vid_cap = &fimc->vid_cap;
@@ -1533,7 +1603,6 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
INIT_LIST_HEAD(&vid_cap->pending_buf_q);
INIT_LIST_HEAD(&vid_cap->active_buf_q);
- spin_lock_init(&ctx->slock);
vid_cap->ctx = ctx;
q = &fimc->vid_cap.vbq;
@@ -1547,18 +1616,22 @@ int fimc_register_capture_device(struct fimc_dev *fimc,
vb2_queue_init(q);
- fimc->vid_cap.vd_pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_init(&vfd->entity, 1, &fimc->vid_cap.vd_pad, 0);
+ vid_cap->vd_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &vid_cap->vd_pad, 0);
if (ret)
goto err_ent;
- ret = fimc_create_capture_subdev(fimc, v4l2_dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret)
- goto err_sd_reg;
+ goto err_vd;
- vfd->ctrl_handler = &ctx->ctrl_handler;
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+
+ vfd->ctrl_handler = &ctx->ctrls.handler;
return 0;
-err_sd_reg:
+err_vd:
media_entity_cleanup(&vfd->entity);
err_ent:
video_device_release(vfd);
@@ -1567,17 +1640,73 @@ err_vd_alloc:
return ret;
}
-void fimc_unregister_capture_device(struct fimc_dev *fimc)
+static int fimc_capture_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = fimc_register_m2m_device(fimc, sd->v4l2_dev);
+ if (ret)
+ return ret;
+
+ ret = fimc_register_capture_device(fimc, sd->v4l2_dev);
+ if (ret)
+ fimc_unregister_m2m_device(fimc);
+
+ return ret;
+}
+
+static void fimc_capture_subdev_unregistered(struct v4l2_subdev *sd)
{
- struct video_device *vfd = fimc->vid_cap.vfd;
+ struct fimc_dev *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc == NULL)
+ return;
- if (vfd) {
- media_entity_cleanup(&vfd->entity);
- /* Can also be called if video device was
- not registered */
- video_unregister_device(vfd);
+ fimc_unregister_m2m_device(fimc);
+
+ if (fimc->vid_cap.vfd) {
+ media_entity_cleanup(&fimc->vid_cap.vfd->entity);
+ video_unregister_device(fimc->vid_cap.vfd);
+ fimc->vid_cap.vfd = NULL;
}
- fimc_destroy_capture_subdev(fimc);
+
kfree(fimc->vid_cap.ctx);
fimc->vid_cap.ctx = NULL;
}
+
+static const struct v4l2_subdev_internal_ops fimc_capture_sd_internal_ops = {
+ .registered = fimc_capture_subdev_registered,
+ .unregistered = fimc_capture_subdev_unregistered,
+};
+
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &fimc_subdev_ops);
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->pdev->id);
+
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+ fimc->vid_cap.sd_pads, 0);
+ if (ret)
+ return ret;
+
+ sd->entity.ops = &fimc_sd_media_ops;
+ sd->internal_ops = &fimc_capture_sd_internal_ops;
+ v4l2_set_subdevdata(sd, fimc);
+ return 0;
+}
+
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_set_subdevdata(sd, NULL);
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index e09ba7b0076e..a4646ca1d56f 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1,8 +1,8 @@
/*
- * Samsung S5P/EXYNOS4 SoC series camera interface (video postprocessor) driver
+ * Samsung S5P/EXYNOS4 SoC series FIMC (CAMIF) driver
*
- * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
- * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010-2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -28,6 +28,7 @@
#include <media/videobuf2-dma-contig.h>
#include "fimc-core.h"
+#include "fimc-reg.h"
#include "fimc-mdevice.h"
static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
@@ -39,7 +40,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "RGB565",
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = { 16 },
- .color = S5P_FIMC_RGB565,
+ .color = FIMC_FMT_RGB565,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M,
@@ -47,7 +48,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "BGR666",
.fourcc = V4L2_PIX_FMT_BGR666,
.depth = { 32 },
- .color = S5P_FIMC_RGB666,
+ .color = FIMC_FMT_RGB666,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M,
@@ -55,7 +56,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "ARGB8888, 32 bpp",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = { 32 },
- .color = S5P_FIMC_RGB888,
+ .color = FIMC_FMT_RGB888,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M | FMT_HAS_ALPHA,
@@ -63,7 +64,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "ARGB1555",
.fourcc = V4L2_PIX_FMT_RGB555,
.depth = { 16 },
- .color = S5P_FIMC_RGB555,
+ .color = FIMC_FMT_RGB555,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
@@ -71,7 +72,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "ARGB4444",
.fourcc = V4L2_PIX_FMT_RGB444,
.depth = { 16 },
- .color = S5P_FIMC_RGB444,
+ .color = FIMC_FMT_RGB444,
.memplanes = 1,
.colplanes = 1,
.flags = FMT_FLAGS_M2M_OUT | FMT_HAS_ALPHA,
@@ -79,7 +80,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 packed, YCbYCr",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = { 16 },
- .color = S5P_FIMC_YCBYCR422,
+ .color = FIMC_FMT_YCBYCR422,
.memplanes = 1,
.colplanes = 1,
.mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
@@ -88,7 +89,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 packed, CbYCrY",
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = { 16 },
- .color = S5P_FIMC_CBYCRY422,
+ .color = FIMC_FMT_CBYCRY422,
.memplanes = 1,
.colplanes = 1,
.mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
@@ -97,7 +98,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 packed, CrYCbY",
.fourcc = V4L2_PIX_FMT_VYUY,
.depth = { 16 },
- .color = S5P_FIMC_CRYCBY422,
+ .color = FIMC_FMT_CRYCBY422,
.memplanes = 1,
.colplanes = 1,
.mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
@@ -106,7 +107,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 packed, YCrYCb",
.fourcc = V4L2_PIX_FMT_YVYU,
.depth = { 16 },
- .color = S5P_FIMC_YCRYCB422,
+ .color = FIMC_FMT_YCRYCB422,
.memplanes = 1,
.colplanes = 1,
.mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
@@ -115,7 +116,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = { 12 },
- .color = S5P_FIMC_YCBYCR422,
+ .color = FIMC_FMT_YCBYCR422,
.memplanes = 1,
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
@@ -123,7 +124,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV16,
.depth = { 16 },
- .color = S5P_FIMC_YCBYCR422,
+ .color = FIMC_FMT_YCBYCR422,
.memplanes = 1,
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
@@ -131,7 +132,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:2 planar, Y/CrCb",
.fourcc = V4L2_PIX_FMT_NV61,
.depth = { 16 },
- .color = S5P_FIMC_YCRYCB422,
+ .color = FIMC_FMT_YCRYCB422,
.memplanes = 1,
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
@@ -139,7 +140,7 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:0 planar, YCbCr",
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = { 12 },
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.memplanes = 1,
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
@@ -147,30 +148,30 @@ static struct fimc_fmt fimc_formats[] = {
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
.depth = { 12 },
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.memplanes = 1,
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
+ .name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12M,
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
.memplanes = 2,
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
+ .name = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV420M,
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.depth = { 8, 2, 2 },
.memplanes = 3,
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
+ .name = "YUV 4:2:0 non-contig. 2p, tiled",
.fourcc = V4L2_PIX_FMT_NV12MT,
- .color = S5P_FIMC_YCBCR420,
+ .color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
.memplanes = 2,
.colplanes = 2,
@@ -178,7 +179,7 @@ static struct fimc_fmt fimc_formats[] = {
}, {
.name = "JPEG encoded data",
.fourcc = V4L2_PIX_FMT_JPEG,
- .color = S5P_FIMC_JPEG,
+ .color = FIMC_FMT_JPEG,
.depth = { 8 },
.memplanes = 1,
.colplanes = 1,
@@ -187,12 +188,12 @@ static struct fimc_fmt fimc_formats[] = {
},
};
-static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
+struct fimc_fmt *fimc_get_format(unsigned int index)
{
- if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- return FMT_FLAGS_M2M_IN;
- else
- return FMT_FLAGS_M2M_OUT;
+ if (index >= ARRAY_SIZE(fimc_formats))
+ return NULL;
+
+ return &fimc_formats[index];
}
int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
@@ -230,7 +231,7 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
int fimc_set_scaler_info(struct fimc_ctx *ctx)
{
- struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ struct fimc_variant *variant = ctx->fimc_dev->variant;
struct device *dev = &ctx->fimc_dev->pdev->dev;
struct fimc_scaler *sc = &ctx->scaler;
struct fimc_frame *s_frame = &ctx->s_frame;
@@ -293,126 +294,9 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
return 0;
}
-static void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
-{
- struct vb2_buffer *src_vb, *dst_vb;
-
- if (!ctx || !ctx->m2m_ctx)
- return;
-
- src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
-
- if (src_vb && dst_vb) {
- v4l2_m2m_buf_done(src_vb, vb_state);
- v4l2_m2m_buf_done(dst_vb, vb_state);
- v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
- ctx->m2m_ctx);
- }
-}
-
-/* Complete the transaction which has been scheduled for execution. */
-static int fimc_m2m_shutdown(struct fimc_ctx *ctx)
-{
- struct fimc_dev *fimc = ctx->fimc_dev;
- int ret;
-
- if (!fimc_m2m_pending(fimc))
- return 0;
-
- fimc_ctx_state_lock_set(FIMC_CTX_SHUT, ctx);
-
- ret = wait_event_timeout(fimc->irq_queue,
- !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
- FIMC_SHUTDOWN_TIMEOUT);
-
- return ret == 0 ? -ETIMEDOUT : ret;
-}
-
-static int start_streaming(struct vb2_queue *q, unsigned int count)
-{
- struct fimc_ctx *ctx = q->drv_priv;
- int ret;
-
- ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
- return ret > 0 ? 0 : ret;
-}
-
-static int stop_streaming(struct vb2_queue *q)
-{
- struct fimc_ctx *ctx = q->drv_priv;
- int ret;
-
- ret = fimc_m2m_shutdown(ctx);
- if (ret == -ETIMEDOUT)
- fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
-
- pm_runtime_put(&ctx->fimc_dev->pdev->dev);
- return 0;
-}
-
-void fimc_capture_irq_handler(struct fimc_dev *fimc, bool final)
-{
- struct fimc_vid_cap *cap = &fimc->vid_cap;
- struct fimc_vid_buffer *v_buf;
- struct timeval *tv;
- struct timespec ts;
-
- if (test_and_clear_bit(ST_CAPT_SHUT, &fimc->state)) {
- wake_up(&fimc->irq_queue);
- return;
- }
-
- if (!list_empty(&cap->active_buf_q) &&
- test_bit(ST_CAPT_RUN, &fimc->state) && final) {
- ktime_get_real_ts(&ts);
-
- v_buf = fimc_active_queue_pop(cap);
-
- tv = &v_buf->vb.v4l2_buf.timestamp;
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- v_buf->vb.v4l2_buf.sequence = cap->frame_count++;
-
- vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
- }
-
- if (!list_empty(&cap->pending_buf_q)) {
-
- v_buf = fimc_pending_queue_pop(cap);
- fimc_hw_set_output_addr(fimc, &v_buf->paddr, cap->buf_index);
- v_buf->index = cap->buf_index;
-
- /* Move the buffer to the capture active queue */
- fimc_active_queue_add(cap, v_buf);
-
- dbg("next frame: %d, done frame: %d",
- fimc_hw_get_frame_index(fimc), v_buf->index);
-
- if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
- cap->buf_index = 0;
- }
-
- if (cap->active_buf_cnt == 0) {
- if (final)
- clear_bit(ST_CAPT_RUN, &fimc->state);
-
- if (++cap->buf_index >= FIMC_MAX_OUT_BUFS)
- cap->buf_index = 0;
- } else {
- set_bit(ST_CAPT_RUN, &fimc->state);
- }
-
- fimc_capture_config_update(cap->ctx);
-
- dbg("frame: %d, active_buf_cnt: %d",
- fimc_hw_get_frame_index(fimc), cap->active_buf_cnt);
-}
-
static irqreturn_t fimc_irq_handler(int irq, void *priv)
{
struct fimc_dev *fimc = priv;
- struct fimc_vid_cap *cap = &fimc->vid_cap;
struct fimc_ctx *ctx;
fimc_hw_clear_irq(fimc);
@@ -430,21 +314,16 @@ static irqreturn_t fimc_irq_handler(int irq, void *priv)
spin_unlock(&fimc->slock);
fimc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);
- spin_lock(&ctx->slock);
if (ctx->state & FIMC_CTX_SHUT) {
ctx->state &= ~FIMC_CTX_SHUT;
wake_up(&fimc->irq_queue);
}
- spin_unlock(&ctx->slock);
+ return IRQ_HANDLED;
}
- return IRQ_HANDLED;
} else if (test_bit(ST_CAPT_PEND, &fimc->state)) {
- fimc_capture_irq_handler(fimc,
- !test_bit(ST_CAPT_JPEG, &fimc->state));
- if (cap->active_buf_cnt == 1) {
- fimc_deactivate_capture(fimc);
- clear_bit(ST_CAPT_STREAM, &fimc->state);
- }
+ int last_buf = test_bit(ST_CAPT_JPEG, &fimc->state) &&
+ fimc->vid_cap.reqbufs_count == 1;
+ fimc_capture_irq_handler(fimc, !last_buf);
}
out:
spin_unlock(&fimc->slock);
@@ -482,7 +361,7 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
case 3:
paddr->cb = (u32)(paddr->y + pix_size);
/* decompose Y into Y/Cb/Cr */
- if (S5P_FIMC_YCBCR420 == frame->fmt->color)
+ if (FIMC_FMT_YCBCR420 == frame->fmt->color)
paddr->cr = (u32)(paddr->cb
+ (pix_size >> 2));
else /* 422 */
@@ -510,40 +389,40 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
void fimc_set_yuv_order(struct fimc_ctx *ctx)
{
/* The one only mode supported in SoC. */
- ctx->in_order_2p = S5P_FIMC_LSB_CRCB;
- ctx->out_order_2p = S5P_FIMC_LSB_CRCB;
+ ctx->in_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
+ ctx->out_order_2p = FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB;
/* Set order for 1 plane input formats. */
switch (ctx->s_frame.fmt->color) {
- case S5P_FIMC_YCRYCB422:
- ctx->in_order_1p = S5P_MSCTRL_ORDER422_CBYCRY;
+ case FIMC_FMT_YCRYCB422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CBYCRY;
break;
- case S5P_FIMC_CBYCRY422:
- ctx->in_order_1p = S5P_MSCTRL_ORDER422_YCRYCB;
+ case FIMC_FMT_CBYCRY422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCRYCB;
break;
- case S5P_FIMC_CRYCBY422:
- ctx->in_order_1p = S5P_MSCTRL_ORDER422_YCBYCR;
+ case FIMC_FMT_CRYCBY422:
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_YCBYCR;
break;
- case S5P_FIMC_YCBYCR422:
+ case FIMC_FMT_YCBYCR422:
default:
- ctx->in_order_1p = S5P_MSCTRL_ORDER422_CRYCBY;
+ ctx->in_order_1p = FIMC_REG_MSCTRL_ORDER422_CRYCBY;
break;
}
dbg("ctx->in_order_1p= %d", ctx->in_order_1p);
switch (ctx->d_frame.fmt->color) {
- case S5P_FIMC_YCRYCB422:
- ctx->out_order_1p = S5P_CIOCTRL_ORDER422_CBYCRY;
+ case FIMC_FMT_YCRYCB422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CBYCRY;
break;
- case S5P_FIMC_CBYCRY422:
- ctx->out_order_1p = S5P_CIOCTRL_ORDER422_YCRYCB;
+ case FIMC_FMT_CBYCRY422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCRYCB;
break;
- case S5P_FIMC_CRYCBY422:
- ctx->out_order_1p = S5P_CIOCTRL_ORDER422_YCBYCR;
+ case FIMC_FMT_CRYCBY422:
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_YCBYCR;
break;
- case S5P_FIMC_YCBYCR422:
+ case FIMC_FMT_YCBYCR422:
default:
- ctx->out_order_1p = S5P_CIOCTRL_ORDER422_CRYCBY;
+ ctx->out_order_1p = FIMC_REG_CIOCTRL_ORDER422_CRYCBY;
break;
}
dbg("ctx->out_order_1p= %d", ctx->out_order_1p);
@@ -551,7 +430,7 @@ void fimc_set_yuv_order(struct fimc_ctx *ctx)
void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
{
- struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ struct fimc_variant *variant = ctx->fimc_dev->variant;
u32 i, depth = 0;
for (i = 0; i < f->fmt->colplanes; i++)
@@ -574,7 +453,7 @@ void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
f->dma_offset.cb_h >>= 1;
f->dma_offset.cr_h >>= 1;
}
- if (f->fmt->color == S5P_FIMC_YCBCR420) {
+ if (f->fmt->color == FIMC_FMT_YCBCR420) {
f->dma_offset.cb_v >>= 1;
f->dma_offset.cr_v >>= 1;
}
@@ -584,203 +463,58 @@ void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
}
-/**
- * fimc_prepare_config - check dimensions, operation and color mode
- * and pre-calculate offset and the scaling coefficients.
- *
- * @ctx: hardware context information
- * @flags: flags indicating which parameters to check/update
- *
- * Return: 0 if dimensions are valid or non zero otherwise.
- */
-int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags)
-{
- struct fimc_frame *s_frame, *d_frame;
- struct vb2_buffer *vb = NULL;
- int ret = 0;
-
- s_frame = &ctx->s_frame;
- d_frame = &ctx->d_frame;
-
- if (flags & FIMC_PARAMS) {
- /* Prepare the DMA offset ratios for scaler. */
- fimc_prepare_dma_offset(ctx, &ctx->s_frame);
- fimc_prepare_dma_offset(ctx, &ctx->d_frame);
-
- if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) ||
- s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) {
- err("out of scaler range");
- return -EINVAL;
- }
- fimc_set_yuv_order(ctx);
- }
-
- if (flags & FIMC_SRC_ADDR) {
- vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, vb, s_frame, &s_frame->paddr);
- if (ret)
- return ret;
- }
-
- if (flags & FIMC_DST_ADDR) {
- vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- ret = fimc_prepare_addr(ctx, vb, d_frame, &d_frame->paddr);
- }
-
- return ret;
-}
-
-static void fimc_dma_run(void *priv)
-{
- struct fimc_ctx *ctx = priv;
- struct fimc_dev *fimc;
- unsigned long flags;
- u32 ret;
-
- if (WARN(!ctx, "null hardware context\n"))
- return;
-
- fimc = ctx->fimc_dev;
- spin_lock_irqsave(&fimc->slock, flags);
- set_bit(ST_M2M_PEND, &fimc->state);
-
- spin_lock(&ctx->slock);
- ctx->state |= (FIMC_SRC_ADDR | FIMC_DST_ADDR);
- ret = fimc_prepare_config(ctx, ctx->state);
- if (ret)
- goto dma_unlock;
-
- /* Reconfigure hardware if the context has changed. */
- if (fimc->m2m.ctx != ctx) {
- ctx->state |= FIMC_PARAMS;
- fimc->m2m.ctx = ctx;
- }
- fimc_hw_set_input_addr(fimc, &ctx->s_frame.paddr);
-
- if (ctx->state & FIMC_PARAMS) {
- fimc_hw_set_input_path(ctx);
- fimc_hw_set_in_dma(ctx);
- ret = fimc_set_scaler_info(ctx);
- if (ret) {
- spin_unlock(&fimc->slock);
- goto dma_unlock;
- }
- fimc_hw_set_prescaler(ctx);
- fimc_hw_set_mainscaler(ctx);
- fimc_hw_set_target_format(ctx);
- fimc_hw_set_rotation(ctx);
- fimc_hw_set_effect(ctx, false);
- }
-
- fimc_hw_set_output_path(ctx);
- if (ctx->state & (FIMC_DST_ADDR | FIMC_PARAMS))
- fimc_hw_set_output_addr(fimc, &ctx->d_frame.paddr, -1);
-
- if (ctx->state & FIMC_PARAMS) {
- fimc_hw_set_out_dma(ctx);
- if (fimc->variant->has_alpha)
- fimc_hw_set_rgb_alpha(ctx);
- }
-
- fimc_activate_capture(ctx);
-
- ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP |
- FIMC_SRC_FMT | FIMC_DST_FMT);
- fimc_hw_activate_input_dma(fimc, true);
-dma_unlock:
- spin_unlock(&ctx->slock);
- spin_unlock_irqrestore(&fimc->slock, flags);
-}
-
-static void fimc_job_abort(void *priv)
+int fimc_set_color_effect(struct fimc_ctx *ctx, enum v4l2_colorfx colorfx)
{
- fimc_m2m_shutdown(priv);
-}
-
-static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
- unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *allocators[])
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- struct fimc_frame *f;
- int i;
+ struct fimc_effect *effect = &ctx->effect;
- f = ctx_get_frame(ctx, vq->type);
- if (IS_ERR(f))
- return PTR_ERR(f);
- /*
- * Return number of non-contigous planes (plane buffers)
- * depending on the configured color format.
- */
- if (!f->fmt)
+ switch (colorfx) {
+ case V4L2_COLORFX_NONE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+ break;
+ case V4L2_COLORFX_BW:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = 128;
+ effect->pat_cr = 128;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = 115;
+ effect->pat_cr = 145;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_NEGATIVE;
+ break;
+ case V4L2_COLORFX_EMBOSS:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_EMBOSSING;
+ break;
+ case V4L2_COLORFX_ART_FREEZE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARTFREEZE;
+ break;
+ case V4L2_COLORFX_SILHOUETTE:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_SILHOUETTE;
+ break;
+ case V4L2_COLORFX_SET_CBCR:
+ effect->type = FIMC_REG_CIIMGEFF_FIN_ARBITRARY;
+ effect->pat_cb = ctx->ctrls.colorfx_cbcr->val >> 8;
+ effect->pat_cr = ctx->ctrls.colorfx_cbcr->val & 0xff;
+ break;
+ default:
return -EINVAL;
-
- *num_planes = f->fmt->memplanes;
- for (i = 0; i < f->fmt->memplanes; i++) {
- sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
- allocators[i] = ctx->fimc_dev->alloc_ctx;
}
- return 0;
-}
-
-static int fimc_buf_prepare(struct vb2_buffer *vb)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct fimc_frame *frame;
- int i;
-
- frame = ctx_get_frame(ctx, vb->vb2_queue->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- for (i = 0; i < frame->fmt->memplanes; i++)
- vb2_set_plane_payload(vb, i, frame->payload[i]);
return 0;
}
-static void fimc_buf_queue(struct vb2_buffer *vb)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
-
- dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
-
- if (ctx->m2m_ctx)
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
-}
-
-static void fimc_lock(struct vb2_queue *vq)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_lock(&ctx->fimc_dev->lock);
-}
-
-static void fimc_unlock(struct vb2_queue *vq)
-{
- struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_unlock(&ctx->fimc_dev->lock);
-}
-
-static struct vb2_ops fimc_qops = {
- .queue_setup = fimc_queue_setup,
- .buf_prepare = fimc_buf_prepare,
- .buf_queue = fimc_buf_queue,
- .wait_prepare = fimc_unlock,
- .wait_finish = fimc_lock,
- .stop_streaming = stop_streaming,
- .start_streaming = start_streaming,
-};
-
/*
* V4L2 controls handling
*/
#define ctrl_to_ctx(__ctrl) \
- container_of((__ctrl)->handler, struct fimc_ctx, ctrl_handler)
+ container_of((__ctrl)->handler, struct fimc_ctx, ctrls.handler)
static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct samsung_fimc_variant *variant = fimc->variant;
+ struct fimc_variant *variant = fimc->variant;
unsigned int flags = FIMC_DST_FMT | FIMC_SRC_FMT;
int ret = 0;
@@ -815,7 +549,14 @@ static int __fimc_s_ctrl(struct fimc_ctx *ctx, struct v4l2_ctrl *ctrl)
case V4L2_CID_ALPHA_COMPONENT:
ctx->d_frame.alpha = ctrl->val;
break;
+
+ case V4L2_CID_COLORFX:
+ ret = fimc_set_color_effect(ctx, ctrl->val);
+ if (ret)
+ return ret;
+ break;
}
+
ctx->state |= FIMC_PARAMS;
set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
return 0;
@@ -827,9 +568,9 @@ static int fimc_s_ctrl(struct v4l2_ctrl *ctrl)
unsigned long flags;
int ret;
- spin_lock_irqsave(&ctx->slock, flags);
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
ret = __fimc_s_ctrl(ctx, ctrl);
- spin_unlock_irqrestore(&ctx->slock, flags);
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
return ret;
}
@@ -840,71 +581,93 @@ static const struct v4l2_ctrl_ops fimc_ctrl_ops = {
int fimc_ctrls_create(struct fimc_ctx *ctx)
{
- struct samsung_fimc_variant *variant = ctx->fimc_dev->variant;
+ struct fimc_variant *variant = ctx->fimc_dev->variant;
unsigned int max_alpha = fimc_get_alpha_mask(ctx->d_frame.fmt);
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+ struct v4l2_ctrl_handler *handler = &ctrls->handler;
- if (ctx->ctrls_rdy)
+ if (ctx->ctrls.ready)
return 0;
- v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
- ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
+ v4l2_ctrl_handler_init(handler, 6);
+
+ ctrls->rotate = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
V4L2_CID_ROTATE, 0, 270, 90, 0);
- ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
+ ctrls->hflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
- ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
+ ctrls->vflip = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
+
if (variant->has_alpha)
- ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
- &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
- 0, max_alpha, 1, 0);
+ ctrls->alpha = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, max_alpha, 1, 0);
else
- ctx->ctrl_alpha = NULL;
+ ctrls->alpha = NULL;
- ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
+ ctrls->colorfx = v4l2_ctrl_new_std_menu(handler, &fimc_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
+ ~0x983f, V4L2_COLORFX_NONE);
- return ctx->ctrl_handler.error;
+ ctrls->colorfx_cbcr = v4l2_ctrl_new_std(handler, &fimc_ctrl_ops,
+ V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
+
+ ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
+
+ if (!handler->error) {
+ v4l2_ctrl_cluster(2, &ctrls->colorfx);
+ ctrls->ready = true;
+ }
+
+ return handler->error;
}
void fimc_ctrls_delete(struct fimc_ctx *ctx)
{
- if (ctx->ctrls_rdy) {
- v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- ctx->ctrls_rdy = false;
- ctx->ctrl_alpha = NULL;
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
+
+ if (ctrls->ready) {
+ v4l2_ctrl_handler_free(&ctrls->handler);
+ ctrls->ready = false;
+ ctrls->alpha = NULL;
}
}
void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
{
unsigned int has_alpha = ctx->d_frame.fmt->flags & FMT_HAS_ALPHA;
+ struct fimc_ctrls *ctrls = &ctx->ctrls;
- if (!ctx->ctrls_rdy)
+ if (!ctrls->ready)
return;
- mutex_lock(&ctx->ctrl_handler.lock);
- v4l2_ctrl_activate(ctx->ctrl_rotate, active);
- v4l2_ctrl_activate(ctx->ctrl_hflip, active);
- v4l2_ctrl_activate(ctx->ctrl_vflip, active);
- if (ctx->ctrl_alpha)
- v4l2_ctrl_activate(ctx->ctrl_alpha, active && has_alpha);
+ mutex_lock(ctrls->handler.lock);
+ v4l2_ctrl_activate(ctrls->rotate, active);
+ v4l2_ctrl_activate(ctrls->hflip, active);
+ v4l2_ctrl_activate(ctrls->vflip, active);
+ v4l2_ctrl_activate(ctrls->colorfx, active);
+ if (ctrls->alpha)
+ v4l2_ctrl_activate(ctrls->alpha, active && has_alpha);
if (active) {
- ctx->rotation = ctx->ctrl_rotate->val;
- ctx->hflip = ctx->ctrl_hflip->val;
- ctx->vflip = ctx->ctrl_vflip->val;
+ fimc_set_color_effect(ctx, ctrls->colorfx->cur.val);
+ ctx->rotation = ctrls->rotate->val;
+ ctx->hflip = ctrls->hflip->val;
+ ctx->vflip = ctrls->vflip->val;
} else {
+ ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
ctx->rotation = 0;
ctx->hflip = 0;
ctx->vflip = 0;
}
- mutex_unlock(&ctx->ctrl_handler.lock);
+ mutex_unlock(ctrls->handler.lock);
}
/* Update maximum value of the alpha color control */
void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- struct v4l2_ctrl *ctrl = ctx->ctrl_alpha;
+ struct v4l2_ctrl *ctrl = ctx->ctrls.alpha;
if (ctrl == NULL || !fimc->variant->has_alpha)
return;
@@ -918,39 +681,6 @@ void fimc_alpha_ctrl_update(struct fimc_ctx *ctx)
v4l2_ctrl_unlock(ctrl);
}
-/*
- * V4L2 ioctl handlers
- */
-static int fimc_m2m_querycap(struct file *file, void *fh,
- struct v4l2_capability *cap)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_dev *fimc = ctx->fimc_dev;
-
- strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
- strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
- cap->bus_info[0] = 0;
- cap->capabilities = V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
-
- return 0;
-}
-
-static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct fimc_fmt *fmt;
-
- fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
- f->index);
- if (!fmt)
- return -EINVAL;
-
- strncpy(f->description, fmt->name, sizeof(f->description) - 1);
- f->pixelformat = fmt->fourcc;
- return 0;
-}
-
int fimc_fill_format(struct fimc_frame *frame, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
@@ -1011,8 +741,8 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
pix->width = width;
for (i = 0; i < pix->num_planes; ++i) {
- u32 bpl = pix->plane_fmt[i].bytesperline;
- u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
+ struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+ u32 bpl = plane_fmt->bytesperline;
if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
bpl = pix->width; /* Planar */
@@ -1024,23 +754,12 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
if (i == 0) /* Same bytesperline for each plane. */
bytesperline = bpl;
- pix->plane_fmt[i].bytesperline = bytesperline;
- *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
+ plane_fmt->bytesperline = bytesperline;
+ plane_fmt->sizeimage = max((pix->width * pix->height *
+ fmt->depth[i]) / 8, plane_fmt->sizeimage);
}
}
-static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
-
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- return fimc_fill_format(frame, f);
-}
-
/**
* fimc_find_format - lookup fimc color format by fourcc or media bus format
* @pixelformat: fourcc to match, ignored if null
@@ -1073,535 +792,10 @@ struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
return def_fmt;
}
-static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
-{
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct samsung_fimc_variant *variant = fimc->variant;
- struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct fimc_fmt *fmt;
- u32 max_w, mod_x, mod_y;
-
- if (!IS_M2M(f->type))
- return -EINVAL;
-
- dbg("w: %d, h: %d", pix->width, pix->height);
-
- fmt = fimc_find_format(&pix->pixelformat, NULL,
- get_m2m_fmt_flags(f->type), 0);
- if (WARN(fmt == NULL, "Pixel format lookup failed"))
- return -EINVAL;
-
- if (pix->field == V4L2_FIELD_ANY)
- pix->field = V4L2_FIELD_NONE;
- else if (pix->field != V4L2_FIELD_NONE)
- return -EINVAL;
-
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- max_w = variant->pix_limit->scaler_dis_w;
- mod_x = ffs(variant->min_inp_pixsize) - 1;
- } else {
- max_w = variant->pix_limit->out_rot_dis_w;
- mod_x = ffs(variant->min_out_pixsize) - 1;
- }
-
- if (tiled_fmt(fmt)) {
- mod_x = 6; /* 64 x 32 pixels tile */
- mod_y = 5;
- } else {
- if (variant->min_vsize_align == 1)
- mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
- else
- mod_y = ffs(variant->min_vsize_align) - 1;
- }
-
- v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
- &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
-
- fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
- return 0;
-}
-
-static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return fimc_try_fmt_mplane(ctx, f);
-}
-
-static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
- struct v4l2_format *f)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct vb2_queue *vq;
- struct fimc_frame *frame;
- struct v4l2_pix_format_mplane *pix;
- int i, ret = 0;
-
- ret = fimc_try_fmt_mplane(ctx, f);
- if (ret)
- return ret;
-
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
-
- if (vb2_is_busy(vq)) {
- v4l2_err(fimc->m2m.vfd, "queue (%d) busy\n", f->type);
- return -EBUSY;
- }
-
- if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- frame = &ctx->s_frame;
- else
- frame = &ctx->d_frame;
-
- pix = &f->fmt.pix_mp;
- frame->fmt = fimc_find_format(&pix->pixelformat, NULL,
- get_m2m_fmt_flags(f->type), 0);
- if (!frame->fmt)
- return -EINVAL;
-
- /* Update RGB Alpha control state and value range */
- fimc_alpha_ctrl_update(ctx);
-
- for (i = 0; i < frame->fmt->colplanes; i++) {
- frame->payload[i] =
- (pix->width * pix->height * frame->fmt->depth[i]) / 8;
- }
-
- fimc_fill_frame(frame, f);
-
- ctx->scaler.enabled = 1;
-
- if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- fimc_ctx_state_lock_set(FIMC_PARAMS | FIMC_DST_FMT, ctx);
- else
- fimc_ctx_state_lock_set(FIMC_PARAMS | FIMC_SRC_FMT, ctx);
-
- dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
-
- return 0;
-}
-
-static int fimc_m2m_reqbufs(struct file *file, void *fh,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int fimc_m2m_querybuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_qbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_dqbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int fimc_m2m_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- /* The source and target color format need to be set */
- if (V4L2_TYPE_IS_OUTPUT(type)) {
- if (!fimc_ctx_state_is_set(FIMC_SRC_FMT, ctx))
- return -EINVAL;
- } else if (!fimc_ctx_state_is_set(FIMC_DST_FMT, ctx)) {
- return -EINVAL;
- }
-
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int fimc_m2m_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
-static int fimc_m2m_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *cr)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_frame *frame;
-
- frame = ctx_get_frame(ctx, cr->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- cr->bounds.left = 0;
- cr->bounds.top = 0;
- cr->bounds.width = frame->o_width;
- cr->bounds.height = frame->o_height;
- cr->defrect = cr->bounds;
-
- return 0;
-}
-
-static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_frame *frame;
-
- frame = ctx_get_frame(ctx, cr->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- cr->c.left = frame->offs_h;
- cr->c.top = frame->offs_v;
- cr->c.width = frame->width;
- cr->c.height = frame->height;
-
- return 0;
-}
-
-static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
-{
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_frame *f;
- u32 min_size, halign, depth = 0;
- int i;
-
- if (cr->c.top < 0 || cr->c.left < 0) {
- v4l2_err(fimc->m2m.vfd,
- "doesn't support negative values for top & left\n");
- return -EINVAL;
- }
- if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- f = &ctx->d_frame;
- else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- f = &ctx->s_frame;
- else
- return -EINVAL;
-
- min_size = (f == &ctx->s_frame) ?
- fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
-
- /* Get pixel alignment constraints. */
- if (fimc->variant->min_vsize_align == 1)
- halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
- else
- halign = ffs(fimc->variant->min_vsize_align) - 1;
-
- for (i = 0; i < f->fmt->colplanes; i++)
- depth += f->fmt->depth[i];
-
- v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
- ffs(min_size) - 1,
- &cr->c.height, min_size, f->o_height,
- halign, 64/(ALIGN(depth, 8)));
-
- /* adjust left/top if cropping rectangle is out of bounds */
- if (cr->c.left + cr->c.width > f->o_width)
- cr->c.left = f->o_width - cr->c.width;
- if (cr->c.top + cr->c.height > f->o_height)
- cr->c.top = f->o_height - cr->c.height;
-
- cr->c.left = round_down(cr->c.left, min_size);
- cr->c.top = round_down(cr->c.top, fimc->variant->hor_offs_align);
-
- dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
- cr->c.left, cr->c.top, cr->c.width, cr->c.height,
- f->f_width, f->f_height);
-
- return 0;
-}
-
-static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_dev *fimc = ctx->fimc_dev;
- struct fimc_frame *f;
- int ret;
-
- ret = fimc_m2m_try_crop(ctx, cr);
- if (ret)
- return ret;
-
- f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
- &ctx->s_frame : &ctx->d_frame;
-
- /* Check to see if scaling ratio is within supported range */
- if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
- if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- ret = fimc_check_scaler_ratio(ctx, cr->c.width,
- cr->c.height, ctx->d_frame.width,
- ctx->d_frame.height, ctx->rotation);
- } else {
- ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
- ctx->s_frame.height, cr->c.width,
- cr->c.height, ctx->rotation);
- }
- if (ret) {
- v4l2_err(fimc->m2m.vfd, "Out of scaler range\n");
- return -EINVAL;
- }
- }
-
- f->offs_h = cr->c.left;
- f->offs_v = cr->c.top;
- f->width = cr->c.width;
- f->height = cr->c.height;
-
- fimc_ctx_state_lock_set(FIMC_PARAMS, ctx);
-
- return 0;
-}
-
-static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
- .vidioc_querycap = fimc_m2m_querycap,
-
- .vidioc_enum_fmt_vid_cap_mplane = fimc_m2m_enum_fmt_mplane,
- .vidioc_enum_fmt_vid_out_mplane = fimc_m2m_enum_fmt_mplane,
-
- .vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
- .vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
-
- .vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
- .vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
-
- .vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
- .vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
-
- .vidioc_reqbufs = fimc_m2m_reqbufs,
- .vidioc_querybuf = fimc_m2m_querybuf,
-
- .vidioc_qbuf = fimc_m2m_qbuf,
- .vidioc_dqbuf = fimc_m2m_dqbuf,
-
- .vidioc_streamon = fimc_m2m_streamon,
- .vidioc_streamoff = fimc_m2m_streamoff,
-
- .vidioc_g_crop = fimc_m2m_g_crop,
- .vidioc_s_crop = fimc_m2m_s_crop,
- .vidioc_cropcap = fimc_m2m_cropcap
-
-};
-
-static int queue_init(void *priv, struct vb2_queue *src_vq,
- struct vb2_queue *dst_vq)
-{
- struct fimc_ctx *ctx = priv;
- int ret;
-
- memset(src_vq, 0, sizeof(*src_vq));
- src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- src_vq->drv_priv = ctx;
- src_vq->ops = &fimc_qops;
- src_vq->mem_ops = &vb2_dma_contig_memops;
- src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
-
- ret = vb2_queue_init(src_vq);
- if (ret)
- return ret;
-
- memset(dst_vq, 0, sizeof(*dst_vq));
- dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- dst_vq->drv_priv = ctx;
- dst_vq->ops = &fimc_qops;
- dst_vq->mem_ops = &vb2_dma_contig_memops;
- dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
-
- return vb2_queue_init(dst_vq);
-}
-
-static int fimc_m2m_open(struct file *file)
-{
- struct fimc_dev *fimc = video_drvdata(file);
- struct fimc_ctx *ctx;
- int ret;
-
- dbg("pid: %d, state: 0x%lx, refcnt: %d",
- task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
-
- /*
- * Return if the corresponding video capture node
- * is already opened.
- */
- if (fimc->vid_cap.refcnt > 0)
- return -EBUSY;
-
- ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- v4l2_fh_init(&ctx->fh, fimc->m2m.vfd);
- ctx->fimc_dev = fimc;
-
- /* Default color format */
- ctx->s_frame.fmt = &fimc_formats[0];
- ctx->d_frame.fmt = &fimc_formats[0];
-
- ret = fimc_ctrls_create(ctx);
- if (ret)
- goto error_fh;
-
- /* Use separate control handler per file handle */
- ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
-
- /* Setup the device context for memory-to-memory mode */
- ctx->state = FIMC_CTX_M2M;
- ctx->flags = 0;
- ctx->in_path = FIMC_DMA;
- ctx->out_path = FIMC_DMA;
- spin_lock_init(&ctx->slock);
-
- ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
- goto error_c;
- }
-
- if (fimc->m2m.refcnt++ == 0)
- set_bit(ST_M2M_RUN, &fimc->state);
- return 0;
-
-error_c:
- fimc_ctrls_delete(ctx);
-error_fh:
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
- kfree(ctx);
- return ret;
-}
-
-static int fimc_m2m_release(struct file *file)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
- struct fimc_dev *fimc = ctx->fimc_dev;
-
- dbg("pid: %d, state: 0x%lx, refcnt= %d",
- task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
-
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
- fimc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
- v4l2_fh_exit(&ctx->fh);
-
- if (--fimc->m2m.refcnt <= 0)
- clear_bit(ST_M2M_RUN, &fimc->state);
- kfree(ctx);
- return 0;
-}
-
-static unsigned int fimc_m2m_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
-
- return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
-}
-
-
-static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
-
- return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
-}
-
-static const struct v4l2_file_operations fimc_m2m_fops = {
- .owner = THIS_MODULE,
- .open = fimc_m2m_open,
- .release = fimc_m2m_release,
- .poll = fimc_m2m_poll,
- .unlocked_ioctl = video_ioctl2,
- .mmap = fimc_m2m_mmap,
-};
-
-static struct v4l2_m2m_ops m2m_ops = {
- .device_run = fimc_dma_run,
- .job_abort = fimc_job_abort,
-};
-
-int fimc_register_m2m_device(struct fimc_dev *fimc,
- struct v4l2_device *v4l2_dev)
-{
- struct video_device *vfd;
- struct platform_device *pdev;
- int ret = 0;
-
- if (!fimc)
- return -ENODEV;
-
- pdev = fimc->pdev;
- fimc->v4l2_dev = v4l2_dev;
-
- vfd = video_device_alloc();
- if (!vfd) {
- v4l2_err(v4l2_dev, "Failed to allocate video device\n");
- return -ENOMEM;
- }
-
- vfd->fops = &fimc_m2m_fops;
- vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
- vfd->v4l2_dev = v4l2_dev;
- vfd->minor = -1;
- vfd->release = video_device_release;
- vfd->lock = &fimc->lock;
-
- snprintf(vfd->name, sizeof(vfd->name), "%s.m2m", dev_name(&pdev->dev));
- video_set_drvdata(vfd, fimc);
-
- fimc->m2m.vfd = vfd;
- fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
- if (IS_ERR(fimc->m2m.m2m_dev)) {
- v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
- ret = PTR_ERR(fimc->m2m.m2m_dev);
- goto err_init;
- }
-
- ret = media_entity_init(&vfd->entity, 0, NULL, 0);
- if (!ret)
- return 0;
-
- v4l2_m2m_release(fimc->m2m.m2m_dev);
-err_init:
- video_device_release(fimc->m2m.vfd);
- return ret;
-}
-
-void fimc_unregister_m2m_device(struct fimc_dev *fimc)
-{
- if (!fimc)
- return;
-
- if (fimc->m2m.m2m_dev)
- v4l2_m2m_release(fimc->m2m.m2m_dev);
- if (fimc->m2m.vfd) {
- media_entity_cleanup(&fimc->m2m.vfd->entity);
- /* Can also be called if video device wasn't registered */
- video_unregister_device(fimc->m2m.vfd);
- }
-}
-
static void fimc_clk_put(struct fimc_dev *fimc)
{
int i;
- for (i = 0; i < fimc->num_clocks; i++) {
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
if (IS_ERR_OR_NULL(fimc->clock[i]))
continue;
clk_unprepare(fimc->clock[i]);
@@ -1614,7 +808,7 @@ static int fimc_clk_get(struct fimc_dev *fimc)
{
int i, ret;
- for (i = 0; i < fimc->num_clocks; i++) {
+ for (i = 0; i < MAX_FIMC_CLOCKS; i++) {
fimc->clock[i] = clk_get(&fimc->pdev->dev, fimc_clocks[i]);
if (IS_ERR(fimc->clock[i]))
goto err;
@@ -1672,15 +866,12 @@ static int fimc_m2m_resume(struct fimc_dev *fimc)
static int fimc_probe(struct platform_device *pdev)
{
+ struct fimc_drvdata *drv_data = fimc_get_drvdata(pdev);
+ struct s5p_platform_fimc *pdata;
struct fimc_dev *fimc;
struct resource *res;
- struct samsung_fimc_driverdata *drv_data;
- struct s5p_platform_fimc *pdata;
int ret = 0;
- drv_data = (struct samsung_fimc_driverdata *)
- platform_get_device_id(pdev)->driver_data;
-
if (pdev->id >= drv_data->num_entities) {
dev_err(&pdev->dev, "Invalid platform device id: %d\n",
pdev->id);
@@ -1714,28 +905,29 @@ static int fimc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to get IRQ resource\n");
return -ENXIO;
}
- fimc->irq = res->start;
- fimc->num_clocks = MAX_FIMC_CLOCKS;
ret = fimc_clk_get(fimc);
if (ret)
return ret;
clk_set_rate(fimc->clock[CLK_BUS], drv_data->lclk_frequency);
clk_enable(fimc->clock[CLK_BUS]);
- platform_set_drvdata(pdev, fimc);
-
- ret = devm_request_irq(&pdev->dev, fimc->irq, fimc_irq_handler,
- 0, pdev->name, fimc);
+ ret = devm_request_irq(&pdev->dev, res->start, fimc_irq_handler,
+ 0, dev_name(&pdev->dev), fimc);
if (ret) {
dev_err(&pdev->dev, "failed to install irq (%d)\n", ret);
goto err_clk;
}
+ ret = fimc_initialize_capture_subdev(fimc);
+ if (ret)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, fimc);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
- goto err_clk;
+ goto err_sd;
/* Initialize contiguous memory allocator */
fimc->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(fimc->alloc_ctx)) {
@@ -1747,9 +939,10 @@ static int fimc_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
return 0;
-
err_pm:
pm_runtime_put(&pdev->dev);
+err_sd:
+ fimc_unregister_capture_subdev(fimc);
err_clk:
fimc_clk_put(fimc);
return ret;
@@ -1834,6 +1027,7 @@ static int __devexit fimc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
+ fimc_unregister_capture_subdev(fimc);
vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
clk_disable(fimc->clock[CLK_BUS]);
@@ -1879,7 +1073,7 @@ static struct fimc_pix_limit s5p_pix_limit[4] = {
},
};
-static struct samsung_fimc_variant fimc0_variant_s5p = {
+static struct fimc_variant fimc0_variant_s5p = {
.has_inp_rot = 1,
.has_out_rot = 1,
.has_cam_if = 1,
@@ -1891,17 +1085,17 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
.pix_limit = &s5p_pix_limit[0],
};
-static struct samsung_fimc_variant fimc2_variant_s5p = {
+static struct fimc_variant fimc2_variant_s5p = {
.has_cam_if = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 8,
.min_vsize_align = 16,
.out_buf_count = 4,
- .pix_limit = &s5p_pix_limit[1],
+ .pix_limit = &s5p_pix_limit[1],
};
-static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
+static struct fimc_variant fimc0_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1914,7 +1108,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
+static struct fimc_variant fimc1_variant_s5pv210 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1928,7 +1122,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[2],
};
-static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
+static struct fimc_variant fimc2_variant_s5pv210 = {
.has_cam_if = 1,
.pix_hoff = 1,
.min_inp_pixsize = 16,
@@ -1939,7 +1133,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[2],
};
-static struct samsung_fimc_variant fimc0_variant_exynos4 = {
+static struct fimc_variant fimc0_variant_exynos4 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1955,7 +1149,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
.pix_limit = &s5p_pix_limit[1],
};
-static struct samsung_fimc_variant fimc3_variant_exynos4 = {
+static struct fimc_variant fimc3_variant_exynos4 = {
.pix_hoff = 1,
.has_cam_if = 1,
.has_cistatus2 = 1,
@@ -1970,7 +1164,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
};
/* S5PC100 */
-static struct samsung_fimc_driverdata fimc_drvdata_s5p = {
+static struct fimc_drvdata fimc_drvdata_s5p = {
.variant = {
[0] = &fimc0_variant_s5p,
[1] = &fimc0_variant_s5p,
@@ -1981,7 +1175,7 @@ static struct samsung_fimc_driverdata fimc_drvdata_s5p = {
};
/* S5PV210, S5PC110 */
-static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = {
+static struct fimc_drvdata fimc_drvdata_s5pv210 = {
.variant = {
[0] = &fimc0_variant_s5pv210,
[1] = &fimc1_variant_s5pv210,
@@ -1991,8 +1185,8 @@ static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = {
.lclk_frequency = 166000000UL,
};
-/* S5PV310, S5PC210 */
-static struct samsung_fimc_driverdata fimc_drvdata_exynos4 = {
+/* EXYNOS4210, S5PV310, S5PC210 */
+static struct fimc_drvdata fimc_drvdata_exynos4 = {
.variant = {
[0] = &fimc0_variant_exynos4,
[1] = &fimc0_variant_exynos4,
@@ -2036,7 +1230,7 @@ static struct platform_driver fimc_driver = {
int __init fimc_register_driver(void)
{
- return platform_driver_probe(&fimc_driver, fimc_probe);
+ return platform_driver_register(&fimc_driver);
}
void __exit fimc_unregister_driver(void)
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 84fd83550bd7..95b27ae5cf27 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/videodev2.h>
#include <linux/io.h>
+#include <asm/sizes.h>
#include <media/media-entity.h>
#include <media/videobuf2-core.h>
@@ -26,8 +27,6 @@
#include <media/v4l2-mediabus.h>
#include <media/s5p_fimc.h>
-#include "regs-fimc.h"
-
#define err(fmt, args...) \
printk(KERN_ERR "%s:%d: " fmt "\n", __func__, __LINE__, ##args)
@@ -78,26 +77,31 @@ enum fimc_dev_flags {
#define fimc_capture_busy(dev) test_bit(ST_CAPT_BUSY, &(dev)->state)
enum fimc_datapath {
- FIMC_CAMERA,
- FIMC_DMA,
- FIMC_LCDFIFO,
- FIMC_WRITEBACK
+ FIMC_IO_NONE,
+ FIMC_IO_CAMERA,
+ FIMC_IO_DMA,
+ FIMC_IO_LCDFIFO,
+ FIMC_IO_WRITEBACK,
+ FIMC_IO_ISP,
};
enum fimc_color_fmt {
- S5P_FIMC_RGB444 = 0x10,
- S5P_FIMC_RGB555,
- S5P_FIMC_RGB565,
- S5P_FIMC_RGB666,
- S5P_FIMC_RGB888,
- S5P_FIMC_RGB30_LOCAL,
- S5P_FIMC_YCBCR420 = 0x20,
- S5P_FIMC_YCBYCR422,
- S5P_FIMC_YCRYCB422,
- S5P_FIMC_CBYCRY422,
- S5P_FIMC_CRYCBY422,
- S5P_FIMC_YCBCR444_LOCAL,
- S5P_FIMC_JPEG = 0x40,
+ FIMC_FMT_RGB444 = 0x10,
+ FIMC_FMT_RGB555,
+ FIMC_FMT_RGB565,
+ FIMC_FMT_RGB666,
+ FIMC_FMT_RGB888,
+ FIMC_FMT_RGB30_LOCAL,
+ FIMC_FMT_YCBCR420 = 0x20,
+ FIMC_FMT_YCBYCR422,
+ FIMC_FMT_YCRYCB422,
+ FIMC_FMT_CBYCRY422,
+ FIMC_FMT_CRYCBY422,
+ FIMC_FMT_YCBCR444_LOCAL,
+ FIMC_FMT_JPEG = 0x40,
+ FIMC_FMT_RAW8 = 0x80,
+ FIMC_FMT_RAW10,
+ FIMC_FMT_RAW12,
};
#define fimc_fmt_is_rgb(x) (!!((x) & 0x10))
@@ -106,24 +110,11 @@ enum fimc_color_fmt {
#define IS_M2M(__strt) ((__strt) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE || \
__strt == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-/* Cb/Cr chrominance components order for 2 plane Y/CbCr 4:2:2 formats. */
-#define S5P_FIMC_LSB_CRCB S5P_CIOCTRL_ORDER422_2P_LSB_CRCB
-
-/* The embedded image effect selection */
-#define S5P_FIMC_EFFECT_ORIGINAL S5P_CIIMGEFF_FIN_BYPASS
-#define S5P_FIMC_EFFECT_ARBITRARY S5P_CIIMGEFF_FIN_ARBITRARY
-#define S5P_FIMC_EFFECT_NEGATIVE S5P_CIIMGEFF_FIN_NEGATIVE
-#define S5P_FIMC_EFFECT_ARTFREEZE S5P_CIIMGEFF_FIN_ARTFREEZE
-#define S5P_FIMC_EFFECT_EMBOSSING S5P_CIIMGEFF_FIN_EMBOSSING
-#define S5P_FIMC_EFFECT_SIKHOUETTE S5P_CIIMGEFF_FIN_SILHOUETTE
-
/* The hardware context state. */
#define FIMC_PARAMS (1 << 0)
-#define FIMC_SRC_ADDR (1 << 1)
-#define FIMC_DST_ADDR (1 << 2)
#define FIMC_SRC_FMT (1 << 3)
#define FIMC_DST_FMT (1 << 4)
-#define FIMC_DST_CROP (1 << 5)
+#define FIMC_COMPOSE (1 << 5)
#define FIMC_CTX_M2M (1 << 16)
#define FIMC_CTX_CAP (1 << 17)
#define FIMC_CTX_SHUT (1 << 18)
@@ -333,7 +324,7 @@ struct fimc_vid_cap {
struct fimc_ctx *ctx;
struct vb2_alloc_ctx *alloc_ctx;
struct video_device *vfd;
- struct v4l2_subdev *subdev;
+ struct v4l2_subdev subdev;
struct media_pad vd_pad;
struct v4l2_mbus_framefmt mf;
struct media_pad sd_pads[FIMC_SD_PADS_NUM];
@@ -370,8 +361,7 @@ struct fimc_pix_limit {
};
/**
- * struct samsung_fimc_variant - camera interface variant information
- *
+ * struct fimc_variant - FIMC device variant information
* @pix_hoff: indicate whether horizontal offset is in pixels or in bytes
* @has_inp_rot: set if has input rotator
* @has_out_rot: set if has output rotator
@@ -386,7 +376,7 @@ struct fimc_pix_limit {
* @min_vsize_align: minimum vertical pixel size alignment
* @out_buf_count: the number of buffers in output DMA sequence
*/
-struct samsung_fimc_variant {
+struct fimc_variant {
unsigned int pix_hoff:1;
unsigned int has_inp_rot:1;
unsigned int has_out_rot:1;
@@ -403,23 +393,19 @@ struct samsung_fimc_variant {
};
/**
- * struct samsung_fimc_driverdata - per device type driver data for init time.
- *
- * @variant: the variant information for this driver.
- * @dev_cnt: number of fimc sub-devices available in SoC
- * @lclk_frequency: fimc bus clock frequency
+ * struct fimc_drvdata - per device type driver data
+ * @variant: variant information for this device
+ * @num_entities: number of fimc instances available in a SoC
+ * @lclk_frequency: local bus clock frequency
*/
-struct samsung_fimc_driverdata {
- struct samsung_fimc_variant *variant[FIMC_MAX_DEVS];
- unsigned long lclk_frequency;
- int num_entities;
+struct fimc_drvdata {
+ struct fimc_variant *variant[FIMC_MAX_DEVS];
+ int num_entities;
+ unsigned long lclk_frequency;
};
-struct fimc_pipeline {
- struct media_pipeline *pipe;
- struct v4l2_subdev *sensor;
- struct v4l2_subdev *csis;
-};
+#define fimc_get_drvdata(_pdev) \
+ ((struct fimc_drvdata *) platform_get_device_id(_pdev)->driver_data)
struct fimc_ctx;
@@ -431,10 +417,8 @@ struct fimc_ctx;
* @pdata: pointer to the device platform data
* @variant: the IP variant information
* @id: FIMC device index (0..FIMC_MAX_DEVS)
- * @num_clocks: the number of clocks managed by this device instance
* @clock: clocks required for FIMC operation
* @regs: the mapped hardware registers
- * @irq: FIMC interrupt number
* @irq_queue: interrupt handler waitqueue
* @v4l2_dev: root v4l2_device
* @m2m: memory-to-memory V4L2 device information
@@ -448,12 +432,10 @@ struct fimc_dev {
struct mutex lock;
struct platform_device *pdev;
struct s5p_platform_fimc *pdata;
- struct samsung_fimc_variant *variant;
+ struct fimc_variant *variant;
u16 id;
- u16 num_clocks;
struct clk *clock[MAX_FIMC_CLOCKS];
void __iomem *regs;
- int irq;
wait_queue_head_t irq_queue;
struct v4l2_device *v4l2_dev;
struct fimc_m2m_device m2m;
@@ -464,8 +446,31 @@ struct fimc_dev {
};
/**
+ * struct fimc_ctrls - v4l2 controls structure
+ * @handler: the control handler
+ * @colorfx: image effect control
+ * @colorfx_cbcr: Cb/Cr coefficients control
+ * @rotate: image rotation control
+ * @hflip: horizontal flip control
+ * @vflip: vertical flip control
+ * @alpha: RGB alpha control
+ * @ready: true if @handler is initialized
+ */
+struct fimc_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *colorfx;
+ struct v4l2_ctrl *colorfx_cbcr;
+ };
+ struct v4l2_ctrl *rotate;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *alpha;
+ bool ready;
+};
+
+/**
* fimc_ctx - the device context data
- * @slock: spinlock protecting this data structure
* @s_frame: source frame properties
* @d_frame: destination frame properties
* @out_order_1p: output 1-plane YCBCR order
@@ -484,15 +489,9 @@ struct fimc_dev {
* @fimc_dev: the FIMC device this context applies to
* @m2m_ctx: memory-to-memory device context
* @fh: v4l2 file handle
- * @ctrl_handler: v4l2 controls handler
- * @ctrl_rotate image rotation control
- * @ctrl_hflip horizontal flip control
- * @ctrl_vflip vertical flip control
- * @ctrl_alpha RGB alpha control
- * @ctrls_rdy: true if the control handler is initialized
+ * @ctrls: v4l2 controls structure
*/
struct fimc_ctx {
- spinlock_t slock;
struct fimc_frame s_frame;
struct fimc_frame d_frame;
u32 out_order_1p;
@@ -511,12 +510,7 @@ struct fimc_ctx {
struct fimc_dev *fimc_dev;
struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_fh fh;
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *ctrl_rotate;
- struct v4l2_ctrl *ctrl_hflip;
- struct v4l2_ctrl *ctrl_vflip;
- struct v4l2_ctrl *ctrl_alpha;
- bool ctrls_rdy;
+ struct fimc_ctrls ctrls;
};
#define fh_to_ctx(__fh) container_of(__fh, struct fimc_ctx, fh)
@@ -560,13 +554,13 @@ static inline bool fimc_capture_active(struct fimc_dev *fimc)
return ret;
}
-static inline void fimc_ctx_state_lock_set(u32 state, struct fimc_ctx *ctx)
+static inline void fimc_ctx_state_set(u32 state, struct fimc_ctx *ctx)
{
unsigned long flags;
- spin_lock_irqsave(&ctx->slock, flags);
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
ctx->state |= state;
- spin_unlock_irqrestore(&ctx->slock, flags);
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
}
static inline bool fimc_ctx_state_is_set(u32 mask, struct fimc_ctx *ctx)
@@ -574,9 +568,9 @@ static inline bool fimc_ctx_state_is_set(u32 mask, struct fimc_ctx *ctx)
unsigned long flags;
bool ret;
- spin_lock_irqsave(&ctx->slock, flags);
+ spin_lock_irqsave(&ctx->fimc_dev->slock, flags);
ret = (ctx->state & mask) == mask;
- spin_unlock_irqrestore(&ctx->slock, flags);
+ spin_unlock_irqrestore(&ctx->fimc_dev->slock, flags);
return ret;
}
@@ -589,61 +583,13 @@ static inline int tiled_fmt(struct fimc_fmt *fmt)
static inline int fimc_get_alpha_mask(struct fimc_fmt *fmt)
{
switch (fmt->color) {
- case S5P_FIMC_RGB444: return 0x0f;
- case S5P_FIMC_RGB555: return 0x01;
- case S5P_FIMC_RGB888: return 0xff;
+ case FIMC_FMT_RGB444: return 0x0f;
+ case FIMC_FMT_RGB555: return 0x01;
+ case FIMC_FMT_RGB888: return 0xff;
default: return 0;
};
}
-static inline void fimc_hw_clear_irq(struct fimc_dev *dev)
-{
- u32 cfg = readl(dev->regs + S5P_CIGCTRL);
- cfg |= S5P_CIGCTRL_IRQ_CLR;
- writel(cfg, dev->regs + S5P_CIGCTRL);
-}
-
-static inline void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on)
-{
- u32 cfg = readl(dev->regs + S5P_CISCCTRL);
- if (on)
- cfg |= S5P_CISCCTRL_SCALERSTART;
- else
- cfg &= ~S5P_CISCCTRL_SCALERSTART;
- writel(cfg, dev->regs + S5P_CISCCTRL);
-}
-
-static inline void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
-{
- u32 cfg = readl(dev->regs + S5P_MSCTRL);
- if (on)
- cfg |= S5P_MSCTRL_ENVID;
- else
- cfg &= ~S5P_MSCTRL_ENVID;
- writel(cfg, dev->regs + S5P_MSCTRL);
-}
-
-static inline void fimc_hw_dis_capture(struct fimc_dev *dev)
-{
- u32 cfg = readl(dev->regs + S5P_CIIMGCPT);
- cfg &= ~(S5P_CIIMGCPT_IMGCPTEN | S5P_CIIMGCPT_IMGCPTEN_SC);
- writel(cfg, dev->regs + S5P_CIIMGCPT);
-}
-
-/**
- * fimc_hw_set_dma_seq - configure output DMA buffer sequence
- * @mask: each bit corresponds to one of 32 output buffer registers set
- * 1 to include buffer in the sequence, 0 to disable
- *
- * This function mask output DMA ring buffers, i.e. it allows to configure
- * which of the output buffer address registers will be used by the DMA
- * engine.
- */
-static inline void fimc_hw_set_dma_seq(struct fimc_dev *dev, u32 mask)
-{
- writel(mask, dev->regs + S5P_CIFCNTSEQ);
-}
-
static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
enum v4l2_buf_type type)
{
@@ -665,48 +611,6 @@ static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
return frame;
}
-/* Return an index to the buffer actually being written. */
-static inline u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
-{
- u32 reg;
-
- if (dev->variant->has_cistatus2) {
- reg = readl(dev->regs + S5P_CISTATUS2) & 0x3F;
- return reg > 0 ? --reg : reg;
- } else {
- reg = readl(dev->regs + S5P_CISTATUS);
- return (reg & S5P_CISTATUS_FRAMECNT_MASK) >>
- S5P_CISTATUS_FRAMECNT_SHIFT;
- }
-}
-
-/* -----------------------------------------------------*/
-/* fimc-reg.c */
-void fimc_hw_reset(struct fimc_dev *fimc);
-void fimc_hw_set_rotation(struct fimc_ctx *ctx);
-void fimc_hw_set_target_format(struct fimc_ctx *ctx);
-void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
-void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
-void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
-void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
-void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
-void fimc_hw_en_capture(struct fimc_ctx *ctx);
-void fimc_hw_set_effect(struct fimc_ctx *ctx, bool active);
-void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
-void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
-void fimc_hw_set_input_path(struct fimc_ctx *ctx);
-void fimc_hw_set_output_path(struct fimc_ctx *ctx);
-void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
-void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
- int index);
-int fimc_hw_set_camera_source(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
-int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
-int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
-int fimc_hw_set_camera_type(struct fimc_dev *fimc,
- struct s5p_fimc_isp_info *cam);
-
/* -----------------------------------------------------*/
/* fimc-core.c */
int fimc_vidioc_enum_fmt_mplane(struct file *file, void *priv,
@@ -720,6 +624,7 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
struct v4l2_pix_format_mplane *pix);
struct fimc_fmt *fimc_find_format(const u32 *pixelformat, const u32 *mbus_code,
unsigned int mask, int index);
+struct fimc_fmt *fimc_get_format(unsigned int index);
int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh,
int dw, int dh, int rotation);
@@ -730,7 +635,7 @@ int fimc_prepare_addr(struct fimc_ctx *ctx, struct vb2_buffer *vb,
void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f);
void fimc_set_yuv_order(struct fimc_ctx *ctx);
void fimc_fill_frame(struct fimc_frame *frame, struct v4l2_format *f);
-void fimc_capture_irq_handler(struct fimc_dev *fimc, bool done);
+void fimc_capture_irq_handler(struct fimc_dev *fimc, int deq_buf);
int fimc_register_m2m_device(struct fimc_dev *fimc,
struct v4l2_device *v4l2_dev);
@@ -739,33 +644,18 @@ int fimc_register_driver(void);
void fimc_unregister_driver(void);
/* -----------------------------------------------------*/
+/* fimc-m2m.c */
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state);
+
+/* -----------------------------------------------------*/
/* fimc-capture.c */
-int fimc_register_capture_device(struct fimc_dev *fimc,
- struct v4l2_device *v4l2_dev);
-void fimc_unregister_capture_device(struct fimc_dev *fimc);
+int fimc_initialize_capture_subdev(struct fimc_dev *fimc);
+void fimc_unregister_capture_subdev(struct fimc_dev *fimc);
int fimc_capture_ctrls_create(struct fimc_dev *fimc);
-int fimc_vid_cap_buf_queue(struct fimc_dev *fimc,
- struct fimc_vid_buffer *fimc_vb);
void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
void *arg);
int fimc_capture_suspend(struct fimc_dev *fimc);
int fimc_capture_resume(struct fimc_dev *fimc);
-int fimc_capture_config_update(struct fimc_ctx *ctx);
-
-/* Locking: the caller holds fimc->slock */
-static inline void fimc_activate_capture(struct fimc_ctx *ctx)
-{
- fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
- fimc_hw_en_capture(ctx);
-}
-
-static inline void fimc_deactivate_capture(struct fimc_dev *fimc)
-{
- fimc_hw_en_lastirq(fimc, true);
- fimc_hw_dis_capture(fimc);
- fimc_hw_enable_scaler(fimc, false);
- fimc_hw_en_lastirq(fimc, false);
-}
/*
* Buffer list manipulation functions. Must be called with fimc.slock held.
diff --git a/drivers/media/video/s5p-fimc/fimc-lite-reg.c b/drivers/media/video/s5p-fimc/fimc-lite-reg.c
new file mode 100644
index 000000000000..419adfb7cdf9
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-lite-reg.c
@@ -0,0 +1,300 @@
+/*
+ * Register interface file for EXYNOS FIMC-LITE (camera interface) driver
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <media/s5p_fimc.h>
+
+#include "fimc-lite-reg.h"
+#include "fimc-lite.h"
+#include "fimc-core.h"
+
+#define FLITE_RESET_TIMEOUT 50 /* in ms */
+
+void flite_hw_reset(struct fimc_lite *dev)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(FLITE_RESET_TIMEOUT);
+ u32 cfg;
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg |= FLITE_REG_CIGCTRL_SWRST_REQ;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ while (time_is_after_jiffies(end)) {
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ if (cfg & FLITE_REG_CIGCTRL_SWRST_RDY)
+ break;
+ usleep_range(1000, 5000);
+ }
+
+ cfg |= FLITE_REG_CIGCTRL_SWRST;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_clear_pending_irq(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS);
+ cfg &= ~FLITE_REG_CISTATUS_IRQ_CAM;
+ writel(cfg, dev->regs + FLITE_REG_CISTATUS);
+}
+
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev)
+{
+ u32 intsrc = readl(dev->regs + FLITE_REG_CISTATUS);
+ return intsrc & FLITE_REG_CISTATUS_IRQ_MASK;
+}
+
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev)
+{
+
+ u32 cfg = readl(dev->regs + FLITE_REG_CISTATUS2);
+ cfg &= ~FLITE_REG_CISTATUS2_LASTCAPEND;
+ writel(cfg, dev->regs + FLITE_REG_CISTATUS2);
+}
+
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev)
+{
+ u32 cfg, intsrc;
+
+ /* Select interrupts to be enabled for each output mode */
+ if (dev->out_path == FIMC_IO_DMA) {
+ intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+ FLITE_REG_CIGCTRL_IRQ_LASTEN |
+ FLITE_REG_CIGCTRL_IRQ_STARTEN;
+ } else {
+ /* An output to the FIMC-IS */
+ intsrc = FLITE_REG_CIGCTRL_IRQ_OVFEN |
+ FLITE_REG_CIGCTRL_IRQ_LASTEN;
+ }
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg |= FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK;
+ cfg &= ~intsrc;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+void flite_hw_capture_start(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+ cfg |= FLITE_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+void flite_hw_capture_stop(struct fimc_lite *dev)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIIMGCPT);
+ cfg &= ~FLITE_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FLITE_REG_CIIMGCPT);
+}
+
+/*
+ * Test pattern (color bars) enable/disable. External sensor
+ * pixel clock must be active for the test pattern to work.
+ */
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ if (on)
+ cfg |= FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+ else
+ cfg &= ~FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+}
+
+static const u32 src_pixfmt_map[8][3] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY,
+ FLITE_REG_CIGCTRL_YUV422_1P },
+ { V4L2_PIX_FMT_SGRBG8, 0, FLITE_REG_CIGCTRL_RAW8 },
+ { V4L2_PIX_FMT_SGRBG10, 0, FLITE_REG_CIGCTRL_RAW10 },
+ { V4L2_PIX_FMT_SGRBG12, 0, FLITE_REG_CIGCTRL_RAW12 },
+ { V4L2_MBUS_FMT_JPEG_1X8, 0, FLITE_REG_CIGCTRL_USER(1) },
+};
+
+/* Set camera input pixel format and resolution */
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
+{
+ enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code;
+ unsigned int i = ARRAY_SIZE(src_pixfmt_map);
+ u32 cfg;
+
+ while (i-- >= 0) {
+ if (src_pixfmt_map[i][0] == pixelcode)
+ break;
+ }
+
+ if (i == 0 && src_pixfmt_map[i][0] != pixelcode) {
+ v4l2_err(dev->vfd,
+ "Unsupported pixel code, falling back to %#08x\n",
+ src_pixfmt_map[i][0]);
+ }
+
+ cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ cfg &= ~FLITE_REG_CIGCTRL_FMT_MASK;
+ cfg |= src_pixfmt_map[i][2];
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ cfg = readl(dev->regs + FLITE_REG_CISRCSIZE);
+ cfg &= ~(FLITE_REG_CISRCSIZE_ORDER422_MASK |
+ FLITE_REG_CISRCSIZE_SIZE_CAM_MASK);
+ cfg |= (f->f_width << 16) | f->f_height;
+ cfg |= src_pixfmt_map[i][1];
+ writel(cfg, dev->regs + FLITE_REG_CISRCSIZE);
+}
+
+/* Set the camera host input window offsets (cropping) */
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f)
+{
+ u32 hoff2, voff2;
+ u32 cfg;
+
+ cfg = readl(dev->regs + FLITE_REG_CIWDOFST);
+ cfg &= ~FLITE_REG_CIWDOFST_OFST_MASK;
+ cfg |= (f->rect.left << 16) | f->rect.top;
+ cfg |= FLITE_REG_CIWDOFST_WINOFSEN;
+ writel(cfg, dev->regs + FLITE_REG_CIWDOFST);
+
+ hoff2 = f->f_width - f->rect.width - f->rect.left;
+ voff2 = f->f_height - f->rect.height - f->rect.top;
+
+ cfg = (hoff2 << 16) | voff2;
+ writel(cfg, dev->regs + FLITE_REG_CIWDOFST2);
+}
+
+/* Select camera port (A, B) */
+static void flite_hw_set_camera_port(struct fimc_lite *dev, int id)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGENERAL);
+ if (id == 0)
+ cfg &= ~FLITE_REG_CIGENERAL_CAM_B;
+ else
+ cfg |= FLITE_REG_CIGENERAL_CAM_B;
+ writel(cfg, dev->regs + FLITE_REG_CIGENERAL);
+}
+
+/* Select serial or parallel bus, camera port (A,B) and set signals polarity */
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+ struct s5p_fimc_isp_info *s_info)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+ unsigned int flags = s_info->flags;
+
+ if (s_info->bus_type != FIMC_MIPI_CSI2) {
+ cfg &= ~(FLITE_REG_CIGCTRL_SELCAM_MIPI |
+ FLITE_REG_CIGCTRL_INVPOLPCLK |
+ FLITE_REG_CIGCTRL_INVPOLVSYNC |
+ FLITE_REG_CIGCTRL_INVPOLHREF);
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLPCLK;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLVSYNC;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg |= FLITE_REG_CIGCTRL_INVPOLHREF;
+ } else {
+ cfg |= FLITE_REG_CIGCTRL_SELCAM_MIPI;
+ }
+
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ flite_hw_set_camera_port(dev, s_info->mux_id);
+}
+
+void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
+{
+ static const u32 pixcode[4][2] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
+ { V4L2_MBUS_FMT_YVYU8_2X8, FLITE_REG_CIODMAFMT_YCRYCB },
+ { V4L2_MBUS_FMT_UYVY8_2X8, FLITE_REG_CIODMAFMT_CBYCRY },
+ { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
+ };
+ u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
+ unsigned int i = ARRAY_SIZE(pixcode);
+
+ while (i-- >= 0)
+ if (pixcode[i][0] == dev->fmt->mbus_code)
+ break;
+ cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
+ writel(cfg | pixcode[i][1], dev->regs + FLITE_REG_CIODMAFMT);
+}
+
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f)
+{
+ u32 cfg;
+
+ /* Maximum output pixel size */
+ cfg = readl(dev->regs + FLITE_REG_CIOCAN);
+ cfg &= ~FLITE_REG_CIOCAN_MASK;
+ cfg = (f->f_height << 16) | f->f_width;
+ writel(cfg, dev->regs + FLITE_REG_CIOCAN);
+
+ /* DMA offsets */
+ cfg = readl(dev->regs + FLITE_REG_CIOOFF);
+ cfg &= ~FLITE_REG_CIOOFF_MASK;
+ cfg |= (f->rect.top << 16) | f->rect.left;
+ writel(cfg, dev->regs + FLITE_REG_CIOOFF);
+}
+
+/* Enable/disable output DMA, set output pixel size and offsets (composition) */
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+ bool enable)
+{
+ u32 cfg = readl(dev->regs + FLITE_REG_CIGCTRL);
+
+ if (!enable) {
+ cfg |= FLITE_REG_CIGCTRL_ODMA_DISABLE;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+ return;
+ }
+
+ cfg &= ~FLITE_REG_CIGCTRL_ODMA_DISABLE;
+ writel(cfg, dev->regs + FLITE_REG_CIGCTRL);
+
+ flite_hw_set_out_order(dev, f);
+ flite_hw_set_dma_window(dev, f);
+}
+
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x00, "CISRCSIZE" },
+ { 0x04, "CIGCTRL" },
+ { 0x08, "CIIMGCPT" },
+ { 0x0c, "CICPTSEQ" },
+ { 0x10, "CIWDOFST" },
+ { 0x14, "CIWDOFST2" },
+ { 0x18, "CIODMAFMT" },
+ { 0x20, "CIOCAN" },
+ { 0x24, "CIOOFF" },
+ { 0x30, "CIOSA" },
+ { 0x40, "CISTATUS" },
+ { 0x44, "CISTATUS2" },
+ { 0xf0, "CITHOLD" },
+ { 0xfc, "CIGENERAL" },
+ };
+ u32 i;
+
+ pr_info("--- %s ---\n", label);
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = readl(dev->regs + registers[i].offset);
+ pr_info("%s: %s:\t0x%08x\n", __func__, registers[i].name, cfg);
+ }
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-lite-reg.h b/drivers/media/video/s5p-fimc/fimc-lite-reg.h
new file mode 100644
index 000000000000..adb9e9e6f3c2
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-lite-reg.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_REG_H_
+#define FIMC_LITE_REG_H_
+
+#include "fimc-lite.h"
+
+/* Camera Source size */
+#define FLITE_REG_CISRCSIZE 0x00
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCBYCR (0 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_YCRYCB (1 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CBYCRY (2 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_IN_CRYCBY (3 << 14)
+#define FLITE_REG_CISRCSIZE_ORDER422_MASK (0x3 << 14)
+#define FLITE_REG_CISRCSIZE_SIZE_CAM_MASK (0x3fff << 16 | 0x3fff)
+
+/* Global control */
+#define FLITE_REG_CIGCTRL 0x04
+#define FLITE_REG_CIGCTRL_YUV422_1P (0x1e << 24)
+#define FLITE_REG_CIGCTRL_RAW8 (0x2a << 24)
+#define FLITE_REG_CIGCTRL_RAW10 (0x2b << 24)
+#define FLITE_REG_CIGCTRL_RAW12 (0x2c << 24)
+#define FLITE_REG_CIGCTRL_RAW14 (0x2d << 24)
+/* User defined formats. x = 0...15 */
+#define FLITE_REG_CIGCTRL_USER(x) ((0x30 + x - 1) << 24)
+#define FLITE_REG_CIGCTRL_FMT_MASK (0x3f << 24)
+#define FLITE_REG_CIGCTRL_SHADOWMASK_DISABLE (1 << 21)
+#define FLITE_REG_CIGCTRL_ODMA_DISABLE (1 << 20)
+#define FLITE_REG_CIGCTRL_SWRST_REQ (1 << 19)
+#define FLITE_REG_CIGCTRL_SWRST_RDY (1 << 18)
+#define FLITE_REG_CIGCTRL_SWRST (1 << 17)
+#define FLITE_REG_CIGCTRL_TEST_PATTERN_COLORBAR (1 << 15)
+#define FLITE_REG_CIGCTRL_INVPOLPCLK (1 << 14)
+#define FLITE_REG_CIGCTRL_INVPOLVSYNC (1 << 13)
+#define FLITE_REG_CIGCTRL_INVPOLHREF (1 << 12)
+/* Interrupts mask bits (1 disables an interrupt) */
+#define FLITE_REG_CIGCTRL_IRQ_LASTEN (1 << 8)
+#define FLITE_REG_CIGCTRL_IRQ_ENDEN (1 << 7)
+#define FLITE_REG_CIGCTRL_IRQ_STARTEN (1 << 6)
+#define FLITE_REG_CIGCTRL_IRQ_OVFEN (1 << 5)
+#define FLITE_REG_CIGCTRL_IRQ_DISABLE_MASK (0xf << 5)
+#define FLITE_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
+
+/* Image Capture Enable */
+#define FLITE_REG_CIIMGCPT 0x08
+#define FLITE_REG_CIIMGCPT_IMGCPTEN (1 << 31)
+#define FLITE_REG_CIIMGCPT_CPT_FREN (1 << 25)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FRCNT (1 << 18)
+#define FLITE_REG_CIIMGCPT_CPT_MOD_FREN (0 << 18)
+
+/* Capture Sequence */
+#define FLITE_REG_CICPTSEQ 0x0c
+
+/* Camera Window Offset */
+#define FLITE_REG_CIWDOFST 0x10
+#define FLITE_REG_CIWDOFST_WINOFSEN (1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVIY (1 << 31)
+#define FLITE_REG_CIWDOFST_CLROVFICB (1 << 15)
+#define FLITE_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FLITE_REG_CIWDOFST_OFST_MASK ((0x1fff << 16) | 0x1fff)
+
+/* Camera Window Offset2 */
+#define FLITE_REG_CIWDOFST2 0x14
+
+/* Camera Output DMA Format */
+#define FLITE_REG_CIODMAFMT 0x18
+#define FLITE_REG_CIODMAFMT_RAW_CON (1 << 15)
+#define FLITE_REG_CIODMAFMT_PACK12 (1 << 14)
+#define FLITE_REG_CIODMAFMT_CRYCBY (0 << 4)
+#define FLITE_REG_CIODMAFMT_CBYCRY (1 << 4)
+#define FLITE_REG_CIODMAFMT_YCRYCB (2 << 4)
+#define FLITE_REG_CIODMAFMT_YCBYCR (3 << 4)
+#define FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK (0x3 << 4)
+
+/* Camera Output Canvas */
+#define FLITE_REG_CIOCAN 0x20
+#define FLITE_REG_CIOCAN_MASK ((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Offset */
+#define FLITE_REG_CIOOFF 0x24
+#define FLITE_REG_CIOOFF_MASK ((0x3fff << 16) | 0x3fff)
+
+/* Camera Output DMA Start Address */
+#define FLITE_REG_CIOSA 0x30
+
+/* Camera Status */
+#define FLITE_REG_CISTATUS 0x40
+#define FLITE_REG_CISTATUS_MIPI_VVALID (1 << 22)
+#define FLITE_REG_CISTATUS_MIPI_HVALID (1 << 21)
+#define FLITE_REG_CISTATUS_MIPI_DVALID (1 << 20)
+#define FLITE_REG_CISTATUS_ITU_VSYNC (1 << 14)
+#define FLITE_REG_CISTATUS_ITU_HREFF (1 << 13)
+#define FLITE_REG_CISTATUS_OVFIY (1 << 10)
+#define FLITE_REG_CISTATUS_OVFICB (1 << 9)
+#define FLITE_REG_CISTATUS_OVFICR (1 << 8)
+#define FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW (1 << 7)
+#define FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND (1 << 6)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART (1 << 5)
+#define FLITE_REG_CISTATUS_IRQ_SRC_FRMEND (1 << 4)
+#define FLITE_REG_CISTATUS_IRQ_CAM (1 << 0)
+#define FLITE_REG_CISTATUS_IRQ_MASK (0xf << 4)
+
+/* Camera Status2 */
+#define FLITE_REG_CISTATUS2 0x44
+#define FLITE_REG_CISTATUS2_LASTCAPEND (1 << 1)
+#define FLITE_REG_CISTATUS2_FRMEND (1 << 0)
+
+/* Qos Threshold */
+#define FLITE_REG_CITHOLD 0xf0
+#define FLITE_REG_CITHOLD_W_QOS_EN (1 << 30)
+
+/* Camera General Purpose */
+#define FLITE_REG_CIGENERAL 0xfc
+/* b0: 1 - camera B, 0 - camera A */
+#define FLITE_REG_CIGENERAL_CAM_B (1 << 0)
+
+/* ----------------------------------------------------------------------------
+ * Function declarations
+ */
+void flite_hw_reset(struct fimc_lite *dev);
+void flite_hw_clear_pending_irq(struct fimc_lite *dev);
+u32 flite_hw_get_interrupt_source(struct fimc_lite *dev);
+void flite_hw_clear_last_capture_end(struct fimc_lite *dev);
+void flite_hw_set_interrupt_mask(struct fimc_lite *dev);
+void flite_hw_capture_start(struct fimc_lite *dev);
+void flite_hw_capture_stop(struct fimc_lite *dev);
+void flite_hw_set_camera_bus(struct fimc_lite *dev,
+ struct s5p_fimc_isp_info *s_info);
+void flite_hw_set_camera_polarity(struct fimc_lite *dev,
+ struct s5p_fimc_isp_info *cam);
+void flite_hw_set_window_offset(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f);
+
+void flite_hw_set_output_dma(struct fimc_lite *dev, struct flite_frame *f,
+ bool enable);
+void flite_hw_set_dma_window(struct fimc_lite *dev, struct flite_frame *f);
+void flite_hw_set_test_pattern(struct fimc_lite *dev, bool on);
+void flite_hw_dump_regs(struct fimc_lite *dev, const char *label);
+
+static inline void flite_hw_set_output_addr(struct fimc_lite *dev, u32 paddr)
+{
+ writel(paddr, dev->regs + FLITE_REG_CIOSA);
+}
+#endif /* FIMC_LITE_REG_H */
diff --git a/drivers/media/video/s5p-fimc/fimc-lite.c b/drivers/media/video/s5p-fimc/fimc-lite.c
new file mode 100644
index 000000000000..74ff310db30c
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-lite.c
@@ -0,0 +1,1607 @@
+/*
+ * Samsung EXYNOS FIMC-LITE (camera host interface) driver
+*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-mdevice.h"
+#include "fimc-core.h"
+#include "fimc-lite-reg.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+static const struct fimc_fmt fimc_lite_formats[] = {
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCBYCR422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = { 16 },
+ .color = FIMC_FMT_CBYCRY422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = { 16 },
+ .color = FIMC_FMT_CRYCBY422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_VYUY8_2X8,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = { 16 },
+ .color = FIMC_FMT_YCRYCB422,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_YVYU8_2X8,
+ }, {
+ .name = "RAW8 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .depth = { 8 },
+ .color = FIMC_FMT_RAW8,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG8_1X8,
+ }, {
+ .name = "RAW10 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .depth = { 10 },
+ .color = FIMC_FMT_RAW10,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
+ }, {
+ .name = "RAW12 (GRBG)",
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .depth = { 12 },
+ .color = FIMC_FMT_RAW12,
+ .memplanes = 1,
+ .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
+ },
+};
+
+/**
+ * fimc_lite_find_format - lookup fimc color format by fourcc or media bus code
+ * @pixelformat: fourcc to match, ignored if null
+ * @mbus_code: media bus code to match, ignored if null
+ * @index: index to the fimc_lite_formats array, ignored if negative
+ */
+static const struct fimc_fmt *fimc_lite_find_format(const u32 *pixelformat,
+ const u32 *mbus_code, int index)
+{
+ const struct fimc_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(fimc_lite_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(fimc_lite_formats); ++i) {
+ fmt = &fimc_lite_formats[i];
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+static int fimc_lite_hw_init(struct fimc_lite *fimc)
+{
+ struct fimc_pipeline *pipeline = &fimc->pipeline;
+ struct fimc_sensor_info *sensor;
+ unsigned long flags;
+
+ if (pipeline->subdevs[IDX_SENSOR] == NULL)
+ return -ENXIO;
+
+ if (fimc->fmt == NULL)
+ return -EINVAL;
+
+ sensor = v4l2_get_subdev_hostdata(pipeline->subdevs[IDX_SENSOR]);
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ flite_hw_set_camera_bus(fimc, sensor->pdata);
+ flite_hw_set_source_format(fimc, &fimc->inp_frame);
+ flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+ flite_hw_set_output_dma(fimc, &fimc->out_frame, true);
+ flite_hw_set_interrupt_mask(fimc);
+ flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+
+ if (debug > 0)
+ flite_hw_dump_regs(fimc, __func__);
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+}
+
+/*
+ * Reinitialize the driver so it is ready to start the streaming again.
+ * Set fimc->state to indicate stream off and the hardware shut down state.
+ * If not suspending (@suspend is false), return any buffers to videobuf2.
+ * Otherwise put any owned buffers onto the pending buffers queue, so they
+ * can be re-spun when the device is being resumed. Also perform FIMC
+ * software reset and disable streaming on the whole pipeline if required.
+ */
+static int fimc_lite_reinit(struct fimc_lite *fimc, bool suspend)
+{
+ struct flite_buffer *buf;
+ unsigned long flags;
+ bool streaming;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ streaming = fimc->state & (1 << ST_SENSOR_STREAM);
+
+ fimc->state &= ~(1 << ST_FLITE_RUN | 1 << ST_FLITE_OFF |
+ 1 << ST_FLITE_STREAM | 1 << ST_SENSOR_STREAM);
+ if (suspend)
+ fimc->state |= (1 << ST_FLITE_SUSPENDED);
+ else
+ fimc->state &= ~(1 << ST_FLITE_PENDING |
+ 1 << ST_FLITE_SUSPENDED);
+
+ /* Release unused buffers */
+ while (!suspend && !list_empty(&fimc->pending_buf_q)) {
+ buf = fimc_lite_pending_queue_pop(fimc);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ /* If suspending put unused buffers onto pending queue */
+ while (!list_empty(&fimc->active_buf_q)) {
+ buf = fimc_lite_active_queue_pop(fimc);
+ if (suspend)
+ fimc_lite_pending_queue_add(fimc, buf);
+ else
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ flite_hw_reset(fimc);
+
+ if (!streaming)
+ return 0;
+
+ return fimc_pipeline_s_stream(&fimc->pipeline, 0);
+}
+
+static int fimc_lite_stop_capture(struct fimc_lite *fimc, bool suspend)
+{
+ unsigned long flags;
+
+ if (!fimc_lite_active(fimc))
+ return 0;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ set_bit(ST_FLITE_OFF, &fimc->state);
+ flite_hw_capture_stop(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ wait_event_timeout(fimc->irq_queue,
+ !test_bit(ST_FLITE_OFF, &fimc->state),
+ (2*HZ/10)); /* 200 ms */
+
+ return fimc_lite_reinit(fimc, suspend);
+}
+
+/* Must be called with fimc.slock spinlock held. */
+static void fimc_lite_config_update(struct fimc_lite *fimc)
+{
+ flite_hw_set_window_offset(fimc, &fimc->inp_frame);
+ flite_hw_set_dma_window(fimc, &fimc->out_frame);
+ flite_hw_set_test_pattern(fimc, fimc->test_pattern->val);
+ clear_bit(ST_FLITE_CONFIG, &fimc->state);
+}
+
+static irqreturn_t flite_irq_handler(int irq, void *priv)
+{
+ struct fimc_lite *fimc = priv;
+ struct flite_buffer *vbuf;
+ unsigned long flags;
+ struct timeval *tv;
+ struct timespec ts;
+ u32 intsrc;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ intsrc = flite_hw_get_interrupt_source(fimc);
+ flite_hw_clear_pending_irq(fimc);
+
+ if (test_and_clear_bit(ST_FLITE_OFF, &fimc->state)) {
+ wake_up(&fimc->irq_queue);
+ goto done;
+ }
+
+ if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_OVERFLOW) {
+ clear_bit(ST_FLITE_RUN, &fimc->state);
+ fimc->events.data_overflow++;
+ }
+
+ if (intsrc & FLITE_REG_CISTATUS_IRQ_SRC_LASTCAPEND) {
+ flite_hw_clear_last_capture_end(fimc);
+ clear_bit(ST_FLITE_STREAM, &fimc->state);
+ wake_up(&fimc->irq_queue);
+ }
+
+ if (fimc->out_path != FIMC_IO_DMA)
+ goto done;
+
+ if ((intsrc & FLITE_REG_CISTATUS_IRQ_SRC_FRMSTART) &&
+ test_bit(ST_FLITE_RUN, &fimc->state) &&
+ !list_empty(&fimc->active_buf_q) &&
+ !list_empty(&fimc->pending_buf_q)) {
+ vbuf = fimc_lite_active_queue_pop(fimc);
+ ktime_get_ts(&ts);
+ tv = &vbuf->vb.v4l2_buf.timestamp;
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ vbuf->vb.v4l2_buf.sequence = fimc->frame_count++;
+ vb2_buffer_done(&vbuf->vb, VB2_BUF_STATE_DONE);
+
+ vbuf = fimc_lite_pending_queue_pop(fimc);
+ flite_hw_set_output_addr(fimc, vbuf->paddr);
+ fimc_lite_active_queue_add(fimc, vbuf);
+ }
+
+ if (test_bit(ST_FLITE_CONFIG, &fimc->state))
+ fimc_lite_config_update(fimc);
+
+ if (list_empty(&fimc->pending_buf_q)) {
+ flite_hw_capture_stop(fimc);
+ clear_bit(ST_FLITE_STREAM, &fimc->state);
+ }
+done:
+ set_bit(ST_FLITE_RUN, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return IRQ_HANDLED;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_lite *fimc = q->drv_priv;
+ int ret;
+
+ fimc->frame_count = 0;
+
+ ret = fimc_lite_hw_init(fimc);
+ if (ret) {
+ fimc_lite_reinit(fimc, false);
+ return ret;
+ }
+
+ set_bit(ST_FLITE_PENDING, &fimc->state);
+
+ if (!list_empty(&fimc->active_buf_q) &&
+ !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+ flite_hw_capture_start(fimc);
+
+ if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+ fimc_pipeline_s_stream(&fimc->pipeline, 1);
+ }
+ if (debug > 0)
+ flite_hw_dump_regs(fimc, __func__);
+
+ return 0;
+}
+
+static int stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_lite *fimc = q->drv_priv;
+
+ if (!fimc_lite_active(fimc))
+ return -EINVAL;
+
+ return fimc_lite_stop_capture(fimc, false);
+}
+
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ const struct v4l2_pix_format_mplane *pixm = NULL;
+ struct fimc_lite *fimc = vq->drv_priv;
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = fimc->fmt;
+ unsigned long wh;
+ int i;
+
+ if (pfmt) {
+ pixm = &pfmt->fmt.pix_mp;
+ fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, -1);
+ wh = pixm->width * pixm->height;
+ } else {
+ wh = frame->f_width * frame->f_height;
+ }
+
+ if (fmt == NULL)
+ return -EINVAL;
+
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ unsigned int size = (wh * fmt->depth[i]) / 8;
+ if (pixm)
+ sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
+ else
+ sizes[i] = size;
+ allocators[i] = fimc->alloc_ctx;
+ }
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct fimc_lite *fimc = vq->drv_priv;
+ int i;
+
+ if (fimc->fmt == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < fimc->fmt->memplanes; i++) {
+ unsigned long size = fimc->payload[i];
+
+ if (vb2_plane_size(vb, i) < size) {
+ v4l2_err(fimc->vfd,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct flite_buffer *buf
+ = container_of(vb, struct flite_buffer, vb);
+ struct fimc_lite *fimc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ buf->paddr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (!test_bit(ST_FLITE_SUSPENDED, &fimc->state) &&
+ !test_bit(ST_FLITE_STREAM, &fimc->state) &&
+ list_empty(&fimc->active_buf_q)) {
+ flite_hw_set_output_addr(fimc, buf->paddr);
+ fimc_lite_active_queue_add(fimc, buf);
+ } else {
+ fimc_lite_pending_queue_add(fimc, buf);
+ }
+
+ if (vb2_is_streaming(&fimc->vb_queue) &&
+ !list_empty(&fimc->pending_buf_q) &&
+ !test_and_set_bit(ST_FLITE_STREAM, &fimc->state)) {
+ flite_hw_capture_start(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (!test_and_set_bit(ST_SENSOR_STREAM, &fimc->state))
+ fimc_pipeline_s_stream(&fimc->pipeline, 1);
+ return;
+ }
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_lock(struct vb2_queue *vq)
+{
+ struct fimc_lite *fimc = vb2_get_drv_priv(vq);
+ mutex_lock(&fimc->lock);
+}
+
+static void fimc_unlock(struct vb2_queue *vq)
+{
+ struct fimc_lite *fimc = vb2_get_drv_priv(vq);
+ mutex_unlock(&fimc->lock);
+}
+
+static const struct vb2_ops fimc_lite_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .wait_prepare = fimc_unlock,
+ .wait_finish = fimc_lock,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ memset(&fimc->events, 0, sizeof(fimc->events));
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static int fimc_lite_open(struct file *file)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ set_bit(ST_FLITE_IN_USE, &fimc->state);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ goto done;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto done;
+
+ if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+ ret = fimc_pipeline_initialize(&fimc->pipeline,
+ &fimc->vfd->entity, true);
+ if (ret < 0) {
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ fimc->ref_count--;
+ v4l2_fh_release(file);
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+ }
+
+ fimc_lite_clear_event_counters(fimc);
+ }
+done:
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static int fimc_lite_close(struct file *file)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+ fimc_lite_stop_capture(fimc, false);
+ fimc_pipeline_shutdown(&fimc->pipeline);
+ clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+ }
+
+ pm_runtime_put(&fimc->pdev->dev);
+
+ if (fimc->ref_count == 0)
+ vb2_queue_release(&fimc->vb_queue);
+
+ ret = v4l2_fh_release(file);
+
+ mutex_unlock(&fimc->lock);
+ return ret;
+}
+
+static unsigned int fimc_lite_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return POLL_ERR;
+
+ ret = vb2_poll(&fimc->vb_queue, file, wait);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
+}
+
+static int fimc_lite_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = vb2_mmap(&fimc->vb_queue, vma);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations fimc_lite_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_lite_open,
+ .release = fimc_lite_close,
+ .poll = fimc_lite_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = fimc_lite_mmap,
+};
+
+/*
+ * Format and crop negotiation helpers
+ */
+
+static const struct fimc_fmt *fimc_lite_try_format(struct fimc_lite *fimc,
+ u32 *width, u32 *height,
+ u32 *code, u32 *fourcc, int pad)
+{
+ struct flite_variant *variant = fimc->variant;
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_lite_find_format(fourcc, code, 0);
+ if (WARN_ON(!fmt))
+ return NULL;
+
+ if (code)
+ *code = fmt->mbus_code;
+ if (fourcc)
+ *fourcc = fmt->fourcc;
+
+ if (pad == FLITE_SD_PAD_SINK) {
+ v4l_bound_align_image(width, 8, variant->max_width,
+ ffs(variant->out_width_align) - 1,
+ height, 0, variant->max_height, 0, 0);
+ } else {
+ v4l_bound_align_image(width, 8, fimc->inp_frame.rect.width,
+ ffs(variant->out_width_align) - 1,
+ height, 0, fimc->inp_frame.rect.height,
+ 0, 0);
+ }
+
+ v4l2_dbg(1, debug, &fimc->subdev, "code: 0x%x, %dx%d\n",
+ code ? *code : 0, *width, *height);
+
+ return fmt;
+}
+
+static void fimc_lite_try_crop(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+ struct flite_frame *frame = &fimc->inp_frame;
+
+ v4l_bound_align_image(&r->width, 0, frame->f_width, 0,
+ &r->height, 0, frame->f_height, 0, 0);
+
+ /* Adjust left/top if cropping rectangle got out of bounds */
+ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+ r->left = round_down(r->left, fimc->variant->win_hor_offs_align);
+ r->top = clamp_t(u32, r->top, 0, frame->f_height - r->height);
+
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, sink fmt: %dx%d",
+ r->left, r->top, r->width, r->height,
+ frame->f_width, frame->f_height);
+}
+
+static void fimc_lite_try_compose(struct fimc_lite *fimc, struct v4l2_rect *r)
+{
+ struct flite_frame *frame = &fimc->out_frame;
+ struct v4l2_rect *crop_rect = &fimc->inp_frame.rect;
+
+ /* Scaling is not supported so we enforce compose rectangle size
+ same as size of the sink crop rectangle. */
+ r->width = crop_rect->width;
+ r->height = crop_rect->height;
+
+ /* Adjust left/top if the composing rectangle got out of bounds */
+ r->left = clamp_t(u32, r->left, 0, frame->f_width - r->width);
+ r->left = round_down(r->left, fimc->variant->out_hor_offs_align);
+ r->top = clamp_t(u32, r->top, 0, fimc->out_frame.f_height - r->height);
+
+ v4l2_dbg(1, debug, &fimc->subdev, "(%d,%d)/%dx%d, source fmt: %dx%d",
+ r->left, r->top, r->width, r->height,
+ frame->f_width, frame->f_height);
+}
+
+/*
+ * Video node ioctl operations
+ */
+static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, FIMC_LITE_DRV_NAME, sizeof(cap->driver));
+ cap->bus_info[0] = 0;
+ cap->card[0] = 0;
+ cap->capabilities = V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int fimc_lite_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct fimc_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(fimc_lite_formats))
+ return -EINVAL;
+
+ fmt = &fimc_lite_formats[f->index];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int fimc_lite_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt = &pixm->plane_fmt[0];
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = fimc->fmt;
+
+ plane_fmt->bytesperline = (frame->f_width * fmt->depth[0]) / 8;
+ plane_fmt->sizeimage = plane_fmt->bytesperline * frame->f_height;
+
+ pixm->num_planes = fmt->memplanes;
+ pixm->pixelformat = fmt->fourcc;
+ pixm->width = frame->f_width;
+ pixm->height = frame->f_height;
+ pixm->field = V4L2_FIELD_NONE;
+ pixm->colorspace = V4L2_COLORSPACE_JPEG;
+ return 0;
+}
+
+static int fimc_lite_try_fmt(struct fimc_lite *fimc,
+ struct v4l2_pix_format_mplane *pixm,
+ const struct fimc_fmt **ffmt)
+{
+ struct flite_variant *variant = fimc->variant;
+ u32 bpl = pixm->plane_fmt[0].bytesperline;
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_lite_find_format(&pixm->pixelformat, NULL, 0);
+ if (WARN_ON(fmt == NULL))
+ return -EINVAL;
+ if (ffmt)
+ *ffmt = fmt;
+ v4l_bound_align_image(&pixm->width, 8, variant->max_width,
+ ffs(variant->out_width_align) - 1,
+ &pixm->height, 0, variant->max_height, 0, 0);
+
+ if ((bpl == 0 || ((bpl * 8) / fmt->depth[0]) < pixm->width))
+ pixm->plane_fmt[0].bytesperline = (pixm->width *
+ fmt->depth[0]) / 8;
+
+ if (pixm->plane_fmt[0].sizeimage == 0)
+ pixm->plane_fmt[0].sizeimage = (pixm->width * pixm->height *
+ fmt->depth[0]) / 8;
+ pixm->num_planes = fmt->memplanes;
+ pixm->pixelformat = fmt->fourcc;
+ pixm->colorspace = V4L2_COLORSPACE_JPEG;
+ pixm->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static int fimc_lite_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, NULL);
+}
+
+static int fimc_lite_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *frame = &fimc->out_frame;
+ const struct fimc_fmt *fmt = NULL;
+ int ret;
+
+ if (vb2_is_busy(&fimc->vb_queue))
+ return -EBUSY;
+
+ ret = fimc_lite_try_fmt(fimc, &f->fmt.pix_mp, &fmt);
+ if (ret < 0)
+ return ret;
+
+ fimc->fmt = fmt;
+ fimc->payload[0] = max((pixm->width * pixm->height * fmt->depth[0]) / 8,
+ pixm->plane_fmt[0].sizeimage);
+ frame->f_width = pixm->width;
+ frame->f_height = pixm->height;
+
+ return 0;
+}
+
+static int fimc_pipeline_validate(struct fimc_lite *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->subdev;
+ struct v4l2_subdev_format sink_fmt, src_fmt;
+ struct media_pad *pad;
+ int ret;
+
+ while (1) {
+ /* Retrieve format at the sink pad */
+ pad = &sd->entity.pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+ /* Don't call FIMC subdev operation to avoid nested locking */
+ if (sd == &fimc->subdev) {
+ struct flite_frame *ff = &fimc->out_frame;
+ sink_fmt.format.width = ff->f_width;
+ sink_fmt.format.height = ff->f_height;
+ sink_fmt.format.code = fimc->fmt->mbus_code;
+ } else {
+ sink_fmt.pad = pad->index;
+ sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
+ &sink_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+ }
+ /* Retrieve format at the source pad */
+ pad = media_entity_remote_source(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ src_fmt.pad = pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EPIPE;
+
+ if (src_fmt.format.width != sink_fmt.format.width ||
+ src_fmt.format.height != sink_fmt.format.height ||
+ src_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+ }
+ return 0;
+}
+
+static int fimc_lite_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct v4l2_subdev *sensor = fimc->pipeline.subdevs[IDX_SENSOR];
+ struct fimc_pipeline *p = &fimc->pipeline;
+ int ret;
+
+ if (fimc_lite_active(fimc))
+ return -EBUSY;
+
+ ret = media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+ if (ret < 0)
+ return ret;
+
+ ret = fimc_pipeline_validate(fimc);
+ if (ret) {
+ media_entity_pipeline_stop(&sensor->entity);
+ return ret;
+ }
+
+ return vb2_streamon(&fimc->vb_queue, type);
+}
+
+static int fimc_lite_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct v4l2_subdev *sd = fimc->pipeline.subdevs[IDX_SENSOR];
+ int ret;
+
+ ret = vb2_streamoff(&fimc->vb_queue, type);
+ if (ret == 0)
+ media_entity_pipeline_stop(&sd->entity);
+ return ret;
+}
+
+static int fimc_lite_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ reqbufs->count = max_t(u32, FLITE_REQ_BUFS_MIN, reqbufs->count);
+ ret = vb2_reqbufs(&fimc->vb_queue, reqbufs);
+ if (!ret < 0)
+ fimc->reqbufs_count = reqbufs->count;
+
+ return ret;
+}
+
+static int fimc_lite_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_querybuf(&fimc->vb_queue, buf);
+}
+
+static int fimc_lite_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_qbuf(&fimc->vb_queue, buf);
+}
+
+static int fimc_lite_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_dqbuf(&fimc->vb_queue, buf, file->f_flags & O_NONBLOCK);
+}
+
+static int fimc_lite_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_create_bufs(&fimc->vb_queue, create);
+}
+
+static int fimc_lite_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *b)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+
+ return vb2_prepare_buf(&fimc->vb_queue, b);
+}
+
+/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int fimc_lite_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *f = &fimc->out_frame;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = f->f_width;
+ sel->r.height = f->f_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ sel->r = f->rect;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int fimc_lite_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct fimc_lite *fimc = video_drvdata(file);
+ struct flite_frame *f = &fimc->out_frame;
+ struct v4l2_rect rect = sel->r;
+ unsigned long flags;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ sel->target != V4L2_SEL_TGT_COMPOSE_ACTIVE)
+ return -EINVAL;
+
+ fimc_lite_try_compose(fimc, &rect);
+
+ if ((sel->flags & V4L2_SEL_FLAG_LE) &&
+ !enclosed_rectangle(&rect, &sel->r))
+ return -ERANGE;
+
+ if ((sel->flags & V4L2_SEL_FLAG_GE) &&
+ !enclosed_rectangle(&sel->r, &rect))
+ return -ERANGE;
+
+ sel->r = rect;
+ spin_lock_irqsave(&fimc->slock, flags);
+ f->rect = rect;
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_lite_ioctl_ops = {
+ .vidioc_querycap = fimc_vidioc_querycap_capture,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_lite_enum_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_lite_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_lite_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_lite_g_fmt_mplane,
+ .vidioc_g_selection = fimc_lite_g_selection,
+ .vidioc_s_selection = fimc_lite_s_selection,
+ .vidioc_reqbufs = fimc_lite_reqbufs,
+ .vidioc_querybuf = fimc_lite_querybuf,
+ .vidioc_prepare_buf = fimc_lite_prepare_buf,
+ .vidioc_create_bufs = fimc_lite_create_bufs,
+ .vidioc_qbuf = fimc_lite_qbuf,
+ .vidioc_dqbuf = fimc_lite_dqbuf,
+ .vidioc_streamon = fimc_lite_streamon,
+ .vidioc_streamoff = fimc_lite_streamoff,
+};
+
+/* Capture subdev media entity operations */
+static int fimc_lite_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ unsigned int remote_ent_type = media_entity_type(remote->entity);
+
+ if (WARN_ON(fimc == NULL))
+ return 0;
+
+ v4l2_dbg(1, debug, sd, "%s: %s --> %s, flags: 0x%x. source_id: 0x%x",
+ __func__, local->entity->name, remote->entity->name,
+ flags, fimc->source_subdev_grp_id);
+
+ switch (local->index) {
+ case FIMC_SD_PAD_SINK:
+ if (remote_ent_type != MEDIA_ENT_T_V4L2_SUBDEV)
+ return -EINVAL;
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (fimc->source_subdev_grp_id != 0)
+ return -EBUSY;
+ fimc->source_subdev_grp_id = sd->grp_id;
+ return 0;
+ }
+
+ fimc->source_subdev_grp_id = 0;
+ break;
+
+ case FIMC_SD_PAD_SOURCE:
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ fimc->out_path = FIMC_IO_NONE;
+ return 0;
+ }
+ if (remote_ent_type == MEDIA_ENT_T_V4L2_SUBDEV)
+ fimc->out_path = FIMC_IO_ISP;
+ else
+ fimc->out_path = FIMC_IO_DMA;
+ break;
+
+ default:
+ v4l2_err(sd, "Invalid pad index\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations fimc_lite_subdev_media_ops = {
+ .link_setup = fimc_lite_link_setup,
+};
+
+static int fimc_lite_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct fimc_fmt *fmt;
+
+ fmt = fimc_lite_find_format(NULL, NULL, code->index);
+ if (!fmt)
+ return -EINVAL;
+ code->code = fmt->mbus_code;
+ return 0;
+}
+
+static int fimc_lite_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct flite_frame *f = &fimc->out_frame;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ fmt->format = *mf;
+ return 0;
+ }
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+ mutex_lock(&fimc->lock);
+ mf->code = fimc->fmt->mbus_code;
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ /* full camera input frame size */
+ mf->width = f->f_width;
+ mf->height = f->f_height;
+ } else {
+ /* crop size */
+ mf->width = f->rect.width;
+ mf->height = f->rect.height;
+ }
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_lite_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct flite_frame *sink = &fimc->inp_frame;
+ const struct fimc_fmt *ffmt;
+
+ v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %dx%d",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mutex_lock(&fimc->lock);
+
+ if ((fimc->out_path == FIMC_IO_ISP && sd->entity.stream_count > 0) ||
+ (fimc->out_path == FIMC_IO_DMA && vb2_is_busy(&fimc->vb_queue))) {
+ mutex_unlock(&fimc->lock);
+ return -EBUSY;
+ }
+
+ ffmt = fimc_lite_try_format(fimc, &mf->width, &mf->height,
+ &mf->code, NULL, fmt->pad);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf = v4l2_subdev_get_try_format(fh, fmt->pad);
+ *mf = fmt->format;
+ mutex_unlock(&fimc->lock);
+ return 0;
+ }
+
+ if (fmt->pad == FLITE_SD_PAD_SINK) {
+ sink->f_width = mf->width;
+ sink->f_height = mf->height;
+ fimc->fmt = ffmt;
+ /* Set sink crop rectangle */
+ sink->rect.width = mf->width;
+ sink->rect.height = mf->height;
+ sink->rect.left = 0;
+ sink->rect.top = 0;
+ /* Reset source crop rectangle */
+ fimc->out_frame.rect = sink->rect;
+ } else {
+ /* Allow changing format only on sink pad */
+ mf->code = fimc->fmt->mbus_code;
+ mf->width = sink->rect.width;
+ mf->height = sink->rect.height;
+ }
+
+ mutex_unlock(&fimc->lock);
+ return 0;
+}
+
+static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct flite_frame *f = &fimc->inp_frame;
+
+ if ((sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL &&
+ sel->target != V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS) ||
+ sel->pad != FLITE_SD_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *v4l2_subdev_get_try_crop(fh, sel->pad);
+ return 0;
+ }
+
+ mutex_lock(&fimc->lock);
+ if (sel->target == V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL) {
+ sel->r = f->rect;
+ } else {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = f->f_width;
+ sel->r.height = f->f_height;
+ }
+ mutex_unlock(&fimc->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+ __func__, f->rect.left, f->rect.top, f->rect.width,
+ f->rect.height, f->f_width, f->f_height);
+
+ return 0;
+}
+
+static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct flite_frame *f = &fimc->inp_frame;
+ int ret = 0;
+
+ if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ sel->pad != FLITE_SD_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&fimc->lock);
+ fimc_lite_try_crop(fimc, &sel->r);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_crop(fh, sel->pad) = sel->r;
+ } else {
+ unsigned long flags;
+ spin_lock_irqsave(&fimc->slock, flags);
+ f->rect = sel->r;
+ /* Same crop rectangle on the source pad */
+ fimc->out_frame.rect = sel->r;
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ }
+ mutex_unlock(&fimc->lock);
+
+ v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %d, f_h: %d",
+ __func__, f->rect.left, f->rect.top, f->rect.width,
+ f->rect.height, f->f_width, f->f_height);
+
+ return ret;
+}
+
+static int fimc_lite_subdev_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc->out_path == FIMC_IO_DMA)
+ return -ENOIOCTLCMD;
+
+ /* TODO: */
+
+ return 0;
+}
+
+static int fimc_lite_subdev_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc->out_path == FIMC_IO_DMA)
+ return -ENOIOCTLCMD;
+
+ /* TODO: */
+
+ return 0;
+}
+
+static int fimc_lite_log_status(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ flite_hw_dump_regs(fimc, __func__);
+ return 0;
+}
+
+static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+ struct vb2_queue *q = &fimc->vb_queue;
+ struct video_device *vfd;
+ int ret;
+
+ fimc->fmt = &fimc_lite_formats[0];
+ fimc->out_path = FIMC_IO_DMA;
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(sd->v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ snprintf(vfd->name, sizeof(vfd->name), "fimc-lite.%d.capture",
+ fimc->index);
+
+ vfd->fops = &fimc_lite_fops;
+ vfd->ioctl_ops = &fimc_lite_ioctl_ops;
+ vfd->v4l2_dev = sd->v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ vfd->lock = &fimc->lock;
+ fimc->vfd = vfd;
+ fimc->ref_count = 0;
+ fimc->reqbufs_count = 0;
+
+ INIT_LIST_HEAD(&fimc->pending_buf_q);
+ INIT_LIST_HEAD(&fimc->active_buf_q);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = &fimc_lite_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct flite_buffer);
+ q->drv_priv = fimc;
+
+ vb2_queue_init(q);
+
+ fimc->vd_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_init(&vfd->entity, 1, &fimc->vd_pad, 0);
+ if (ret)
+ goto err;
+
+ video_set_drvdata(vfd, fimc);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vd;
+
+ v4l2_info(sd->v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+ err_vd:
+ media_entity_cleanup(&vfd->entity);
+ err:
+ video_device_release(vfd);
+ return ret;
+}
+
+static void fimc_lite_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
+
+ if (fimc == NULL)
+ return;
+
+ if (fimc->vfd) {
+ video_unregister_device(fimc->vfd);
+ media_entity_cleanup(&fimc->vfd->entity);
+ fimc->vfd = NULL;
+ }
+}
+
+static const struct v4l2_subdev_internal_ops fimc_lite_subdev_internal_ops = {
+ .registered = fimc_lite_subdev_registered,
+ .unregistered = fimc_lite_subdev_unregistered,
+};
+
+static const struct v4l2_subdev_pad_ops fimc_lite_subdev_pad_ops = {
+ .enum_mbus_code = fimc_lite_subdev_enum_mbus_code,
+ .get_selection = fimc_lite_subdev_get_selection,
+ .set_selection = fimc_lite_subdev_set_selection,
+ .get_fmt = fimc_lite_subdev_get_fmt,
+ .set_fmt = fimc_lite_subdev_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops fimc_lite_subdev_video_ops = {
+ .s_stream = fimc_lite_subdev_s_stream,
+};
+
+static const struct v4l2_subdev_core_ops fimc_lite_core_ops = {
+ .s_power = fimc_lite_subdev_s_power,
+ .log_status = fimc_lite_log_status,
+};
+
+static struct v4l2_subdev_ops fimc_lite_subdev_ops = {
+ .core = &fimc_lite_core_ops,
+ .video = &fimc_lite_subdev_video_ops,
+ .pad = &fimc_lite_subdev_pad_ops,
+};
+
+static int fimc_lite_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fimc_lite *fimc = container_of(ctrl->handler, struct fimc_lite,
+ ctrl_handler);
+ set_bit(ST_FLITE_CONFIG, &fimc->state);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fimc_lite_ctrl_ops = {
+ .s_ctrl = fimc_lite_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config fimc_lite_ctrl = {
+ .ops = &fimc_lite_ctrl_ops,
+ .id = V4L2_CTRL_CLASS_USER | 0x1001,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Test Pattern 640x480",
+};
+
+static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
+{
+ struct v4l2_ctrl_handler *handler = &fimc->ctrl_handler;
+ struct v4l2_subdev *sd = &fimc->subdev;
+ int ret;
+
+ v4l2_subdev_init(sd, &fimc_lite_subdev_ops);
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(sd->name, sizeof(sd->name), "FIMC-LITE.%d", fimc->index);
+
+ fimc->subdev_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ fimc->subdev_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM,
+ fimc->subdev_pads, 0);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_handler_init(handler, 1);
+ fimc->test_pattern = v4l2_ctrl_new_custom(handler, &fimc_lite_ctrl,
+ NULL);
+ if (handler->error) {
+ media_entity_cleanup(&sd->entity);
+ return handler->error;
+ }
+
+ sd->ctrl_handler = handler;
+ sd->internal_ops = &fimc_lite_subdev_internal_ops;
+ sd->entity.ops = &fimc_lite_subdev_media_ops;
+ v4l2_set_subdevdata(sd, fimc);
+
+ return 0;
+}
+
+static void fimc_lite_unregister_capture_subdev(struct fimc_lite *fimc)
+{
+ struct v4l2_subdev *sd = &fimc->subdev;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&fimc->ctrl_handler);
+ v4l2_set_subdevdata(sd, NULL);
+}
+
+static void fimc_lite_clk_put(struct fimc_lite *fimc)
+{
+ if (IS_ERR_OR_NULL(fimc->clock))
+ return;
+
+ clk_unprepare(fimc->clock);
+ clk_put(fimc->clock);
+ fimc->clock = NULL;
+}
+
+static int fimc_lite_clk_get(struct fimc_lite *fimc)
+{
+ int ret;
+
+ fimc->clock = clk_get(&fimc->pdev->dev, FLITE_CLK_NAME);
+ if (IS_ERR(fimc->clock))
+ return PTR_ERR(fimc->clock);
+
+ ret = clk_prepare(fimc->clock);
+ if (ret < 0) {
+ clk_put(fimc->clock);
+ fimc->clock = NULL;
+ }
+ return ret;
+}
+
+static int __devinit fimc_lite_probe(struct platform_device *pdev)
+{
+ struct flite_drvdata *drv_data = fimc_lite_get_drvdata(pdev);
+ struct fimc_lite *fimc;
+ struct resource *res;
+ int ret;
+
+ fimc = devm_kzalloc(&pdev->dev, sizeof(*fimc), GFP_KERNEL);
+ if (!fimc)
+ return -ENOMEM;
+
+ fimc->index = pdev->id;
+ fimc->variant = drv_data->variant[fimc->index];
+ fimc->pdev = pdev;
+
+ init_waitqueue_head(&fimc->irq_queue);
+ spin_lock_init(&fimc->slock);
+ mutex_init(&fimc->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fimc->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (fimc->regs == NULL) {
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -ENOENT;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Failed to get IRQ resource\n");
+ return -ENXIO;
+ }
+
+ ret = fimc_lite_clk_get(fimc);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, res->start, flite_irq_handler,
+ 0, dev_name(&pdev->dev), fimc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
+ goto err_clk;
+ }
+
+ /* The video node will be created within the subdev's registered() op */
+ ret = fimc_lite_create_capture_subdev(fimc);
+ if (ret)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, fimc);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_sd;
+
+ fimc->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(fimc->alloc_ctx)) {
+ ret = PTR_ERR(fimc->alloc_ctx);
+ goto err_pm;
+ }
+ pm_runtime_put(&pdev->dev);
+
+ dev_dbg(&pdev->dev, "FIMC-LITE.%d registered successfully\n",
+ fimc->index);
+ return 0;
+err_pm:
+ pm_runtime_put(&pdev->dev);
+err_sd:
+ fimc_lite_unregister_capture_subdev(fimc);
+err_clk:
+ fimc_lite_clk_put(fimc);
+ return ret;
+}
+
+static int fimc_lite_runtime_resume(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+ clk_enable(fimc->clock);
+ return 0;
+}
+
+static int fimc_lite_runtime_suspend(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+
+ clk_disable(fimc->clock);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_lite_resume(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ struct flite_buffer *buf;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ if (!test_and_clear_bit(ST_LPM, &fimc->state) ||
+ !test_bit(ST_FLITE_IN_USE, &fimc->state)) {
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return 0;
+ }
+ flite_hw_reset(fimc);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+
+ if (!test_and_clear_bit(ST_FLITE_SUSPENDED, &fimc->state))
+ return 0;
+
+ INIT_LIST_HEAD(&fimc->active_buf_q);
+ fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity, false);
+ fimc_lite_hw_init(fimc);
+ clear_bit(ST_FLITE_SUSPENDED, &fimc->state);
+
+ for (i = 0; i < fimc->reqbufs_count; i++) {
+ if (list_empty(&fimc->pending_buf_q))
+ break;
+ buf = fimc_lite_pending_queue_pop(fimc);
+ buffer_queue(&buf->vb);
+ }
+ return 0;
+}
+
+static int fimc_lite_suspend(struct device *dev)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ bool suspend = test_bit(ST_FLITE_IN_USE, &fimc->state);
+ int ret;
+
+ if (test_and_set_bit(ST_LPM, &fimc->state))
+ return 0;
+
+ ret = fimc_lite_stop_capture(fimc, suspend);
+ if (ret < 0 || !fimc_lite_active(fimc))
+ return ret;
+
+ return fimc_pipeline_shutdown(&fimc->pipeline);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int __devexit fimc_lite_remove(struct platform_device *pdev)
+{
+ struct fimc_lite *fimc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ fimc_lite_unregister_capture_subdev(fimc);
+ vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+ fimc_lite_clk_put(fimc);
+
+ dev_info(dev, "Driver unloaded\n");
+ return 0;
+}
+
+static struct flite_variant fimc_lite0_variant_exynos4 = {
+ .max_width = 8192,
+ .max_height = 8192,
+ .out_width_align = 8,
+ .win_hor_offs_align = 2,
+ .out_hor_offs_align = 8,
+};
+
+/* EXYNOS4212, EXYNOS4412 */
+static struct flite_drvdata fimc_lite_drvdata_exynos4 = {
+ .variant = {
+ [0] = &fimc_lite0_variant_exynos4,
+ [1] = &fimc_lite0_variant_exynos4,
+ },
+};
+
+static struct platform_device_id fimc_lite_driver_ids[] = {
+ {
+ .name = "exynos-fimc-lite",
+ .driver_data = (unsigned long)&fimc_lite_drvdata_exynos4,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, fimc_lite_driver_ids);
+
+static const struct dev_pm_ops fimc_lite_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_lite_suspend, fimc_lite_resume)
+ SET_RUNTIME_PM_OPS(fimc_lite_runtime_suspend, fimc_lite_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver fimc_lite_driver = {
+ .probe = fimc_lite_probe,
+ .remove = __devexit_p(fimc_lite_remove),
+ .id_table = fimc_lite_driver_ids,
+ .driver = {
+ .name = FIMC_LITE_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &fimc_lite_pm_ops,
+ }
+};
+module_platform_driver(fimc_lite_driver);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" FIMC_LITE_DRV_NAME);
diff --git a/drivers/media/video/s5p-fimc/fimc-lite.h b/drivers/media/video/s5p-fimc/fimc-lite.h
new file mode 100644
index 000000000000..44424eee81d8
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-lite.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_LITE_H_
+#define FIMC_LITE_H_
+
+#include <asm/sizes.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/s5p_fimc.h>
+
+#include "fimc-core.h"
+
+#define FIMC_LITE_DRV_NAME "exynos-fimc-lite"
+#define FLITE_CLK_NAME "flite"
+#define FIMC_LITE_MAX_DEVS 2
+#define FLITE_REQ_BUFS_MIN 2
+
+/* Bit index definitions for struct fimc_lite::state */
+enum {
+ ST_FLITE_LPM,
+ ST_FLITE_PENDING,
+ ST_FLITE_RUN,
+ ST_FLITE_STREAM,
+ ST_FLITE_SUSPENDED,
+ ST_FLITE_OFF,
+ ST_FLITE_IN_USE,
+ ST_FLITE_CONFIG,
+ ST_SENSOR_STREAM,
+};
+
+#define FLITE_SD_PAD_SINK 0
+#define FLITE_SD_PAD_SOURCE 1
+#define FLITE_SD_PADS_NUM 2
+
+struct flite_variant {
+ unsigned short max_width;
+ unsigned short max_height;
+ unsigned short out_width_align;
+ unsigned short win_hor_offs_align;
+ unsigned short out_hor_offs_align;
+};
+
+struct flite_drvdata {
+ struct flite_variant *variant[FIMC_LITE_MAX_DEVS];
+};
+
+#define fimc_lite_get_drvdata(_pdev) \
+ ((struct flite_drvdata *) platform_get_device_id(_pdev)->driver_data)
+
+struct fimc_lite_events {
+ unsigned int data_overflow;
+};
+
+#define FLITE_MAX_PLANES 1
+
+/**
+ * struct flite_frame - source/target frame properties
+ * @f_width: full pixel width
+ * @f_height: full pixel height
+ * @rect: crop/composition rectangle
+ */
+struct flite_frame {
+ u16 f_width;
+ u16 f_height;
+ struct v4l2_rect rect;
+};
+
+/**
+ * struct flite_buffer - video buffer structure
+ * @vb: vb2 buffer
+ * @list: list head for the buffers queue
+ * @paddr: precalculated physical address
+ */
+struct flite_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+ dma_addr_t paddr;
+};
+
+/**
+ * struct fimc_lite - fimc lite structure
+ * @pdev: pointer to FIMC-LITE platform device
+ * @variant: variant information for this IP
+ * @v4l2_dev: pointer to top the level v4l2_device
+ * @vfd: video device node
+ * @fh: v4l2 file handle
+ * @alloc_ctx: videobuf2 memory allocator context
+ * @subdev: FIMC-LITE subdev
+ * @vd_pad: media (sink) pad for the capture video node
+ * @subdev_pads: the subdev media pads
+ * @ctrl_handler: v4l2 control handler
+ * @test_pattern: test pattern controls
+ * @index: FIMC-LITE platform device index
+ * @pipeline: video capture pipeline data structure
+ * @slock: spinlock protecting this data structure and the hw registers
+ * @lock: mutex serializing video device and the subdev operations
+ * @clock: FIMC-LITE gate clock
+ * @regs: memory mapped io registers
+ * @irq_queue: interrupt handler waitqueue
+ * @fmt: pointer to color format description structure
+ * @payload: image size in bytes (w x h x bpp)
+ * @inp_frame: camera input frame structure
+ * @out_frame: DMA output frame structure
+ * @out_path: output data path (DMA or FIFO)
+ * @source_subdev_grp_id: source subdev group id
+ * @state: driver state flags
+ * @pending_buf_q: pending buffers queue head
+ * @active_buf_q: the queue head of buffers scheduled in hardware
+ * @vb_queue: vb2 buffers queue
+ * @active_buf_count: number of video buffers scheduled in hardware
+ * @frame_count: the captured frames counter
+ * @reqbufs_count: the number of buffers requested with REQBUFS ioctl
+ * @ref_count: driver's private reference counter
+ */
+struct fimc_lite {
+ struct platform_device *pdev;
+ struct flite_variant *variant;
+ struct v4l2_device *v4l2_dev;
+ struct video_device *vfd;
+ struct v4l2_fh fh;
+ struct vb2_alloc_ctx *alloc_ctx;
+ struct v4l2_subdev subdev;
+ struct media_pad vd_pad;
+ struct media_pad subdev_pads[FLITE_SD_PADS_NUM];
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *test_pattern;
+ u32 index;
+ struct fimc_pipeline pipeline;
+
+ struct mutex lock;
+ spinlock_t slock;
+
+ struct clk *clock;
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+
+ const struct fimc_fmt *fmt;
+ unsigned long payload[FLITE_MAX_PLANES];
+ struct flite_frame inp_frame;
+ struct flite_frame out_frame;
+ enum fimc_datapath out_path;
+ unsigned int source_subdev_grp_id;
+
+ unsigned long state;
+ struct list_head pending_buf_q;
+ struct list_head active_buf_q;
+ struct vb2_queue vb_queue;
+ unsigned int frame_count;
+ unsigned int reqbufs_count;
+ int ref_count;
+
+ struct fimc_lite_events events;
+};
+
+static inline bool fimc_lite_active(struct fimc_lite *fimc)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&fimc->slock, flags);
+ ret = fimc->state & (1 << ST_FLITE_RUN) ||
+ fimc->state & (1 << ST_FLITE_PENDING);
+ spin_unlock_irqrestore(&fimc->slock, flags);
+ return ret;
+}
+
+static inline void fimc_lite_active_queue_add(struct fimc_lite *dev,
+ struct flite_buffer *buf)
+{
+ list_add_tail(&buf->list, &dev->active_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_active_queue_pop(
+ struct fimc_lite *dev)
+{
+ struct flite_buffer *buf = list_entry(dev->active_buf_q.next,
+ struct flite_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+static inline void fimc_lite_pending_queue_add(struct fimc_lite *dev,
+ struct flite_buffer *buf)
+{
+ list_add_tail(&buf->list, &dev->pending_buf_q);
+}
+
+static inline struct flite_buffer *fimc_lite_pending_queue_pop(
+ struct fimc_lite *dev)
+{
+ struct flite_buffer *buf = list_entry(dev->pending_buf_q.next,
+ struct flite_buffer, list);
+ list_del(&buf->list);
+ return buf;
+}
+
+#endif /* FIMC_LITE_H_ */
diff --git a/drivers/media/video/s5p-fimc/fimc-m2m.c b/drivers/media/video/s5p-fimc/fimc-m2m.c
new file mode 100644
index 000000000000..4c58e0570962
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-m2m.c
@@ -0,0 +1,824 @@
+/*
+ * Samsung S5P/EXYNOS4 SoC series FIMC (video postprocessor) driver
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "fimc-core.h"
+#include "fimc-reg.h"
+#include "fimc-mdevice.h"
+
+
+static unsigned int get_m2m_fmt_flags(unsigned int stream_type)
+{
+ if (stream_type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return FMT_FLAGS_M2M_IN;
+ else
+ return FMT_FLAGS_M2M_OUT;
+}
+
+void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
+{
+ struct vb2_buffer *src_vb, *dst_vb;
+
+ if (!ctx || !ctx->m2m_ctx)
+ return;
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ v4l2_m2m_buf_done(src_vb, vb_state);
+ v4l2_m2m_buf_done(dst_vb, vb_state);
+ v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
+ ctx->m2m_ctx);
+ }
+}
+
+/* Complete the transaction which has been scheduled for execution. */
+static int fimc_m2m_shutdown(struct fimc_ctx *ctx)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret;
+
+ if (!fimc_m2m_pending(fimc))
+ return 0;
+
+ fimc_ctx_state_set(FIMC_CTX_SHUT, ctx);
+
+ ret = wait_event_timeout(fimc->irq_queue,
+ !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
+ FIMC_SHUTDOWN_TIMEOUT);
+
+ return ret == 0 ? -ETIMEDOUT : ret;
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev);
+ return ret > 0 ? 0 : ret;
+}
+
+static int stop_streaming(struct vb2_queue *q)
+{
+ struct fimc_ctx *ctx = q->drv_priv;
+ int ret;
+
+ ret = fimc_m2m_shutdown(ctx);
+ if (ret == -ETIMEDOUT)
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+ pm_runtime_put(&ctx->fimc_dev->pdev->dev);
+ return 0;
+}
+
+static void fimc_device_run(void *priv)
+{
+ struct vb2_buffer *vb = NULL;
+ struct fimc_ctx *ctx = priv;
+ struct fimc_frame *sf, *df;
+ struct fimc_dev *fimc;
+ unsigned long flags;
+ u32 ret;
+
+ if (WARN(!ctx, "Null context\n"))
+ return;
+
+ fimc = ctx->fimc_dev;
+ spin_lock_irqsave(&fimc->slock, flags);
+
+ set_bit(ST_M2M_PEND, &fimc->state);
+ sf = &ctx->s_frame;
+ df = &ctx->d_frame;
+
+ if (ctx->state & FIMC_PARAMS) {
+ /* Prepare the DMA offsets for scaler */
+ fimc_prepare_dma_offset(ctx, sf);
+ fimc_prepare_dma_offset(ctx, df);
+ }
+
+ vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ ret = fimc_prepare_addr(ctx, vb, sf, &sf->paddr);
+ if (ret)
+ goto dma_unlock;
+
+ vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ ret = fimc_prepare_addr(ctx, vb, df, &df->paddr);
+ if (ret)
+ goto dma_unlock;
+
+ /* Reconfigure hardware if the context has changed. */
+ if (fimc->m2m.ctx != ctx) {
+ ctx->state |= FIMC_PARAMS;
+ fimc->m2m.ctx = ctx;
+ }
+
+ if (ctx->state & FIMC_PARAMS) {
+ fimc_set_yuv_order(ctx);
+ fimc_hw_set_input_path(ctx);
+ fimc_hw_set_in_dma(ctx);
+ ret = fimc_set_scaler_info(ctx);
+ if (ret)
+ goto dma_unlock;
+ fimc_hw_set_prescaler(ctx);
+ fimc_hw_set_mainscaler(ctx);
+ fimc_hw_set_target_format(ctx);
+ fimc_hw_set_rotation(ctx);
+ fimc_hw_set_effect(ctx);
+ fimc_hw_set_out_dma(ctx);
+ if (fimc->variant->has_alpha)
+ fimc_hw_set_rgb_alpha(ctx);
+ fimc_hw_set_output_path(ctx);
+ }
+ fimc_hw_set_input_addr(fimc, &sf->paddr);
+ fimc_hw_set_output_addr(fimc, &df->paddr, -1);
+
+ fimc_activate_capture(ctx);
+ ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP |
+ FIMC_SRC_FMT | FIMC_DST_FMT);
+ fimc_hw_activate_input_dma(fimc, true);
+
+dma_unlock:
+ spin_unlock_irqrestore(&fimc->slock, flags);
+}
+
+static void fimc_job_abort(void *priv)
+{
+ fimc_m2m_shutdown(priv);
+}
+
+static int fimc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *allocators[])
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ struct fimc_frame *f;
+ int i;
+
+ f = ctx_get_frame(ctx, vq->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ /*
+ * Return number of non-contigous planes (plane buffers)
+ * depending on the configured color format.
+ */
+ if (!f->fmt)
+ return -EINVAL;
+
+ *num_planes = f->fmt->memplanes;
+ for (i = 0; i < f->fmt->memplanes; i++) {
+ sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
+ allocators[i] = ctx->fimc_dev->alloc_ctx;
+ }
+ return 0;
+}
+
+static int fimc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fimc_frame *frame;
+ int i;
+
+ frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ for (i = 0; i < frame->fmt->memplanes; i++)
+ vb2_set_plane_payload(vb, i, frame->payload[i]);
+
+ return 0;
+}
+
+static void fimc_buf_queue(struct vb2_buffer *vb)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
+
+ if (ctx->m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void fimc_lock(struct vb2_queue *vq)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_lock(&ctx->fimc_dev->lock);
+}
+
+static void fimc_unlock(struct vb2_queue *vq)
+{
+ struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_unlock(&ctx->fimc_dev->lock);
+}
+
+static struct vb2_ops fimc_qops = {
+ .queue_setup = fimc_queue_setup,
+ .buf_prepare = fimc_buf_prepare,
+ .buf_queue = fimc_buf_queue,
+ .wait_prepare = fimc_unlock,
+ .wait_finish = fimc_lock,
+ .stop_streaming = stop_streaming,
+ .start_streaming = start_streaming,
+};
+
+/*
+ * V4L2 ioctl handlers
+ */
+static int fimc_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+ return 0;
+}
+
+static int fimc_m2m_enum_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_fmt *fmt;
+
+ fmt = fimc_find_format(NULL, NULL, get_m2m_fmt_flags(f->type),
+ f->index);
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
+
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ return fimc_fill_format(frame, f);
+}
+
+static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_variant *variant = fimc->variant;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct fimc_fmt *fmt;
+ u32 max_w, mod_x, mod_y;
+
+ if (!IS_M2M(f->type))
+ return -EINVAL;
+
+ dbg("w: %d, h: %d", pix->width, pix->height);
+
+ fmt = fimc_find_format(&pix->pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (WARN(fmt == NULL, "Pixel format lookup failed"))
+ return -EINVAL;
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+ else if (pix->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ max_w = variant->pix_limit->scaler_dis_w;
+ mod_x = ffs(variant->min_inp_pixsize) - 1;
+ } else {
+ max_w = variant->pix_limit->out_rot_dis_w;
+ mod_x = ffs(variant->min_out_pixsize) - 1;
+ }
+
+ if (tiled_fmt(fmt)) {
+ mod_x = 6; /* 64 x 32 pixels tile */
+ mod_y = 5;
+ } else {
+ if (variant->min_vsize_align == 1)
+ mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
+ else
+ mod_y = ffs(variant->min_vsize_align) - 1;
+ }
+
+ v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
+ &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
+
+ fimc_adjust_mplane_format(fmt, pix->width, pix->height, &f->fmt.pix_mp);
+ return 0;
+}
+
+static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return fimc_try_fmt_mplane(ctx, f);
+}
+
+static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct vb2_queue *vq;
+ struct fimc_frame *frame;
+ struct v4l2_pix_format_mplane *pix;
+ int i, ret = 0;
+
+ ret = fimc_try_fmt_mplane(ctx, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(fimc->m2m.vfd, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ frame = &ctx->s_frame;
+ else
+ frame = &ctx->d_frame;
+
+ pix = &f->fmt.pix_mp;
+ frame->fmt = fimc_find_format(&pix->pixelformat, NULL,
+ get_m2m_fmt_flags(f->type), 0);
+ if (!frame->fmt)
+ return -EINVAL;
+
+ /* Update RGB Alpha control state and value range */
+ fimc_alpha_ctrl_update(ctx);
+
+ for (i = 0; i < frame->fmt->colplanes; i++) {
+ frame->payload[i] =
+ (pix->width * pix->height * frame->fmt->depth[i]) / 8;
+ }
+
+ fimc_fill_frame(frame, f);
+
+ ctx->scaler.enabled = 1;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fimc_ctx_state_set(FIMC_PARAMS | FIMC_DST_FMT, ctx);
+ else
+ fimc_ctx_state_set(FIMC_PARAMS | FIMC_SRC_FMT, ctx);
+
+ dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
+
+ return 0;
+}
+
+static int fimc_m2m_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int fimc_m2m_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_dqbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int fimc_m2m_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ /* The source and target color format need to be set */
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ if (!fimc_ctx_state_is_set(FIMC_SRC_FMT, ctx))
+ return -EINVAL;
+ } else if (!fimc_ctx_state_is_set(FIMC_DST_FMT, ctx)) {
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int fimc_m2m_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int fimc_m2m_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->bounds.left = 0;
+ cr->bounds.top = 0;
+ cr->bounds.width = frame->o_width;
+ cr->bounds.height = frame->o_height;
+ cr->defrect = cr->bounds;
+
+ return 0;
+}
+
+static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_frame *frame;
+
+ frame = ctx_get_frame(ctx, cr->type);
+ if (IS_ERR(frame))
+ return PTR_ERR(frame);
+
+ cr->c.left = frame->offs_h;
+ cr->c.top = frame->offs_v;
+ cr->c.width = frame->width;
+ cr->c.height = frame->height;
+
+ return 0;
+}
+
+static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
+{
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_frame *f;
+ u32 min_size, halign, depth = 0;
+ int i;
+
+ if (cr->c.top < 0 || cr->c.left < 0) {
+ v4l2_err(fimc->m2m.vfd,
+ "doesn't support negative values for top & left\n");
+ return -EINVAL;
+ }
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ f = &ctx->d_frame;
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ f = &ctx->s_frame;
+ else
+ return -EINVAL;
+
+ min_size = (f == &ctx->s_frame) ?
+ fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
+
+ /* Get pixel alignment constraints. */
+ if (fimc->variant->min_vsize_align == 1)
+ halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
+ else
+ halign = ffs(fimc->variant->min_vsize_align) - 1;
+
+ for (i = 0; i < f->fmt->colplanes; i++)
+ depth += f->fmt->depth[i];
+
+ v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
+ ffs(min_size) - 1,
+ &cr->c.height, min_size, f->o_height,
+ halign, 64/(ALIGN(depth, 8)));
+
+ /* adjust left/top if cropping rectangle is out of bounds */
+ if (cr->c.left + cr->c.width > f->o_width)
+ cr->c.left = f->o_width - cr->c.width;
+ if (cr->c.top + cr->c.height > f->o_height)
+ cr->c.top = f->o_height - cr->c.height;
+
+ cr->c.left = round_down(cr->c.left, min_size);
+ cr->c.top = round_down(cr->c.top, fimc->variant->hor_offs_align);
+
+ dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
+ cr->c.left, cr->c.top, cr->c.width, cr->c.height,
+ f->f_width, f->f_height);
+
+ return 0;
+}
+
+static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ struct fimc_frame *f;
+ int ret;
+
+ ret = fimc_m2m_try_crop(ctx, cr);
+ if (ret)
+ return ret;
+
+ f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
+ &ctx->s_frame : &ctx->d_frame;
+
+ /* Check to see if scaling ratio is within supported range */
+ if (fimc_ctx_state_is_set(FIMC_DST_FMT | FIMC_SRC_FMT, ctx)) {
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = fimc_check_scaler_ratio(ctx, cr->c.width,
+ cr->c.height, ctx->d_frame.width,
+ ctx->d_frame.height, ctx->rotation);
+ } else {
+ ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
+ ctx->s_frame.height, cr->c.width,
+ cr->c.height, ctx->rotation);
+ }
+ if (ret) {
+ v4l2_err(fimc->m2m.vfd, "Out of scaler range\n");
+ return -EINVAL;
+ }
+ }
+
+ f->offs_h = cr->c.left;
+ f->offs_v = cr->c.top;
+ f->width = cr->c.width;
+ f->height = cr->c.height;
+
+ fimc_ctx_state_set(FIMC_PARAMS, ctx);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
+ .vidioc_querycap = fimc_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = fimc_m2m_enum_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = fimc_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = fimc_m2m_g_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = fimc_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = fimc_m2m_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = fimc_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = fimc_m2m_s_fmt_mplane,
+ .vidioc_reqbufs = fimc_m2m_reqbufs,
+ .vidioc_querybuf = fimc_m2m_querybuf,
+ .vidioc_qbuf = fimc_m2m_qbuf,
+ .vidioc_dqbuf = fimc_m2m_dqbuf,
+ .vidioc_streamon = fimc_m2m_streamon,
+ .vidioc_streamoff = fimc_m2m_streamoff,
+ .vidioc_g_crop = fimc_m2m_g_crop,
+ .vidioc_s_crop = fimc_m2m_s_crop,
+ .vidioc_cropcap = fimc_m2m_cropcap
+
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct fimc_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &fimc_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &fimc_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int fimc_m2m_open(struct file *file)
+{
+ struct fimc_dev *fimc = video_drvdata(file);
+ struct fimc_ctx *ctx;
+ int ret;
+
+ dbg("pid: %d, state: 0x%lx, refcnt: %d",
+ task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
+
+ /*
+ * Return if the corresponding video capture node
+ * is already opened.
+ */
+ if (fimc->vid_cap.refcnt > 0)
+ return -EBUSY;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ v4l2_fh_init(&ctx->fh, fimc->m2m.vfd);
+ ctx->fimc_dev = fimc;
+
+ /* Default color format */
+ ctx->s_frame.fmt = fimc_get_format(0);
+ ctx->d_frame.fmt = fimc_get_format(0);
+
+ ret = fimc_ctrls_create(ctx);
+ if (ret)
+ goto error_fh;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrls.handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ /* Setup the device context for memory-to-memory mode */
+ ctx->state = FIMC_CTX_M2M;
+ ctx->flags = 0;
+ ctx->in_path = FIMC_IO_DMA;
+ ctx->out_path = FIMC_IO_DMA;
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_c;
+ }
+
+ if (fimc->m2m.refcnt++ == 0)
+ set_bit(ST_M2M_RUN, &fimc->state);
+ return 0;
+
+error_c:
+ fimc_ctrls_delete(ctx);
+error_fh:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int fimc_m2m_release(struct file *file)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+
+ dbg("pid: %d, state: 0x%lx, refcnt= %d",
+ task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
+
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ fimc_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ if (--fimc->m2m.refcnt <= 0)
+ clear_bit(ST_M2M_RUN, &fimc->state);
+ kfree(ctx);
+ return 0;
+}
+
+static unsigned int fimc_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+
+static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations fimc_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_m2m_open,
+ .release = fimc_m2m_release,
+ .poll = fimc_m2m_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = fimc_m2m_mmap,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+ .device_run = fimc_device_run,
+ .job_abort = fimc_job_abort,
+};
+
+int fimc_register_m2m_device(struct fimc_dev *fimc,
+ struct v4l2_device *v4l2_dev)
+{
+ struct video_device *vfd;
+ struct platform_device *pdev;
+ int ret = 0;
+
+ if (!fimc)
+ return -ENODEV;
+
+ pdev = fimc->pdev;
+ fimc->v4l2_dev = v4l2_dev;
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ vfd->fops = &fimc_m2m_fops;
+ vfd->ioctl_ops = &fimc_m2m_ioctl_ops;
+ vfd->v4l2_dev = v4l2_dev;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ vfd->lock = &fimc->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
+
+ snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
+ video_set_drvdata(vfd, fimc);
+
+ fimc->m2m.vfd = vfd;
+ fimc->m2m.m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(fimc->m2m.m2m_dev)) {
+ v4l2_err(v4l2_dev, "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(fimc->m2m.m2m_dev);
+ goto err_init;
+ }
+
+ ret = media_entity_init(&vfd->entity, 0, NULL, 0);
+ if (ret)
+ goto err_me;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vd;
+
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+ return 0;
+
+err_vd:
+ media_entity_cleanup(&vfd->entity);
+err_me:
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+err_init:
+ video_device_release(fimc->m2m.vfd);
+ return ret;
+}
+
+void fimc_unregister_m2m_device(struct fimc_dev *fimc)
+{
+ if (!fimc)
+ return;
+
+ if (fimc->m2m.m2m_dev)
+ v4l2_m2m_release(fimc->m2m.m2m_dev);
+ if (fimc->m2m.vfd) {
+ media_entity_cleanup(&fimc->m2m.vfd->entity);
+ /* Can also be called if video device wasn't registered */
+ video_unregister_device(fimc->m2m.vfd);
+ }
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 62ed37e40149..52cef4865423 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -25,6 +25,7 @@
#include <media/media-device.h>
#include "fimc-core.h"
+#include "fimc-lite.h"
#include "fimc-mdevice.h"
#include "mipi-csis.h"
@@ -37,22 +38,46 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
*
* Caller holds the graph mutex.
*/
-void fimc_pipeline_prepare(struct fimc_dev *fimc, struct media_entity *me)
+void fimc_pipeline_prepare(struct fimc_pipeline *p, struct media_entity *me)
{
- struct media_entity_graph graph;
+ struct media_pad *pad = &me->pads[0];
struct v4l2_subdev *sd;
+ int i;
- media_entity_graph_walk_start(&graph, me);
+ for (i = 0; i < IDX_MAX; i++)
+ p->subdevs[i] = NULL;
- while ((me = media_entity_graph_walk_next(&graph))) {
- if (media_entity_type(me) != MEDIA_ENT_T_V4L2_SUBDEV)
- continue;
- sd = media_entity_to_v4l2_subdev(me);
+ while (1) {
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ /* source pad */
+ pad = media_entity_remote_source(pad);
+ if (pad == NULL ||
+ media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ break;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
- if (sd->grp_id == SENSOR_GROUP_ID)
- fimc->pipeline.sensor = sd;
- else if (sd->grp_id == CSIS_GROUP_ID)
- fimc->pipeline.csis = sd;
+ switch (sd->grp_id) {
+ case SENSOR_GROUP_ID:
+ p->subdevs[IDX_SENSOR] = sd;
+ break;
+ case CSIS_GROUP_ID:
+ p->subdevs[IDX_CSIS] = sd;
+ break;
+ case FLITE_GROUP_ID:
+ p->subdevs[IDX_FLITE] = sd;
+ break;
+ case FIMC_GROUP_ID:
+ /* No need to control FIMC subdev through subdev ops */
+ break;
+ default:
+ pr_warn("%s: Unknown subdev grp_id: %#x\n",
+ __func__, sd->grp_id);
+ }
+ /* sink pad */
+ pad = &sd->entity.pads[0];
}
}
@@ -85,30 +110,27 @@ static int __subdev_set_power(struct v4l2_subdev *sd, int on)
/**
* fimc_pipeline_s_power - change power state of all pipeline subdevs
* @fimc: fimc device terminating the pipeline
- * @state: 1 to enable power or 0 for power down
+ * @state: true to power on, false to power off
*
- * Need to be called with the graph mutex held.
+ * Needs to be called with the graph mutex held.
*/
-int fimc_pipeline_s_power(struct fimc_dev *fimc, int state)
+int fimc_pipeline_s_power(struct fimc_pipeline *p, bool state)
{
- int ret = 0;
+ unsigned int i;
+ int ret;
- if (fimc->pipeline.sensor == NULL)
+ if (p->subdevs[IDX_SENSOR] == NULL)
return -ENXIO;
- if (state) {
- ret = __subdev_set_power(fimc->pipeline.csis, 1);
- if (ret && ret != -ENXIO)
+ for (i = 0; i < IDX_MAX; i++) {
+ unsigned int idx = state ? (IDX_MAX - 1) - i : i;
+
+ ret = __subdev_set_power(p->subdevs[idx], state);
+ if (ret < 0 && ret != -ENXIO)
return ret;
- return __subdev_set_power(fimc->pipeline.sensor, 1);
}
- ret = __subdev_set_power(fimc->pipeline.sensor, 0);
- if (ret)
- return ret;
- ret = __subdev_set_power(fimc->pipeline.csis, 0);
-
- return ret == -ENXIO ? 0 : ret;
+ return 0;
}
/**
@@ -119,32 +141,36 @@ int fimc_pipeline_s_power(struct fimc_dev *fimc, int state)
*
* This function must be called with the graph mutex held.
*/
-static int __fimc_pipeline_initialize(struct fimc_dev *fimc,
+static int __fimc_pipeline_initialize(struct fimc_pipeline *p,
struct media_entity *me, bool prep)
{
int ret;
if (prep)
- fimc_pipeline_prepare(fimc, me);
- if (fimc->pipeline.sensor == NULL)
+ fimc_pipeline_prepare(p, me);
+
+ if (p->subdevs[IDX_SENSOR] == NULL)
return -EINVAL;
- ret = fimc_md_set_camclk(fimc->pipeline.sensor, true);
+
+ ret = fimc_md_set_camclk(p->subdevs[IDX_SENSOR], true);
if (ret)
return ret;
- return fimc_pipeline_s_power(fimc, 1);
+
+ return fimc_pipeline_s_power(p, 1);
}
-int fimc_pipeline_initialize(struct fimc_dev *fimc, struct media_entity *me,
+int fimc_pipeline_initialize(struct fimc_pipeline *p, struct media_entity *me,
bool prep)
{
int ret;
mutex_lock(&me->parent->graph_mutex);
- ret = __fimc_pipeline_initialize(fimc, me, prep);
+ ret = __fimc_pipeline_initialize(p, me, prep);
mutex_unlock(&me->parent->graph_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(fimc_pipeline_initialize);
/**
* __fimc_pipeline_shutdown - disable the sensor clock and pipeline power
@@ -154,52 +180,59 @@ int fimc_pipeline_initialize(struct fimc_dev *fimc, struct media_entity *me,
* sensor clock.
* Called with the graph mutex held.
*/
-int __fimc_pipeline_shutdown(struct fimc_dev *fimc)
+int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
{
int ret = 0;
- if (fimc->pipeline.sensor) {
- ret = fimc_pipeline_s_power(fimc, 0);
- fimc_md_set_camclk(fimc->pipeline.sensor, false);
+ if (p->subdevs[IDX_SENSOR]) {
+ ret = fimc_pipeline_s_power(p, 0);
+ fimc_md_set_camclk(p->subdevs[IDX_SENSOR], false);
}
return ret == -ENXIO ? 0 : ret;
}
-int fimc_pipeline_shutdown(struct fimc_dev *fimc)
+int fimc_pipeline_shutdown(struct fimc_pipeline *p)
{
- struct media_entity *me = &fimc->vid_cap.vfd->entity;
+ struct media_entity *me;
int ret;
+ if (!p || !p->subdevs[IDX_SENSOR])
+ return -EINVAL;
+
+ me = &p->subdevs[IDX_SENSOR]->entity;
mutex_lock(&me->parent->graph_mutex);
- ret = __fimc_pipeline_shutdown(fimc);
+ ret = __fimc_pipeline_shutdown(p);
mutex_unlock(&me->parent->graph_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(fimc_pipeline_shutdown);
/**
* fimc_pipeline_s_stream - invoke s_stream on pipeline subdevs
- * @fimc: fimc device terminating the pipeline
+ * @pipeline: video pipeline structure
* @on: passed as the s_stream call argument
*/
-int fimc_pipeline_s_stream(struct fimc_dev *fimc, int on)
+int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on)
{
- struct fimc_pipeline *p = &fimc->pipeline;
- int ret = 0;
+ int i, ret;
- if (p->sensor == NULL)
+ if (p->subdevs[IDX_SENSOR] == NULL)
return -ENODEV;
- if ((on && p->csis) || !on)
- ret = v4l2_subdev_call(on ? p->csis : p->sensor,
- video, s_stream, on);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
- if ((!on && p->csis) || on)
- ret = v4l2_subdev_call(on ? p->sensor : p->csis,
- video, s_stream, on);
- return ret == -ENOIOCTLCMD ? 0 : ret;
+ for (i = 0; i < IDX_MAX; i++) {
+ unsigned int idx = on ? (IDX_MAX - 1) - i : i;
+
+ ret = v4l2_subdev_call(p->subdevs[idx], video, s_stream, on);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+ }
+
+ return 0;
+
}
+EXPORT_SYMBOL_GPL(fimc_pipeline_s_stream);
/*
* Sensor subdevice helper functions
@@ -214,14 +247,20 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
return NULL;
adapter = i2c_get_adapter(s_info->pdata->i2c_bus_num);
- if (!adapter)
- return NULL;
+ if (!adapter) {
+ v4l2_warn(&fmd->v4l2_dev,
+ "Failed to get I2C adapter %d, deferring probe\n",
+ s_info->pdata->i2c_bus_num);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
s_info->pdata->board_info, NULL);
if (IS_ERR_OR_NULL(sd)) {
i2c_put_adapter(adapter);
- v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
- return NULL;
+ v4l2_warn(&fmd->v4l2_dev,
+ "Failed to acquire subdev %s, deferring probe\n",
+ s_info->pdata->board_info->type);
+ return ERR_PTR(-EPROBE_DEFER);
}
v4l2_set_subdev_hostdata(sd, s_info);
sd->grp_id = SENSOR_GROUP_ID;
@@ -269,13 +308,22 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
fmd->num_sensors = num_clients;
for (i = 0; i < num_clients; i++) {
+ struct v4l2_subdev *sd;
+
fmd->sensor[i].pdata = &pdata->isp_info[i];
ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], true);
if (ret)
break;
- fmd->sensor[i].subdev =
- fimc_md_register_sensor(fmd, &fmd->sensor[i]);
+ sd = fimc_md_register_sensor(fmd, &fmd->sensor[i]);
ret = __fimc_md_set_camclk(fmd, &fmd->sensor[i], false);
+
+ if (!IS_ERR(sd)) {
+ fmd->sensor[i].subdev = sd;
+ } else {
+ fmd->sensor[i].subdev = NULL;
+ ret = PTR_ERR(sd);
+ break;
+ }
if (ret)
break;
}
@@ -289,21 +337,50 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
static int fimc_register_callback(struct device *dev, void *p)
{
struct fimc_dev *fimc = dev_get_drvdata(dev);
+ struct v4l2_subdev *sd = &fimc->vid_cap.subdev;
struct fimc_md *fmd = p;
- int ret;
+ int ret = 0;
if (!fimc || !fimc->pdev)
return 0;
+
if (fimc->pdev->id < 0 || fimc->pdev->id >= FIMC_MAX_DEVS)
return 0;
fmd->fimc[fimc->pdev->id] = fimc;
- ret = fimc_register_m2m_device(fimc, &fmd->v4l2_dev);
- if (ret)
- return ret;
- ret = fimc_register_capture_device(fimc, &fmd->v4l2_dev);
- if (!ret)
- fimc->vid_cap.user_subdev_api = fmd->user_subdev_api;
+ sd->grp_id = FIMC_GROUP_ID;
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (ret) {
+ v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
+ fimc->id, ret);
+ }
+
+ return ret;
+}
+
+static int fimc_lite_register_callback(struct device *dev, void *p)
+{
+ struct fimc_lite *fimc = dev_get_drvdata(dev);
+ struct v4l2_subdev *sd = &fimc->subdev;
+ struct fimc_md *fmd = p;
+ int ret;
+
+ if (fimc == NULL)
+ return 0;
+
+ if (fimc->index >= FIMC_LITE_MAX_DEVS)
+ return 0;
+
+ fmd->fimc_lite[fimc->index] = fimc;
+ sd->grp_id = FLITE_GROUP_ID;
+
+ ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
+ if (ret) {
+ v4l2_err(&fmd->v4l2_dev,
+ "Failed to register FIMC-LITE.%d (%d)\n",
+ fimc->index, ret);
+ }
return ret;
}
@@ -336,22 +413,56 @@ static int csis_register_callback(struct device *dev, void *p)
*/
static int fimc_md_register_platform_entities(struct fimc_md *fmd)
{
+ struct s5p_platform_fimc *pdata = fmd->pdev->dev.platform_data;
struct device_driver *driver;
- int ret;
+ int ret, i;
driver = driver_find(FIMC_MODULE_NAME, &platform_bus_type);
- if (!driver)
- return -ENODEV;
+ if (!driver) {
+ v4l2_warn(&fmd->v4l2_dev,
+ "%s driver not found, deffering probe\n",
+ FIMC_MODULE_NAME);
+ return -EPROBE_DEFER;
+ }
+
ret = driver_for_each_device(driver, NULL, fmd,
fimc_register_callback);
if (ret)
return ret;
- driver = driver_find(CSIS_DRIVER_NAME, &platform_bus_type);
- if (driver)
+ driver = driver_find(FIMC_LITE_DRV_NAME, &platform_bus_type);
+ if (driver && try_module_get(driver->owner)) {
ret = driver_for_each_device(driver, NULL, fmd,
- csis_register_callback);
- return ret;
+ fimc_lite_register_callback);
+ if (ret)
+ return ret;
+ module_put(driver->owner);
+ }
+ /*
+ * Check if there is any sensor on the MIPI-CSI2 bus and
+ * if not skip the s5p-csis module loading.
+ */
+ if (pdata == NULL)
+ return 0;
+ for (i = 0; i < pdata->num_clients; i++) {
+ if (pdata->isp_info[i].bus_type == FIMC_MIPI_CSI2) {
+ ret = 1;
+ break;
+ }
+ }
+ if (!ret)
+ return 0;
+
+ driver = driver_find(CSIS_DRIVER_NAME, &platform_bus_type);
+ if (!driver || !try_module_get(driver->owner)) {
+ v4l2_warn(&fmd->v4l2_dev,
+ "%s driver not found, deffering probe\n",
+ CSIS_DRIVER_NAME);
+ return -EPROBE_DEFER;
+ }
+
+ return driver_for_each_device(driver, NULL, fmd,
+ csis_register_callback);
}
static void fimc_md_unregister_entities(struct fimc_md *fmd)
@@ -361,14 +472,20 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
for (i = 0; i < FIMC_MAX_DEVS; i++) {
if (fmd->fimc[i] == NULL)
continue;
- fimc_unregister_m2m_device(fmd->fimc[i]);
- fimc_unregister_capture_device(fmd->fimc[i]);
+ v4l2_device_unregister_subdev(&fmd->fimc[i]->vid_cap.subdev);
fmd->fimc[i] = NULL;
}
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ if (fmd->fimc_lite[i] == NULL)
+ continue;
+ v4l2_device_unregister_subdev(&fmd->fimc_lite[i]->subdev);
+ fmd->fimc_lite[i] = NULL;
+ }
for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
if (fmd->csis[i].sd == NULL)
continue;
v4l2_device_unregister_subdev(fmd->csis[i].sd);
+ module_put(fmd->csis[i].sd->owner);
fmd->csis[i].sd = NULL;
}
for (i = 0; i < fmd->num_sensors; i++) {
@@ -379,66 +496,37 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
}
}
-static int fimc_md_register_video_nodes(struct fimc_md *fmd)
-{
- struct video_device *vdev;
- int i, ret = 0;
-
- for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
- if (!fmd->fimc[i])
- continue;
-
- vdev = fmd->fimc[i]->m2m.vfd;
- if (vdev) {
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
- if (ret)
- break;
- v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
- vdev->name, video_device_node_name(vdev));
- }
-
- vdev = fmd->fimc[i]->vid_cap.vfd;
- if (vdev == NULL)
- continue;
- ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
- v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
- vdev->name, video_device_node_name(vdev));
- }
-
- return ret;
-}
-
/**
* __fimc_md_create_fimc_links - create links to all FIMC entities
* @fmd: fimc media device
* @source: the source entity to create links to all fimc entities from
* @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
* @pad: the source entity pad index
- * @fimc_id: index of the fimc device for which link should be enabled
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
*/
-static int __fimc_md_create_fimc_links(struct fimc_md *fmd,
- struct media_entity *source,
- struct v4l2_subdev *sensor,
- int pad, int fimc_id)
+static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
+ struct media_entity *source,
+ struct v4l2_subdev *sensor,
+ int pad, int link_mask)
{
struct fimc_sensor_info *s_info;
struct media_entity *sink;
- unsigned int flags;
+ unsigned int flags = 0;
int ret, i;
for (i = 0; i < FIMC_MAX_DEVS; i++) {
if (!fmd->fimc[i])
- break;
+ continue;
/*
* Some FIMC variants are not fitted with camera capture
* interface. Skip creating a link from sensor for those.
*/
- if (sensor->grp_id == SENSOR_GROUP_ID &&
- !fmd->fimc[i]->variant->has_cam_if)
+ if (!fmd->fimc[i]->variant->has_cam_if)
continue;
- flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
- sink = &fmd->fimc[i]->vid_cap.subdev->entity;
+ flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
+
+ sink = &fmd->fimc[i]->vid_cap.subdev.entity;
ret = media_entity_create_link(source, pad, sink,
FIMC_SD_PAD_SINK, flags);
if (ret)
@@ -453,7 +541,7 @@ static int __fimc_md_create_fimc_links(struct fimc_md *fmd,
v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
source->name, flags ? '=' : '-', sink->name);
- if (flags == 0)
+ if (flags == 0 || sensor == NULL)
continue;
s_info = v4l2_get_subdev_hostdata(sensor);
if (!WARN_ON(s_info == NULL)) {
@@ -463,9 +551,58 @@ static int __fimc_md_create_fimc_links(struct fimc_md *fmd,
spin_unlock_irqrestore(&fmd->slock, irq_flags);
}
}
+
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ if (!fmd->fimc_lite[i])
+ continue;
+
+ if (link_mask & (1 << (i + FIMC_MAX_DEVS)))
+ flags = MEDIA_LNK_FL_ENABLED;
+ else
+ flags = 0;
+
+ sink = &fmd->fimc_lite[i]->subdev.entity;
+ ret = media_entity_create_link(source, pad, sink,
+ FLITE_SD_PAD_SINK, flags);
+ if (ret)
+ return ret;
+
+ /* Notify FIMC-LITE subdev entity */
+ ret = media_entity_call(sink, link_setup, &sink->pads[0],
+ &source->pads[pad], flags);
+ if (ret)
+ break;
+
+ v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]",
+ source->name, flags ? '=' : '-', sink->name);
+ }
return 0;
}
+/* Create links from FIMC-LITE source pads to other entities */
+static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
+{
+ struct media_entity *source, *sink;
+ unsigned int flags = MEDIA_LNK_FL_ENABLED;
+ int i, ret;
+
+ for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
+ struct fimc_lite *fimc = fmd->fimc_lite[i];
+ if (fimc == NULL)
+ continue;
+ source = &fimc->subdev.entity;
+ sink = &fimc->vfd->entity;
+ /* FIMC-LITE's subdev and video node */
+ ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
+ sink, 0, flags);
+ if (ret)
+ break;
+ /* TODO: create links to other entities */
+ }
+
+ return ret;
+}
+
/**
* fimc_md_create_links - create default links between registered entities
*
@@ -484,9 +621,8 @@ static int fimc_md_create_links(struct fimc_md *fmd)
struct s5p_fimc_isp_info *pdata;
struct fimc_sensor_info *s_info;
struct media_entity *source, *sink;
- int i, pad, fimc_id = 0;
- int ret = 0;
- u32 flags;
+ int i, pad, fimc_id = 0, ret = 0;
+ u32 flags, link_mask = 0;
for (i = 0; i < fmd->num_sensors; i++) {
if (fmd->sensor[i].subdev == NULL)
@@ -522,8 +658,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]",
sensor->entity.name, csis->entity.name);
- source = &csis->entity;
- pad = CSIS_PAD_SOURCE;
+ source = NULL;
break;
case FIMC_ITU_601...FIMC_ITU_656:
@@ -539,15 +674,28 @@ static int fimc_md_create_links(struct fimc_md *fmd)
if (source == NULL)
continue;
- ret = __fimc_md_create_fimc_links(fmd, source, sensor, pad,
- fimc_id++);
+ link_mask = 1 << fimc_id++;
+ ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
+ pad, link_mask);
}
+
+ for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
+ if (fmd->csis[i].sd == NULL)
+ continue;
+ source = &fmd->csis[i].sd->entity;
+ pad = CSIS_PAD_SOURCE;
+
+ link_mask = 1 << fimc_id++;
+ ret = __fimc_md_create_fimc_sink_links(fmd, source, NULL,
+ pad, link_mask);
+ }
+
/* Create immutable links between each FIMC's subdev and video node */
flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
for (i = 0; i < FIMC_MAX_DEVS; i++) {
if (!fmd->fimc[i])
continue;
- source = &fmd->fimc[i]->vid_cap.subdev->entity;
+ source = &fmd->fimc[i]->vid_cap.subdev.entity;
sink = &fmd->fimc[i]->vid_cap.vfd->entity;
ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE,
sink, 0, flags);
@@ -555,7 +703,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
break;
}
- return ret;
+ return __fimc_md_create_flite_source_links(fmd);
}
/*
@@ -593,8 +741,8 @@ static void fimc_md_put_clocks(struct fimc_md *fmd)
}
static int __fimc_md_set_camclk(struct fimc_md *fmd,
- struct fimc_sensor_info *s_info,
- bool on)
+ struct fimc_sensor_info *s_info,
+ bool on)
{
struct s5p_fimc_isp_info *pdata = s_info->pdata;
struct fimc_camclk_info *camclk;
@@ -603,12 +751,10 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
if (WARN_ON(pdata->clk_id >= FIMC_MAX_CAMCLKS) || fmd == NULL)
return -EINVAL;
- if (s_info->clk_on == on)
- return 0;
camclk = &fmd->camclk[pdata->clk_id];
- dbg("camclk %d, f: %lu, clk: %p, on: %d",
- pdata->clk_id, pdata->clk_frequency, camclk, on);
+ dbg("camclk %d, f: %lu, use_count: %d, on: %d",
+ pdata->clk_id, pdata->clk_frequency, camclk->use_count, on);
if (on) {
if (camclk->use_count > 0 &&
@@ -619,11 +765,9 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
clk_set_rate(camclk->clock, pdata->clk_frequency);
camclk->frequency = pdata->clk_frequency;
ret = clk_enable(camclk->clock);
+ dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
+ clk_get_rate(camclk->clock));
}
- s_info->clk_on = 1;
- dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
- clk_get_rate(camclk->clock));
-
return ret;
}
@@ -632,7 +776,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
if (--camclk->use_count == 0) {
clk_disable(camclk->clock);
- s_info->clk_on = 0;
dbg("Disabled camclk %d", pdata->clk_id);
}
return ret;
@@ -648,8 +791,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
* devices to which sensors can be attached, either directly or through
* the MIPI CSI receiver. The clock is allowed here to be used by
* multiple sensors concurrently if they use same frequency.
- * The per sensor subdev clk_on attribute helps to synchronize accesses
- * to the sclk_cam clocks from the video and media device nodes.
* This function should only be called when the graph mutex is held.
*/
int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
@@ -663,24 +804,40 @@ int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
static int fimc_md_link_notify(struct media_pad *source,
struct media_pad *sink, u32 flags)
{
+ struct fimc_lite *fimc_lite = NULL;
+ struct fimc_dev *fimc = NULL;
+ struct fimc_pipeline *pipeline;
struct v4l2_subdev *sd;
- struct fimc_dev *fimc;
int ret = 0;
if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
return 0;
sd = media_entity_to_v4l2_subdev(sink->entity);
- fimc = v4l2_get_subdevdata(sd);
- if (!(flags & MEDIA_LNK_FL_ENABLED)) {
- ret = __fimc_pipeline_shutdown(fimc);
- fimc->pipeline.sensor = NULL;
- fimc->pipeline.csis = NULL;
+ switch (sd->grp_id) {
+ case FLITE_GROUP_ID:
+ fimc_lite = v4l2_get_subdevdata(sd);
+ pipeline = &fimc_lite->pipeline;
+ break;
+ case FIMC_GROUP_ID:
+ fimc = v4l2_get_subdevdata(sd);
+ pipeline = &fimc->pipeline;
+ break;
+ default:
+ return 0;
+ }
- mutex_lock(&fimc->lock);
- fimc_ctrls_delete(fimc->vid_cap.ctx);
- mutex_unlock(&fimc->lock);
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ ret = __fimc_pipeline_shutdown(pipeline);
+ pipeline->subdevs[IDX_SENSOR] = NULL;
+ pipeline->subdevs[IDX_CSIS] = NULL;
+
+ if (fimc) {
+ mutex_lock(&fimc->lock);
+ fimc_ctrls_delete(fimc->vid_cap.ctx);
+ mutex_unlock(&fimc->lock);
+ }
return ret;
}
/*
@@ -688,14 +845,23 @@ static int fimc_md_link_notify(struct media_pad *source,
* pipeline is already in use, i.e. its video node is opened.
* Recreate the controls destroyed during the link deactivation.
*/
- mutex_lock(&fimc->lock);
- if (fimc->vid_cap.refcnt > 0) {
- ret = __fimc_pipeline_initialize(fimc, source->entity, true);
+ if (fimc) {
+ mutex_lock(&fimc->lock);
+ if (fimc->vid_cap.refcnt > 0) {
+ ret = __fimc_pipeline_initialize(pipeline,
+ source->entity, true);
if (!ret)
ret = fimc_capture_ctrls_create(fimc);
+ }
+ mutex_unlock(&fimc->lock);
+ } else {
+ mutex_lock(&fimc_lite->lock);
+ if (fimc_lite->ref_count > 0) {
+ ret = __fimc_pipeline_initialize(pipeline,
+ source->entity, true);
+ }
+ mutex_unlock(&fimc_lite->lock);
}
- mutex_unlock(&fimc->lock);
-
return ret ? -EPIPE : ret;
}
@@ -744,7 +910,7 @@ static ssize_t fimc_md_sysfs_store(struct device *dev,
static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO,
fimc_md_sysfs_show, fimc_md_sysfs_store);
-static int __devinit fimc_md_probe(struct platform_device *pdev)
+static int fimc_md_probe(struct platform_device *pdev)
{
struct v4l2_device *v4l2_dev;
struct fimc_md *fmd;
@@ -776,42 +942,48 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
ret = media_device_register(&fmd->media_dev);
if (ret < 0) {
v4l2_err(v4l2_dev, "Failed to register media device: %d\n", ret);
- goto err2;
+ goto err_md;
}
ret = fimc_md_get_clocks(fmd);
if (ret)
- goto err3;
+ goto err_clk;
fmd->user_subdev_api = false;
+
+ /* Protect the media graph while we're registering entities */
+ mutex_lock(&fmd->media_dev.graph_mutex);
+
ret = fimc_md_register_platform_entities(fmd);
if (ret)
- goto err3;
+ goto err_unlock;
if (pdev->dev.platform_data) {
ret = fimc_md_register_sensor_entities(fmd);
if (ret)
- goto err3;
+ goto err_unlock;
}
ret = fimc_md_create_links(fmd);
if (ret)
- goto err3;
+ goto err_unlock;
ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev);
if (ret)
- goto err3;
- ret = fimc_md_register_video_nodes(fmd);
- if (ret)
- goto err3;
+ goto err_unlock;
ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
- if (!ret) {
- platform_set_drvdata(pdev, fmd);
- return 0;
- }
-err3:
+ if (ret)
+ goto err_unlock;
+
+ platform_set_drvdata(pdev, fmd);
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&fmd->media_dev.graph_mutex);
+err_clk:
media_device_unregister(&fmd->media_dev);
fimc_md_put_clocks(fmd);
fimc_md_unregister_entities(fmd);
-err2:
+err_md:
v4l2_device_unregister(&fmd->v4l2_dev);
return ret;
}
@@ -841,10 +1013,12 @@ static struct platform_driver fimc_md_driver = {
int __init fimc_md_init(void)
{
int ret;
+
request_module("s5p-csis");
ret = fimc_register_driver();
if (ret)
return ret;
+
return platform_driver_register(&fimc_md_driver);
}
void __exit fimc_md_exit(void)
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.h b/drivers/media/video/s5p-fimc/fimc-mdevice.h
index da3780823e7d..1f5dbaff5442 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.h
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,12 +18,15 @@
#include <media/v4l2-subdev.h>
#include "fimc-core.h"
+#include "fimc-lite.h"
#include "mipi-csis.h"
-/* Group IDs of sensor, MIPI CSIS and the writeback subdevs. */
+/* Group IDs of sensor, MIPI-CSIS, FIMC-LITE and the writeback subdevs. */
#define SENSOR_GROUP_ID (1 << 8)
#define CSIS_GROUP_ID (1 << 9)
#define WRITEBACK_GROUP_ID (1 << 10)
+#define FIMC_GROUP_ID (1 << 11)
+#define FLITE_GROUP_ID (1 << 12)
#define FIMC_MAX_SENSORS 8
#define FIMC_MAX_CAMCLKS 2
@@ -44,7 +47,6 @@ struct fimc_camclk_info {
* @pdata: sensor's atrributes passed as media device's platform data
* @subdev: image sensor v4l2 subdev
* @host: fimc device the sensor is currently linked to
- * @clk_on: sclk_cam clock's state associated with this subdev
*
* This data structure applies to image sensor and the writeback subdevs.
*/
@@ -52,7 +54,6 @@ struct fimc_sensor_info {
struct s5p_fimc_isp_info *pdata;
struct v4l2_subdev *subdev;
struct fimc_dev *host;
- bool clk_on;
};
/**
@@ -73,6 +74,7 @@ struct fimc_md {
struct fimc_sensor_info sensor[FIMC_MAX_SENSORS];
int num_sensors;
struct fimc_camclk_info camclk[FIMC_MAX_CAMCLKS];
+ struct fimc_lite *fimc_lite[FIMC_LITE_MAX_DEVS];
struct fimc_dev *fimc[FIMC_MAX_DEVS];
struct media_device media_dev;
struct v4l2_device v4l2_dev;
@@ -108,11 +110,11 @@ static inline void fimc_md_graph_unlock(struct fimc_dev *fimc)
}
int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on);
-void fimc_pipeline_prepare(struct fimc_dev *fimc, struct media_entity *me);
-int fimc_pipeline_initialize(struct fimc_dev *fimc, struct media_entity *me,
+void fimc_pipeline_prepare(struct fimc_pipeline *p, struct media_entity *me);
+int fimc_pipeline_initialize(struct fimc_pipeline *p, struct media_entity *me,
bool resume);
-int fimc_pipeline_shutdown(struct fimc_dev *fimc);
-int fimc_pipeline_s_power(struct fimc_dev *fimc, int state);
-int fimc_pipeline_s_stream(struct fimc_dev *fimc, int state);
+int fimc_pipeline_shutdown(struct fimc_pipeline *p);
+int fimc_pipeline_s_power(struct fimc_pipeline *p, bool state);
+int fimc_pipeline_s_stream(struct fimc_pipeline *p, bool state);
#endif
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 15466d0529c1..1fc4ce8446f5 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -1,9 +1,8 @@
/*
* Register interface file for Samsung Camera Interface (FIMC) driver
*
- * Copyright (c) 2010 Samsung Electronics
- *
- * Sylwester Nawrocki, s.nawrocki@samsung.com
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,9 +11,9 @@
#include <linux/io.h>
#include <linux/delay.h>
-#include <mach/map.h>
#include <media/s5p_fimc.h>
+#include "fimc-reg.h"
#include "fimc-core.h"
@@ -22,19 +21,19 @@ void fimc_hw_reset(struct fimc_dev *dev)
{
u32 cfg;
- cfg = readl(dev->regs + S5P_CISRCFMT);
- cfg |= S5P_CISRCFMT_ITU601_8BIT;
- writel(cfg, dev->regs + S5P_CISRCFMT);
+ cfg = readl(dev->regs + FIMC_REG_CISRCFMT);
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
+ writel(cfg, dev->regs + FIMC_REG_CISRCFMT);
/* Software reset. */
- cfg = readl(dev->regs + S5P_CIGCTRL);
- cfg |= (S5P_CIGCTRL_SWRST | S5P_CIGCTRL_IRQ_LEVEL);
- writel(cfg, dev->regs + S5P_CIGCTRL);
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg |= (FIMC_REG_CIGCTRL_SWRST | FIMC_REG_CIGCTRL_IRQ_LEVEL);
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
udelay(10);
- cfg = readl(dev->regs + S5P_CIGCTRL);
- cfg &= ~S5P_CIGCTRL_SWRST;
- writel(cfg, dev->regs + S5P_CIGCTRL);
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg &= ~FIMC_REG_CIGCTRL_SWRST;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
if (dev->variant->out_buf_count > 4)
fimc_hw_set_dma_seq(dev, 0xF);
@@ -42,32 +41,32 @@ void fimc_hw_reset(struct fimc_dev *dev)
static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
{
- u32 flip = S5P_MSCTRL_FLIP_NORMAL;
+ u32 flip = FIMC_REG_MSCTRL_FLIP_NORMAL;
if (ctx->hflip)
- flip = S5P_MSCTRL_FLIP_X_MIRROR;
+ flip = FIMC_REG_MSCTRL_FLIP_X_MIRROR;
if (ctx->vflip)
- flip = S5P_MSCTRL_FLIP_Y_MIRROR;
+ flip = FIMC_REG_MSCTRL_FLIP_Y_MIRROR;
if (ctx->rotation <= 90)
return flip;
- return (flip ^ S5P_MSCTRL_FLIP_180) & S5P_MSCTRL_FLIP_180;
+ return (flip ^ FIMC_REG_MSCTRL_FLIP_180) & FIMC_REG_MSCTRL_FLIP_180;
}
static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx)
{
- u32 flip = S5P_CITRGFMT_FLIP_NORMAL;
+ u32 flip = FIMC_REG_CITRGFMT_FLIP_NORMAL;
if (ctx->hflip)
- flip |= S5P_CITRGFMT_FLIP_X_MIRROR;
+ flip |= FIMC_REG_CITRGFMT_FLIP_X_MIRROR;
if (ctx->vflip)
- flip |= S5P_CITRGFMT_FLIP_Y_MIRROR;
+ flip |= FIMC_REG_CITRGFMT_FLIP_Y_MIRROR;
if (ctx->rotation <= 90)
return flip;
- return (flip ^ S5P_CITRGFMT_FLIP_180) & S5P_CITRGFMT_FLIP_180;
+ return (flip ^ FIMC_REG_CITRGFMT_FLIP_180) & FIMC_REG_CITRGFMT_FLIP_180;
}
void fimc_hw_set_rotation(struct fimc_ctx *ctx)
@@ -75,9 +74,9 @@ void fimc_hw_set_rotation(struct fimc_ctx *ctx)
u32 cfg, flip;
struct fimc_dev *dev = ctx->fimc_dev;
- cfg = readl(dev->regs + S5P_CITRGFMT);
- cfg &= ~(S5P_CITRGFMT_INROT90 | S5P_CITRGFMT_OUTROT90 |
- S5P_CITRGFMT_FLIP_180);
+ cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+ cfg &= ~(FIMC_REG_CITRGFMT_INROT90 | FIMC_REG_CITRGFMT_OUTROT90 |
+ FIMC_REG_CITRGFMT_FLIP_180);
/*
* The input and output rotator cannot work simultaneously.
@@ -85,21 +84,21 @@ void fimc_hw_set_rotation(struct fimc_ctx *ctx)
* in direct fifo output mode.
*/
if (ctx->rotation == 90 || ctx->rotation == 270) {
- if (ctx->out_path == FIMC_LCDFIFO)
- cfg |= S5P_CITRGFMT_INROT90;
+ if (ctx->out_path == FIMC_IO_LCDFIFO)
+ cfg |= FIMC_REG_CITRGFMT_INROT90;
else
- cfg |= S5P_CITRGFMT_OUTROT90;
+ cfg |= FIMC_REG_CITRGFMT_OUTROT90;
}
- if (ctx->out_path == FIMC_DMA) {
+ if (ctx->out_path == FIMC_IO_DMA) {
cfg |= fimc_hw_get_target_flip(ctx);
- writel(cfg, dev->regs + S5P_CITRGFMT);
+ writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
} else {
/* LCD FIFO path */
- flip = readl(dev->regs + S5P_MSCTRL);
- flip &= ~S5P_MSCTRL_FLIP_MASK;
+ flip = readl(dev->regs + FIMC_REG_MSCTRL);
+ flip &= ~FIMC_REG_MSCTRL_FLIP_MASK;
flip |= fimc_hw_get_in_flip(ctx);
- writel(flip, dev->regs + S5P_MSCTRL);
+ writel(flip, dev->regs + FIMC_REG_MSCTRL);
}
}
@@ -110,43 +109,40 @@ void fimc_hw_set_target_format(struct fimc_ctx *ctx)
struct fimc_frame *frame = &ctx->d_frame;
dbg("w= %d, h= %d color: %d", frame->width,
- frame->height, frame->fmt->color);
+ frame->height, frame->fmt->color);
- cfg = readl(dev->regs + S5P_CITRGFMT);
- cfg &= ~(S5P_CITRGFMT_FMT_MASK | S5P_CITRGFMT_HSIZE_MASK |
- S5P_CITRGFMT_VSIZE_MASK);
+ cfg = readl(dev->regs + FIMC_REG_CITRGFMT);
+ cfg &= ~(FIMC_REG_CITRGFMT_FMT_MASK | FIMC_REG_CITRGFMT_HSIZE_MASK |
+ FIMC_REG_CITRGFMT_VSIZE_MASK);
switch (frame->fmt->color) {
- case S5P_FIMC_RGB444...S5P_FIMC_RGB888:
- cfg |= S5P_CITRGFMT_RGB;
+ case FIMC_FMT_RGB444...FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_CITRGFMT_RGB;
break;
- case S5P_FIMC_YCBCR420:
- cfg |= S5P_CITRGFMT_YCBCR420;
+ case FIMC_FMT_YCBCR420:
+ cfg |= FIMC_REG_CITRGFMT_YCBCR420;
break;
- case S5P_FIMC_YCBYCR422...S5P_FIMC_CRYCBY422:
+ case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
if (frame->fmt->colplanes == 1)
- cfg |= S5P_CITRGFMT_YCBCR422_1P;
+ cfg |= FIMC_REG_CITRGFMT_YCBCR422_1P;
else
- cfg |= S5P_CITRGFMT_YCBCR422;
+ cfg |= FIMC_REG_CITRGFMT_YCBCR422;
break;
default:
break;
}
- if (ctx->rotation == 90 || ctx->rotation == 270) {
- cfg |= S5P_CITRGFMT_HSIZE(frame->height);
- cfg |= S5P_CITRGFMT_VSIZE(frame->width);
- } else {
-
- cfg |= S5P_CITRGFMT_HSIZE(frame->width);
- cfg |= S5P_CITRGFMT_VSIZE(frame->height);
- }
+ if (ctx->rotation == 90 || ctx->rotation == 270)
+ cfg |= (frame->height << 16) | frame->width;
+ else
+ cfg |= (frame->width << 16) | frame->height;
- writel(cfg, dev->regs + S5P_CITRGFMT);
+ writel(cfg, dev->regs + FIMC_REG_CITRGFMT);
- cfg = readl(dev->regs + S5P_CITAREA) & ~S5P_CITAREA_MASK;
+ cfg = readl(dev->regs + FIMC_REG_CITAREA);
+ cfg &= ~FIMC_REG_CITAREA_MASK;
cfg |= (frame->width * frame->height);
- writel(cfg, dev->regs + S5P_CITAREA);
+ writel(cfg, dev->regs + FIMC_REG_CITAREA);
}
static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
@@ -155,87 +151,82 @@ static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx)
struct fimc_frame *frame = &ctx->d_frame;
u32 cfg;
- cfg = S5P_ORIG_SIZE_HOR(frame->f_width);
- cfg |= S5P_ORIG_SIZE_VER(frame->f_height);
- writel(cfg, dev->regs + S5P_ORGOSIZE);
+ cfg = (frame->f_height << 16) | frame->f_width;
+ writel(cfg, dev->regs + FIMC_REG_ORGOSIZE);
/* Select color space conversion equation (HD/SD size).*/
- cfg = readl(dev->regs + S5P_CIGCTRL);
+ cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
if (frame->f_width >= 1280) /* HD */
- cfg |= S5P_CIGCTRL_CSC_ITU601_709;
+ cfg |= FIMC_REG_CIGCTRL_CSC_ITU601_709;
else /* SD */
- cfg &= ~S5P_CIGCTRL_CSC_ITU601_709;
- writel(cfg, dev->regs + S5P_CIGCTRL);
+ cfg &= ~FIMC_REG_CIGCTRL_CSC_ITU601_709;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
}
void fimc_hw_set_out_dma(struct fimc_ctx *ctx)
{
- u32 cfg;
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_frame *frame = &ctx->d_frame;
struct fimc_dma_offset *offset = &frame->dma_offset;
struct fimc_fmt *fmt = frame->fmt;
+ u32 cfg;
/* Set the input dma offsets. */
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->y_h);
- cfg |= S5P_CIO_OFFS_VER(offset->y_v);
- writel(cfg, dev->regs + S5P_CIOYOFF);
+ cfg = (offset->y_v << 16) | offset->y_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOYOFF);
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->cb_h);
- cfg |= S5P_CIO_OFFS_VER(offset->cb_v);
- writel(cfg, dev->regs + S5P_CIOCBOFF);
+ cfg = (offset->cb_v << 16) | offset->cb_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOCBOFF);
- cfg = 0;
- cfg |= S5P_CIO_OFFS_HOR(offset->cr_h);
- cfg |= S5P_CIO_OFFS_VER(offset->cr_v);
- writel(cfg, dev->regs + S5P_CIOCROFF);
+ cfg = (offset->cr_v << 16) | offset->cr_h;
+ writel(cfg, dev->regs + FIMC_REG_CIOCROFF);
fimc_hw_set_out_dma_size(ctx);
/* Configure chroma components order. */
- cfg = readl(dev->regs + S5P_CIOCTRL);
+ cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
- cfg &= ~(S5P_CIOCTRL_ORDER2P_MASK | S5P_CIOCTRL_ORDER422_MASK |
- S5P_CIOCTRL_YCBCR_PLANE_MASK | S5P_CIOCTRL_RGB16FMT_MASK);
+ cfg &= ~(FIMC_REG_CIOCTRL_ORDER2P_MASK |
+ FIMC_REG_CIOCTRL_ORDER422_MASK |
+ FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK |
+ FIMC_REG_CIOCTRL_RGB16FMT_MASK);
if (fmt->colplanes == 1)
cfg |= ctx->out_order_1p;
else if (fmt->colplanes == 2)
- cfg |= ctx->out_order_2p | S5P_CIOCTRL_YCBCR_2PLANE;
+ cfg |= ctx->out_order_2p | FIMC_REG_CIOCTRL_YCBCR_2PLANE;
else if (fmt->colplanes == 3)
- cfg |= S5P_CIOCTRL_YCBCR_3PLANE;
+ cfg |= FIMC_REG_CIOCTRL_YCBCR_3PLANE;
- if (fmt->color == S5P_FIMC_RGB565)
- cfg |= S5P_CIOCTRL_RGB565;
- else if (fmt->color == S5P_FIMC_RGB555)
- cfg |= S5P_CIOCTRL_ARGB1555;
- else if (fmt->color == S5P_FIMC_RGB444)
- cfg |= S5P_CIOCTRL_ARGB4444;
+ if (fmt->color == FIMC_FMT_RGB565)
+ cfg |= FIMC_REG_CIOCTRL_RGB565;
+ else if (fmt->color == FIMC_FMT_RGB555)
+ cfg |= FIMC_REG_CIOCTRL_ARGB1555;
+ else if (fmt->color == FIMC_FMT_RGB444)
+ cfg |= FIMC_REG_CIOCTRL_ARGB4444;
- writel(cfg, dev->regs + S5P_CIOCTRL);
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
}
static void fimc_hw_en_autoload(struct fimc_dev *dev, int enable)
{
- u32 cfg = readl(dev->regs + S5P_ORGISIZE);
+ u32 cfg = readl(dev->regs + FIMC_REG_ORGISIZE);
if (enable)
- cfg |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
+ cfg |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
else
- cfg &= ~S5P_CIREAL_ISIZE_AUTOLOAD_EN;
- writel(cfg, dev->regs + S5P_ORGISIZE);
+ cfg &= ~FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
+ writel(cfg, dev->regs + FIMC_REG_ORGISIZE);
}
void fimc_hw_en_lastirq(struct fimc_dev *dev, int enable)
{
- u32 cfg = readl(dev->regs + S5P_CIOCTRL);
+ u32 cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
if (enable)
- cfg |= S5P_CIOCTRL_LASTIRQ_ENABLE;
+ cfg |= FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
else
- cfg &= ~S5P_CIOCTRL_LASTIRQ_ENABLE;
- writel(cfg, dev->regs + S5P_CIOCTRL);
+ cfg &= ~FIMC_REG_CIOCTRL_LASTIRQ_ENABLE;
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
}
void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
@@ -245,15 +236,13 @@ void fimc_hw_set_prescaler(struct fimc_ctx *ctx)
u32 cfg, shfactor;
shfactor = 10 - (sc->hfactor + sc->vfactor);
+ cfg = shfactor << 28;
- cfg = S5P_CISCPRERATIO_SHFACTOR(shfactor);
- cfg |= S5P_CISCPRERATIO_HOR(sc->pre_hratio);
- cfg |= S5P_CISCPRERATIO_VER(sc->pre_vratio);
- writel(cfg, dev->regs + S5P_CISCPRERATIO);
+ cfg |= (sc->pre_hratio << 16) | sc->pre_vratio;
+ writel(cfg, dev->regs + FIMC_REG_CISCPRERATIO);
- cfg = S5P_CISCPREDST_WIDTH(sc->pre_dst_width);
- cfg |= S5P_CISCPREDST_HEIGHT(sc->pre_dst_height);
- writel(cfg, dev->regs + S5P_CISCPREDST);
+ cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
+ writel(cfg, dev->regs + FIMC_REG_CISCPREDST);
}
static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
@@ -263,93 +252,95 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
struct fimc_frame *src_frame = &ctx->s_frame;
struct fimc_frame *dst_frame = &ctx->d_frame;
- u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
- cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
- S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
- S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
- S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
- S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
+ cfg &= ~(FIMC_REG_CISCCTRL_CSCR2Y_WIDE | FIMC_REG_CISCCTRL_CSCY2R_WIDE |
+ FIMC_REG_CISCCTRL_SCALEUP_H | FIMC_REG_CISCCTRL_SCALEUP_V |
+ FIMC_REG_CISCCTRL_SCALERBYPASS | FIMC_REG_CISCCTRL_ONE2ONE |
+ FIMC_REG_CISCCTRL_INRGB_FMT_MASK | FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK |
+ FIMC_REG_CISCCTRL_INTERLACE | FIMC_REG_CISCCTRL_RGB_EXT);
if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
- cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
+ cfg |= (FIMC_REG_CISCCTRL_CSCR2Y_WIDE |
+ FIMC_REG_CISCCTRL_CSCY2R_WIDE);
if (!sc->enabled)
- cfg |= S5P_CISCCTRL_SCALERBYPASS;
+ cfg |= FIMC_REG_CISCCTRL_SCALERBYPASS;
if (sc->scaleup_h)
- cfg |= S5P_CISCCTRL_SCALEUP_H;
+ cfg |= FIMC_REG_CISCCTRL_SCALEUP_H;
if (sc->scaleup_v)
- cfg |= S5P_CISCCTRL_SCALEUP_V;
+ cfg |= FIMC_REG_CISCCTRL_SCALEUP_V;
if (sc->copy_mode)
- cfg |= S5P_CISCCTRL_ONE2ONE;
+ cfg |= FIMC_REG_CISCCTRL_ONE2ONE;
- if (ctx->in_path == FIMC_DMA) {
+ if (ctx->in_path == FIMC_IO_DMA) {
switch (src_frame->fmt->color) {
- case S5P_FIMC_RGB565:
- cfg |= S5P_CISCCTRL_INRGB_FMT_RGB565;
+ case FIMC_FMT_RGB565:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB565;
break;
- case S5P_FIMC_RGB666:
- cfg |= S5P_CISCCTRL_INRGB_FMT_RGB666;
+ case FIMC_FMT_RGB666:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB666;
break;
- case S5P_FIMC_RGB888:
- cfg |= S5P_CISCCTRL_INRGB_FMT_RGB888;
+ case FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_CISCCTRL_INRGB_FMT_RGB888;
break;
}
}
- if (ctx->out_path == FIMC_DMA) {
+ if (ctx->out_path == FIMC_IO_DMA) {
u32 color = dst_frame->fmt->color;
- if (color >= S5P_FIMC_RGB444 && color <= S5P_FIMC_RGB565)
- cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB565;
- else if (color == S5P_FIMC_RGB666)
- cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB666;
- else if (color == S5P_FIMC_RGB888)
- cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB888;
+ if (color >= FIMC_FMT_RGB444 && color <= FIMC_FMT_RGB565)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565;
+ else if (color == FIMC_FMT_RGB666)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666;
+ else if (color == FIMC_FMT_RGB888)
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
} else {
- cfg |= S5P_CISCCTRL_OUTRGB_FMT_RGB888;
+ cfg |= FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888;
if (ctx->flags & FIMC_SCAN_MODE_INTERLACED)
- cfg |= S5P_CISCCTRL_INTERLACE;
+ cfg |= FIMC_REG_CISCCTRL_INTERLACE;
}
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
}
void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- struct samsung_fimc_variant *variant = dev->variant;
+ struct fimc_variant *variant = dev->variant;
struct fimc_scaler *sc = &ctx->scaler;
u32 cfg;
dbg("main_hratio= 0x%X main_vratio= 0x%X",
- sc->main_hratio, sc->main_vratio);
+ sc->main_hratio, sc->main_vratio);
fimc_hw_set_scaler(ctx);
- cfg = readl(dev->regs + S5P_CISCCTRL);
- cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
+ cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ cfg &= ~(FIMC_REG_CISCCTRL_MHRATIO_MASK |
+ FIMC_REG_CISCCTRL_MVRATIO_MASK);
if (variant->has_mainscaler_ext) {
- cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
- cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ cfg |= FIMC_REG_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
+ cfg |= FIMC_REG_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
- cfg = readl(dev->regs + S5P_CIEXTEN);
+ cfg = readl(dev->regs + FIMC_REG_CIEXTEN);
- cfg &= ~(S5P_CIEXTEN_MVRATIO_EXT_MASK |
- S5P_CIEXTEN_MHRATIO_EXT_MASK);
- cfg |= S5P_CIEXTEN_MHRATIO_EXT(sc->main_hratio);
- cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
- writel(cfg, dev->regs + S5P_CIEXTEN);
+ cfg &= ~(FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK |
+ FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK);
+ cfg |= FIMC_REG_CIEXTEN_MHRATIO_EXT(sc->main_hratio);
+ cfg |= FIMC_REG_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CIEXTEN);
} else {
- cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
- cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ cfg |= FIMC_REG_CISCCTRL_MHRATIO(sc->main_hratio);
+ cfg |= FIMC_REG_CISCCTRL_MVRATIO(sc->main_vratio);
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
}
}
@@ -357,40 +348,41 @@ void fimc_hw_en_capture(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- u32 cfg = readl(dev->regs + S5P_CIIMGCPT);
+ u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
- if (ctx->out_path == FIMC_DMA) {
+ if (ctx->out_path == FIMC_IO_DMA) {
/* one shot mode */
- cfg |= S5P_CIIMGCPT_CPT_FREN_ENABLE | S5P_CIIMGCPT_IMGCPTEN;
+ cfg |= FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
+ FIMC_REG_CIIMGCPT_IMGCPTEN;
} else {
/* Continuous frame capture mode (freerun). */
- cfg &= ~(S5P_CIIMGCPT_CPT_FREN_ENABLE |
- S5P_CIIMGCPT_CPT_FRMOD_CNT);
- cfg |= S5P_CIIMGCPT_IMGCPTEN;
+ cfg &= ~(FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE |
+ FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT);
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
}
if (ctx->scaler.enabled)
- cfg |= S5P_CIIMGCPT_IMGCPTEN_SC;
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN_SC;
- writel(cfg | S5P_CIIMGCPT_IMGCPTEN, dev->regs + S5P_CIIMGCPT);
+ cfg |= FIMC_REG_CIIMGCPT_IMGCPTEN;
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
}
-void fimc_hw_set_effect(struct fimc_ctx *ctx, bool active)
+void fimc_hw_set_effect(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
struct fimc_effect *effect = &ctx->effect;
u32 cfg = 0;
- if (active) {
- cfg |= S5P_CIIMGEFF_IE_SC_AFTER | S5P_CIIMGEFF_IE_ENABLE;
+ if (effect->type != FIMC_REG_CIIMGEFF_FIN_BYPASS) {
+ cfg |= FIMC_REG_CIIMGEFF_IE_SC_AFTER |
+ FIMC_REG_CIIMGEFF_IE_ENABLE;
cfg |= effect->type;
- if (effect->type == S5P_FIMC_EFFECT_ARBITRARY) {
- cfg |= S5P_CIIMGEFF_PAT_CB(effect->pat_cb);
- cfg |= S5P_CIIMGEFF_PAT_CR(effect->pat_cr);
- }
+ if (effect->type == FIMC_REG_CIIMGEFF_FIN_ARBITRARY)
+ cfg |= (effect->pat_cb << 13) | effect->pat_cr;
}
- writel(cfg, dev->regs + S5P_CIIMGEFF);
+ writel(cfg, dev->regs + FIMC_REG_CIIMGEFF);
}
void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
@@ -402,10 +394,10 @@ void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx)
if (!(frame->fmt->flags & FMT_HAS_ALPHA))
return;
- cfg = readl(dev->regs + S5P_CIOCTRL);
- cfg &= ~S5P_CIOCTRL_ALPHA_OUT_MASK;
+ cfg = readl(dev->regs + FIMC_REG_CIOCTRL);
+ cfg &= ~FIMC_REG_CIOCTRL_ALPHA_OUT_MASK;
cfg |= (frame->alpha << 4);
- writel(cfg, dev->regs + S5P_CIOCTRL);
+ writel(cfg, dev->regs + FIMC_REG_CIOCTRL);
}
static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
@@ -415,16 +407,14 @@ static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx)
u32 cfg_o = 0;
u32 cfg_r = 0;
- if (FIMC_LCDFIFO == ctx->out_path)
- cfg_r |= S5P_CIREAL_ISIZE_AUTOLOAD_EN;
+ if (FIMC_IO_LCDFIFO == ctx->out_path)
+ cfg_r |= FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN;
- cfg_o |= S5P_ORIG_SIZE_HOR(frame->f_width);
- cfg_o |= S5P_ORIG_SIZE_VER(frame->f_height);
- cfg_r |= S5P_CIREAL_ISIZE_WIDTH(frame->width);
- cfg_r |= S5P_CIREAL_ISIZE_HEIGHT(frame->height);
+ cfg_o |= (frame->f_height << 16) | frame->f_width;
+ cfg_r |= (frame->height << 16) | frame->width;
- writel(cfg_o, dev->regs + S5P_ORGISIZE);
- writel(cfg_r, dev->regs + S5P_CIREAL_ISIZE);
+ writel(cfg_o, dev->regs + FIMC_REG_ORGISIZE);
+ writel(cfg_r, dev->regs + FIMC_REG_CIREAL_ISIZE);
}
void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
@@ -435,80 +425,77 @@ void fimc_hw_set_in_dma(struct fimc_ctx *ctx)
u32 cfg;
/* Set the pixel offsets. */
- cfg = S5P_CIO_OFFS_HOR(offset->y_h);
- cfg |= S5P_CIO_OFFS_VER(offset->y_v);
- writel(cfg, dev->regs + S5P_CIIYOFF);
+ cfg = (offset->y_v << 16) | offset->y_h;
+ writel(cfg, dev->regs + FIMC_REG_CIIYOFF);
- cfg = S5P_CIO_OFFS_HOR(offset->cb_h);
- cfg |= S5P_CIO_OFFS_VER(offset->cb_v);
- writel(cfg, dev->regs + S5P_CIICBOFF);
+ cfg = (offset->cb_v << 16) | offset->cb_h;
+ writel(cfg, dev->regs + FIMC_REG_CIICBOFF);
- cfg = S5P_CIO_OFFS_HOR(offset->cr_h);
- cfg |= S5P_CIO_OFFS_VER(offset->cr_v);
- writel(cfg, dev->regs + S5P_CIICROFF);
+ cfg = (offset->cr_v << 16) | offset->cr_h;
+ writel(cfg, dev->regs + FIMC_REG_CIICROFF);
/* Input original and real size. */
fimc_hw_set_in_dma_size(ctx);
/* Use DMA autoload only in FIFO mode. */
- fimc_hw_en_autoload(dev, ctx->out_path == FIMC_LCDFIFO);
+ fimc_hw_en_autoload(dev, ctx->out_path == FIMC_IO_LCDFIFO);
/* Set the input DMA to process single frame only. */
- cfg = readl(dev->regs + S5P_MSCTRL);
- cfg &= ~(S5P_MSCTRL_INFORMAT_MASK
- | S5P_MSCTRL_IN_BURST_COUNT_MASK
- | S5P_MSCTRL_INPUT_MASK
- | S5P_MSCTRL_C_INT_IN_MASK
- | S5P_MSCTRL_2P_IN_ORDER_MASK);
+ cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ cfg &= ~(FIMC_REG_MSCTRL_INFORMAT_MASK
+ | FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK
+ | FIMC_REG_MSCTRL_INPUT_MASK
+ | FIMC_REG_MSCTRL_C_INT_IN_MASK
+ | FIMC_REG_MSCTRL_2P_IN_ORDER_MASK);
- cfg |= (S5P_MSCTRL_IN_BURST_COUNT(4)
- | S5P_MSCTRL_INPUT_MEMORY
- | S5P_MSCTRL_FIFO_CTRL_FULL);
+ cfg |= (FIMC_REG_MSCTRL_IN_BURST_COUNT(4)
+ | FIMC_REG_MSCTRL_INPUT_MEMORY
+ | FIMC_REG_MSCTRL_FIFO_CTRL_FULL);
switch (frame->fmt->color) {
- case S5P_FIMC_RGB565...S5P_FIMC_RGB888:
- cfg |= S5P_MSCTRL_INFORMAT_RGB;
+ case FIMC_FMT_RGB565...FIMC_FMT_RGB888:
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_RGB;
break;
- case S5P_FIMC_YCBCR420:
- cfg |= S5P_MSCTRL_INFORMAT_YCBCR420;
+ case FIMC_FMT_YCBCR420:
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR420;
if (frame->fmt->colplanes == 2)
- cfg |= ctx->in_order_2p | S5P_MSCTRL_C_INT_IN_2PLANE;
+ cfg |= ctx->in_order_2p | FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
else
- cfg |= S5P_MSCTRL_C_INT_IN_3PLANE;
+ cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
break;
- case S5P_FIMC_YCBYCR422...S5P_FIMC_CRYCBY422:
+ case FIMC_FMT_YCBYCR422...FIMC_FMT_CRYCBY422:
if (frame->fmt->colplanes == 1) {
cfg |= ctx->in_order_1p
- | S5P_MSCTRL_INFORMAT_YCBCR422_1P;
+ | FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P;
} else {
- cfg |= S5P_MSCTRL_INFORMAT_YCBCR422;
+ cfg |= FIMC_REG_MSCTRL_INFORMAT_YCBCR422;
if (frame->fmt->colplanes == 2)
cfg |= ctx->in_order_2p
- | S5P_MSCTRL_C_INT_IN_2PLANE;
+ | FIMC_REG_MSCTRL_C_INT_IN_2PLANE;
else
- cfg |= S5P_MSCTRL_C_INT_IN_3PLANE;
+ cfg |= FIMC_REG_MSCTRL_C_INT_IN_3PLANE;
}
break;
default:
break;
}
- writel(cfg, dev->regs + S5P_MSCTRL);
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
/* Input/output DMA linear/tiled mode. */
- cfg = readl(dev->regs + S5P_CIDMAPARAM);
- cfg &= ~S5P_CIDMAPARAM_TILE_MASK;
+ cfg = readl(dev->regs + FIMC_REG_CIDMAPARAM);
+ cfg &= ~FIMC_REG_CIDMAPARAM_TILE_MASK;
if (tiled_fmt(ctx->s_frame.fmt))
- cfg |= S5P_CIDMAPARAM_R_64X32;
+ cfg |= FIMC_REG_CIDMAPARAM_R_64X32;
if (tiled_fmt(ctx->d_frame.fmt))
- cfg |= S5P_CIDMAPARAM_W_64X32;
+ cfg |= FIMC_REG_CIDMAPARAM_W_64X32;
- writel(cfg, dev->regs + S5P_CIDMAPARAM);
+ writel(cfg, dev->regs + FIMC_REG_CIDMAPARAM);
}
@@ -516,40 +503,40 @@ void fimc_hw_set_input_path(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- u32 cfg = readl(dev->regs + S5P_MSCTRL);
- cfg &= ~S5P_MSCTRL_INPUT_MASK;
+ u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ cfg &= ~FIMC_REG_MSCTRL_INPUT_MASK;
- if (ctx->in_path == FIMC_DMA)
- cfg |= S5P_MSCTRL_INPUT_MEMORY;
+ if (ctx->in_path == FIMC_IO_DMA)
+ cfg |= FIMC_REG_MSCTRL_INPUT_MEMORY;
else
- cfg |= S5P_MSCTRL_INPUT_EXTCAM;
+ cfg |= FIMC_REG_MSCTRL_INPUT_EXTCAM;
- writel(cfg, dev->regs + S5P_MSCTRL);
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
}
void fimc_hw_set_output_path(struct fimc_ctx *ctx)
{
struct fimc_dev *dev = ctx->fimc_dev;
- u32 cfg = readl(dev->regs + S5P_CISCCTRL);
- cfg &= ~S5P_CISCCTRL_LCDPATHEN_FIFO;
- if (ctx->out_path == FIMC_LCDFIFO)
- cfg |= S5P_CISCCTRL_LCDPATHEN_FIFO;
- writel(cfg, dev->regs + S5P_CISCCTRL);
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ cfg &= ~FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+ if (ctx->out_path == FIMC_IO_LCDFIFO)
+ cfg |= FIMC_REG_CISCCTRL_LCDPATHEN_FIFO;
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
}
void fimc_hw_set_input_addr(struct fimc_dev *dev, struct fimc_addr *paddr)
{
- u32 cfg = readl(dev->regs + S5P_CIREAL_ISIZE);
- cfg |= S5P_CIREAL_ISIZE_ADDR_CH_DIS;
- writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
+ u32 cfg = readl(dev->regs + FIMC_REG_CIREAL_ISIZE);
+ cfg |= FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
- writel(paddr->y, dev->regs + S5P_CIIYSA(0));
- writel(paddr->cb, dev->regs + S5P_CIICBSA(0));
- writel(paddr->cr, dev->regs + S5P_CIICRSA(0));
+ writel(paddr->y, dev->regs + FIMC_REG_CIIYSA(0));
+ writel(paddr->cb, dev->regs + FIMC_REG_CIICBSA(0));
+ writel(paddr->cr, dev->regs + FIMC_REG_CIICRSA(0));
- cfg &= ~S5P_CIREAL_ISIZE_ADDR_CH_DIS;
- writel(cfg, dev->regs + S5P_CIREAL_ISIZE);
+ cfg &= ~FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS;
+ writel(cfg, dev->regs + FIMC_REG_CIREAL_ISIZE);
}
void fimc_hw_set_output_addr(struct fimc_dev *dev,
@@ -557,9 +544,9 @@ void fimc_hw_set_output_addr(struct fimc_dev *dev,
{
int i = (index == -1) ? 0 : index;
do {
- writel(paddr->y, dev->regs + S5P_CIOYSA(i));
- writel(paddr->cb, dev->regs + S5P_CIOCBSA(i));
- writel(paddr->cr, dev->regs + S5P_CIOCRSA(i));
+ writel(paddr->y, dev->regs + FIMC_REG_CIOYSA(i));
+ writel(paddr->cb, dev->regs + FIMC_REG_CIOCBSA(i));
+ writel(paddr->cr, dev->regs + FIMC_REG_CIOCRSA(i));
dbg("dst_buf[%d]: 0x%X, cb: 0x%X, cr: 0x%X",
i, paddr->y, paddr->cb, paddr->cr);
} while (index == -1 && ++i < FIMC_MAX_OUT_BUFS);
@@ -568,32 +555,45 @@ void fimc_hw_set_output_addr(struct fimc_dev *dev,
int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
struct s5p_fimc_isp_info *cam)
{
- u32 cfg = readl(fimc->regs + S5P_CIGCTRL);
+ u32 cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
- cfg &= ~(S5P_CIGCTRL_INVPOLPCLK | S5P_CIGCTRL_INVPOLVSYNC |
- S5P_CIGCTRL_INVPOLHREF | S5P_CIGCTRL_INVPOLHSYNC |
- S5P_CIGCTRL_INVPOLFIELD);
+ cfg &= ~(FIMC_REG_CIGCTRL_INVPOLPCLK | FIMC_REG_CIGCTRL_INVPOLVSYNC |
+ FIMC_REG_CIGCTRL_INVPOLHREF | FIMC_REG_CIGCTRL_INVPOLHSYNC |
+ FIMC_REG_CIGCTRL_INVPOLFIELD);
if (cam->flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
- cfg |= S5P_CIGCTRL_INVPOLPCLK;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLPCLK;
if (cam->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- cfg |= S5P_CIGCTRL_INVPOLVSYNC;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLVSYNC;
if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- cfg |= S5P_CIGCTRL_INVPOLHREF;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLHREF;
if (cam->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- cfg |= S5P_CIGCTRL_INVPOLHSYNC;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLHSYNC;
if (cam->flags & V4L2_MBUS_FIELD_EVEN_LOW)
- cfg |= S5P_CIGCTRL_INVPOLFIELD;
+ cfg |= FIMC_REG_CIGCTRL_INVPOLFIELD;
- writel(cfg, fimc->regs + S5P_CIGCTRL);
+ writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
return 0;
}
+struct mbus_pixfmt_desc {
+ u32 pixelcode;
+ u32 cisrcfmt;
+ u16 bus_width;
+};
+
+static const struct mbus_pixfmt_desc pix_desc[] = {
+ { V4L2_MBUS_FMT_YUYV8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCBYCR, 8 },
+ { V4L2_MBUS_FMT_YVYU8_2X8, FIMC_REG_CISRCFMT_ORDER422_YCRYCB, 8 },
+ { V4L2_MBUS_FMT_VYUY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CRYCBY, 8 },
+ { V4L2_MBUS_FMT_UYVY8_2X8, FIMC_REG_CISRCFMT_ORDER422_CBYCRY, 8 },
+};
+
int fimc_hw_set_camera_source(struct fimc_dev *fimc,
struct s5p_fimc_isp_info *cam)
{
@@ -602,18 +602,6 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
u32 bus_width;
int i;
- static const struct {
- u32 pixelcode;
- u32 cisrcfmt;
- u16 bus_width;
- } pix_desc[] = {
- { V4L2_MBUS_FMT_YUYV8_2X8, S5P_CISRCFMT_ORDER422_YCBYCR, 8 },
- { V4L2_MBUS_FMT_YVYU8_2X8, S5P_CISRCFMT_ORDER422_YCRYCB, 8 },
- { V4L2_MBUS_FMT_VYUY8_2X8, S5P_CISRCFMT_ORDER422_CRYCBY, 8 },
- { V4L2_MBUS_FMT_UYVY8_2X8, S5P_CISRCFMT_ORDER422_CBYCRY, 8 },
- /* TODO: Add pixel codes for 16-bit bus width */
- };
-
if (cam->bus_type == FIMC_ITU_601 || cam->bus_type == FIMC_ITU_656) {
for (i = 0; i < ARRAY_SIZE(pix_desc); i++) {
if (fimc->vid_cap.mf.code == pix_desc[i].pixelcode) {
@@ -632,41 +620,37 @@ int fimc_hw_set_camera_source(struct fimc_dev *fimc,
if (cam->bus_type == FIMC_ITU_601) {
if (bus_width == 8)
- cfg |= S5P_CISRCFMT_ITU601_8BIT;
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
else if (bus_width == 16)
- cfg |= S5P_CISRCFMT_ITU601_16BIT;
+ cfg |= FIMC_REG_CISRCFMT_ITU601_16BIT;
} /* else defaults to ITU-R BT.656 8-bit */
} else if (cam->bus_type == FIMC_MIPI_CSI2) {
if (fimc_fmt_is_jpeg(f->fmt->color))
- cfg |= S5P_CISRCFMT_ITU601_8BIT;
+ cfg |= FIMC_REG_CISRCFMT_ITU601_8BIT;
}
- cfg |= S5P_CISRCFMT_HSIZE(f->o_width) | S5P_CISRCFMT_VSIZE(f->o_height);
- writel(cfg, fimc->regs + S5P_CISRCFMT);
+ cfg |= (f->o_width << 16) | f->o_height;
+ writel(cfg, fimc->regs + FIMC_REG_CISRCFMT);
return 0;
}
-
-int fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f)
{
u32 hoff2, voff2;
- u32 cfg = readl(fimc->regs + S5P_CIWDOFST);
+ u32 cfg = readl(fimc->regs + FIMC_REG_CIWDOFST);
- cfg &= ~(S5P_CIWDOFST_HOROFF_MASK | S5P_CIWDOFST_VEROFF_MASK);
- cfg |= S5P_CIWDOFST_OFF_EN |
- S5P_CIWDOFST_HOROFF(f->offs_h) |
- S5P_CIWDOFST_VEROFF(f->offs_v);
+ cfg &= ~(FIMC_REG_CIWDOFST_HOROFF_MASK | FIMC_REG_CIWDOFST_VEROFF_MASK);
+ cfg |= FIMC_REG_CIWDOFST_OFF_EN |
+ (f->offs_h << 16) | f->offs_v;
- writel(cfg, fimc->regs + S5P_CIWDOFST);
+ writel(cfg, fimc->regs + FIMC_REG_CIWDOFST);
/* See CIWDOFSTn register description in the datasheet for details. */
hoff2 = f->o_width - f->width - f->offs_h;
voff2 = f->o_height - f->height - f->offs_v;
- cfg = S5P_CIWDOFST2_HOROFF(hoff2) | S5P_CIWDOFST2_VEROFF(voff2);
-
- writel(cfg, fimc->regs + S5P_CIWDOFST2);
- return 0;
+ cfg = (hoff2 << 16) | voff2;
+ writel(cfg, fimc->regs + FIMC_REG_CIWDOFST2);
}
int fimc_hw_set_camera_type(struct fimc_dev *fimc,
@@ -674,28 +658,29 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
{
u32 cfg, tmp;
struct fimc_vid_cap *vid_cap = &fimc->vid_cap;
+ u32 csis_data_alignment = 32;
- cfg = readl(fimc->regs + S5P_CIGCTRL);
+ cfg = readl(fimc->regs + FIMC_REG_CIGCTRL);
/* Select ITU B interface, disable Writeback path and test pattern. */
- cfg &= ~(S5P_CIGCTRL_TESTPAT_MASK | S5P_CIGCTRL_SELCAM_ITU_A |
- S5P_CIGCTRL_SELCAM_MIPI | S5P_CIGCTRL_CAMIF_SELWB |
- S5P_CIGCTRL_SELCAM_MIPI_A | S5P_CIGCTRL_CAM_JPEG);
+ cfg &= ~(FIMC_REG_CIGCTRL_TESTPAT_MASK | FIMC_REG_CIGCTRL_SELCAM_ITU_A |
+ FIMC_REG_CIGCTRL_SELCAM_MIPI | FIMC_REG_CIGCTRL_CAMIF_SELWB |
+ FIMC_REG_CIGCTRL_SELCAM_MIPI_A | FIMC_REG_CIGCTRL_CAM_JPEG);
if (cam->bus_type == FIMC_MIPI_CSI2) {
- cfg |= S5P_CIGCTRL_SELCAM_MIPI;
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI;
if (cam->mux_id == 0)
- cfg |= S5P_CIGCTRL_SELCAM_MIPI_A;
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI_A;
/* TODO: add remaining supported formats. */
switch (vid_cap->mf.code) {
case V4L2_MBUS_FMT_VYUY8_2X8:
- tmp = S5P_CSIIMGFMT_YCBCR422_8BIT;
+ tmp = FIMC_REG_CSIIMGFMT_YCBCR422_8BIT;
break;
case V4L2_MBUS_FMT_JPEG_1X8:
- tmp = S5P_CSIIMGFMT_USER(1);
- cfg |= S5P_CIGCTRL_CAM_JPEG;
+ tmp = FIMC_REG_CSIIMGFMT_USER(1);
+ cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
break;
default:
v4l2_err(fimc->vid_cap.vfd,
@@ -703,21 +688,86 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
vid_cap->mf.code);
return -EINVAL;
}
- tmp |= (cam->csi_data_align == 32) << 8;
+ tmp |= (csis_data_alignment == 32) << 8;
- writel(tmp, fimc->regs + S5P_CSIIMGFMT);
+ writel(tmp, fimc->regs + FIMC_REG_CSIIMGFMT);
} else if (cam->bus_type == FIMC_ITU_601 ||
cam->bus_type == FIMC_ITU_656) {
if (cam->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
- cfg |= S5P_CIGCTRL_SELCAM_ITU_A;
+ cfg |= FIMC_REG_CIGCTRL_SELCAM_ITU_A;
} else if (cam->bus_type == FIMC_LCD_WB) {
- cfg |= S5P_CIGCTRL_CAMIF_SELWB;
+ cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
} else {
err("invalid camera bus type selected\n");
return -EINVAL;
}
- writel(cfg, fimc->regs + S5P_CIGCTRL);
+ writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
return 0;
}
+
+void fimc_hw_clear_irq(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIGCTRL);
+ cfg |= FIMC_REG_CIGCTRL_IRQ_CLR;
+ writel(cfg, dev->regs + FIMC_REG_CIGCTRL);
+}
+
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CISCCTRL);
+ if (on)
+ cfg |= FIMC_REG_CISCCTRL_SCALERSTART;
+ else
+ cfg &= ~FIMC_REG_CISCCTRL_SCALERSTART;
+ writel(cfg, dev->regs + FIMC_REG_CISCCTRL);
+}
+
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_MSCTRL);
+ if (on)
+ cfg |= FIMC_REG_MSCTRL_ENVID;
+ else
+ cfg &= ~FIMC_REG_MSCTRL_ENVID;
+ writel(cfg, dev->regs + FIMC_REG_MSCTRL);
+}
+
+void fimc_hw_dis_capture(struct fimc_dev *dev)
+{
+ u32 cfg = readl(dev->regs + FIMC_REG_CIIMGCPT);
+ cfg &= ~(FIMC_REG_CIIMGCPT_IMGCPTEN | FIMC_REG_CIIMGCPT_IMGCPTEN_SC);
+ writel(cfg, dev->regs + FIMC_REG_CIIMGCPT);
+}
+
+/* Return an index to the buffer actually being written. */
+u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
+{
+ u32 reg;
+
+ if (dev->variant->has_cistatus2) {
+ reg = readl(dev->regs + FIMC_REG_CISTATUS2) & 0x3F;
+ return reg > 0 ? --reg : reg;
+ }
+
+ reg = readl(dev->regs + FIMC_REG_CISTATUS);
+
+ return (reg & FIMC_REG_CISTATUS_FRAMECNT_MASK) >>
+ FIMC_REG_CISTATUS_FRAMECNT_SHIFT;
+}
+
+/* Locking: the caller holds fimc->slock */
+void fimc_activate_capture(struct fimc_ctx *ctx)
+{
+ fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled);
+ fimc_hw_en_capture(ctx);
+}
+
+void fimc_deactivate_capture(struct fimc_dev *fimc)
+{
+ fimc_hw_en_lastirq(fimc, true);
+ fimc_hw_dis_capture(fimc);
+ fimc_hw_enable_scaler(fimc, false);
+ fimc_hw_en_lastirq(fimc, false);
+}
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.h b/drivers/media/video/s5p-fimc/fimc-reg.h
new file mode 100644
index 000000000000..579ac8ac03de
--- /dev/null
+++ b/drivers/media/video/s5p-fimc/fimc-reg.h
@@ -0,0 +1,326 @@
+/*
+ * Samsung camera host interface (FIMC) registers definition
+ *
+ * Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FIMC_REG_H_
+#define FIMC_REG_H_
+
+#include "fimc-core.h"
+
+/* Input source format */
+#define FIMC_REG_CISRCFMT 0x00
+#define FIMC_REG_CISRCFMT_ITU601_8BIT (1 << 31)
+#define FIMC_REG_CISRCFMT_ITU601_16BIT (1 << 29)
+#define FIMC_REG_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define FIMC_REG_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+
+/* Window offset */
+#define FIMC_REG_CIWDOFST 0x04
+#define FIMC_REG_CIWDOFST_OFF_EN (1 << 31)
+#define FIMC_REG_CIWDOFST_CLROVFIY (1 << 30)
+#define FIMC_REG_CIWDOFST_CLROVRLB (1 << 29)
+#define FIMC_REG_CIWDOFST_HOROFF_MASK (0x7ff << 16)
+#define FIMC_REG_CIWDOFST_CLROVFICB (1 << 15)
+#define FIMC_REG_CIWDOFST_CLROVFICR (1 << 14)
+#define FIMC_REG_CIWDOFST_VEROFF_MASK (0xfff << 0)
+
+/* Global control */
+#define FIMC_REG_CIGCTRL 0x08
+#define FIMC_REG_CIGCTRL_SWRST (1 << 31)
+#define FIMC_REG_CIGCTRL_CAMRST_A (1 << 30)
+#define FIMC_REG_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define FIMC_REG_CIGCTRL_TESTPAT_NORMAL (0 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_VER_INC (3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_MASK (3 << 27)
+#define FIMC_REG_CIGCTRL_TESTPAT_SHIFT 27
+#define FIMC_REG_CIGCTRL_INVPOLPCLK (1 << 26)
+#define FIMC_REG_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define FIMC_REG_CIGCTRL_INVPOLHREF (1 << 24)
+#define FIMC_REG_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define FIMC_REG_CIGCTRL_HREF_MASK (1 << 21)
+#define FIMC_REG_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define FIMC_REG_CIGCTRL_IRQ_CLR (1 << 19)
+#define FIMC_REG_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define FIMC_REG_CIGCTRL_SHDW_DISABLE (1 << 12)
+#define FIMC_REG_CIGCTRL_CAM_JPEG (1 << 8)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define FIMC_REG_CIGCTRL_CAMIF_SELWB (1 << 6)
+/* 0 - ITU601; 1 - ITU709 */
+#define FIMC_REG_CIGCTRL_CSC_ITU601_709 (1 << 5)
+#define FIMC_REG_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define FIMC_REG_CIGCTRL_SELCAM_MIPI (1 << 3)
+#define FIMC_REG_CIGCTRL_INVPOLFIELD (1 << 1)
+#define FIMC_REG_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset 2 */
+#define FIMC_REG_CIWDOFST2 0x14
+#define FIMC_REG_CIWDOFST2_HOROFF_MASK (0xfff << 16)
+#define FIMC_REG_CIWDOFST2_VEROFF_MASK (0xfff << 0)
+
+/* Output DMA Y/Cb/Cr plane start addresses */
+#define FIMC_REG_CIOYSA(n) (0x18 + (n) * 4)
+#define FIMC_REG_CIOCBSA(n) (0x28 + (n) * 4)
+#define FIMC_REG_CIOCRSA(n) (0x38 + (n) * 4)
+
+/* Target image format */
+#define FIMC_REG_CITRGFMT 0x48
+#define FIMC_REG_CITRGFMT_INROT90 (1 << 31)
+#define FIMC_REG_CITRGFMT_YCBCR420 (0 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422 (1 << 29)
+#define FIMC_REG_CITRGFMT_YCBCR422_1P (2 << 29)
+#define FIMC_REG_CITRGFMT_RGB (3 << 29)
+#define FIMC_REG_CITRGFMT_FMT_MASK (3 << 29)
+#define FIMC_REG_CITRGFMT_HSIZE_MASK (0xfff << 16)
+#define FIMC_REG_CITRGFMT_FLIP_SHIFT 14
+#define FIMC_REG_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_180 (3 << 14)
+#define FIMC_REG_CITRGFMT_FLIP_MASK (3 << 14)
+#define FIMC_REG_CITRGFMT_OUTROT90 (1 << 13)
+#define FIMC_REG_CITRGFMT_VSIZE_MASK (0xfff << 0)
+
+/* Output DMA control */
+#define FIMC_REG_CIOCTRL 0x4c
+#define FIMC_REG_CIOCTRL_ORDER422_MASK (3 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CRYCBY (0 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_CBYCRY (1 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCRYCB (2 << 0)
+#define FIMC_REG_CIOCTRL_ORDER422_YCBYCR (3 << 0)
+#define FIMC_REG_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define FIMC_REG_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define FIMC_REG_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define FIMC_REG_CIOCTRL_ALPHA_OUT_MASK (0xff << 4)
+#define FIMC_REG_CIOCTRL_RGB16FMT_MASK (3 << 16)
+#define FIMC_REG_CIOCTRL_RGB565 (0 << 16)
+#define FIMC_REG_CIOCTRL_ARGB1555 (1 << 16)
+#define FIMC_REG_CIOCTRL_ARGB4444 (2 << 16)
+#define FIMC_REG_CIOCTRL_ORDER2P_SHIFT 24
+#define FIMC_REG_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define FIMC_REG_CIOCTRL_ORDER422_2P_LSB_CRCB (0 << 24)
+
+/* Pre-scaler control 1 */
+#define FIMC_REG_CISCPRERATIO 0x50
+
+#define FIMC_REG_CISCPREDST 0x54
+
+/* Main scaler control */
+#define FIMC_REG_CISCCTRL 0x58
+#define FIMC_REG_CISCCTRL_SCALERBYPASS (1 << 31)
+#define FIMC_REG_CISCCTRL_SCALEUP_H (1 << 30)
+#define FIMC_REG_CISCCTRL_SCALEUP_V (1 << 29)
+#define FIMC_REG_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define FIMC_REG_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define FIMC_REG_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define FIMC_REG_CISCCTRL_INTERLACE (1 << 25)
+#define FIMC_REG_CISCCTRL_SCALERSTART (1 << 15)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define FIMC_REG_CISCCTRL_INRGB_FMT_MASK (3 << 13)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define FIMC_REG_CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
+#define FIMC_REG_CISCCTRL_RGB_EXT (1 << 10)
+#define FIMC_REG_CISCCTRL_ONE2ONE (1 << 9)
+#define FIMC_REG_CISCCTRL_MHRATIO(x) ((x) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO(x) ((x) << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_MASK (0x1ff << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_MASK (0x1ff << 0)
+#define FIMC_REG_CISCCTRL_MHRATIO_EXT(x) (((x) >> 6) << 16)
+#define FIMC_REG_CISCCTRL_MVRATIO_EXT(x) (((x) >> 6) << 0)
+
+/* Target area */
+#define FIMC_REG_CITAREA 0x5c
+#define FIMC_REG_CITAREA_MASK 0x0fffffff
+
+/* General status */
+#define FIMC_REG_CISTATUS 0x64
+#define FIMC_REG_CISTATUS_OVFIY (1 << 31)
+#define FIMC_REG_CISTATUS_OVFICB (1 << 30)
+#define FIMC_REG_CISTATUS_OVFICR (1 << 29)
+#define FIMC_REG_CISTATUS_VSYNC (1 << 28)
+#define FIMC_REG_CISTATUS_FRAMECNT_MASK (3 << 26)
+#define FIMC_REG_CISTATUS_FRAMECNT_SHIFT 26
+#define FIMC_REG_CISTATUS_WINOFF_EN (1 << 25)
+#define FIMC_REG_CISTATUS_IMGCPT_EN (1 << 22)
+#define FIMC_REG_CISTATUS_IMGCPT_SCEN (1 << 21)
+#define FIMC_REG_CISTATUS_VSYNC_A (1 << 20)
+#define FIMC_REG_CISTATUS_VSYNC_B (1 << 19)
+#define FIMC_REG_CISTATUS_OVRLB (1 << 18)
+#define FIMC_REG_CISTATUS_FRAME_END (1 << 17)
+#define FIMC_REG_CISTATUS_LASTCAPT_END (1 << 16)
+#define FIMC_REG_CISTATUS_VVALID_A (1 << 15)
+#define FIMC_REG_CISTATUS_VVALID_B (1 << 14)
+
+/* Indexes to the last and the currently processed buffer. */
+#define FIMC_REG_CISTATUS2 0x68
+
+/* Image capture control */
+#define FIMC_REG_CIIMGCPT 0xc0
+#define FIMC_REG_CIIMGCPT_IMGCPTEN (1 << 31)
+#define FIMC_REG_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define FIMC_REG_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define FIMC_REG_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Frame capture sequence */
+#define FIMC_REG_CICPTSEQ 0xc4
+
+/* Image effect */
+#define FIMC_REG_CIIMGEFF 0xd0
+#define FIMC_REG_CIIMGEFF_IE_ENABLE (1 << 30)
+#define FIMC_REG_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define FIMC_REG_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define FIMC_REG_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define FIMC_REG_CIIMGEFF_FIN_MASK (7 << 26)
+#define FIMC_REG_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | 0xff)
+
+/* Input DMA Y/Cb/Cr plane start address 0/1 */
+#define FIMC_REG_CIIYSA(n) (0xd4 + (n) * 0x70)
+#define FIMC_REG_CIICBSA(n) (0xd8 + (n) * 0x70)
+#define FIMC_REG_CIICRSA(n) (0xdc + (n) * 0x70)
+
+/* Real input DMA image size */
+#define FIMC_REG_CIREAL_ISIZE 0xf8
+#define FIMC_REG_CIREAL_ISIZE_AUTOLOAD_EN (1 << 31)
+#define FIMC_REG_CIREAL_ISIZE_ADDR_CH_DIS (1 << 30)
+
+/* Input DMA control */
+#define FIMC_REG_MSCTRL 0xfc
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT_MASK (0xf << 24)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_MASK (3 << 16)
+#define FIMC_REG_MSCTRL_2P_IN_ORDER_SHIFT 16
+#define FIMC_REG_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define FIMC_REG_MSCTRL_C_INT_IN_MASK (1 << 15)
+#define FIMC_REG_MSCTRL_FLIP_SHIFT 13
+#define FIMC_REG_MSCTRL_FLIP_MASK (3 << 13)
+#define FIMC_REG_MSCTRL_FLIP_NORMAL (0 << 13)
+#define FIMC_REG_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define FIMC_REG_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define FIMC_REG_MSCTRL_FLIP_180 (3 << 13)
+#define FIMC_REG_MSCTRL_FIFO_CTRL_FULL (1 << 12)
+#define FIMC_REG_MSCTRL_ORDER422_SHIFT 4
+#define FIMC_REG_MSCTRL_ORDER422_YCBYCR (0 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_CBYCRY (1 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_YCRYCB (2 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_CRYCBY (3 << 4)
+#define FIMC_REG_MSCTRL_ORDER422_MASK (3 << 4)
+#define FIMC_REG_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MEMORY (1 << 3)
+#define FIMC_REG_MSCTRL_INPUT_MASK (1 << 3)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_YCBCR422_1P (2 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_RGB (3 << 1)
+#define FIMC_REG_MSCTRL_INFORMAT_MASK (3 << 1)
+#define FIMC_REG_MSCTRL_ENVID (1 << 0)
+#define FIMC_REG_MSCTRL_IN_BURST_COUNT(x) ((x) << 24)
+
+/* Output DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIOYOFF 0x168
+#define FIMC_REG_CIOCBOFF 0x16c
+#define FIMC_REG_CIOCROFF 0x170
+
+/* Input DMA Y/Cb/Cr offset */
+#define FIMC_REG_CIIYOFF 0x174
+#define FIMC_REG_CIICBOFF 0x178
+#define FIMC_REG_CIICROFF 0x17c
+
+/* Input DMA original image size */
+#define FIMC_REG_ORGISIZE 0x180
+
+/* Output DMA original image size */
+#define FIMC_REG_ORGOSIZE 0x184
+
+/* Real output DMA image size (extension register) */
+#define FIMC_REG_CIEXTEN 0x188
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT(x) (((x) & 0x3f) << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT(x) ((x) & 0x3f)
+#define FIMC_REG_CIEXTEN_MHRATIO_EXT_MASK (0x3f << 10)
+#define FIMC_REG_CIEXTEN_MVRATIO_EXT_MASK 0x3f
+
+#define FIMC_REG_CIDMAPARAM 0x18c
+#define FIMC_REG_CIDMAPARAM_R_LINEAR (0 << 29)
+#define FIMC_REG_CIDMAPARAM_R_64X32 (3 << 29)
+#define FIMC_REG_CIDMAPARAM_W_LINEAR (0 << 13)
+#define FIMC_REG_CIDMAPARAM_W_64X32 (3 << 13)
+#define FIMC_REG_CIDMAPARAM_TILE_MASK ((3 << 29) | (3 << 13))
+
+/* MIPI CSI image format */
+#define FIMC_REG_CSIIMGFMT 0x194
+#define FIMC_REG_CSIIMGFMT_YCBCR422_8BIT 0x1e
+#define FIMC_REG_CSIIMGFMT_RAW8 0x2a
+#define FIMC_REG_CSIIMGFMT_RAW10 0x2b
+#define FIMC_REG_CSIIMGFMT_RAW12 0x2c
+/* User defined formats. x = 0...16. */
+#define FIMC_REG_CSIIMGFMT_USER(x) (0x30 + x - 1)
+
+/* Output frame buffer sequence mask */
+#define FIMC_REG_CIFCNTSEQ 0x1fc
+
+/*
+ * Function declarations
+ */
+void fimc_hw_reset(struct fimc_dev *fimc);
+void fimc_hw_set_rotation(struct fimc_ctx *ctx);
+void fimc_hw_set_target_format(struct fimc_ctx *ctx);
+void fimc_hw_set_out_dma(struct fimc_ctx *ctx);
+void fimc_hw_en_lastirq(struct fimc_dev *fimc, int enable);
+void fimc_hw_en_irq(struct fimc_dev *fimc, int enable);
+void fimc_hw_set_prescaler(struct fimc_ctx *ctx);
+void fimc_hw_set_mainscaler(struct fimc_ctx *ctx);
+void fimc_hw_en_capture(struct fimc_ctx *ctx);
+void fimc_hw_set_effect(struct fimc_ctx *ctx);
+void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx);
+void fimc_hw_set_in_dma(struct fimc_ctx *ctx);
+void fimc_hw_set_input_path(struct fimc_ctx *ctx);
+void fimc_hw_set_output_path(struct fimc_ctx *ctx);
+void fimc_hw_set_input_addr(struct fimc_dev *fimc, struct fimc_addr *paddr);
+void fimc_hw_set_output_addr(struct fimc_dev *fimc, struct fimc_addr *paddr,
+ int index);
+int fimc_hw_set_camera_source(struct fimc_dev *fimc,
+ struct s5p_fimc_isp_info *cam);
+void fimc_hw_set_camera_offset(struct fimc_dev *fimc, struct fimc_frame *f);
+int fimc_hw_set_camera_polarity(struct fimc_dev *fimc,
+ struct s5p_fimc_isp_info *cam);
+int fimc_hw_set_camera_type(struct fimc_dev *fimc,
+ struct s5p_fimc_isp_info *cam);
+void fimc_hw_clear_irq(struct fimc_dev *dev);
+void fimc_hw_enable_scaler(struct fimc_dev *dev, bool on);
+void fimc_hw_activate_input_dma(struct fimc_dev *dev, bool on);
+void fimc_hw_dis_capture(struct fimc_dev *dev);
+u32 fimc_hw_get_frame_index(struct fimc_dev *dev);
+void fimc_activate_capture(struct fimc_ctx *ctx);
+void fimc_deactivate_capture(struct fimc_dev *fimc);
+
+/**
+ * fimc_hw_set_dma_seq - configure output DMA buffer sequence
+ * @mask: bitmask for the DMA output buffer registers, set to 0 to skip buffer
+ * This function masks output DMA ring buffers, it allows to select which of
+ * the 32 available output buffer address registers will be used by the DMA
+ * engine.
+ */
+static inline void fimc_hw_set_dma_seq(struct fimc_dev *dev, u32 mask)
+{
+ writel(mask, dev->regs + FIMC_REG_CIFCNTSEQ);
+}
+
+#endif /* FIMC_REG_H_ */
diff --git a/drivers/media/video/s5p-fimc/mipi-csis.c b/drivers/media/video/s5p-fimc/mipi-csis.c
index f44f690397f7..2f73d9e3d0b7 100644
--- a/drivers/media/video/s5p-fimc/mipi-csis.c
+++ b/drivers/media/video/s5p-fimc/mipi-csis.c
@@ -127,20 +127,24 @@ struct csis_state {
* multiple of 2^pix_width_alignment
* @code: corresponding media bus code
* @fmt_reg: S5PCSIS_CONFIG register value
+ * @data_alignment: MIPI-CSI data alignment in bits
*/
struct csis_pix_format {
unsigned int pix_width_alignment;
enum v4l2_mbus_pixelcode code;
u32 fmt_reg;
+ u8 data_alignment;
};
static const struct csis_pix_format s5pcsis_formats[] = {
{
.code = V4L2_MBUS_FMT_VYUY8_2X8,
.fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT,
+ .data_alignment = 32,
}, {
.code = V4L2_MBUS_FMT_JPEG_1X8,
.fmt_reg = S5PCSIS_CFG_FMT_USER(1),
+ .data_alignment = 32,
},
};
@@ -239,7 +243,7 @@ static void s5pcsis_set_params(struct csis_state *state)
s5pcsis_set_hsync_settle(state, pdata->hs_settle);
val = s5pcsis_read(state, S5PCSIS_CTRL);
- if (pdata->alignment == 32)
+ if (state->csis_fmt->data_alignment == 32)
val |= S5PCSIS_CTRL_ALIGN_32BIT;
else /* 24-bits */
val &= ~S5PCSIS_CTRL_ALIGN_32BIT;
@@ -711,19 +715,8 @@ static struct platform_driver s5pcsis_driver = {
},
};
-static int __init s5pcsis_init(void)
-{
- return platform_driver_probe(&s5pcsis_driver, s5pcsis_probe);
-}
-
-static void __exit s5pcsis_exit(void)
-{
- platform_driver_unregister(&s5pcsis_driver);
-}
-
-module_init(s5pcsis_init);
-module_exit(s5pcsis_exit);
+module_platform_driver(s5pcsis_driver);
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
-MODULE_DESCRIPTION("S5P/EXYNOS4 MIPI CSI receiver driver");
+MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC MIPI-CSI2 receiver driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/s5p-fimc/regs-fimc.h b/drivers/media/video/s5p-fimc/regs-fimc.h
deleted file mode 100644
index c7a5bc51d571..000000000000
--- a/drivers/media/video/s5p-fimc/regs-fimc.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Register definition file for Samsung Camera Interface (FIMC) driver
- *
- * Copyright (c) 2010 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef REGS_FIMC_H_
-#define REGS_FIMC_H_
-
-/* Input source format */
-#define S5P_CISRCFMT 0x00
-#define S5P_CISRCFMT_ITU601_8BIT (1 << 31)
-#define S5P_CISRCFMT_ITU601_16BIT (1 << 29)
-#define S5P_CISRCFMT_ORDER422_YCBYCR (0 << 14)
-#define S5P_CISRCFMT_ORDER422_YCRYCB (1 << 14)
-#define S5P_CISRCFMT_ORDER422_CBYCRY (2 << 14)
-#define S5P_CISRCFMT_ORDER422_CRYCBY (3 << 14)
-#define S5P_CISRCFMT_HSIZE(x) ((x) << 16)
-#define S5P_CISRCFMT_VSIZE(x) ((x) << 0)
-
-/* Window offset */
-#define S5P_CIWDOFST 0x04
-#define S5P_CIWDOFST_OFF_EN (1 << 31)
-#define S5P_CIWDOFST_CLROVFIY (1 << 30)
-#define S5P_CIWDOFST_CLROVRLB (1 << 29)
-#define S5P_CIWDOFST_HOROFF_MASK (0x7ff << 16)
-#define S5P_CIWDOFST_CLROVFICB (1 << 15)
-#define S5P_CIWDOFST_CLROVFICR (1 << 14)
-#define S5P_CIWDOFST_HOROFF(x) ((x) << 16)
-#define S5P_CIWDOFST_VEROFF(x) ((x) << 0)
-#define S5P_CIWDOFST_VEROFF_MASK (0xfff << 0)
-
-/* Global control */
-#define S5P_CIGCTRL 0x08
-#define S5P_CIGCTRL_SWRST (1 << 31)
-#define S5P_CIGCTRL_CAMRST_A (1 << 30)
-#define S5P_CIGCTRL_SELCAM_ITU_A (1 << 29)
-#define S5P_CIGCTRL_TESTPAT_NORMAL (0 << 27)
-#define S5P_CIGCTRL_TESTPAT_COLOR_BAR (1 << 27)
-#define S5P_CIGCTRL_TESTPAT_HOR_INC (2 << 27)
-#define S5P_CIGCTRL_TESTPAT_VER_INC (3 << 27)
-#define S5P_CIGCTRL_TESTPAT_MASK (3 << 27)
-#define S5P_CIGCTRL_TESTPAT_SHIFT (27)
-#define S5P_CIGCTRL_INVPOLPCLK (1 << 26)
-#define S5P_CIGCTRL_INVPOLVSYNC (1 << 25)
-#define S5P_CIGCTRL_INVPOLHREF (1 << 24)
-#define S5P_CIGCTRL_IRQ_OVFEN (1 << 22)
-#define S5P_CIGCTRL_HREF_MASK (1 << 21)
-#define S5P_CIGCTRL_IRQ_LEVEL (1 << 20)
-#define S5P_CIGCTRL_IRQ_CLR (1 << 19)
-#define S5P_CIGCTRL_IRQ_ENABLE (1 << 16)
-#define S5P_CIGCTRL_SHDW_DISABLE (1 << 12)
-#define S5P_CIGCTRL_CAM_JPEG (1 << 8)
-#define S5P_CIGCTRL_SELCAM_MIPI_A (1 << 7)
-#define S5P_CIGCTRL_CAMIF_SELWB (1 << 6)
-/* 0 - ITU601; 1 - ITU709 */
-#define S5P_CIGCTRL_CSC_ITU601_709 (1 << 5)
-#define S5P_CIGCTRL_INVPOLHSYNC (1 << 4)
-#define S5P_CIGCTRL_SELCAM_MIPI (1 << 3)
-#define S5P_CIGCTRL_INVPOLFIELD (1 << 1)
-#define S5P_CIGCTRL_INTERLACE (1 << 0)
-
-/* Window offset 2 */
-#define S5P_CIWDOFST2 0x14
-#define S5P_CIWDOFST2_HOROFF_MASK (0xfff << 16)
-#define S5P_CIWDOFST2_VEROFF_MASK (0xfff << 0)
-#define S5P_CIWDOFST2_HOROFF(x) ((x) << 16)
-#define S5P_CIWDOFST2_VEROFF(x) ((x) << 0)
-
-/* Output DMA Y/Cb/Cr plane start addresses */
-#define S5P_CIOYSA(n) (0x18 + (n) * 4)
-#define S5P_CIOCBSA(n) (0x28 + (n) * 4)
-#define S5P_CIOCRSA(n) (0x38 + (n) * 4)
-
-/* Target image format */
-#define S5P_CITRGFMT 0x48
-#define S5P_CITRGFMT_INROT90 (1 << 31)
-#define S5P_CITRGFMT_YCBCR420 (0 << 29)
-#define S5P_CITRGFMT_YCBCR422 (1 << 29)
-#define S5P_CITRGFMT_YCBCR422_1P (2 << 29)
-#define S5P_CITRGFMT_RGB (3 << 29)
-#define S5P_CITRGFMT_FMT_MASK (3 << 29)
-#define S5P_CITRGFMT_HSIZE_MASK (0xfff << 16)
-#define S5P_CITRGFMT_FLIP_SHIFT (14)
-#define S5P_CITRGFMT_FLIP_NORMAL (0 << 14)
-#define S5P_CITRGFMT_FLIP_X_MIRROR (1 << 14)
-#define S5P_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
-#define S5P_CITRGFMT_FLIP_180 (3 << 14)
-#define S5P_CITRGFMT_FLIP_MASK (3 << 14)
-#define S5P_CITRGFMT_OUTROT90 (1 << 13)
-#define S5P_CITRGFMT_VSIZE_MASK (0xfff << 0)
-#define S5P_CITRGFMT_HSIZE(x) ((x) << 16)
-#define S5P_CITRGFMT_VSIZE(x) ((x) << 0)
-
-/* Output DMA control */
-#define S5P_CIOCTRL 0x4c
-#define S5P_CIOCTRL_ORDER422_MASK (3 << 0)
-#define S5P_CIOCTRL_ORDER422_CRYCBY (0 << 0)
-#define S5P_CIOCTRL_ORDER422_CBYCRY (1 << 0)
-#define S5P_CIOCTRL_ORDER422_YCRYCB (2 << 0)
-#define S5P_CIOCTRL_ORDER422_YCBYCR (3 << 0)
-#define S5P_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
-#define S5P_CIOCTRL_YCBCR_3PLANE (0 << 3)
-#define S5P_CIOCTRL_YCBCR_2PLANE (1 << 3)
-#define S5P_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
-#define S5P_CIOCTRL_ALPHA_OUT_MASK (0xff << 4)
-#define S5P_CIOCTRL_RGB16FMT_MASK (3 << 16)
-#define S5P_CIOCTRL_RGB565 (0 << 16)
-#define S5P_CIOCTRL_ARGB1555 (1 << 16)
-#define S5P_CIOCTRL_ARGB4444 (2 << 16)
-#define S5P_CIOCTRL_ORDER2P_SHIFT (24)
-#define S5P_CIOCTRL_ORDER2P_MASK (3 << 24)
-#define S5P_CIOCTRL_ORDER422_2P_LSB_CRCB (0 << 24)
-
-/* Pre-scaler control 1 */
-#define S5P_CISCPRERATIO 0x50
-#define S5P_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
-#define S5P_CISCPRERATIO_HOR(x) ((x) << 16)
-#define S5P_CISCPRERATIO_VER(x) ((x) << 0)
-
-#define S5P_CISCPREDST 0x54
-#define S5P_CISCPREDST_WIDTH(x) ((x) << 16)
-#define S5P_CISCPREDST_HEIGHT(x) ((x) << 0)
-
-/* Main scaler control */
-#define S5P_CISCCTRL 0x58
-#define S5P_CISCCTRL_SCALERBYPASS (1 << 31)
-#define S5P_CISCCTRL_SCALEUP_H (1 << 30)
-#define S5P_CISCCTRL_SCALEUP_V (1 << 29)
-#define S5P_CISCCTRL_CSCR2Y_WIDE (1 << 28)
-#define S5P_CISCCTRL_CSCY2R_WIDE (1 << 27)
-#define S5P_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
-#define S5P_CISCCTRL_INTERLACE (1 << 25)
-#define S5P_CISCCTRL_SCALERSTART (1 << 15)
-#define S5P_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
-#define S5P_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
-#define S5P_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
-#define S5P_CISCCTRL_INRGB_FMT_MASK (3 << 13)
-#define S5P_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
-#define S5P_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
-#define S5P_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
-#define S5P_CISCCTRL_OUTRGB_FMT_MASK (3 << 11)
-#define S5P_CISCCTRL_RGB_EXT (1 << 10)
-#define S5P_CISCCTRL_ONE2ONE (1 << 9)
-#define S5P_CISCCTRL_MHRATIO(x) ((x) << 16)
-#define S5P_CISCCTRL_MVRATIO(x) ((x) << 0)
-#define S5P_CISCCTRL_MHRATIO_MASK (0x1ff << 16)
-#define S5P_CISCCTRL_MVRATIO_MASK (0x1ff << 0)
-#define S5P_CISCCTRL_MHRATIO_EXT(x) (((x) >> 6) << 16)
-#define S5P_CISCCTRL_MVRATIO_EXT(x) (((x) >> 6) << 0)
-
-/* Target area */
-#define S5P_CITAREA 0x5c
-#define S5P_CITAREA_MASK 0x0fffffff
-
-/* General status */
-#define S5P_CISTATUS 0x64
-#define S5P_CISTATUS_OVFIY (1 << 31)
-#define S5P_CISTATUS_OVFICB (1 << 30)
-#define S5P_CISTATUS_OVFICR (1 << 29)
-#define S5P_CISTATUS_VSYNC (1 << 28)
-#define S5P_CISTATUS_FRAMECNT_MASK (3 << 26)
-#define S5P_CISTATUS_FRAMECNT_SHIFT 26
-#define S5P_CISTATUS_WINOFF_EN (1 << 25)
-#define S5P_CISTATUS_IMGCPT_EN (1 << 22)
-#define S5P_CISTATUS_IMGCPT_SCEN (1 << 21)
-#define S5P_CISTATUS_VSYNC_A (1 << 20)
-#define S5P_CISTATUS_VSYNC_B (1 << 19)
-#define S5P_CISTATUS_OVRLB (1 << 18)
-#define S5P_CISTATUS_FRAME_END (1 << 17)
-#define S5P_CISTATUS_LASTCAPT_END (1 << 16)
-#define S5P_CISTATUS_VVALID_A (1 << 15)
-#define S5P_CISTATUS_VVALID_B (1 << 14)
-
-/* Indexes to the last and the currently processed buffer. */
-#define S5P_CISTATUS2 0x68
-
-/* Image capture control */
-#define S5P_CIIMGCPT 0xc0
-#define S5P_CIIMGCPT_IMGCPTEN (1 << 31)
-#define S5P_CIIMGCPT_IMGCPTEN_SC (1 << 30)
-#define S5P_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
-#define S5P_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
-
-/* Frame capture sequence */
-#define S5P_CICPTSEQ 0xc4
-
-/* Image effect */
-#define S5P_CIIMGEFF 0xd0
-#define S5P_CIIMGEFF_IE_ENABLE (1 << 30)
-#define S5P_CIIMGEFF_IE_SC_BEFORE (0 << 29)
-#define S5P_CIIMGEFF_IE_SC_AFTER (1 << 29)
-#define S5P_CIIMGEFF_FIN_BYPASS (0 << 26)
-#define S5P_CIIMGEFF_FIN_ARBITRARY (1 << 26)
-#define S5P_CIIMGEFF_FIN_NEGATIVE (2 << 26)
-#define S5P_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
-#define S5P_CIIMGEFF_FIN_EMBOSSING (4 << 26)
-#define S5P_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
-#define S5P_CIIMGEFF_FIN_MASK (7 << 26)
-#define S5P_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
-#define S5P_CIIMGEFF_PAT_CB(x) ((x) << 13)
-#define S5P_CIIMGEFF_PAT_CR(x) ((x) << 0)
-
-/* Input DMA Y/Cb/Cr plane start address 0/1 */
-#define S5P_CIIYSA(n) (0xd4 + (n) * 0x70)
-#define S5P_CIICBSA(n) (0xd8 + (n) * 0x70)
-#define S5P_CIICRSA(n) (0xdc + (n) * 0x70)
-
-/* Real input DMA image size */
-#define S5P_CIREAL_ISIZE 0xf8
-#define S5P_CIREAL_ISIZE_AUTOLOAD_EN (1 << 31)
-#define S5P_CIREAL_ISIZE_ADDR_CH_DIS (1 << 30)
-#define S5P_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
-#define S5P_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
-
-
-/* Input DMA control */
-#define S5P_MSCTRL 0xfc
-#define S5P_MSCTRL_IN_BURST_COUNT_MASK (0xF << 24)
-#define S5P_MSCTRL_2P_IN_ORDER_MASK (3 << 16)
-#define S5P_MSCTRL_2P_IN_ORDER_SHIFT 16
-#define S5P_MSCTRL_C_INT_IN_3PLANE (0 << 15)
-#define S5P_MSCTRL_C_INT_IN_2PLANE (1 << 15)
-#define S5P_MSCTRL_C_INT_IN_MASK (1 << 15)
-#define S5P_MSCTRL_FLIP_SHIFT 13
-#define S5P_MSCTRL_FLIP_MASK (3 << 13)
-#define S5P_MSCTRL_FLIP_NORMAL (0 << 13)
-#define S5P_MSCTRL_FLIP_X_MIRROR (1 << 13)
-#define S5P_MSCTRL_FLIP_Y_MIRROR (2 << 13)
-#define S5P_MSCTRL_FLIP_180 (3 << 13)
-#define S5P_MSCTRL_FIFO_CTRL_FULL (1 << 12)
-#define S5P_MSCTRL_ORDER422_SHIFT 4
-#define S5P_MSCTRL_ORDER422_YCBYCR (0 << 4)
-#define S5P_MSCTRL_ORDER422_CBYCRY (1 << 4)
-#define S5P_MSCTRL_ORDER422_YCRYCB (2 << 4)
-#define S5P_MSCTRL_ORDER422_CRYCBY (3 << 4)
-#define S5P_MSCTRL_ORDER422_MASK (3 << 4)
-#define S5P_MSCTRL_INPUT_EXTCAM (0 << 3)
-#define S5P_MSCTRL_INPUT_MEMORY (1 << 3)
-#define S5P_MSCTRL_INPUT_MASK (1 << 3)
-#define S5P_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
-#define S5P_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
-#define S5P_MSCTRL_INFORMAT_YCBCR422_1P (2 << 1)
-#define S5P_MSCTRL_INFORMAT_RGB (3 << 1)
-#define S5P_MSCTRL_INFORMAT_MASK (3 << 1)
-#define S5P_MSCTRL_ENVID (1 << 0)
-#define S5P_MSCTRL_IN_BURST_COUNT(x) ((x) << 24)
-
-/* Output DMA Y/Cb/Cr offset */
-#define S5P_CIOYOFF 0x168
-#define S5P_CIOCBOFF 0x16c
-#define S5P_CIOCROFF 0x170
-
-/* Input DMA Y/Cb/Cr offset */
-#define S5P_CIIYOFF 0x174
-#define S5P_CIICBOFF 0x178
-#define S5P_CIICROFF 0x17c
-
-#define S5P_CIO_OFFS_VER(x) ((x) << 16)
-#define S5P_CIO_OFFS_HOR(x) ((x) << 0)
-
-/* Input DMA original image size */
-#define S5P_ORGISIZE 0x180
-
-/* Output DMA original image size */
-#define S5P_ORGOSIZE 0x184
-
-#define S5P_ORIG_SIZE_VER(x) ((x) << 16)
-#define S5P_ORIG_SIZE_HOR(x) ((x) << 0)
-
-/* Real output DMA image size (extension register) */
-#define S5P_CIEXTEN 0x188
-#define S5P_CIEXTEN_MHRATIO_EXT(x) (((x) & 0x3f) << 10)
-#define S5P_CIEXTEN_MVRATIO_EXT(x) ((x) & 0x3f)
-#define S5P_CIEXTEN_MHRATIO_EXT_MASK (0x3f << 10)
-#define S5P_CIEXTEN_MVRATIO_EXT_MASK 0x3f
-
-#define S5P_CIDMAPARAM 0x18c
-#define S5P_CIDMAPARAM_R_LINEAR (0 << 29)
-#define S5P_CIDMAPARAM_R_64X32 (3 << 29)
-#define S5P_CIDMAPARAM_W_LINEAR (0 << 13)
-#define S5P_CIDMAPARAM_W_64X32 (3 << 13)
-#define S5P_CIDMAPARAM_TILE_MASK ((3 << 29) | (3 << 13))
-
-/* MIPI CSI image format */
-#define S5P_CSIIMGFMT 0x194
-#define S5P_CSIIMGFMT_YCBCR422_8BIT 0x1e
-#define S5P_CSIIMGFMT_RAW8 0x2a
-#define S5P_CSIIMGFMT_RAW10 0x2b
-#define S5P_CSIIMGFMT_RAW12 0x2c
-/* User defined formats. x = 0...16. */
-#define S5P_CSIIMGFMT_USER(x) (0x30 + x - 1)
-
-/* Output frame buffer sequence mask */
-#define S5P_CIFCNTSEQ 0x1FC
-
-#endif /* REGS_FIMC_H_ */
diff --git a/drivers/media/video/s5p-g2d/g2d.c b/drivers/media/video/s5p-g2d/g2d.c
index 789de74014e5..7c98ee7377ee 100644
--- a/drivers/media/video/s5p-g2d/g2d.c
+++ b/drivers/media/video/s5p-g2d/g2d.c
@@ -65,7 +65,7 @@ static struct g2d_fmt formats[] = {
};
#define NUM_FORMATS ARRAY_SIZE(formats)
-struct g2d_frame def_frame = {
+static struct g2d_frame def_frame = {
.width = DEFAULT_WIDTH,
.height = DEFAULT_HEIGHT,
.c_width = DEFAULT_WIDTH,
@@ -77,7 +77,7 @@ struct g2d_frame def_frame = {
.bottom = DEFAULT_HEIGHT,
};
-struct g2d_fmt *find_fmt(struct v4l2_format *f)
+static struct g2d_fmt *find_fmt(struct v4l2_format *f)
{
unsigned int i;
for (i = 0; i < NUM_FORMATS; i++) {
@@ -202,7 +202,7 @@ static const struct v4l2_ctrl_ops g2d_ctrl_ops = {
.s_ctrl = g2d_s_ctrl,
};
-int g2d_setup_ctrls(struct g2d_ctx *ctx)
+static int g2d_setup_ctrls(struct g2d_ctx *ctx)
{
struct g2d_dev *dev = ctx->dev;
@@ -546,11 +546,11 @@ static void job_abort(void *prv)
struct g2d_dev *dev = ctx->dev;
int ret;
- if (dev->curr == 0) /* No job currently running */
+ if (dev->curr == NULL) /* No job currently running */
return;
ret = wait_event_timeout(dev->irq_queue,
- dev->curr == 0,
+ dev->curr == NULL,
msecs_to_jiffies(G2D_TIMEOUT));
}
@@ -599,19 +599,19 @@ static irqreturn_t g2d_isr(int irq, void *prv)
g2d_clear_int(dev);
clk_disable(dev->gate);
- BUG_ON(ctx == 0);
+ BUG_ON(ctx == NULL);
src = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- BUG_ON(src == 0);
- BUG_ON(dst == 0);
+ BUG_ON(src == NULL);
+ BUG_ON(dst == NULL);
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
- dev->curr = 0;
+ dev->curr = NULL;
wake_up(&dev->irq_queue);
return IRQ_HANDLED;
}
@@ -674,42 +674,27 @@ static int g2d_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
+
spin_lock_init(&dev->ctrl_lock);
mutex_init(&dev->mutex);
atomic_set(&dev->num_inst, 0);
init_waitqueue_head(&dev->irq_queue);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to find registers\n");
- ret = -ENOENT;
- goto free_dev;
- }
- dev->res_regs = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
-
- if (!dev->res_regs) {
- dev_err(&pdev->dev, "failed to obtain register region\n");
- ret = -ENOENT;
- goto free_dev;
- }
-
- dev->regs = ioremap(res->start, resource_size(res));
- if (!dev->regs) {
- dev_err(&pdev->dev, "failed to map registers\n");
- ret = -ENOENT;
- goto rel_res_regs;
+ dev->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (dev->regs == NULL) {
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -ENOENT;
}
dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
if (IS_ERR_OR_NULL(dev->clk)) {
dev_err(&pdev->dev, "failed to get g2d clock\n");
- ret = -ENXIO;
- goto unmap_regs;
+ return -ENXIO;
}
ret = clk_prepare(dev->clk);
@@ -740,7 +725,8 @@ static int g2d_probe(struct platform_device *pdev)
dev->irq = res->start;
- ret = request_irq(dev->irq, g2d_isr, 0, pdev->name, dev);
+ ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr,
+ 0, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev, "failed to install IRQ\n");
goto put_clk_gate;
@@ -749,7 +735,7 @@ static int g2d_probe(struct platform_device *pdev)
dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(dev->alloc_ctx)) {
ret = PTR_ERR(dev->alloc_ctx);
- goto rel_irq;
+ goto unprep_clk_gate;
}
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
@@ -762,6 +748,10 @@ static int g2d_probe(struct platform_device *pdev)
goto unreg_v4l2_dev;
}
*vfd = g2d_videodev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->lock = &dev->mutex;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
@@ -793,8 +783,6 @@ unreg_v4l2_dev:
v4l2_device_unregister(&dev->v4l2_dev);
alloc_ctx_cleanup:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
-rel_irq:
- free_irq(dev->irq, dev);
unprep_clk_gate:
clk_unprepare(dev->gate);
put_clk_gate:
@@ -803,12 +791,7 @@ unprep_clk:
clk_unprepare(dev->clk);
put_clk:
clk_put(dev->clk);
-unmap_regs:
- iounmap(dev->regs);
-rel_res_regs:
- release_resource(dev->res_regs);
-free_dev:
- kfree(dev);
+
return ret;
}
@@ -821,14 +804,10 @@ static int g2d_remove(struct platform_device *pdev)
video_unregister_device(dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
- free_irq(dev->irq, dev);
clk_unprepare(dev->gate);
clk_put(dev->gate);
clk_unprepare(dev->clk);
clk_put(dev->clk);
- iounmap(dev->regs);
- release_resource(dev->res_regs);
- kfree(dev);
return 0;
}
diff --git a/drivers/media/video/s5p-g2d/g2d.h b/drivers/media/video/s5p-g2d/g2d.h
index 1b82065aeaef..6b765b0216c5 100644
--- a/drivers/media/video/s5p-g2d/g2d.h
+++ b/drivers/media/video/s5p-g2d/g2d.h
@@ -23,7 +23,6 @@ struct g2d_dev {
spinlock_t ctrl_lock;
atomic_t num_inst;
struct vb2_alloc_ctx *alloc_ctx;
- struct resource *res_regs;
void __iomem *regs;
struct clk *clk;
struct clk *gate;
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.c b/drivers/media/video/s5p-jpeg/jpeg-core.c
index 5a49c307f9c1..28b5225d94f5 100644
--- a/drivers/media/video/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.c
@@ -813,7 +813,7 @@ static int s5p_jpeg_streamoff(struct file *file, void *priv,
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
-int s5p_jpeg_g_selection(struct file *file, void *priv,
+static int s5p_jpeg_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
@@ -1290,7 +1290,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
int ret;
/* JPEG IP abstraction struct */
- jpeg = kzalloc(sizeof(struct s5p_jpeg), GFP_KERNEL);
+ jpeg = devm_kzalloc(&pdev->dev, sizeof(struct s5p_jpeg), GFP_KERNEL);
if (!jpeg)
return -ENOMEM;
@@ -1300,43 +1300,25 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
/* memory-mapped registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot find IO resource\n");
- ret = -ENOENT;
- goto jpeg_alloc_rollback;
- }
-
- jpeg->ioarea = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!jpeg->ioarea) {
- dev_err(&pdev->dev, "cannot request IO\n");
- ret = -ENXIO;
- goto jpeg_alloc_rollback;
- }
- jpeg->regs = ioremap(res->start, resource_size(res));
- if (!jpeg->regs) {
- dev_err(&pdev->dev, "cannot map IO\n");
- ret = -ENXIO;
- goto mem_region_rollback;
+ jpeg->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (jpeg->regs == NULL) {
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -ENOENT;
}
- dev_dbg(&pdev->dev, "registers %p (%p, %p)\n",
- jpeg->regs, jpeg->ioarea, res);
-
/* interrupt service routine registration */
jpeg->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
- goto ioremap_rollback;
+ return ret;
}
- ret = request_irq(jpeg->irq, s5p_jpeg_irq, 0,
- dev_name(&pdev->dev), jpeg);
-
+ ret = devm_request_irq(&pdev->dev, jpeg->irq, s5p_jpeg_irq, 0,
+ dev_name(&pdev->dev), jpeg);
if (ret) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq);
- goto ioremap_rollback;
+ return ret;
}
/* clocks */
@@ -1344,7 +1326,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
if (IS_ERR(jpeg->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(jpeg->clk);
- goto request_irq_rollback;
+ return ret;
}
dev_dbg(&pdev->dev, "clock source %p\n", jpeg->clk);
clk_enable(jpeg->clk);
@@ -1386,6 +1368,10 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->vfd_encoder->release = video_device_release;
jpeg->vfd_encoder->lock = &jpeg->lock;
jpeg->vfd_encoder->v4l2_dev = &jpeg->v4l2_dev;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &jpeg->vfd_encoder->flags);
ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_GRABBER, -1);
if (ret) {
@@ -1413,6 +1399,10 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->vfd_decoder->release = video_device_release;
jpeg->vfd_decoder->lock = &jpeg->lock;
jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
+ /* Locking in file operations other than ioctl should be done by the driver,
+ not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &jpeg->vfd_decoder->flags);
ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
if (ret) {
@@ -1456,18 +1446,6 @@ clk_get_rollback:
clk_disable(jpeg->clk);
clk_put(jpeg->clk);
-request_irq_rollback:
- free_irq(jpeg->irq, jpeg);
-
-ioremap_rollback:
- iounmap(jpeg->regs);
-
-mem_region_rollback:
- release_resource(jpeg->ioarea);
- release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea));
-
-jpeg_alloc_rollback:
- kfree(jpeg);
return ret;
}
@@ -1488,14 +1466,6 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
clk_disable(jpeg->clk);
clk_put(jpeg->clk);
- free_irq(jpeg->irq, jpeg);
-
- iounmap(jpeg->regs);
-
- release_resource(jpeg->ioarea);
- release_mem_region(jpeg->ioarea->start, resource_size(jpeg->ioarea));
- kfree(jpeg);
-
return 0;
}
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.h b/drivers/media/video/s5p-jpeg/jpeg-core.h
index 38d7367f7a6d..9d0cd2b76f61 100644
--- a/drivers/media/video/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.h
@@ -54,7 +54,6 @@
* @vfd_encoder: video device node for encoder mem2mem mode
* @vfd_decoder: video device node for decoder mem2mem mode
* @m2m_dev: v4l2 mem2mem device data
- * @ioarea: JPEG IP memory region
* @regs: JPEG IP registers mapping
* @irq: JPEG IP irq
* @clk: JPEG IP clock
@@ -70,7 +69,6 @@ struct s5p_jpeg {
struct video_device *vfd_decoder;
struct v4l2_m2m_dev *m2m_dev;
- struct resource *ioarea;
void __iomem *regs;
unsigned int irq;
struct clk *clk;
diff --git a/drivers/media/video/s5p-mfc/regs-mfc.h b/drivers/media/video/s5p-mfc/regs-mfc.h
index 053a8a872fd7..a19bece41ba9 100644
--- a/drivers/media/video/s5p-mfc/regs-mfc.h
+++ b/drivers/media/video/s5p-mfc/regs-mfc.h
@@ -164,10 +164,15 @@
decoded pic */
#define S5P_FIMV_SI_DISPLAY_Y_ADR 0x2010 /* luma addr of displayed pic */
#define S5P_FIMV_SI_DISPLAY_C_ADR 0x2014 /* chroma addrof displayed pic */
+
#define S5P_FIMV_SI_CONSUMED_BYTES 0x2018 /* Consumed number of bytes to
decode a frame */
#define S5P_FIMV_SI_DISPLAY_STATUS 0x201c /* status of decoded picture */
+#define S5P_FIMV_SI_DECODE_Y_ADR 0x2024 /* luma addr of decoded pic */
+#define S5P_FIMV_SI_DECODE_C_ADR 0x2028 /* chroma addrof decoded pic */
+#define S5P_FIMV_SI_DECODE_STATUS 0x202c /* status of decoded picture */
+
#define S5P_FIMV_SI_CH0_SB_ST_ADR 0x2044 /* start addr of stream buf */
#define S5P_FIMV_SI_CH0_SB_FRM_SIZE 0x2048 /* size of stream buf */
#define S5P_FIMV_SI_CH0_DESC_ADR 0x204c /* addr of descriptor buf */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
index 83fe461af263..9bb68e7b5ae8 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -70,7 +70,7 @@ static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
wake_up(&dev->queue);
}
-void s5p_mfc_watchdog(unsigned long arg)
+static void s5p_mfc_watchdog(unsigned long arg)
{
struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
@@ -373,7 +373,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
/* If no context is available then all necessary
* processing has been done. */
- if (ctx == 0)
+ if (ctx == NULL)
return;
dev = ctx->dev;
@@ -429,7 +429,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_dev *dev;
unsigned int guard_width, guard_height;
- if (ctx == 0)
+ if (ctx == NULL)
return;
dev = ctx->dev;
if (ctx->c_ops->post_seq_start) {
@@ -496,7 +496,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_dev *dev;
unsigned long flags;
- if (ctx == 0)
+ if (ctx == NULL)
return;
dev = ctx->dev;
s5p_mfc_clear_int_flags(dev);
@@ -772,7 +772,7 @@ err_queue_init:
err_init_hw:
s5p_mfc_release_firmware(dev);
err_alloc_fw:
- dev->ctx[ctx->num] = 0;
+ dev->ctx[ctx->num] = NULL;
del_timer_sync(&dev->watchdog_timer);
s5p_mfc_clock_off();
err_pwr_enable:
@@ -849,7 +849,7 @@ static int s5p_mfc_release(struct file *file)
}
mfc_debug(2, "Shutting down clock\n");
s5p_mfc_clock_off();
- dev->ctx[ctx->num] = 0;
+ dev->ctx[ctx->num] = NULL;
s5p_mfc_dec_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
@@ -948,7 +948,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
int ret;
pr_debug("%s++\n", __func__);
- dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof *dev, GFP_KERNEL);
if (!dev) {
dev_err(&pdev->dev, "Not enough memory for MFC device\n");
return -ENOMEM;
@@ -959,49 +959,35 @@ static int s5p_mfc_probe(struct platform_device *pdev)
dev->plat_dev = pdev;
if (!dev->plat_dev) {
dev_err(&pdev->dev, "No platform data specified\n");
- ret = -ENODEV;
- goto err_dev;
+ return -ENODEV;
}
ret = s5p_mfc_init_pm(dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get mfc clock source\n");
- goto err_clk;
+ return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get memory region resource\n");
- ret = -ENOENT;
- goto err_res;
- }
- dev->mfc_mem = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (dev->mfc_mem == NULL) {
- dev_err(&pdev->dev, "failed to get memory region\n");
- ret = -ENOENT;
- goto err_mem_reg;
- }
- dev->regs_base = ioremap(dev->mfc_mem->start, resource_size(dev->mfc_mem));
+ dev->regs_base = devm_request_and_ioremap(&pdev->dev, res);
if (dev->regs_base == NULL) {
- dev_err(&pdev->dev, "failed to ioremap address region\n");
- ret = -ENOENT;
- goto err_ioremap;
+ dev_err(&pdev->dev, "Failed to obtain io memory\n");
+ return -ENOENT;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(&pdev->dev, "failed to get irq resource\n");
ret = -ENOENT;
- goto err_get_res;
+ goto err_res;
}
dev->irq = res->start;
- ret = request_irq(dev->irq, s5p_mfc_irq, IRQF_DISABLED, pdev->name,
- dev);
+ ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
+ IRQF_DISABLED, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
- goto err_req_irq;
+ goto err_res;
}
dev->mem_dev_l = device_find_child(&dev->plat_dev->dev, "s5p-mfc-l",
@@ -1009,20 +995,20 @@ static int s5p_mfc_probe(struct platform_device *pdev)
if (!dev->mem_dev_l) {
mfc_err("Mem child (L) device get failed\n");
ret = -ENODEV;
- goto err_find_child;
+ goto err_res;
}
dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
match_child);
if (!dev->mem_dev_r) {
mfc_err("Mem child (R) device get failed\n");
ret = -ENODEV;
- goto err_find_child;
+ goto err_res;
}
dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
if (IS_ERR_OR_NULL(dev->alloc_ctx[0])) {
ret = PTR_ERR(dev->alloc_ctx[0]);
- goto err_mem_init_ctx_0;
+ goto err_res;
}
dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
if (IS_ERR_OR_NULL(dev->alloc_ctx[1])) {
@@ -1048,6 +1034,10 @@ static int s5p_mfc_probe(struct platform_device *pdev)
vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
vfd->release = video_device_release,
vfd->lock = &dev->mfc_mutex;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->v4l2_dev = &dev->v4l2_dev;
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
dev->vfd_dec = vfd;
@@ -1072,6 +1062,8 @@ static int s5p_mfc_probe(struct platform_device *pdev)
vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
vfd->release = video_device_release,
vfd->lock = &dev->mfc_mutex;
+ /* This should not be necessary */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
vfd->v4l2_dev = &dev->v4l2_dev;
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
dev->vfd_enc = vfd;
@@ -1110,22 +1102,9 @@ err_v4l2_dev_reg:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
err_mem_init_ctx_1:
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
-err_mem_init_ctx_0:
-err_find_child:
- free_irq(dev->irq, dev);
-err_req_irq:
-err_get_res:
- iounmap(dev->regs_base);
- dev->regs_base = NULL;
-err_ioremap:
- release_resource(dev->mfc_mem);
- kfree(dev->mfc_mem);
-err_mem_reg:
err_res:
s5p_mfc_final_pm(dev);
-err_clk:
-err_dev:
- kfree(dev);
+
pr_debug("%s-- with error\n", __func__);
return ret;
@@ -1148,15 +1127,7 @@ static int __devexit s5p_mfc_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
- free_irq(dev->irq, dev);
- iounmap(dev->regs_base);
- if (dev->mfc_mem) {
- release_resource(dev->mfc_mem);
- kfree(dev->mfc_mem);
- dev->mfc_mem = NULL;
- }
s5p_mfc_final_pm(dev);
- kfree(dev);
return 0;
}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_common.h b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
index 91146fa622e4..bd5706a6bad1 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
@@ -185,7 +185,6 @@ struct s5p_mfc_pm {
* @mem_dev_r: child device of the right memory bank (1)
* @regs_base: base address of the MFC hw registers
* @irq: irq resource
- * @mfc_mem: MFC registers memory resource
* @dec_ctrl_handler: control framework handler for decoding
* @enc_ctrl_handler: control framework handler for encoding
* @pm: power management control
@@ -221,7 +220,6 @@ struct s5p_mfc_dev {
struct device *mem_dev_r;
void __iomem *regs_base;
int irq;
- struct resource *mfc_mem;
struct v4l2_ctrl_handler dec_ctrl_handler;
struct v4l2_ctrl_handler enc_ctrl_handler;
struct s5p_mfc_pm pm;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
index f2481a85e0a2..08a5cfeaa59e 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
@@ -52,7 +52,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
s5p_mfc_bitproc_buf = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], dev->fw_size);
if (IS_ERR(s5p_mfc_bitproc_buf)) {
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
mfc_err("Allocating bitprocessor buffer failed\n");
release_firmware(fw_blob);
return -ENOMEM;
@@ -63,7 +63,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
mfc_err("The base memory for bank 1 is not aligned to 128KB\n");
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
release_firmware(fw_blob);
return -EIO;
}
@@ -72,7 +72,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
mfc_err("Bitprocessor memory remap failed\n");
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
release_firmware(fw_blob);
return -EIO;
}
@@ -82,7 +82,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
if (IS_ERR(b_base)) {
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
mfc_err("Allocating bank2 base failed\n");
release_firmware(fw_blob);
return -ENOMEM;
@@ -94,7 +94,7 @@ int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
release_firmware(fw_blob);
return -EIO;
}
@@ -126,7 +126,7 @@ int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
release_firmware(fw_blob);
return -ENOMEM;
}
- if (s5p_mfc_bitproc_buf == 0 || s5p_mfc_bitproc_phys == 0) {
+ if (s5p_mfc_bitproc_buf == NULL || s5p_mfc_bitproc_phys == 0) {
mfc_err("MFC firmware is not allocated or was not mapped correctly\n");
release_firmware(fw_blob);
return -EINVAL;
@@ -146,9 +146,9 @@ int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
if (!s5p_mfc_bitproc_buf)
return -EINVAL;
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
- s5p_mfc_bitproc_virt = 0;
+ s5p_mfc_bitproc_virt = NULL;
s5p_mfc_bitproc_phys = 0;
- s5p_mfc_bitproc_buf = 0;
+ s5p_mfc_bitproc_buf = NULL;
return 0;
}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index c25ec022d267..feea867f318c 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -627,13 +627,13 @@ static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
- ctx->loop_filter_mpeg4 = ctrl->val;
+ ctx->display_delay = ctrl->val;
break;
case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
ctx->display_delay_enable = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
- ctx->display_delay = ctrl->val;
+ ctx->loop_filter_mpeg4 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
ctx->slice_interface = ctrl->val;
@@ -996,6 +996,7 @@ int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
for (i = 0; i < NUM_CTRLS; i++) {
if (IS_MFC51_PRIV(controls[i].id)) {
+ memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
cfg.ops = &s5p_mfc_dec_ctrl_ops;
cfg.id = controls[i].id;
cfg.min = controls[i].minimum;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index dff9dc798795..158b78989b89 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -243,12 +243,6 @@ static struct mfc_control controls[] = {
.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
- .menu_skip_mask = ~(
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
- ),
},
{
.id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
@@ -494,7 +488,7 @@ static struct mfc_control controls[] = {
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
.maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
- .default_value = 0,
+ .default_value = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
.menu_skip_mask = 0,
},
{
@@ -534,7 +528,7 @@ static struct mfc_control controls[] = {
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
.maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
- .default_value = 0,
+ .default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
.menu_skip_mask = 0,
},
{
@@ -907,6 +901,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
mfc_err("failed to try output format\n");
return -EINVAL;
}
+ v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
+ &pix_fmt_mp->height, 4, 1080, 1, 0);
} else {
mfc_err("invalid buf type\n");
return -EINVAL;
@@ -1436,7 +1432,8 @@ static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
.s_ctrl = s5p_mfc_enc_s_ctrl,
};
-int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
@@ -1452,7 +1449,8 @@ int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
return 0;
}
-int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
@@ -1775,6 +1773,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
}
for (i = 0; i < NUM_CTRLS; i++) {
if (IS_MFC51_PRIV(controls[i].id)) {
+ memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
cfg.ops = &s5p_mfc_enc_ctrl_ops;
cfg.id = controls[i].id;
cfg.min = controls[i].minimum;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.c b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
index e08b21c50ebf..e6217cbfa4a3 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
@@ -43,7 +43,7 @@ int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
ctx->desc_buf = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], DESC_BUF_SIZE);
if (IS_ERR_VALUE((int)ctx->desc_buf)) {
- ctx->desc_buf = 0;
+ ctx->desc_buf = NULL;
mfc_err("Allocating DESC buffer failed\n");
return -ENOMEM;
}
@@ -54,7 +54,7 @@ int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
if (desc_virt == NULL) {
vb2_dma_contig_memops.put(ctx->desc_buf);
ctx->desc_phys = 0;
- ctx->desc_buf = 0;
+ ctx->desc_buf = NULL;
mfc_err("Remapping DESC buffer failed\n");
return -ENOMEM;
}
@@ -69,7 +69,7 @@ void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
if (ctx->desc_phys) {
vb2_dma_contig_memops.put(ctx->desc_buf);
ctx->desc_phys = 0;
- ctx->desc_buf = 0;
+ ctx->desc_buf = NULL;
}
}
@@ -186,7 +186,7 @@ int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
ctx->bank1_buf = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
if (IS_ERR(ctx->bank1_buf)) {
- ctx->bank1_buf = 0;
+ ctx->bank1_buf = NULL;
printk(KERN_ERR
"Buf alloc for decoding failed (port A)\n");
return -ENOMEM;
@@ -200,7 +200,7 @@ int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
ctx->bank2_buf = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
if (IS_ERR(ctx->bank2_buf)) {
- ctx->bank2_buf = 0;
+ ctx->bank2_buf = NULL;
mfc_err("Buf alloc for decoding failed (port B)\n");
return -ENOMEM;
}
@@ -216,13 +216,13 @@ void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx)
{
if (ctx->bank1_buf) {
vb2_dma_contig_memops.put(ctx->bank1_buf);
- ctx->bank1_buf = 0;
+ ctx->bank1_buf = NULL;
ctx->bank1_phys = 0;
ctx->bank1_size = 0;
}
if (ctx->bank2_buf) {
vb2_dma_contig_memops.put(ctx->bank2_buf);
- ctx->bank2_buf = 0;
+ ctx->bank2_buf = NULL;
ctx->bank2_phys = 0;
ctx->bank2_size = 0;
}
@@ -244,7 +244,7 @@ int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
if (IS_ERR(ctx->ctx_buf)) {
mfc_err("Allocating context buffer failed\n");
ctx->ctx_phys = 0;
- ctx->ctx_buf = 0;
+ ctx->ctx_buf = NULL;
return -ENOMEM;
}
ctx->ctx_phys = s5p_mfc_mem_cookie(
@@ -256,7 +256,7 @@ int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
mfc_err("Remapping instance buffer failed\n");
vb2_dma_contig_memops.put(ctx->ctx_buf);
ctx->ctx_phys = 0;
- ctx->ctx_buf = 0;
+ ctx->ctx_buf = NULL;
return -ENOMEM;
}
/* Zero content of the allocated memory */
@@ -265,7 +265,7 @@ int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
if (s5p_mfc_init_shm(ctx) < 0) {
vb2_dma_contig_memops.put(ctx->ctx_buf);
ctx->ctx_phys = 0;
- ctx->ctx_buf = 0;
+ ctx->ctx_buf = NULL;
return -ENOMEM;
}
return 0;
@@ -277,12 +277,12 @@ void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx)
if (ctx->ctx_buf) {
vb2_dma_contig_memops.put(ctx->ctx_buf);
ctx->ctx_phys = 0;
- ctx->ctx_buf = 0;
+ ctx->ctx_buf = NULL;
}
if (ctx->shm_alloc) {
vb2_dma_contig_memops.put(ctx->shm_alloc);
- ctx->shm_alloc = 0;
- ctx->shm = 0;
+ ctx->shm_alloc = NULL;
+ ctx->shm = NULL;
}
}
@@ -296,7 +296,7 @@ void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
}
/* Set registers for shared buffer */
-void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
+static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
mfc_write(dev, ctx->shm_ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
index db83836e6a9f..5932d1c782c5 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
@@ -57,10 +57,12 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
S5P_FIMV_SI_DISPLAY_Y_ADR) << \
MFC_OFFSET_SHIFT)
#define s5p_mfc_get_dec_y_adr() (readl(dev->regs_base + \
- S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+ S5P_FIMV_SI_DECODE_Y_ADR) << \
MFC_OFFSET_SHIFT)
#define s5p_mfc_get_dspl_status() readl(dev->regs_base + \
S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_dec_status() readl(dev->regs_base + \
+ S5P_FIMV_SI_DECODE_STATUS)
#define s5p_mfc_get_frame_type() (readl(dev->regs_base + \
S5P_FIMV_DECODE_FRAME_TYPE) \
& S5P_FIMV_DECODE_FRAME_MASK)
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
index 764eac6bcc4c..cf962a466276 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
@@ -13,8 +13,7 @@
#ifndef S5P_MFC_SHM_H_
#define S5P_MFC_SHM_H_
-enum MFC_SHM_OFS
-{
+enum MFC_SHM_OFS {
EXTENEDED_DECODE_STATUS = 0x00, /* D */
SET_FRAME_TAG = 0x04, /* D */
GET_FRAME_TAG_TOP = 0x08, /* D */
diff --git a/drivers/media/video/s5p-tv/hdmi_drv.c b/drivers/media/video/s5p-tv/hdmi_drv.c
index 4865d25a0e57..20cb6eef2979 100644
--- a/drivers/media/video/s5p-tv/hdmi_drv.c
+++ b/drivers/media/video/s5p-tv/hdmi_drv.c
@@ -42,7 +42,23 @@ MODULE_DESCRIPTION("Samsung HDMI");
MODULE_LICENSE("GPL");
/* default preset configured on probe */
-#define HDMI_DEFAULT_PRESET V4L2_DV_1080P60
+#define HDMI_DEFAULT_PRESET V4L2_DV_480P59_94
+
+struct hdmi_pulse {
+ u32 beg;
+ u32 end;
+};
+
+struct hdmi_timings {
+ struct hdmi_pulse hact;
+ u32 hsyn_pol; /* 0 - high, 1 - low */
+ struct hdmi_pulse hsyn;
+ u32 interlaced;
+ struct hdmi_pulse vact[2];
+ u32 vsyn_pol; /* 0 - high, 1 - low */
+ u32 vsyn_off;
+ struct hdmi_pulse vsyn[2];
+};
struct hdmi_resources {
struct clk *hdmi;
@@ -70,64 +86,15 @@ struct hdmi_device {
/** subdev of MHL interface */
struct v4l2_subdev *mhl_sd;
/** configuration of current graphic mode */
- const struct hdmi_preset_conf *cur_conf;
+ const struct hdmi_timings *cur_conf;
+ /** flag indicating that timings are dirty */
+ int cur_conf_dirty;
/** current preset */
u32 cur_preset;
/** other resources */
struct hdmi_resources res;
};
-struct hdmi_tg_regs {
- u8 cmd;
- u8 h_fsz_l;
- u8 h_fsz_h;
- u8 hact_st_l;
- u8 hact_st_h;
- u8 hact_sz_l;
- u8 hact_sz_h;
- u8 v_fsz_l;
- u8 v_fsz_h;
- u8 vsync_l;
- u8 vsync_h;
- u8 vsync2_l;
- u8 vsync2_h;
- u8 vact_st_l;
- u8 vact_st_h;
- u8 vact_sz_l;
- u8 vact_sz_h;
- u8 field_chg_l;
- u8 field_chg_h;
- u8 vact_st2_l;
- u8 vact_st2_h;
- u8 vsync_top_hdmi_l;
- u8 vsync_top_hdmi_h;
- u8 vsync_bot_hdmi_l;
- u8 vsync_bot_hdmi_h;
- u8 field_top_hdmi_l;
- u8 field_top_hdmi_h;
- u8 field_bot_hdmi_l;
- u8 field_bot_hdmi_h;
-};
-
-struct hdmi_core_regs {
- u8 h_blank[2];
- u8 v_blank[3];
- u8 h_v_line[3];
- u8 vsync_pol[1];
- u8 int_pro_mode[1];
- u8 v_blank_f[3];
- u8 h_sync_gen[3];
- u8 v_sync_gen1[3];
- u8 v_sync_gen2[3];
- u8 v_sync_gen3[3];
-};
-
-struct hdmi_preset_conf {
- struct hdmi_core_regs core;
- struct hdmi_tg_regs tg;
- struct v4l2_mbus_framefmt mbus_fmt;
-};
-
static struct platform_device_id hdmi_driver_types[] = {
{
.name = "s5pv210-hdmi",
@@ -165,6 +132,21 @@ void hdmi_writeb(struct hdmi_device *hdev, u32 reg_id, u8 value)
writeb(value, hdev->regs + reg_id);
}
+static inline
+void hdmi_writebn(struct hdmi_device *hdev, u32 reg_id, int n, u32 value)
+{
+ switch (n) {
+ default:
+ writeb(value >> 24, hdev->regs + reg_id + 12);
+ case 3:
+ writeb(value >> 16, hdev->regs + reg_id + 8);
+ case 2:
+ writeb(value >> 8, hdev->regs + reg_id + 4);
+ case 1:
+ writeb(value >> 0, hdev->regs + reg_id + 0);
+ }
+}
+
static inline u32 hdmi_read(struct hdmi_device *hdev, u32 reg_id)
{
return readl(hdev->regs + reg_id);
@@ -211,77 +193,72 @@ static void hdmi_reg_init(struct hdmi_device *hdev)
}
static void hdmi_timing_apply(struct hdmi_device *hdev,
- const struct hdmi_preset_conf *conf)
+ const struct hdmi_timings *t)
{
- const struct hdmi_core_regs *core = &conf->core;
- const struct hdmi_tg_regs *tg = &conf->tg;
-
/* setting core registers */
- hdmi_writeb(hdev, HDMI_H_BLANK_0, core->h_blank[0]);
- hdmi_writeb(hdev, HDMI_H_BLANK_1, core->h_blank[1]);
- hdmi_writeb(hdev, HDMI_V_BLANK_0, core->v_blank[0]);
- hdmi_writeb(hdev, HDMI_V_BLANK_1, core->v_blank[1]);
- hdmi_writeb(hdev, HDMI_V_BLANK_2, core->v_blank[2]);
- hdmi_writeb(hdev, HDMI_H_V_LINE_0, core->h_v_line[0]);
- hdmi_writeb(hdev, HDMI_H_V_LINE_1, core->h_v_line[1]);
- hdmi_writeb(hdev, HDMI_H_V_LINE_2, core->h_v_line[2]);
- hdmi_writeb(hdev, HDMI_VSYNC_POL, core->vsync_pol[0]);
- hdmi_writeb(hdev, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
- hdmi_writeb(hdev, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
- hdmi_writeb(hdev, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
- hdmi_writeb(hdev, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
- hdmi_writeb(hdev, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
- hdmi_writeb(hdev, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
- hdmi_writeb(hdev, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
- hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ hdmi_writebn(hdev, HDMI_H_BLANK_0, 2, t->hact.beg);
+ hdmi_writebn(hdev, HDMI_H_SYNC_GEN_0, 3,
+ (t->hsyn_pol << 20) | (t->hsyn.end << 10) | t->hsyn.beg);
+ hdmi_writeb(hdev, HDMI_VSYNC_POL, t->vsyn_pol);
+ hdmi_writebn(hdev, HDMI_V_BLANK_0, 3,
+ (t->vact[0].beg << 11) | t->vact[0].end);
+ hdmi_writebn(hdev, HDMI_V_SYNC_GEN_1_0, 3,
+ (t->vsyn[0].beg << 12) | t->vsyn[0].end);
+ if (t->interlaced) {
+ u32 vsyn_trans = t->hsyn.beg + t->vsyn_off;
+
+ hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 1);
+ hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
+ (t->hact.end << 12) | t->vact[1].end);
+ hdmi_writebn(hdev, HDMI_V_BLANK_F_0, 3,
+ (t->vact[1].end << 11) | t->vact[1].beg);
+ hdmi_writebn(hdev, HDMI_V_SYNC_GEN_2_0, 3,
+ (t->vsyn[1].beg << 12) | t->vsyn[1].end);
+ hdmi_writebn(hdev, HDMI_V_SYNC_GEN_3_0, 3,
+ (vsyn_trans << 12) | vsyn_trans);
+ } else {
+ hdmi_writeb(hdev, HDMI_INT_PRO_MODE, 0);
+ hdmi_writebn(hdev, HDMI_H_V_LINE_0, 3,
+ (t->hact.end << 12) | t->vact[0].end);
+ }
+
/* Timing generator registers */
- hdmi_writeb(hdev, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
- hdmi_writeb(hdev, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
- hdmi_writeb(hdev, HDMI_TG_HACT_ST_L, tg->hact_st_l);
- hdmi_writeb(hdev, HDMI_TG_HACT_ST_H, tg->hact_st_h);
- hdmi_writeb(hdev, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
- hdmi_writeb(hdev, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
- hdmi_writeb(hdev, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
- hdmi_writeb(hdev, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_L, tg->vsync_l);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_H, tg->vsync_h);
- hdmi_writeb(hdev, HDMI_TG_VSYNC2_L, tg->vsync2_l);
- hdmi_writeb(hdev, HDMI_TG_VSYNC2_H, tg->vsync2_h);
- hdmi_writeb(hdev, HDMI_TG_VACT_ST_L, tg->vact_st_l);
- hdmi_writeb(hdev, HDMI_TG_VACT_ST_H, tg->vact_st_h);
- hdmi_writeb(hdev, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
- hdmi_writeb(hdev, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
- hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
- hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
- hdmi_writeb(hdev, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
- hdmi_writeb(hdev, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
- hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
- hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
- hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
- hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
- hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+ hdmi_writebn(hdev, HDMI_TG_H_FSZ_L, 2, t->hact.end);
+ hdmi_writebn(hdev, HDMI_TG_HACT_ST_L, 2, t->hact.beg);
+ hdmi_writebn(hdev, HDMI_TG_HACT_SZ_L, 2, t->hact.end - t->hact.beg);
+ hdmi_writebn(hdev, HDMI_TG_VSYNC_L, 2, t->vsyn[0].beg);
+ hdmi_writebn(hdev, HDMI_TG_VACT_ST_L, 2, t->vact[0].beg);
+ hdmi_writebn(hdev, HDMI_TG_VACT_SZ_L, 2,
+ t->vact[0].end - t->vact[0].beg);
+ hdmi_writebn(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, 2, t->vsyn[0].beg);
+ hdmi_writebn(hdev, HDMI_TG_FIELD_TOP_HDMI_L, 2, t->vsyn[0].beg);
+ if (t->interlaced) {
+ hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_FIELD_EN);
+ hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[1].end);
+ hdmi_writebn(hdev, HDMI_TG_VSYNC2_L, 2, t->vsyn[1].beg);
+ hdmi_writebn(hdev, HDMI_TG_FIELD_CHG_L, 2, t->vact[0].end);
+ hdmi_writebn(hdev, HDMI_TG_VACT_ST2_L, 2, t->vact[1].beg);
+ hdmi_writebn(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, 2, t->vsyn[1].beg);
+ hdmi_writebn(hdev, HDMI_TG_FIELD_BOT_HDMI_L, 2, t->vsyn[1].beg);
+ } else {
+ hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_FIELD_EN);
+ hdmi_writebn(hdev, HDMI_TG_V_FSZ_L, 2, t->vact[0].end);
+ }
}
static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
{
struct device *dev = hdmi_dev->dev;
- const struct hdmi_preset_conf *conf = hdmi_dev->cur_conf;
+ const struct hdmi_timings *conf = hdmi_dev->cur_conf;
struct v4l2_dv_preset preset;
int ret;
dev_dbg(dev, "%s\n", __func__);
+ /* skip if conf is already synchronized with HW */
+ if (!hdmi_dev->cur_conf_dirty)
+ return 0;
+
/* reset hdmiphy */
hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
mdelay(10);
@@ -307,6 +284,8 @@ static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
/* setting core registers */
hdmi_timing_apply(hdmi_dev, conf);
+ hdmi_dev->cur_conf_dirty = 0;
+
return 0;
}
@@ -398,156 +377,126 @@ static void hdmi_dumpregs(struct hdmi_device *hdev, char *prefix)
#undef DUMPREG
}
-static const struct hdmi_preset_conf hdmi_conf_480p = {
- .core = {
- .h_blank = {0x8a, 0x00},
- .v_blank = {0x0d, 0x6a, 0x01},
- .h_v_line = {0x0d, 0xa2, 0x35},
- .vsync_pol = {0x01},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00},
- .h_sync_gen = {0x0e, 0x30, 0x11},
- .v_sync_gen1 = {0x0f, 0x90, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x5a, 0x03, /* h_fsz */
- 0x8a, 0x00, 0xd0, 0x02, /* hact */
- 0x0d, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0xe0, 0x01, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
- .mbus_fmt = {
- .width = 720,
- .height = 480,
- .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
+static const struct hdmi_timings hdmi_timings_480p = {
+ .hact = { .beg = 138, .end = 858 },
+ .hsyn_pol = 1,
+ .hsyn = { .beg = 16, .end = 16 + 62 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 42 + 3, .end = 522 + 3 },
+ .vsyn_pol = 1,
+ .vsyn[0] = { .beg = 6 + 3, .end = 12 + 3},
};
-static const struct hdmi_preset_conf hdmi_conf_720p60 = {
- .core = {
- .h_blank = {0x72, 0x01},
- .v_blank = {0xee, 0xf2, 0x00},
- .h_v_line = {0xee, 0x22, 0x67},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
- .h_sync_gen = {0x6c, 0x50, 0x02},
- .v_sync_gen1 = {0x0a, 0x50, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x72, 0x06, /* h_fsz */
- 0x72, 0x01, 0x00, 0x05, /* hact */
- 0xee, 0x02, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x1e, 0x00, 0xd0, 0x02, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
- .mbus_fmt = {
- .width = 1280,
- .height = 720,
- .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
+static const struct hdmi_timings hdmi_timings_576p50 = {
+ .hact = { .beg = 144, .end = 864 },
+ .hsyn_pol = 1,
+ .hsyn = { .beg = 12, .end = 12 + 64 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 44 + 5, .end = 620 + 5 },
+ .vsyn_pol = 1,
+ .vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
};
-static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
- .core = {
- .h_blank = {0xd0, 0x02},
- .v_blank = {0x65, 0x6c, 0x01},
- .h_v_line = {0x65, 0x04, 0xa5},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
- .h_sync_gen = {0x0e, 0xea, 0x08},
- .v_sync_gen1 = {0x09, 0x40, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x49, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
- .mbus_fmt = {
- .width = 1920,
- .height = 1080,
- .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
+static const struct hdmi_timings hdmi_timings_720p60 = {
+ .hact = { .beg = 370, .end = 1650 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 110, .end = 110 + 40 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
};
-static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
- .core = {
- .h_blank = {0x18, 0x01},
- .v_blank = {0x65, 0x6c, 0x01},
- .h_v_line = {0x65, 0x84, 0x89},
- .vsync_pol = {0x00},
- .int_pro_mode = {0x00},
- .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
- .h_sync_gen = {0x56, 0x08, 0x02},
- .v_sync_gen1 = {0x09, 0x40, 0x00},
- /* other don't care */
- },
- .tg = {
- 0x00, /* cmd */
- 0x98, 0x08, /* h_fsz */
- 0x18, 0x01, 0x80, 0x07, /* hact */
- 0x65, 0x04, /* v_fsz */
- 0x01, 0x00, 0x33, 0x02, /* vsync */
- 0x2d, 0x00, 0x38, 0x04, /* vact */
- 0x33, 0x02, /* field_chg */
- 0x48, 0x02, /* vact_st2 */
- 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
- 0x01, 0x00, 0x33, 0x02, /* field top/bot */
- },
- .mbus_fmt = {
- .width = 1920,
- .height = 1080,
- .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
+static const struct hdmi_timings hdmi_timings_720p50 = {
+ .hact = { .beg = 700, .end = 1980 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 440, .end = 440 + 40 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 25 + 5, .end = 745 + 5 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 5, .end = 5 + 5},
+};
+
+static const struct hdmi_timings hdmi_timings_1080p24 = {
+ .hact = { .beg = 830, .end = 2750 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 638, .end = 638 + 44 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
+};
+
+static const struct hdmi_timings hdmi_timings_1080p60 = {
+ .hact = { .beg = 280, .end = 2200 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 88, .end = 88 + 44 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
+};
+
+static const struct hdmi_timings hdmi_timings_1080i60 = {
+ .hact = { .beg = 280, .end = 2200 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 88, .end = 88 + 44 },
+ .interlaced = 1,
+ .vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
+ .vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
+ .vsyn_pol = 0,
+ .vsyn_off = 1100,
+ .vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
+ .vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
+};
+
+static const struct hdmi_timings hdmi_timings_1080i50 = {
+ .hact = { .beg = 720, .end = 2640 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 528, .end = 528 + 44 },
+ .interlaced = 1,
+ .vact[0] = { .beg = 20 + 2, .end = 560 + 2 },
+ .vact[1] = { .beg = 583 + 2, .end = 1123 + 2 },
+ .vsyn_pol = 0,
+ .vsyn_off = 1320,
+ .vsyn[0] = { .beg = 0 + 2, .end = 5 + 2},
+ .vsyn[1] = { .beg = 562 + 2, .end = 567 + 2},
+};
+
+static const struct hdmi_timings hdmi_timings_1080p50 = {
+ .hact = { .beg = 720, .end = 2640 },
+ .hsyn_pol = 0,
+ .hsyn = { .beg = 528, .end = 528 + 44 },
+ .interlaced = 0,
+ .vact[0] = { .beg = 41 + 4, .end = 1121 + 4 },
+ .vsyn_pol = 0,
+ .vsyn[0] = { .beg = 0 + 4, .end = 5 + 4},
};
static const struct {
u32 preset;
- const struct hdmi_preset_conf *conf;
-} hdmi_conf[] = {
- { V4L2_DV_480P59_94, &hdmi_conf_480p },
- { V4L2_DV_720P59_94, &hdmi_conf_720p60 },
- { V4L2_DV_1080P50, &hdmi_conf_1080p50 },
- { V4L2_DV_1080P30, &hdmi_conf_1080p60 },
- { V4L2_DV_1080P60, &hdmi_conf_1080p60 },
+ const struct hdmi_timings *timings;
+} hdmi_timings[] = {
+ { V4L2_DV_480P59_94, &hdmi_timings_480p },
+ { V4L2_DV_576P50, &hdmi_timings_576p50 },
+ { V4L2_DV_720P50, &hdmi_timings_720p50 },
+ { V4L2_DV_720P59_94, &hdmi_timings_720p60 },
+ { V4L2_DV_720P60, &hdmi_timings_720p60 },
+ { V4L2_DV_1080P24, &hdmi_timings_1080p24 },
+ { V4L2_DV_1080P30, &hdmi_timings_1080p60 },
+ { V4L2_DV_1080P50, &hdmi_timings_1080p50 },
+ { V4L2_DV_1080I50, &hdmi_timings_1080i50 },
+ { V4L2_DV_1080I60, &hdmi_timings_1080i60 },
+ { V4L2_DV_1080P60, &hdmi_timings_1080p60 },
};
-static const struct hdmi_preset_conf *hdmi_preset2conf(u32 preset)
+static const struct hdmi_timings *hdmi_preset2timings(u32 preset)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hdmi_conf); ++i)
- if (hdmi_conf[i].preset == preset)
- return hdmi_conf[i].conf;
+ for (i = 0; i < ARRAY_SIZE(hdmi_timings); ++i)
+ if (hdmi_timings[i].preset == preset)
+ return hdmi_timings[i].timings;
return NULL;
}
@@ -559,6 +508,10 @@ static int hdmi_streamon(struct hdmi_device *hdev)
dev_dbg(dev, "%s\n", __func__);
+ ret = hdmi_conf_apply(hdev);
+ if (ret)
+ return ret;
+
ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
if (ret)
return ret;
@@ -671,14 +624,15 @@ static int hdmi_s_dv_preset(struct v4l2_subdev *sd,
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
struct device *dev = hdev->dev;
- const struct hdmi_preset_conf *conf;
+ const struct hdmi_timings *conf;
- conf = hdmi_preset2conf(preset->preset);
+ conf = hdmi_preset2timings(preset->preset);
if (conf == NULL) {
dev_err(dev, "preset (%u) not supported\n", preset->preset);
return -EINVAL;
}
hdev->cur_conf = conf;
+ hdev->cur_conf_dirty = 1;
hdev->cur_preset = preset->preset;
return 0;
}
@@ -695,21 +649,32 @@ static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt)
{
struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
- struct device *dev = hdev->dev;
+ const struct hdmi_timings *t = hdev->cur_conf;
- dev_dbg(dev, "%s\n", __func__);
+ dev_dbg(hdev->dev, "%s\n", __func__);
if (!hdev->cur_conf)
return -EINVAL;
- *fmt = hdev->cur_conf->mbus_fmt;
+ memset(fmt, 0, sizeof *fmt);
+ fmt->width = t->hact.end - t->hact.beg;
+ fmt->height = t->vact[0].end - t->vact[0].beg;
+ fmt->code = V4L2_MBUS_FMT_FIXED; /* means RGB888 */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ if (t->interlaced) {
+ fmt->field = V4L2_FIELD_INTERLACED;
+ fmt->height *= 2;
+ } else {
+ fmt->field = V4L2_FIELD_NONE;
+ }
return 0;
}
static int hdmi_enum_dv_presets(struct v4l2_subdev *sd,
struct v4l2_dv_enum_preset *preset)
{
- if (preset->index >= ARRAY_SIZE(hdmi_conf))
+ if (preset->index >= ARRAY_SIZE(hdmi_timings))
return -EINVAL;
- return v4l_fill_dv_preset_info(hdmi_conf[preset->index].preset, preset);
+ return v4l_fill_dv_preset_info(hdmi_timings[preset->index].preset,
+ preset);
}
static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
@@ -737,6 +702,8 @@ static int hdmi_runtime_suspend(struct device *dev)
dev_dbg(dev, "%s\n", __func__);
v4l2_subdev_call(hdev->mhl_sd, core, s_power, 0);
hdmi_resource_poweroff(&hdev->res);
+ /* flag that device context is lost */
+ hdev->cur_conf_dirty = 1;
return 0;
}
@@ -750,10 +717,6 @@ static int hdmi_runtime_resume(struct device *dev)
hdmi_resource_poweron(&hdev->res);
- ret = hdmi_conf_apply(hdev);
- if (ret)
- goto fail;
-
/* starting MHL */
ret = v4l2_subdev_call(hdev->mhl_sd, core, s_power, 1);
if (hdev->mhl_sd && ret)
@@ -993,7 +956,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
strlcpy(sd->name, "s5p-hdmi", sizeof sd->name);
hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
/* FIXME: missing fail preset is not supported */
- hdmi_dev->cur_conf = hdmi_preset2conf(hdmi_dev->cur_preset);
+ hdmi_dev->cur_conf = hdmi_preset2timings(hdmi_dev->cur_preset);
+ hdmi_dev->cur_conf_dirty = 1;
/* storing subdev for call that have only access to struct device */
dev_set_drvdata(dev, sd);
diff --git a/drivers/media/video/s5p-tv/hdmiphy_drv.c b/drivers/media/video/s5p-tv/hdmiphy_drv.c
index 0afef77747e5..f67b38631801 100644
--- a/drivers/media/video/s5p-tv/hdmiphy_drv.c
+++ b/drivers/media/video/s5p-tv/hdmiphy_drv.c
@@ -26,53 +26,188 @@ MODULE_DESCRIPTION("Samsung HDMI Physical interface driver");
MODULE_LICENSE("GPL");
struct hdmiphy_conf {
- u32 preset;
+ unsigned long pixclk;
const u8 *data;
};
-static const u8 hdmiphy_conf27[32] = {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
- 0x6B, 0x10, 0x02, 0x51, 0xDf, 0xF2, 0x54, 0x87,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xe3, 0x26, 0x00, 0x00, 0x00, 0x00,
+struct hdmiphy_ctx {
+ struct v4l2_subdev sd;
+ const struct hdmiphy_conf *conf_tab;
};
-static const u8 hdmiphy_conf74_175[32] = {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
- 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+static const struct hdmiphy_conf hdmiphy_conf_s5pv210[] = {
+ { .pixclk = 27000000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 27027000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x52, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 74176000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
+ 0x6D, 0x10, 0x01, 0x52, 0xEF, 0xF3, 0x54, 0xB9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 74250000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
+ 0x6A, 0x10, 0x01, 0x52, 0xFF, 0xF1, 0x54, 0xBA,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
+ },
+ { /* end marker */ }
};
-static const u8 hdmiphy_conf74_25[32] = {
- 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
- 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
- 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xe0,
- 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+static const struct hdmiphy_conf hdmiphy_conf_exynos4210[] = {
+ { .pixclk = 27000000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 27027000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE2, 0x26, 0x00, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 74176000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xA5, 0x26, 0x01, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 74250000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
+ 0x6A, 0x10, 0x01, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xA4, 0x26, 0x01, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 148352000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xEF, 0x5B,
+ 0x6D, 0x18, 0x00, 0x51, 0xEF, 0xF3, 0x54, 0xB9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x11, 0x40, 0xA5, 0x26, 0x02, 0x00, 0x00, 0x00, }
+ },
+ { .pixclk = 148500000, .data = (u8 [32]) {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xF8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xFF, 0xF1, 0x54, 0xBA,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x11, 0x40, 0xA4, 0x26, 0x02, 0x00, 0x00, 0x00, }
+ },
+ { /* end marker */ }
};
-static const u8 hdmiphy_conf148_5[32] = {
- 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
- 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
- 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
- 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+static const struct hdmiphy_conf hdmiphy_conf_exynos4212[] = {
+ { .pixclk = 27000000, .data = (u8 [32]) {
+ 0x01, 0x11, 0x2D, 0x75, 0x00, 0x01, 0x00, 0x08,
+ 0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
+ 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 27027000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x2D, 0x72, 0x00, 0x64, 0x12, 0x08,
+ 0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x71,
+ 0x54, 0xE2, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 74176000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x3E, 0x35, 0x00, 0x5B, 0xDE, 0x08,
+ 0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
+ 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 74250000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x3E, 0x35, 0x00, 0x40, 0xF0, 0x08,
+ 0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0x52,
+ 0x54, 0xA4, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 148500000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x3E, 0x15, 0x00, 0x40, 0xF0, 0x08,
+ 0x82, 0x20, 0x73, 0xD9, 0x45, 0xA0, 0x34, 0xC0,
+ 0x0B, 0x80, 0x12, 0x87, 0x08, 0x24, 0x24, 0xA4,
+ 0x54, 0x4A, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { /* end marker */ }
};
-static const struct hdmiphy_conf hdmiphy_conf[] = {
- { V4L2_DV_480P59_94, hdmiphy_conf27 },
- { V4L2_DV_1080P30, hdmiphy_conf74_175 },
- { V4L2_DV_720P59_94, hdmiphy_conf74_175 },
- { V4L2_DV_720P60, hdmiphy_conf74_25 },
- { V4L2_DV_1080P50, hdmiphy_conf148_5 },
- { V4L2_DV_1080P60, hdmiphy_conf148_5 },
+static const struct hdmiphy_conf hdmiphy_conf_exynos4412[] = {
+ { .pixclk = 27000000, .data = (u8 [32]) {
+ 0x01, 0x11, 0x2D, 0x75, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x00, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 27027000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x2D, 0x72, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0x20, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 74176000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0x08,
+ 0x81, 0x20, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 74250000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08,
+ 0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { .pixclk = 148500000, .data = (u8 [32]) {
+ 0x01, 0x91, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08,
+ 0x81, 0x20, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x4B, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }
+ },
+ { /* end marker */ }
};
-const u8 *hdmiphy_preset2conf(u32 preset)
+static inline struct hdmiphy_ctx *sd_to_ctx(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct hdmiphy_ctx, sd);
+}
+
+static unsigned long hdmiphy_preset_to_pixclk(u32 preset)
+{
+ static const unsigned long pixclk[] = {
+ [V4L2_DV_480P59_94] = 27000000,
+ [V4L2_DV_576P50] = 27000000,
+ [V4L2_DV_720P59_94] = 74176000,
+ [V4L2_DV_720P50] = 74250000,
+ [V4L2_DV_720P60] = 74250000,
+ [V4L2_DV_1080P24] = 74250000,
+ [V4L2_DV_1080P30] = 74250000,
+ [V4L2_DV_1080I50] = 74250000,
+ [V4L2_DV_1080I60] = 74250000,
+ [V4L2_DV_1080P50] = 148500000,
+ [V4L2_DV_1080P60] = 148500000,
+ };
+ if (preset < ARRAY_SIZE(pixclk))
+ return pixclk[preset];
+ else
+ return 0;
+}
+
+static const u8 *hdmiphy_find_conf(u32 preset, const struct hdmiphy_conf *conf)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(hdmiphy_conf); ++i)
- if (hdmiphy_conf[i].preset == preset)
- return hdmiphy_conf[i].data;
+ unsigned long pixclk;
+
+ pixclk = hdmiphy_preset_to_pixclk(preset);
+ if (!pixclk)
+ return NULL;
+
+ for (; conf->pixclk; ++conf)
+ if (conf->pixclk == pixclk)
+ return conf->data;
return NULL;
}
@@ -88,11 +223,12 @@ static int hdmiphy_s_dv_preset(struct v4l2_subdev *sd,
const u8 *data;
u8 buffer[32];
int ret;
+ struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct device *dev = &client->dev;
dev_info(dev, "s_dv_preset(preset = %d)\n", preset->preset);
- data = hdmiphy_preset2conf(preset->preset);
+ data = hdmiphy_find_conf(preset->preset, ctx->conf_tab);
if (!data) {
dev_err(dev, "format not supported\n");
return -EINVAL;
@@ -146,21 +282,36 @@ static const struct v4l2_subdev_ops hdmiphy_ops = {
static int __devinit hdmiphy_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- static struct v4l2_subdev sd;
+ struct hdmiphy_ctx *ctx;
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->conf_tab = (struct hdmiphy_conf *)id->driver_data;
+ v4l2_i2c_subdev_init(&ctx->sd, client, &hdmiphy_ops);
- v4l2_i2c_subdev_init(&sd, client, &hdmiphy_ops);
dev_info(&client->dev, "probe successful\n");
return 0;
}
static int __devexit hdmiphy_remove(struct i2c_client *client)
{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hdmiphy_ctx *ctx = sd_to_ctx(sd);
+
+ kfree(ctx);
dev_info(&client->dev, "remove successful\n");
+
return 0;
}
static const struct i2c_device_id hdmiphy_id[] = {
- { "hdmiphy", 0 },
+ { "hdmiphy", (unsigned long)hdmiphy_conf_exynos4210 },
+ { "hdmiphy-s5pv210", (unsigned long)hdmiphy_conf_s5pv210 },
+ { "hdmiphy-exynos4210", (unsigned long)hdmiphy_conf_exynos4210 },
+ { "hdmiphy-exynos4212", (unsigned long)hdmiphy_conf_exynos4212 },
+ { "hdmiphy-exynos4412", (unsigned long)hdmiphy_conf_exynos4412 },
{ },
};
MODULE_DEVICE_TABLE(i2c, hdmiphy_id);
diff --git a/drivers/media/video/s5p-tv/mixer.h b/drivers/media/video/s5p-tv/mixer.h
index 1597078c4a50..ddb422e23550 100644
--- a/drivers/media/video/s5p-tv/mixer.h
+++ b/drivers/media/video/s5p-tv/mixer.h
@@ -226,6 +226,7 @@ struct mxr_resources {
/* event flags used */
enum mxr_devide_flags {
MXR_EVENT_VSYNC = 0,
+ MXR_EVENT_TOP = 1,
};
/** drivers instance */
@@ -293,7 +294,7 @@ int __devinit mxr_acquire_video(struct mxr_device *mdev,
struct mxr_output_conf *output_cont, int output_count);
/** releasing common video resources */
-void __devexit mxr_release_video(struct mxr_device *mdev);
+void mxr_release_video(struct mxr_device *mdev);
struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
diff --git a/drivers/media/video/s5p-tv/mixer_drv.c b/drivers/media/video/s5p-tv/mixer_drv.c
index a2c0c25ad130..edca06592883 100644
--- a/drivers/media/video/s5p-tv/mixer_drv.c
+++ b/drivers/media/video/s5p-tv/mixer_drv.c
@@ -461,7 +461,7 @@ static struct platform_driver mxr_driver __refdata = {
static int __init mxr_init(void)
{
int i, ret;
- static const char banner[] __initdata = KERN_INFO
+ static const char banner[] __initconst = KERN_INFO
"Samsung TV Mixer driver, "
"(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
printk(banner);
diff --git a/drivers/media/video/s5p-tv/mixer_reg.c b/drivers/media/video/s5p-tv/mixer_reg.c
index 4800a3cbb297..3b1670a045f4 100644
--- a/drivers/media/video/s5p-tv/mixer_reg.c
+++ b/drivers/media/video/s5p-tv/mixer_reg.c
@@ -296,21 +296,25 @@ irqreturn_t mxr_irq_handler(int irq, void *dev_data)
/* wake up process waiting for VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
+ /* toggle TOP field event if working in interlaced mode */
+ if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
+ change_bit(MXR_EVENT_TOP, &mdev->event_flags);
wake_up(&mdev->event_queue);
- }
-
- /* clear interrupts */
- if (~val & MXR_INT_EN_VSYNC) {
/* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_EN_VSYNC;
+ val &= ~MXR_INT_STATUS_VSYNC;
val |= MXR_INT_CLEAR_VSYNC;
}
+
+ /* clear interrupts */
mxr_write(mdev, MXR_INT_STATUS, val);
spin_unlock(&mdev->reg_slock);
/* leave on non-vsync event */
if (~val & MXR_INT_CLEAR_VSYNC)
return IRQ_HANDLED;
+ /* skip layer update on bottom field */
+ if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
+ return IRQ_HANDLED;
for (i = 0; i < MXR_MAX_LAYERS; ++i)
mxr_irq_layer_handle(mdev->layer[i]);
return IRQ_HANDLED;
@@ -333,6 +337,7 @@ void mxr_reg_streamon(struct mxr_device *mdev)
/* start MIXER */
mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+ set_bit(MXR_EVENT_TOP, &mdev->event_flags);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index f7ca5cc143c6..33fde2a763ec 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -140,7 +140,7 @@ fail:
return ret;
}
-void __devexit mxr_release_video(struct mxr_device *mdev)
+void mxr_release_video(struct mxr_device *mdev)
{
int i;
@@ -853,8 +853,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
*nplanes = fmt->num_subframes;
for (i = 0; i < fmt->num_subframes; ++i) {
alloc_ctxs[i] = layer->mdev->alloc_ctx;
- sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
- mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
+ sizes[i] = planes[i].sizeimage;
+ mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
}
if (*nbuffers == 0)
@@ -1069,6 +1069,10 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
video_set_drvdata(&layer->vfd, layer);
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &layer->vfd.flags);
layer->vfd.lock = &layer->mutex;
layer->vfd.v4l2_dev = &mdev->v4l2_dev;
diff --git a/drivers/media/video/s5p-tv/regs-hdmi.h b/drivers/media/video/s5p-tv/regs-hdmi.h
index 33247d13e4c0..a889d1f57f28 100644
--- a/drivers/media/video/s5p-tv/regs-hdmi.h
+++ b/drivers/media/video/s5p-tv/regs-hdmi.h
@@ -140,6 +140,7 @@
#define HDMI_MODE_MASK (3 << 0)
/* HDMI_TG_CMD */
+#define HDMI_TG_FIELD_EN (1 << 1)
#define HDMI_TG_EN (1 << 0)
#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 53aae5968ffb..bc08f1dbc293 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5080,6 +5080,36 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0200000,
},
},
+ [SAA7134_BOARD_ASUSTeK_PS3_100] = {
+ .name = "Asus My Cinema PS3-100",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tuner_config = 2,
+ .gpiomask = 1 << 21,
+ .mpeg = SAA7134_MPEG_DVB,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 1,
+ .amux = TV,
+ .tv = 1,
+ }, {
+ .name = name_comp,
+ .vmux = 0,
+ .amux = LINE2,
+ }, {
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE2,
+ } },
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ .gpio = 0x0200000,
+ },
+ },
[SAA7134_BOARD_REAL_ANGEL_220] = {
.name = "Zogis Real Angel 220",
.audio_clock = 0x00187de7,
@@ -6877,6 +6907,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.driver_data = SAA7134_BOARD_ASUSTeK_TIGER_3IN1,
}, {
.vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x1043,
+ .subdevice = 0x48cd,
+ .driver_data = SAA7134_BOARD_ASUSTeK_PS3_100,
+ }, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
.subvendor = 0x17de,
.subdevice = 0x7128,
@@ -7347,6 +7383,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_KWORLD_TERMINATOR:
case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS:
case SAA7134_BOARD_FLYDVBT_LR301:
+ case SAA7134_BOARD_ASUSTeK_PS3_100:
case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
case SAA7134_BOARD_ASUSTeK_P7131_ANALOG:
@@ -7811,6 +7848,14 @@ int saa7134_board_init2(struct saa7134_dev *dev)
i2c_transfer(&dev->i2c_adap, &msg, 1);
break;
}
+ case SAA7134_BOARD_ASUSTeK_PS3_100:
+ {
+ u8 data[] = { 0x3c, 0x33, 0x60};
+ struct i2c_msg msg = {.addr = 0x0b, .flags = 0, .buf = data,
+ .len = sizeof(data)};
+ i2c_transfer(&dev->i2c_adap, &msg, 1);
+ break;
+ }
case SAA7134_BOARD_FLYDVB_TRIO:
{
u8 temp = 0;
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index aaa5c97a7216..5dfd826d734e 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -881,6 +881,20 @@ static struct tda1004x_config asus_tiger_3in1_config = {
.request_firmware = philips_tda1004x_request_firmware
};
+static struct tda1004x_config asus_ps3_100_config = {
+ .demod_address = 0x0b,
+ .invert = 1,
+ .invert_oclk = 0,
+ .xtal_freq = TDA10046_XTAL_16M,
+ .agc_config = TDA10046_AGC_TDA827X,
+ .gpio_config = TDA10046_GP11_I,
+ .if_freq = TDA10046_FREQ_045,
+ .i2c_gate = 0x4b,
+ .tuner_address = 0x61,
+ .antenna_switch = 1,
+ .request_firmware = philips_tda1004x_request_firmware
+};
+
/* ------------------------------------------------------------------
* special case: this card uses saa713x GPIO22 for the mode switch
*/
@@ -1647,6 +1661,31 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, 0, 0) == NULL) {
wprintk("%s: Asus Tiger 3in1, no lnbp21"
" found!\n", __func__);
+ goto dettach_frontend;
+ }
+ }
+ }
+ break;
+ case SAA7134_BOARD_ASUSTeK_PS3_100:
+ if (!use_frontend) { /* terrestrial */
+ if (configure_tda827x_fe(dev, &asus_ps3_100_config,
+ &tda827x_cfg_2) < 0)
+ goto dettach_frontend;
+ } else { /* satellite */
+ fe0->dvb.frontend = dvb_attach(tda10086_attach,
+ &flydvbs, &dev->i2c_adap);
+ if (fe0->dvb.frontend) {
+ if (dvb_attach(tda826x_attach,
+ fe0->dvb.frontend, 0x60,
+ &dev->i2c_adap, 0) == NULL) {
+ wprintk("%s: Asus My Cinema PS3-100, no "
+ "tda826x found!\n", __func__);
+ goto dettach_frontend;
+ }
+ if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, 0, 0) == NULL) {
+ wprintk("%s: Asus My Cinema PS3-100, no lnbp21"
+ " found!\n", __func__);
goto dettach_frontend;
}
}
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 48d2878699b7..05c6e217d8a7 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -753,6 +753,13 @@ int saa7134_input_init1(struct saa7134_dev *dev)
mask_keycode = 0xffff;
raw_decode = true;
break;
+ case SAA7134_BOARD_ASUSTeK_PS3_100:
+ ir_codes = RC_MAP_ASUS_PS3_100;
+ mask_keydown = 0x0040000;
+ mask_keyup = 0x0040000;
+ mask_keycode = 0xffff;
+ raw_decode = true;
+ break;
case SAA7134_BOARD_ENCORE_ENLTV:
case SAA7134_BOARD_ENCORE_ENLTV_FM:
ir_codes = RC_MAP_ENCORE_ENLTV;
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 417034eb6ad2..6de10b1e7251 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -2036,7 +2036,7 @@ static int saa7134_s_tuner(struct file *file, void *priv,
mode = dev->thread.mode;
if (UNSET == mode) {
rx = saa7134_tvaudio_getstereo(dev);
- mode = saa7134_tvaudio_rx2mode(t->rxsubchans);
+ mode = saa7134_tvaudio_rx2mode(rx);
}
if (mode != t->audmode)
dev->thread.mode = t->audmode;
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index f625060e6a0f..89c8333736a2 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -332,6 +332,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_BEHOLD_503FM 187
#define SAA7134_BOARD_SENSORAY811_911 188
#define SAA7134_BOARD_KWORLD_PC150U 189
+#define SAA7134_BOARD_ASUSTeK_PS3_100 190
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
index 273cf807401c..d8e6c8f14079 100644
--- a/drivers/media/video/saa7164/saa7164-vbi.c
+++ b/drivers/media/video/saa7164/saa7164-vbi.c
@@ -952,7 +952,7 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_vbi_stop_port(port);
- if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ if (result != SAA_OK) {
printk(KERN_ERR "%s() pause/forced stop transition "
"failed, res = 0x%x\n", __func__, result);
}
@@ -971,7 +971,7 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
/* Stop the hardware, regardless */
result = saa7164_vbi_acquire_port(port);
result = saa7164_vbi_stop_port(port);
- if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
+ if (result != SAA_OK) {
printk(KERN_ERR "%s() run/forced stop transition "
"failed, res = 0x%x\n", __func__, result);
}
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 742b34103b5d..8d120e3baf70 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -611,11 +611,6 @@ extern unsigned int saa_debug;
printk(KERN_WARNING "%s: " fmt, dev->name, ## arg);\
} while (0)
-#define log_err(fmt, arg...)\
- do { \
- printk(KERN_ERROR "%s: " fmt, dev->name, ## arg);\
- } while (0)
-
#define saa7164_readl(reg) readl(dev->lmmio + ((reg) >> 2))
#define saa7164_writel(reg, value) writel((value), dev->lmmio + ((reg) >> 2))
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 424dfacd263a..0baaf94db7e0 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -210,27 +210,33 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- int bytes_per_line;
- unsigned int height;
if (fmt) {
const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
fmt->fmt.pix.pixelformat);
+ unsigned int bytes_per_line;
+ int ret;
+
if (!xlate)
return -EINVAL;
- bytes_per_line = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
- xlate->host_fmt);
- height = fmt->fmt.pix.height;
+
+ ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+ xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
+
+ ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
+ fmt->fmt.pix.height);
+ if (ret < 0)
+ return ret;
+
+ sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
} else {
/* Called from VIDIOC_REQBUFS or in compatibility mode */
- bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- height = icd->user_height;
+ sizes[0] = icd->sizeimage;
}
- if (bytes_per_line < 0)
- return bytes_per_line;
-
- sizes[0] = bytes_per_line * height;
alloc_ctxs[0] = pcdev->alloc_ctx;
@@ -336,21 +342,15 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
ceu_write(pcdev, top1, phys_addr_top);
if (V4L2_FIELD_NONE != pcdev->field) {
- if (planar)
- phys_addr_bottom = phys_addr_top + icd->user_width;
- else
- phys_addr_bottom = phys_addr_top +
- soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
+ phys_addr_bottom = phys_addr_top + icd->bytesperline;
ceu_write(pcdev, bottom1, phys_addr_bottom);
}
if (planar) {
- phys_addr_top += icd->user_width *
- icd->user_height;
+ phys_addr_top += icd->bytesperline * icd->user_height;
ceu_write(pcdev, top2, phys_addr_top);
if (V4L2_FIELD_NONE != pcdev->field) {
- phys_addr_bottom = phys_addr_top + icd->user_width;
+ phys_addr_bottom = phys_addr_top + icd->bytesperline;
ceu_write(pcdev, bottom2, phys_addr_bottom);
}
}
@@ -377,13 +377,8 @@ static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
unsigned long size;
- int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
- icd->current_fmt->host_fmt);
- if (bytes_per_line < 0)
- goto error;
-
- size = icd->user_height * bytes_per_line;
+ size = icd->sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
@@ -682,10 +677,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
in_width *= 2;
left_offset *= 2;
}
- cdwdr_width = width;
} else {
- int bytes_per_line = soc_mbus_bytes_per_line(width,
- icd->current_fmt->host_fmt);
unsigned int w_factor;
switch (icd->current_fmt->host_fmt->packing) {
@@ -698,13 +690,10 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
in_width = cam->width * w_factor;
left_offset *= w_factor;
-
- if (bytes_per_line < 0)
- cdwdr_width = width;
- else
- cdwdr_width = bytes_per_line;
}
+ cdwdr_width = icd->bytesperline;
+
height = icd->user_height;
in_height = cam->height;
if (V4L2_FIELD_NONE != pcdev->field) {
@@ -881,11 +870,13 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
- value |= pcdev->is_16bit ? 1 << 12 : 0;
- /* CSI2 mode */
- if (pcdev->pdata->csi2)
+ if (pcdev->pdata->csi2) /* CSI2 mode */
value |= 3 << 12;
+ else if (pcdev->is_16bit)
+ value |= 1 << 12;
+ else if (pcdev->pdata->flags & SH_CEU_FLAG_LOWER_8BIT)
+ value |= 2 << 12;
ceu_write(pcdev, CAMCR, value);
@@ -964,24 +955,28 @@ static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
}, {
.fourcc = V4L2_PIX_FMT_NV21,
.name = "NV21",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.name = "NV16",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.name = "NV61",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
},
};
@@ -1845,6 +1840,8 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
return 0;
}
+#define CEU_CHDW_MAX 8188U /* Maximum line stride */
+
static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
@@ -1863,8 +1860,12 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(icd->parent, "Format %x not found\n", pixfmt);
- return -EINVAL;
+ xlate = icd->current_fmt;
+ dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
+ pixfmt, xlate->host_fmt->fourcc);
+ pixfmt = xlate->host_fmt->fourcc;
+ pix->pixelformat = pixfmt;
+ pix->colorspace = icd->colorspace;
}
/* FIXME: calculate using depth and bus width */
@@ -1923,10 +1924,20 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
pix->width = width;
if (mf.height > height)
pix->height = height;
+
+ pix->bytesperline = max(pix->bytesperline, pix->width);
+ pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX);
+ pix->bytesperline &= ~3;
+ break;
+
+ default:
+ /* Configurable stride isn't supported in pass-through mode. */
+ pix->bytesperline = 0;
}
pix->width &= ~3;
pix->height &= ~3;
+ pix->sizeimage = 0;
dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
__func__, ret, pix->pixelformat, pix->width, pix->height);
@@ -2145,6 +2156,7 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->ici.nr = pdev->id;
pcdev->ici.drv_name = dev_name(&pdev->dev);
pcdev->ici.ops = &sh_mobile_ceu_host_ops;
+ pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(pcdev->alloc_ctx)) {
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
index 9644bd861abc..8fd1874382c6 100644
--- a/drivers/media/video/sh_vou.c
+++ b/drivers/media/video/sh_vou.c
@@ -1390,6 +1390,10 @@ static int __devinit sh_vou_probe(struct platform_device *pdev)
vdev->v4l2_dev = &vou_dev->v4l2_dev;
vdev->release = video_device_release;
vdev->lock = &vou_dev->fop_lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags);
vou_dev->vdev = vdev;
video_set_drvdata(vdev, vou_dev);
diff --git a/drivers/media/video/smiapp-pll.c b/drivers/media/video/smiapp-pll.c
new file mode 100644
index 000000000000..a2e41a21dc65
--- /dev/null
+++ b/drivers/media/video/smiapp-pll.c
@@ -0,0 +1,418 @@
+/*
+ * drivers/media/video/smiapp-pll.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/gcd.h>
+#include <linux/lcm.h>
+#include <linux/module.h>
+
+#include "smiapp-pll.h"
+
+/* Return an even number or one. */
+static inline uint32_t clk_div_even(uint32_t a)
+{
+ return max_t(uint32_t, 1, a & ~1);
+}
+
+/* Return an even number or one. */
+static inline uint32_t clk_div_even_up(uint32_t a)
+{
+ if (a == 1)
+ return 1;
+ return (a + 1) & ~1;
+}
+
+static inline uint32_t is_one_or_even(uint32_t a)
+{
+ if (a == 1)
+ return 1;
+ if (a & 1)
+ return 0;
+
+ return 1;
+}
+
+static int bounds_check(struct device *dev, uint32_t val,
+ uint32_t min, uint32_t max, char *str)
+{
+ if (val >= min && val <= max)
+ return 0;
+
+ dev_warn(dev, "%s out of bounds: %d (%d--%d)\n", str, val, min, max);
+
+ return -EINVAL;
+}
+
+static void print_pll(struct device *dev, struct smiapp_pll *pll)
+{
+ dev_dbg(dev, "pre_pll_clk_div\t%d\n", pll->pre_pll_clk_div);
+ dev_dbg(dev, "pll_multiplier \t%d\n", pll->pll_multiplier);
+ if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
+ dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div);
+ dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div);
+ }
+ dev_dbg(dev, "vt_sys_clk_div \t%d\n", pll->vt_sys_clk_div);
+ dev_dbg(dev, "vt_pix_clk_div \t%d\n", pll->vt_pix_clk_div);
+
+ dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz);
+ dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz);
+ dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz);
+ if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
+ dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n",
+ pll->op_sys_clk_freq_hz);
+ dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n",
+ pll->op_pix_clk_freq_hz);
+ }
+ dev_dbg(dev, "vt_sys_clk_freq_hz \t%d\n", pll->vt_sys_clk_freq_hz);
+ dev_dbg(dev, "vt_pix_clk_freq_hz \t%d\n", pll->vt_pix_clk_freq_hz);
+}
+
+int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll)
+{
+ uint32_t sys_div;
+ uint32_t best_pix_div = INT_MAX >> 1;
+ uint32_t vt_op_binning_div;
+ uint32_t lane_op_clock_ratio;
+ uint32_t mul, div;
+ uint32_t more_mul_min, more_mul_max;
+ uint32_t more_mul_factor;
+ uint32_t min_vt_div, max_vt_div, vt_div;
+ uint32_t min_sys_div, max_sys_div;
+ unsigned int i;
+ int rval;
+
+ if (pll->flags & SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE)
+ lane_op_clock_ratio = pll->lanes;
+ else
+ lane_op_clock_ratio = 1;
+ dev_dbg(dev, "lane_op_clock_ratio: %d\n", lane_op_clock_ratio);
+
+ dev_dbg(dev, "binning: %dx%d\n", pll->binning_horizontal,
+ pll->binning_vertical);
+
+ /* CSI transfers 2 bits per clock per lane; thus times 2 */
+ pll->pll_op_clk_freq_hz = pll->link_freq * 2
+ * (pll->lanes / lane_op_clock_ratio);
+
+ /* Figure out limits for pre-pll divider based on extclk */
+ dev_dbg(dev, "min / max pre_pll_clk_div: %d / %d\n",
+ limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
+ limits->max_pre_pll_clk_div =
+ min_t(uint16_t, limits->max_pre_pll_clk_div,
+ clk_div_even(pll->ext_clk_freq_hz /
+ limits->min_pll_ip_freq_hz));
+ limits->min_pre_pll_clk_div =
+ max_t(uint16_t, limits->min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(pll->ext_clk_freq_hz,
+ limits->max_pll_ip_freq_hz)));
+ dev_dbg(dev, "pre-pll check: min / max pre_pll_clk_div: %d / %d\n",
+ limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
+
+ i = gcd(pll->pll_op_clk_freq_hz, pll->ext_clk_freq_hz);
+ mul = div_u64(pll->pll_op_clk_freq_hz, i);
+ div = pll->ext_clk_freq_hz / i;
+ dev_dbg(dev, "mul %d / div %d\n", mul, div);
+
+ limits->min_pre_pll_clk_div =
+ max_t(uint16_t, limits->min_pre_pll_clk_div,
+ clk_div_even_up(
+ DIV_ROUND_UP(mul * pll->ext_clk_freq_hz,
+ limits->max_pll_op_freq_hz)));
+ dev_dbg(dev, "pll_op check: min / max pre_pll_clk_div: %d / %d\n",
+ limits->min_pre_pll_clk_div, limits->max_pre_pll_clk_div);
+
+ if (limits->min_pre_pll_clk_div > limits->max_pre_pll_clk_div) {
+ dev_err(dev, "unable to compute pre_pll divisor\n");
+ return -EINVAL;
+ }
+
+ pll->pre_pll_clk_div = limits->min_pre_pll_clk_div;
+
+ /*
+ * Get pre_pll_clk_div so that our pll_op_clk_freq_hz won't be
+ * too high.
+ */
+ dev_dbg(dev, "pre_pll_clk_div %d\n", pll->pre_pll_clk_div);
+
+ /* Don't go above max pll multiplier. */
+ more_mul_max = limits->max_pll_multiplier / mul;
+ dev_dbg(dev, "more_mul_max: max_pll_multiplier check: %d\n",
+ more_mul_max);
+ /* Don't go above max pll op frequency. */
+ more_mul_max =
+ min_t(int,
+ more_mul_max,
+ limits->max_pll_op_freq_hz
+ / (pll->ext_clk_freq_hz / pll->pre_pll_clk_div * mul));
+ dev_dbg(dev, "more_mul_max: max_pll_op_freq_hz check: %d\n",
+ more_mul_max);
+ /* Don't go above the division capability of op sys clock divider. */
+ more_mul_max = min(more_mul_max,
+ limits->max_op_sys_clk_div * pll->pre_pll_clk_div
+ / div);
+ dev_dbg(dev, "more_mul_max: max_op_sys_clk_div check: %d\n",
+ more_mul_max);
+ /* Ensure we won't go above min_pll_multiplier. */
+ more_mul_max = min(more_mul_max,
+ DIV_ROUND_UP(limits->max_pll_multiplier, mul));
+ dev_dbg(dev, "more_mul_max: min_pll_multiplier check: %d\n",
+ more_mul_max);
+
+ /* Ensure we won't go below min_pll_op_freq_hz. */
+ more_mul_min = DIV_ROUND_UP(limits->min_pll_op_freq_hz,
+ pll->ext_clk_freq_hz / pll->pre_pll_clk_div
+ * mul);
+ dev_dbg(dev, "more_mul_min: min_pll_op_freq_hz check: %d\n",
+ more_mul_min);
+ /* Ensure we won't go below min_pll_multiplier. */
+ more_mul_min = max(more_mul_min,
+ DIV_ROUND_UP(limits->min_pll_multiplier, mul));
+ dev_dbg(dev, "more_mul_min: min_pll_multiplier check: %d\n",
+ more_mul_min);
+
+ if (more_mul_min > more_mul_max) {
+ dev_warn(dev,
+ "unable to compute more_mul_min and more_mul_max");
+ return -EINVAL;
+ }
+
+ more_mul_factor = lcm(div, pll->pre_pll_clk_div) / div;
+ dev_dbg(dev, "more_mul_factor: %d\n", more_mul_factor);
+ more_mul_factor = lcm(more_mul_factor, limits->min_op_sys_clk_div);
+ dev_dbg(dev, "more_mul_factor: min_op_sys_clk_div: %d\n",
+ more_mul_factor);
+ i = roundup(more_mul_min, more_mul_factor);
+ if (!is_one_or_even(i))
+ i <<= 1;
+
+ dev_dbg(dev, "final more_mul: %d\n", i);
+ if (i > more_mul_max) {
+ dev_warn(dev, "final more_mul is bad, max %d", more_mul_max);
+ return -EINVAL;
+ }
+
+ pll->pll_multiplier = mul * i;
+ pll->op_sys_clk_div = div * i / pll->pre_pll_clk_div;
+ dev_dbg(dev, "op_sys_clk_div: %d\n", pll->op_sys_clk_div);
+
+ pll->pll_ip_clk_freq_hz = pll->ext_clk_freq_hz
+ / pll->pre_pll_clk_div;
+
+ pll->pll_op_clk_freq_hz = pll->pll_ip_clk_freq_hz
+ * pll->pll_multiplier;
+
+ /* Derive pll_op_clk_freq_hz. */
+ pll->op_sys_clk_freq_hz =
+ pll->pll_op_clk_freq_hz / pll->op_sys_clk_div;
+
+ pll->op_pix_clk_div = pll->bits_per_pixel;
+ dev_dbg(dev, "op_pix_clk_div: %d\n", pll->op_pix_clk_div);
+
+ pll->op_pix_clk_freq_hz =
+ pll->op_sys_clk_freq_hz / pll->op_pix_clk_div;
+
+ /*
+ * Some sensors perform analogue binning and some do this
+ * digitally. The ones doing this digitally can be roughly be
+ * found out using this formula. The ones doing this digitally
+ * should run at higher clock rate, so smaller divisor is used
+ * on video timing side.
+ */
+ if (limits->min_line_length_pck_bin > limits->min_line_length_pck
+ / pll->binning_horizontal)
+ vt_op_binning_div = pll->binning_horizontal;
+ else
+ vt_op_binning_div = 1;
+ dev_dbg(dev, "vt_op_binning_div: %d\n", vt_op_binning_div);
+
+ /*
+ * Profile 2 supports vt_pix_clk_div E [4, 10]
+ *
+ * Horizontal binning can be used as a base for difference in
+ * divisors. One must make sure that horizontal blanking is
+ * enough to accommodate the CSI-2 sync codes.
+ *
+ * Take scaling factor into account as well.
+ *
+ * Find absolute limits for the factor of vt divider.
+ */
+ dev_dbg(dev, "scale_m: %d\n", pll->scale_m);
+ min_vt_div = DIV_ROUND_UP(pll->op_pix_clk_div * pll->op_sys_clk_div
+ * pll->scale_n,
+ lane_op_clock_ratio * vt_op_binning_div
+ * pll->scale_m);
+
+ /* Find smallest and biggest allowed vt divisor. */
+ dev_dbg(dev, "min_vt_div: %d\n", min_vt_div);
+ min_vt_div = max(min_vt_div,
+ DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
+ limits->max_vt_pix_clk_freq_hz));
+ dev_dbg(dev, "min_vt_div: max_vt_pix_clk_freq_hz: %d\n",
+ min_vt_div);
+ min_vt_div = max_t(uint32_t, min_vt_div,
+ limits->min_vt_pix_clk_div
+ * limits->min_vt_sys_clk_div);
+ dev_dbg(dev, "min_vt_div: min_vt_clk_div: %d\n", min_vt_div);
+
+ max_vt_div = limits->max_vt_sys_clk_div * limits->max_vt_pix_clk_div;
+ dev_dbg(dev, "max_vt_div: %d\n", max_vt_div);
+ max_vt_div = min(max_vt_div,
+ DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
+ limits->min_vt_pix_clk_freq_hz));
+ dev_dbg(dev, "max_vt_div: min_vt_pix_clk_freq_hz: %d\n",
+ max_vt_div);
+
+ /*
+ * Find limitsits for sys_clk_div. Not all values are possible
+ * with all values of pix_clk_div.
+ */
+ min_sys_div = limits->min_vt_sys_clk_div;
+ dev_dbg(dev, "min_sys_div: %d\n", min_sys_div);
+ min_sys_div = max(min_sys_div,
+ DIV_ROUND_UP(min_vt_div,
+ limits->max_vt_pix_clk_div));
+ dev_dbg(dev, "min_sys_div: max_vt_pix_clk_div: %d\n", min_sys_div);
+ min_sys_div = max(min_sys_div,
+ pll->pll_op_clk_freq_hz
+ / limits->max_vt_sys_clk_freq_hz);
+ dev_dbg(dev, "min_sys_div: max_pll_op_clk_freq_hz: %d\n", min_sys_div);
+ min_sys_div = clk_div_even_up(min_sys_div);
+ dev_dbg(dev, "min_sys_div: one or even: %d\n", min_sys_div);
+
+ max_sys_div = limits->max_vt_sys_clk_div;
+ dev_dbg(dev, "max_sys_div: %d\n", max_sys_div);
+ max_sys_div = min(max_sys_div,
+ DIV_ROUND_UP(max_vt_div,
+ limits->min_vt_pix_clk_div));
+ dev_dbg(dev, "max_sys_div: min_vt_pix_clk_div: %d\n", max_sys_div);
+ max_sys_div = min(max_sys_div,
+ DIV_ROUND_UP(pll->pll_op_clk_freq_hz,
+ limits->min_vt_pix_clk_freq_hz));
+ dev_dbg(dev, "max_sys_div: min_vt_pix_clk_freq_hz: %d\n", max_sys_div);
+
+ /*
+ * Find pix_div such that a legal pix_div * sys_div results
+ * into a value which is not smaller than div, the desired
+ * divisor.
+ */
+ for (vt_div = min_vt_div; vt_div <= max_vt_div;
+ vt_div += 2 - (vt_div & 1)) {
+ for (sys_div = min_sys_div;
+ sys_div <= max_sys_div;
+ sys_div += 2 - (sys_div & 1)) {
+ int pix_div = DIV_ROUND_UP(vt_div, sys_div);
+
+ if (pix_div < limits->min_vt_pix_clk_div
+ || pix_div > limits->max_vt_pix_clk_div) {
+ dev_dbg(dev,
+ "pix_div %d too small or too big (%d--%d)\n",
+ pix_div,
+ limits->min_vt_pix_clk_div,
+ limits->max_vt_pix_clk_div);
+ continue;
+ }
+
+ /* Check if this one is better. */
+ if (pix_div * sys_div
+ <= roundup(min_vt_div, best_pix_div))
+ best_pix_div = pix_div;
+ }
+ if (best_pix_div < INT_MAX >> 1)
+ break;
+ }
+
+ pll->vt_sys_clk_div = DIV_ROUND_UP(min_vt_div, best_pix_div);
+ pll->vt_pix_clk_div = best_pix_div;
+
+ pll->vt_sys_clk_freq_hz =
+ pll->pll_op_clk_freq_hz / pll->vt_sys_clk_div;
+ pll->vt_pix_clk_freq_hz =
+ pll->vt_sys_clk_freq_hz / pll->vt_pix_clk_div;
+
+ pll->pixel_rate_csi =
+ pll->op_pix_clk_freq_hz * lane_op_clock_ratio;
+
+ print_pll(dev, pll);
+
+ rval = bounds_check(dev, pll->pre_pll_clk_div,
+ limits->min_pre_pll_clk_div,
+ limits->max_pre_pll_clk_div, "pre_pll_clk_div");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->pll_ip_clk_freq_hz,
+ limits->min_pll_ip_freq_hz, limits->max_pll_ip_freq_hz,
+ "pll_ip_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->pll_multiplier,
+ limits->min_pll_multiplier, limits->max_pll_multiplier,
+ "pll_multiplier");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->pll_op_clk_freq_hz,
+ limits->min_pll_op_freq_hz, limits->max_pll_op_freq_hz,
+ "pll_op_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->op_sys_clk_div,
+ limits->min_op_sys_clk_div, limits->max_op_sys_clk_div,
+ "op_sys_clk_div");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->op_pix_clk_div,
+ limits->min_op_pix_clk_div, limits->max_op_pix_clk_div,
+ "op_pix_clk_div");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->op_sys_clk_freq_hz,
+ limits->min_op_sys_clk_freq_hz,
+ limits->max_op_sys_clk_freq_hz,
+ "op_sys_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->op_pix_clk_freq_hz,
+ limits->min_op_pix_clk_freq_hz,
+ limits->max_op_pix_clk_freq_hz,
+ "op_pix_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->vt_sys_clk_freq_hz,
+ limits->min_vt_sys_clk_freq_hz,
+ limits->max_vt_sys_clk_freq_hz,
+ "vt_sys_clk_freq_hz");
+ if (!rval)
+ rval = bounds_check(
+ dev, pll->vt_pix_clk_freq_hz,
+ limits->min_vt_pix_clk_freq_hz,
+ limits->max_vt_pix_clk_freq_hz,
+ "vt_pix_clk_freq_hz");
+
+ return rval;
+}
+EXPORT_SYMBOL_GPL(smiapp_pll_calculate);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_DESCRIPTION("Generic SMIA/SMIA++ PLL calculator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/smiapp-pll.h b/drivers/media/video/smiapp-pll.h
new file mode 100644
index 000000000000..9eab63f23afb
--- /dev/null
+++ b/drivers/media/video/smiapp-pll.h
@@ -0,0 +1,103 @@
+/*
+ * drivers/media/video/smiapp-pll.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef SMIAPP_PLL_H
+#define SMIAPP_PLL_H
+
+#include <linux/device.h>
+
+struct smiapp_pll {
+ uint8_t lanes;
+ uint8_t binning_horizontal;
+ uint8_t binning_vertical;
+ uint8_t scale_m;
+ uint8_t scale_n;
+ uint8_t bits_per_pixel;
+ uint16_t flags;
+ uint32_t link_freq;
+
+ uint16_t pre_pll_clk_div;
+ uint16_t pll_multiplier;
+ uint16_t op_sys_clk_div;
+ uint16_t op_pix_clk_div;
+ uint16_t vt_sys_clk_div;
+ uint16_t vt_pix_clk_div;
+
+ uint32_t ext_clk_freq_hz;
+ uint32_t pll_ip_clk_freq_hz;
+ uint32_t pll_op_clk_freq_hz;
+ uint32_t op_sys_clk_freq_hz;
+ uint32_t op_pix_clk_freq_hz;
+ uint32_t vt_sys_clk_freq_hz;
+ uint32_t vt_pix_clk_freq_hz;
+
+ uint32_t pixel_rate_csi;
+};
+
+struct smiapp_pll_limits {
+ /* Strict PLL limits */
+ uint32_t min_ext_clk_freq_hz;
+ uint32_t max_ext_clk_freq_hz;
+ uint16_t min_pre_pll_clk_div;
+ uint16_t max_pre_pll_clk_div;
+ uint32_t min_pll_ip_freq_hz;
+ uint32_t max_pll_ip_freq_hz;
+ uint16_t min_pll_multiplier;
+ uint16_t max_pll_multiplier;
+ uint32_t min_pll_op_freq_hz;
+ uint32_t max_pll_op_freq_hz;
+
+ uint16_t min_vt_sys_clk_div;
+ uint16_t max_vt_sys_clk_div;
+ uint32_t min_vt_sys_clk_freq_hz;
+ uint32_t max_vt_sys_clk_freq_hz;
+ uint16_t min_vt_pix_clk_div;
+ uint16_t max_vt_pix_clk_div;
+ uint32_t min_vt_pix_clk_freq_hz;
+ uint32_t max_vt_pix_clk_freq_hz;
+
+ uint16_t min_op_sys_clk_div;
+ uint16_t max_op_sys_clk_div;
+ uint32_t min_op_sys_clk_freq_hz;
+ uint32_t max_op_sys_clk_freq_hz;
+ uint16_t min_op_pix_clk_div;
+ uint16_t max_op_pix_clk_div;
+ uint32_t min_op_pix_clk_freq_hz;
+ uint32_t max_op_pix_clk_freq_hz;
+
+ /* Other relevant limits */
+ uint32_t min_line_length_pck_bin;
+ uint32_t min_line_length_pck;
+};
+
+/* op pix clock is for all lanes in total normally */
+#define SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
+#define SMIAPP_PLL_FLAG_NO_OP_CLOCKS (1 << 1)
+
+struct device;
+
+int smiapp_pll_calculate(struct device *dev, struct smiapp_pll_limits *limits,
+ struct smiapp_pll *pll);
+
+#endif /* SMIAPP_PLL_H */
diff --git a/drivers/media/video/smiapp/Kconfig b/drivers/media/video/smiapp/Kconfig
new file mode 100644
index 000000000000..fb99ff18be07
--- /dev/null
+++ b/drivers/media/video/smiapp/Kconfig
@@ -0,0 +1,6 @@
+config VIDEO_SMIAPP
+ tristate "SMIA++/SMIA sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
+ select VIDEO_SMIAPP_PLL
+ ---help---
+ This is a generic driver for SMIA++/SMIA camera modules.
diff --git a/drivers/media/video/smiapp/Makefile b/drivers/media/video/smiapp/Makefile
new file mode 100644
index 000000000000..36b0cfa2c541
--- /dev/null
+++ b/drivers/media/video/smiapp/Makefile
@@ -0,0 +1,5 @@
+smiapp-objs += smiapp-core.o smiapp-regs.o \
+ smiapp-quirk.o smiapp-limits.o
+obj-$(CONFIG_VIDEO_SMIAPP) += smiapp.o
+
+ccflags-y += -Idrivers/media/video
diff --git a/drivers/media/video/smiapp/smiapp-core.c b/drivers/media/video/smiapp/smiapp-core.c
new file mode 100644
index 000000000000..9cf5bda35fbe
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-core.c
@@ -0,0 +1,2896 @@
+/*
+ * drivers/media/video/smiapp/smiapp-core.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2010--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * Based on smiapp driver by Vimarsh Zutshi
+ * Based on jt8ev1.c by Vimarsh Zutshi
+ * Based on smia-sensor.c by Tuukka Toivonen <tuukkat76@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-device.h>
+
+#include "smiapp.h"
+
+#define SMIAPP_ALIGN_DIM(dim, flags) \
+ ((flags) & V4L2_SUBDEV_SEL_FLAG_SIZE_GE \
+ ? ALIGN((dim), 2) \
+ : (dim) & ~1)
+
+/*
+ * smiapp_module_idents - supported camera modules
+ */
+static const struct smiapp_module_ident smiapp_module_idents[] = {
+ SMIAPP_IDENT_L(0x01, 0x022b, -1, "vs6555"),
+ SMIAPP_IDENT_L(0x01, 0x022e, -1, "vw6558"),
+ SMIAPP_IDENT_L(0x07, 0x7698, -1, "ovm7698"),
+ SMIAPP_IDENT_L(0x0b, 0x4242, -1, "smiapp-003"),
+ SMIAPP_IDENT_L(0x0c, 0x208a, -1, "tcm8330md"),
+ SMIAPP_IDENT_LQ(0x0c, 0x2134, -1, "tcm8500md", &smiapp_tcm8500md_quirk),
+ SMIAPP_IDENT_L(0x0c, 0x213e, -1, "et8en2"),
+ SMIAPP_IDENT_L(0x0c, 0x2184, -1, "tcm8580md"),
+ SMIAPP_IDENT_LQ(0x0c, 0x560f, -1, "jt8ew9", &smiapp_jt8ew9_quirk),
+ SMIAPP_IDENT_LQ(0x10, 0x4141, -1, "jt8ev1", &smiapp_jt8ev1_quirk),
+ SMIAPP_IDENT_LQ(0x10, 0x4241, -1, "imx125es", &smiapp_imx125es_quirk),
+};
+
+/*
+ *
+ * Dynamic Capability Identification
+ *
+ */
+
+static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ u32 fmt_model_type, fmt_model_subtype, ncol_desc, nrow_desc;
+ unsigned int i;
+ int rval;
+ int line_count = 0;
+ int embedded_start = -1, embedded_end = -1;
+ int image_start = 0;
+
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_FRAME_FORMAT_MODEL_TYPE,
+ &fmt_model_type);
+ if (rval)
+ return rval;
+
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_FRAME_FORMAT_MODEL_SUBTYPE,
+ &fmt_model_subtype);
+ if (rval)
+ return rval;
+
+ ncol_desc = (fmt_model_subtype
+ & SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NCOLS_MASK)
+ >> SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NCOLS_SHIFT;
+ nrow_desc = fmt_model_subtype
+ & SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NROWS_MASK;
+
+ dev_dbg(&client->dev, "format_model_type %s\n",
+ fmt_model_type == SMIAPP_FRAME_FORMAT_MODEL_TYPE_2BYTE
+ ? "2 byte" :
+ fmt_model_type == SMIAPP_FRAME_FORMAT_MODEL_TYPE_4BYTE
+ ? "4 byte" : "is simply bad");
+
+ for (i = 0; i < ncol_desc + nrow_desc; i++) {
+ u32 desc;
+ u32 pixelcode;
+ u32 pixels;
+ char *which;
+ char *what;
+
+ if (fmt_model_type == SMIAPP_FRAME_FORMAT_MODEL_TYPE_2BYTE) {
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(i),
+ &desc);
+ if (rval)
+ return rval;
+
+ pixelcode =
+ (desc
+ & SMIAPP_FRAME_FORMAT_DESC_2_PIXELCODE_MASK)
+ >> SMIAPP_FRAME_FORMAT_DESC_2_PIXELCODE_SHIFT;
+ pixels = desc & SMIAPP_FRAME_FORMAT_DESC_2_PIXELS_MASK;
+ } else if (fmt_model_type
+ == SMIAPP_FRAME_FORMAT_MODEL_TYPE_4BYTE) {
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(i),
+ &desc);
+ if (rval)
+ return rval;
+
+ pixelcode =
+ (desc
+ & SMIAPP_FRAME_FORMAT_DESC_4_PIXELCODE_MASK)
+ >> SMIAPP_FRAME_FORMAT_DESC_4_PIXELCODE_SHIFT;
+ pixels = desc & SMIAPP_FRAME_FORMAT_DESC_4_PIXELS_MASK;
+ } else {
+ dev_dbg(&client->dev,
+ "invalid frame format model type %d\n",
+ fmt_model_type);
+ return -EINVAL;
+ }
+
+ if (i < ncol_desc)
+ which = "columns";
+ else
+ which = "rows";
+
+ switch (pixelcode) {
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED:
+ what = "embedded";
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_DUMMY:
+ what = "dummy";
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_BLACK:
+ what = "black";
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_DARK:
+ what = "dark";
+ break;
+ case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE:
+ what = "visible";
+ break;
+ default:
+ what = "invalid";
+ dev_dbg(&client->dev, "pixelcode %d\n", pixelcode);
+ break;
+ }
+
+ dev_dbg(&client->dev, "%s pixels: %d %s\n",
+ what, pixels, which);
+
+ if (i < ncol_desc)
+ continue;
+
+ /* Handle row descriptors */
+ if (pixelcode
+ == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED) {
+ embedded_start = line_count;
+ } else {
+ if (pixelcode == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE
+ || pixels >= sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES] / 2)
+ image_start = line_count;
+ if (embedded_start != -1 && embedded_end == -1)
+ embedded_end = line_count;
+ }
+ line_count += pixels;
+ }
+
+ if (embedded_start == -1 || embedded_end == -1) {
+ embedded_start = 0;
+ embedded_end = 0;
+ }
+
+ dev_dbg(&client->dev, "embedded data from lines %d to %d\n",
+ embedded_start, embedded_end);
+ dev_dbg(&client->dev, "image data starts at line %d\n", image_start);
+
+ return 0;
+}
+
+static int smiapp_pll_configure(struct smiapp_sensor *sensor)
+{
+ struct smiapp_pll *pll = &sensor->pll;
+ int rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_VT_PIX_CLK_DIV, pll->vt_pix_clk_div);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_VT_SYS_CLK_DIV, pll->vt_sys_clk_div);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_PRE_PLL_CLK_DIV, pll->pre_pll_clk_div);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_PLL_MULTIPLIER, pll->pll_multiplier);
+ if (rval < 0)
+ return rval;
+
+ /* Lane op clock ratio does not apply here. */
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U32_REQUESTED_LINK_BIT_RATE_MBPS,
+ DIV_ROUND_UP(pll->op_sys_clk_freq_hz, 1000000 / 256 / 256));
+ if (rval < 0 || sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
+ return rval;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_OP_PIX_CLK_DIV, pll->op_pix_clk_div);
+ if (rval < 0)
+ return rval;
+
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_OP_SYS_CLK_DIV, pll->op_sys_clk_div);
+}
+
+static int smiapp_pll_update(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct smiapp_pll_limits lim = {
+ .min_pre_pll_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_PRE_PLL_CLK_DIV],
+ .max_pre_pll_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_PRE_PLL_CLK_DIV],
+ .min_pll_ip_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_IP_FREQ_HZ],
+ .max_pll_ip_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_IP_FREQ_HZ],
+ .min_pll_multiplier = sensor->limits[SMIAPP_LIMIT_MIN_PLL_MULTIPLIER],
+ .max_pll_multiplier = sensor->limits[SMIAPP_LIMIT_MAX_PLL_MULTIPLIER],
+ .min_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_PLL_OP_FREQ_HZ],
+ .max_pll_op_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_PLL_OP_FREQ_HZ],
+
+ .min_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV],
+ .max_op_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV],
+ .min_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV],
+ .max_op_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV],
+ .min_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ],
+ .max_op_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ],
+ .min_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ],
+ .max_op_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ],
+
+ .min_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV],
+ .max_vt_sys_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV],
+ .min_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV],
+ .max_vt_pix_clk_div = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV],
+ .min_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ],
+ .max_vt_sys_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ],
+ .min_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ],
+ .max_vt_pix_clk_freq_hz = sensor->limits[SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ],
+
+ .min_line_length_pck_bin = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN],
+ .min_line_length_pck = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK],
+ };
+ struct smiapp_pll *pll = &sensor->pll;
+ int rval;
+
+ memset(&sensor->pll, 0, sizeof(sensor->pll));
+
+ pll->lanes = sensor->platform_data->lanes;
+ pll->ext_clk_freq_hz = sensor->platform_data->ext_clk;
+
+ if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0) {
+ /*
+ * Fill in operational clock divisors limits from the
+ * video timing ones. On profile 0 sensors the
+ * requirements regarding them are essentially the
+ * same as on VT ones.
+ */
+ lim.min_op_sys_clk_div = lim.min_vt_sys_clk_div;
+ lim.max_op_sys_clk_div = lim.max_vt_sys_clk_div;
+ lim.min_op_pix_clk_div = lim.min_vt_pix_clk_div;
+ lim.max_op_pix_clk_div = lim.max_vt_pix_clk_div;
+ lim.min_op_sys_clk_freq_hz = lim.min_vt_sys_clk_freq_hz;
+ lim.max_op_sys_clk_freq_hz = lim.max_vt_sys_clk_freq_hz;
+ lim.min_op_pix_clk_freq_hz = lim.min_vt_pix_clk_freq_hz;
+ lim.max_op_pix_clk_freq_hz = lim.max_vt_pix_clk_freq_hz;
+ /* Profile 0 sensors have no separate OP clock branch. */
+ pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+ }
+
+ if (smiapp_needs_quirk(sensor,
+ SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE))
+ pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
+
+ pll->binning_horizontal = sensor->binning_horizontal;
+ pll->binning_vertical = sensor->binning_vertical;
+ pll->link_freq =
+ sensor->link_freq->qmenu_int[sensor->link_freq->val];
+ pll->scale_m = sensor->scale_m;
+ pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ pll->bits_per_pixel = sensor->csi_format->compressed;
+
+ rval = smiapp_pll_calculate(&client->dev, &lim, pll);
+ if (rval < 0)
+ return rval;
+
+ sensor->pixel_rate_parray->cur.val64 = pll->vt_pix_clk_freq_hz;
+ sensor->pixel_rate_csi->cur.val64 = pll->pixel_rate_csi;
+
+ return 0;
+}
+
+
+/*
+ *
+ * V4L2 Controls handling
+ *
+ */
+
+static void __smiapp_update_exposure_limits(struct smiapp_sensor *sensor)
+{
+ struct v4l2_ctrl *ctrl = sensor->exposure;
+ int max;
+
+ max = sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
+ + sensor->vblank->val
+ - sensor->limits[SMIAPP_LIMIT_COARSE_INTEGRATION_TIME_MAX_MARGIN];
+
+ ctrl->maximum = max;
+ if (ctrl->default_value > max)
+ ctrl->default_value = max;
+ if (ctrl->val > max)
+ ctrl->val = max;
+ if (ctrl->cur.val > max)
+ ctrl->cur.val = max;
+}
+
+/*
+ * Order matters.
+ *
+ * 1. Bits-per-pixel, descending.
+ * 2. Bits-per-pixel compressed, descending.
+ * 3. Pixel order, same as in pixel_order_str. Formats for all four pixel
+ * orders must be defined.
+ */
+static const struct smiapp_csi_data_format smiapp_csi_data_formats[] = {
+ { V4L2_MBUS_FMT_SGRBG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GRBG, },
+ { V4L2_MBUS_FMT_SRGGB12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_RGGB, },
+ { V4L2_MBUS_FMT_SBGGR12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_BGGR, },
+ { V4L2_MBUS_FMT_SGBRG12_1X12, 12, 12, SMIAPP_PIXEL_ORDER_GBRG, },
+ { V4L2_MBUS_FMT_SGRBG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GRBG, },
+ { V4L2_MBUS_FMT_SRGGB10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_RGGB, },
+ { V4L2_MBUS_FMT_SBGGR10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_BGGR, },
+ { V4L2_MBUS_FMT_SGBRG10_1X10, 10, 10, SMIAPP_PIXEL_ORDER_GBRG, },
+ { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GRBG, },
+ { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_RGGB, },
+ { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_BGGR, },
+ { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, 10, 8, SMIAPP_PIXEL_ORDER_GBRG, },
+ { V4L2_MBUS_FMT_SGRBG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GRBG, },
+ { V4L2_MBUS_FMT_SRGGB8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_RGGB, },
+ { V4L2_MBUS_FMT_SBGGR8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_BGGR, },
+ { V4L2_MBUS_FMT_SGBRG8_1X8, 8, 8, SMIAPP_PIXEL_ORDER_GBRG, },
+};
+
+const char *pixel_order_str[] = { "GRBG", "RGGB", "BGGR", "GBRG" };
+
+#define to_csi_format_idx(fmt) (((unsigned long)(fmt) \
+ - (unsigned long)smiapp_csi_data_formats) \
+ / sizeof(*smiapp_csi_data_formats))
+
+static u32 smiapp_pixel_order(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int flip = 0;
+
+ if (sensor->hflip) {
+ if (sensor->hflip->val)
+ flip |= SMIAPP_IMAGE_ORIENTATION_HFLIP;
+
+ if (sensor->vflip->val)
+ flip |= SMIAPP_IMAGE_ORIENTATION_VFLIP;
+ }
+
+ flip ^= sensor->hvflip_inv_mask;
+
+ dev_dbg(&client->dev, "flip %d\n", flip);
+ return sensor->default_pixel_order ^ flip;
+}
+
+static void smiapp_update_mbus_formats(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int csi_format_idx =
+ to_csi_format_idx(sensor->csi_format) & ~3;
+ unsigned int internal_csi_format_idx =
+ to_csi_format_idx(sensor->internal_csi_format) & ~3;
+ unsigned int pixel_order = smiapp_pixel_order(sensor);
+
+ sensor->mbus_frame_fmts =
+ sensor->default_mbus_frame_fmts << pixel_order;
+ sensor->csi_format =
+ &smiapp_csi_data_formats[csi_format_idx + pixel_order];
+ sensor->internal_csi_format =
+ &smiapp_csi_data_formats[internal_csi_format_idx
+ + pixel_order];
+
+ BUG_ON(max(internal_csi_format_idx, csi_format_idx) + pixel_order
+ >= ARRAY_SIZE(smiapp_csi_data_formats));
+ BUG_ON(min(internal_csi_format_idx, csi_format_idx) < 0);
+
+ dev_dbg(&client->dev, "new pixel order %s\n",
+ pixel_order_str[pixel_order]);
+}
+
+static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct smiapp_sensor *sensor =
+ container_of(ctrl->handler, struct smiapp_subdev, ctrl_handler)
+ ->sensor;
+ u32 orient = 0;
+ int exposure;
+ int rval;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ return smiapp_write(
+ sensor,
+ SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL, ctrl->val);
+
+ case V4L2_CID_EXPOSURE:
+ return smiapp_write(
+ sensor,
+ SMIAPP_REG_U16_COARSE_INTEGRATION_TIME, ctrl->val);
+
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ if (sensor->streaming)
+ return -EBUSY;
+
+ if (sensor->hflip->val)
+ orient |= SMIAPP_IMAGE_ORIENTATION_HFLIP;
+
+ if (sensor->vflip->val)
+ orient |= SMIAPP_IMAGE_ORIENTATION_VFLIP;
+
+ orient ^= sensor->hvflip_inv_mask;
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_IMAGE_ORIENTATION,
+ orient);
+ if (rval < 0)
+ return rval;
+
+ smiapp_update_mbus_formats(sensor);
+
+ return 0;
+
+ case V4L2_CID_VBLANK:
+ exposure = sensor->exposure->val;
+
+ __smiapp_update_exposure_limits(sensor);
+
+ if (exposure > sensor->exposure->maximum) {
+ sensor->exposure->val =
+ sensor->exposure->maximum;
+ rval = smiapp_set_ctrl(
+ sensor->exposure);
+ if (rval < 0)
+ return rval;
+ }
+
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_FRAME_LENGTH_LINES,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
+ + ctrl->val);
+
+ case V4L2_CID_HBLANK:
+ return smiapp_write(
+ sensor, SMIAPP_REG_U16_LINE_LENGTH_PCK,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
+ + ctrl->val);
+
+ case V4L2_CID_LINK_FREQ:
+ if (sensor->streaming)
+ return -EBUSY;
+
+ return smiapp_pll_update(sensor);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ctrl_ops smiapp_ctrl_ops = {
+ .s_ctrl = smiapp_set_ctrl,
+};
+
+static int smiapp_init_controls(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int max;
+ int rval;
+
+ rval = v4l2_ctrl_handler_init(&sensor->pixel_array->ctrl_handler, 7);
+ if (rval)
+ return rval;
+ sensor->pixel_array->ctrl_handler.lock = &sensor->mutex;
+
+ sensor->analog_gain = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN,
+ sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN],
+ sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MAX],
+ max(sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_STEP], 1U),
+ sensor->limits[SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN]);
+
+ /* Exposure limits will be updated soon, use just something here. */
+ sensor->exposure = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 0, 1, 0);
+
+ sensor->hflip = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ sensor->vflip = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ sensor->vblank = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_VBLANK, 0, 1, 1, 0);
+
+ if (sensor->vblank)
+ sensor->vblank->flags |= V4L2_CTRL_FLAG_UPDATE;
+
+ sensor->hblank = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_HBLANK, 0, 1, 1, 0);
+
+ if (sensor->hblank)
+ sensor->hblank->flags |= V4L2_CTRL_FLAG_UPDATE;
+
+ sensor->pixel_rate_parray = v4l2_ctrl_new_std(
+ &sensor->pixel_array->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0, 0, 1, 0);
+
+ if (sensor->pixel_array->ctrl_handler.error) {
+ dev_err(&client->dev,
+ "pixel array controls initialization failed (%d)\n",
+ sensor->pixel_array->ctrl_handler.error);
+ rval = sensor->pixel_array->ctrl_handler.error;
+ goto error;
+ }
+
+ sensor->pixel_array->sd.ctrl_handler =
+ &sensor->pixel_array->ctrl_handler;
+
+ v4l2_ctrl_cluster(2, &sensor->hflip);
+
+ rval = v4l2_ctrl_handler_init(&sensor->src->ctrl_handler, 0);
+ if (rval)
+ goto error;
+ sensor->src->ctrl_handler.lock = &sensor->mutex;
+
+ for (max = 0; sensor->platform_data->op_sys_clock[max + 1]; max++);
+
+ sensor->link_freq = v4l2_ctrl_new_int_menu(
+ &sensor->src->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_LINK_FREQ, max, 0,
+ sensor->platform_data->op_sys_clock);
+
+ sensor->pixel_rate_csi = v4l2_ctrl_new_std(
+ &sensor->src->ctrl_handler, &smiapp_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0, 0, 1, 0);
+
+ if (sensor->src->ctrl_handler.error) {
+ dev_err(&client->dev,
+ "src controls initialization failed (%d)\n",
+ sensor->src->ctrl_handler.error);
+ rval = sensor->src->ctrl_handler.error;
+ goto error;
+ }
+
+ sensor->src->sd.ctrl_handler =
+ &sensor->src->ctrl_handler;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&sensor->pixel_array->ctrl_handler);
+ v4l2_ctrl_handler_free(&sensor->src->ctrl_handler);
+
+ return rval;
+}
+
+static void smiapp_free_controls(struct smiapp_sensor *sensor)
+{
+ unsigned int i;
+
+ for (i = 0; i < sensor->ssds_used; i++)
+ v4l2_ctrl_handler_free(&sensor->ssds[i].ctrl_handler);
+}
+
+static int smiapp_get_limits(struct smiapp_sensor *sensor, int const *limit,
+ unsigned int n)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int i;
+ u32 val;
+ int rval;
+
+ for (i = 0; i < n; i++) {
+ rval = smiapp_read(
+ sensor, smiapp_reg_limits[limit[i]].addr, &val);
+ if (rval)
+ return rval;
+ sensor->limits[limit[i]] = val;
+ dev_dbg(&client->dev, "0x%8.8x \"%s\" = %d, 0x%x\n",
+ smiapp_reg_limits[limit[i]].addr,
+ smiapp_reg_limits[limit[i]].what, val, val);
+ }
+
+ return 0;
+}
+
+static int smiapp_get_all_limits(struct smiapp_sensor *sensor)
+{
+ unsigned int i;
+ int rval;
+
+ for (i = 0; i < SMIAPP_LIMIT_LAST; i++) {
+ rval = smiapp_get_limits(sensor, &i, 1);
+ if (rval < 0)
+ return rval;
+ }
+
+ if (sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] == 0)
+ smiapp_replace_limit(sensor, SMIAPP_LIMIT_SCALER_N_MIN, 16);
+
+ return 0;
+}
+
+static int smiapp_get_limits_binning(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ static u32 const limits[] = {
+ SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN,
+ SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN,
+ SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN,
+ SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN,
+ SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN,
+ SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN_BIN,
+ SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN,
+ };
+ static u32 const limits_replace[] = {
+ SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES,
+ SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES,
+ SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK,
+ SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK,
+ SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK,
+ SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN,
+ SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN,
+ };
+ unsigned int i;
+ int rval;
+
+ if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY] ==
+ SMIAPP_BINNING_CAPABILITY_NO) {
+ for (i = 0; i < ARRAY_SIZE(limits); i++)
+ sensor->limits[limits[i]] =
+ sensor->limits[limits_replace[i]];
+
+ return 0;
+ }
+
+ rval = smiapp_get_limits(sensor, limits, ARRAY_SIZE(limits));
+ if (rval < 0)
+ return rval;
+
+ /*
+ * Sanity check whether the binning limits are valid. If not,
+ * use the non-binning ones.
+ */
+ if (sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN]
+ && sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN]
+ && sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN])
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(limits); i++) {
+ dev_dbg(&client->dev,
+ "replace limit 0x%8.8x \"%s\" = %d, 0x%x\n",
+ smiapp_reg_limits[limits[i]].addr,
+ smiapp_reg_limits[limits[i]].what,
+ sensor->limits[limits_replace[i]],
+ sensor->limits[limits_replace[i]]);
+ sensor->limits[limits[i]] =
+ sensor->limits[limits_replace[i]];
+ }
+
+ return 0;
+}
+
+static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int type, n;
+ unsigned int i, pixel_order;
+ int rval;
+
+ rval = smiapp_read(
+ sensor, SMIAPP_REG_U8_DATA_FORMAT_MODEL_TYPE, &type);
+ if (rval)
+ return rval;
+
+ dev_dbg(&client->dev, "data_format_model_type %d\n", type);
+
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_PIXEL_ORDER,
+ &pixel_order);
+ if (rval)
+ return rval;
+
+ if (pixel_order >= ARRAY_SIZE(pixel_order_str)) {
+ dev_dbg(&client->dev, "bad pixel order %d\n", pixel_order);
+ return -EINVAL;
+ }
+
+ dev_dbg(&client->dev, "pixel order %d (%s)\n", pixel_order,
+ pixel_order_str[pixel_order]);
+
+ switch (type) {
+ case SMIAPP_DATA_FORMAT_MODEL_TYPE_NORMAL:
+ n = SMIAPP_DATA_FORMAT_MODEL_TYPE_NORMAL_N;
+ break;
+ case SMIAPP_DATA_FORMAT_MODEL_TYPE_EXTENDED:
+ n = SMIAPP_DATA_FORMAT_MODEL_TYPE_EXTENDED_N;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sensor->default_pixel_order = pixel_order;
+ sensor->mbus_frame_fmts = 0;
+
+ for (i = 0; i < n; i++) {
+ unsigned int fmt, j;
+
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U16_DATA_FORMAT_DESCRIPTOR(i), &fmt);
+ if (rval)
+ return rval;
+
+ dev_dbg(&client->dev, "bpp %d, compressed %d\n",
+ fmt >> 8, (u8)fmt);
+
+ for (j = 0; j < ARRAY_SIZE(smiapp_csi_data_formats); j++) {
+ const struct smiapp_csi_data_format *f =
+ &smiapp_csi_data_formats[j];
+
+ if (f->pixel_order != SMIAPP_PIXEL_ORDER_GRBG)
+ continue;
+
+ if (f->width != fmt >> 8 || f->compressed != (u8)fmt)
+ continue;
+
+ dev_dbg(&client->dev, "jolly good! %d\n", j);
+
+ sensor->default_mbus_frame_fmts |= 1 << j;
+ if (!sensor->csi_format) {
+ sensor->csi_format = f;
+ sensor->internal_csi_format = f;
+ }
+ }
+ }
+
+ if (!sensor->csi_format) {
+ dev_err(&client->dev, "no supported mbus code found\n");
+ return -EINVAL;
+ }
+
+ smiapp_update_mbus_formats(sensor);
+
+ return 0;
+}
+
+static void smiapp_update_blanking(struct smiapp_sensor *sensor)
+{
+ struct v4l2_ctrl *vblank = sensor->vblank;
+ struct v4l2_ctrl *hblank = sensor->hblank;
+
+ vblank->minimum =
+ max_t(int,
+ sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES],
+ sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height);
+ vblank->maximum =
+ sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
+
+ vblank->val = clamp_t(int, vblank->val,
+ vblank->minimum, vblank->maximum);
+ vblank->default_value = vblank->minimum;
+ vblank->val = vblank->val;
+ vblank->cur.val = vblank->val;
+
+ hblank->minimum =
+ max_t(int,
+ sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width,
+ sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN]);
+ hblank->maximum =
+ sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN] -
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
+
+ hblank->val = clamp_t(int, hblank->val,
+ hblank->minimum, hblank->maximum);
+ hblank->default_value = hblank->minimum;
+ hblank->val = hblank->val;
+ hblank->cur.val = hblank->val;
+
+ __smiapp_update_exposure_limits(sensor);
+}
+
+static int smiapp_update_mode(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int binning_mode;
+ int rval;
+
+ dev_dbg(&client->dev, "frame size: %dx%d\n",
+ sensor->src->crop[SMIAPP_PAD_SRC].width,
+ sensor->src->crop[SMIAPP_PAD_SRC].height);
+ dev_dbg(&client->dev, "csi format width: %d\n",
+ sensor->csi_format->width);
+
+ /* Binning has to be set up here; it affects limits */
+ if (sensor->binning_horizontal == 1 &&
+ sensor->binning_vertical == 1) {
+ binning_mode = 0;
+ } else {
+ u8 binning_type =
+ (sensor->binning_horizontal << 4)
+ | sensor->binning_vertical;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
+ if (rval < 0)
+ return rval;
+
+ binning_mode = 1;
+ }
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
+ if (rval < 0)
+ return rval;
+
+ /* Get updated limits due to binning */
+ rval = smiapp_get_limits_binning(sensor);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_pll_update(sensor);
+ if (rval < 0)
+ return rval;
+
+ /* Output from pixel array, including blanking */
+ smiapp_update_blanking(sensor);
+
+ dev_dbg(&client->dev, "vblank\t\t%d\n", sensor->vblank->val);
+ dev_dbg(&client->dev, "hblank\t\t%d\n", sensor->hblank->val);
+
+ dev_dbg(&client->dev, "real timeperframe\t100/%d\n",
+ sensor->pll.vt_pix_clk_freq_hz /
+ ((sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width
+ + sensor->hblank->val) *
+ (sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height
+ + sensor->vblank->val) / 100));
+
+ return 0;
+}
+
+/*
+ *
+ * SMIA++ NVM handling
+ *
+ */
+static int smiapp_read_nvm(struct smiapp_sensor *sensor,
+ unsigned char *nvm)
+{
+ u32 i, s, p, np, v;
+ int rval = 0, rval2;
+
+ np = sensor->nvm_size / SMIAPP_NVM_PAGE_SIZE;
+ for (p = 0; p < np; p++) {
+ rval = smiapp_write(
+ sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
+ if (rval)
+ goto out;
+
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
+ SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN |
+ SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN);
+ if (rval)
+ goto out;
+
+ for (i = 0; i < 1000; i++) {
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
+
+ if (rval)
+ goto out;
+
+ if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
+ break;
+
+ if (--i == 0) {
+ rval = -ETIMEDOUT;
+ goto out;
+ }
+
+ }
+
+ for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
+ rval = smiapp_read(
+ sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
+ &v);
+ if (rval)
+ goto out;
+
+ *nvm++ = v;
+ }
+ }
+
+out:
+ rval2 = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL, 0);
+ if (rval < 0)
+ return rval;
+ else
+ return rval2;
+}
+
+/*
+ *
+ * SMIA++ CCI address control
+ *
+ */
+static int smiapp_change_cci_addr(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+ u32 val;
+
+ client->addr = sensor->platform_data->i2c_addr_dfl;
+
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_CCI_ADDRESS_CONTROL,
+ sensor->platform_data->i2c_addr_alt << 1);
+ if (rval)
+ return rval;
+
+ client->addr = sensor->platform_data->i2c_addr_alt;
+
+ /* verify addr change went ok */
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_CCI_ADDRESS_CONTROL, &val);
+ if (rval)
+ return rval;
+
+ if (val != sensor->platform_data->i2c_addr_alt << 1)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ *
+ * SMIA++ Mode Control
+ *
+ */
+static int smiapp_setup_flash_strobe(struct smiapp_sensor *sensor)
+{
+ struct smiapp_flash_strobe_parms *strobe_setup;
+ unsigned int ext_freq = sensor->platform_data->ext_clk;
+ u32 tmp;
+ u32 strobe_adjustment;
+ u32 strobe_width_high_rs;
+ int rval;
+
+ strobe_setup = sensor->platform_data->strobe_setup;
+
+ /*
+ * How to calculate registers related to strobe length. Please
+ * do not change, or if you do at least know what you're
+ * doing. :-)
+ *
+ * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> 2010-10-25
+ *
+ * flash_strobe_length [us] / 10^6 = (tFlash_strobe_width_ctrl
+ * / EXTCLK freq [Hz]) * flash_strobe_adjustment
+ *
+ * tFlash_strobe_width_ctrl E N, [1 - 0xffff]
+ * flash_strobe_adjustment E N, [1 - 0xff]
+ *
+ * The formula above is written as below to keep it on one
+ * line:
+ *
+ * l / 10^6 = w / e * a
+ *
+ * Let's mark w * a by x:
+ *
+ * x = w * a
+ *
+ * Thus, we get:
+ *
+ * x = l * e / 10^6
+ *
+ * The strobe width must be at least as long as requested,
+ * thus rounding upwards is needed.
+ *
+ * x = (l * e + 10^6 - 1) / 10^6
+ * -----------------------------
+ *
+ * Maximum possible accuracy is wanted at all times. Thus keep
+ * a as small as possible.
+ *
+ * Calculate a, assuming maximum w, with rounding upwards:
+ *
+ * a = (x + (2^16 - 1) - 1) / (2^16 - 1)
+ * -------------------------------------
+ *
+ * Thus, we also get w, with that a, with rounding upwards:
+ *
+ * w = (x + a - 1) / a
+ * -------------------
+ *
+ * To get limits:
+ *
+ * x E [1, (2^16 - 1) * (2^8 - 1)]
+ *
+ * Substituting maximum x to the original formula (with rounding),
+ * the maximum l is thus
+ *
+ * (2^16 - 1) * (2^8 - 1) * 10^6 = l * e + 10^6 - 1
+ *
+ * l = (10^6 * (2^16 - 1) * (2^8 - 1) - 10^6 + 1) / e
+ * --------------------------------------------------
+ *
+ * flash_strobe_length must be clamped between 1 and
+ * (10^6 * (2^16 - 1) * (2^8 - 1) - 10^6 + 1) / EXTCLK freq.
+ *
+ * Then,
+ *
+ * flash_strobe_adjustment = ((flash_strobe_length *
+ * EXTCLK freq + 10^6 - 1) / 10^6 + (2^16 - 1) - 1) / (2^16 - 1)
+ *
+ * tFlash_strobe_width_ctrl = ((flash_strobe_length *
+ * EXTCLK freq + 10^6 - 1) / 10^6 +
+ * flash_strobe_adjustment - 1) / flash_strobe_adjustment
+ */
+ tmp = div_u64(1000000ULL * ((1 << 16) - 1) * ((1 << 8) - 1) -
+ 1000000 + 1, ext_freq);
+ strobe_setup->strobe_width_high_us =
+ clamp_t(u32, strobe_setup->strobe_width_high_us, 1, tmp);
+
+ tmp = div_u64(((u64)strobe_setup->strobe_width_high_us * (u64)ext_freq +
+ 1000000 - 1), 1000000ULL);
+ strobe_adjustment = (tmp + (1 << 16) - 1 - 1) / ((1 << 16) - 1);
+ strobe_width_high_rs = (tmp + strobe_adjustment - 1) /
+ strobe_adjustment;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_FLASH_MODE_RS,
+ strobe_setup->mode);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_FLASH_STROBE_ADJUSTMENT,
+ strobe_adjustment);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_RS_CTRL,
+ strobe_width_high_rs);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_TFLASH_STROBE_DELAY_RS_CTRL,
+ strobe_setup->strobe_delay);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_FLASH_STROBE_START_POINT,
+ strobe_setup->stobe_start_point);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_FLASH_TRIGGER_RS,
+ strobe_setup->trigger);
+
+out:
+ sensor->platform_data->strobe_setup->trigger = 0;
+
+ return rval;
+}
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+static int smiapp_power_on(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int sleep;
+ int rval;
+
+ rval = regulator_enable(sensor->vana);
+ if (rval) {
+ dev_err(&client->dev, "failed to enable vana regulator\n");
+ return rval;
+ }
+ usleep_range(1000, 1000);
+
+ if (sensor->platform_data->set_xclk)
+ rval = sensor->platform_data->set_xclk(
+ &sensor->src->sd, sensor->platform_data->ext_clk);
+ else
+ rval = clk_enable(sensor->ext_clk);
+ if (rval < 0) {
+ dev_dbg(&client->dev, "failed to set xclk\n");
+ goto out_xclk_fail;
+ }
+ usleep_range(1000, 1000);
+
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_set_value(sensor->platform_data->xshutdown, 1);
+
+ sleep = SMIAPP_RESET_DELAY(sensor->platform_data->ext_clk);
+ usleep_range(sleep, sleep);
+
+ /*
+ * Failures to respond to the address change command have been noticed.
+ * Those failures seem to be caused by the sensor requiring a longer
+ * boot time than advertised. An additional 10ms delay seems to work
+ * around the issue, but the SMIA++ I2C write retry hack makes the delay
+ * unnecessary. The failures need to be investigated to find a proper
+ * fix, and a delay will likely need to be added here if the I2C write
+ * retry hack is reverted before the root cause of the boot time issue
+ * is found.
+ */
+
+ if (sensor->platform_data->i2c_addr_alt) {
+ rval = smiapp_change_cci_addr(sensor);
+ if (rval) {
+ dev_err(&client->dev, "cci address change error\n");
+ goto out_cci_addr_fail;
+ }
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_SOFTWARE_RESET,
+ SMIAPP_SOFTWARE_RESET);
+ if (rval < 0) {
+ dev_err(&client->dev, "software reset failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ if (sensor->platform_data->i2c_addr_alt) {
+ rval = smiapp_change_cci_addr(sensor);
+ if (rval) {
+ dev_err(&client->dev, "cci address change error\n");
+ goto out_cci_addr_fail;
+ }
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_COMPRESSION_MODE,
+ SMIAPP_COMPRESSION_MODE_SIMPLE_PREDICTOR);
+ if (rval) {
+ dev_err(&client->dev, "compression mode set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_EXTCLK_FREQUENCY_MHZ,
+ sensor->platform_data->ext_clk / (1000000 / (1 << 8)));
+ if (rval) {
+ dev_err(&client->dev, "extclk frequency set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_CSI_LANE_MODE,
+ sensor->platform_data->lanes - 1);
+ if (rval) {
+ dev_err(&client->dev, "csi lane mode set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_FAST_STANDBY_CTRL,
+ SMIAPP_FAST_STANDBY_CTRL_IMMEDIATE);
+ if (rval) {
+ dev_err(&client->dev, "fast standby set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_CSI_SIGNALLING_MODE,
+ sensor->platform_data->csi_signalling_mode);
+ if (rval) {
+ dev_err(&client->dev, "csi signalling mode set failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ /* DPHY control done by sensor based on requested link rate */
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_DPHY_CTRL,
+ SMIAPP_DPHY_CTRL_UI);
+ if (rval < 0)
+ return rval;
+
+ rval = smiapp_call_quirk(sensor, post_poweron);
+ if (rval) {
+ dev_err(&client->dev, "post_poweron quirks failed\n");
+ goto out_cci_addr_fail;
+ }
+
+ /* Are we still initialising...? If yes, return here. */
+ if (!sensor->pixel_array)
+ return 0;
+
+ rval = v4l2_ctrl_handler_setup(
+ &sensor->pixel_array->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
+
+ rval = v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
+
+ mutex_lock(&sensor->mutex);
+ rval = smiapp_update_mode(sensor);
+ mutex_unlock(&sensor->mutex);
+ if (rval < 0)
+ goto out_cci_addr_fail;
+
+ return 0;
+
+out_cci_addr_fail:
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_set_value(sensor->platform_data->xshutdown, 0);
+ if (sensor->platform_data->set_xclk)
+ sensor->platform_data->set_xclk(&sensor->src->sd, 0);
+ else
+ clk_disable(sensor->ext_clk);
+
+out_xclk_fail:
+ regulator_disable(sensor->vana);
+ return rval;
+}
+
+static void smiapp_power_off(struct smiapp_sensor *sensor)
+{
+ /*
+ * Currently power/clock to lens are enable/disabled separately
+ * but they are essentially the same signals. So if the sensor is
+ * powered off while the lens is powered on the sensor does not
+ * really see a power off and next time the cci address change
+ * will fail. So do a soft reset explicitly here.
+ */
+ if (sensor->platform_data->i2c_addr_alt)
+ smiapp_write(sensor,
+ SMIAPP_REG_U8_SOFTWARE_RESET,
+ SMIAPP_SOFTWARE_RESET);
+
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_set_value(sensor->platform_data->xshutdown, 0);
+ if (sensor->platform_data->set_xclk)
+ sensor->platform_data->set_xclk(&sensor->src->sd, 0);
+ else
+ clk_disable(sensor->ext_clk);
+ usleep_range(5000, 5000);
+ regulator_disable(sensor->vana);
+ sensor->streaming = 0;
+}
+
+static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int ret = 0;
+
+ mutex_lock(&sensor->power_mutex);
+
+ /*
+ * If the power count is modified from 0 to != 0 or from != 0
+ * to 0, update the power state.
+ */
+ if (!sensor->power_count == !on)
+ goto out;
+
+ if (on) {
+ /* Power on and perform initialisation. */
+ ret = smiapp_power_on(sensor);
+ if (ret < 0)
+ goto out;
+ } else {
+ smiapp_power_off(sensor);
+ }
+
+ /* Update the power count. */
+ sensor->power_count += on ? 1 : -1;
+ WARN_ON(sensor->power_count < 0);
+
+out:
+ mutex_unlock(&sensor->power_mutex);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video stream management
+ */
+
+static int smiapp_start_streaming(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+
+ mutex_lock(&sensor->mutex);
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_CSI_DATA_FORMAT,
+ (sensor->csi_format->width << 8) |
+ sensor->csi_format->compressed);
+ if (rval)
+ goto out;
+
+ rval = smiapp_pll_configure(sensor);
+ if (rval)
+ goto out;
+
+ /* Analog crop start coordinates */
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_X_ADDR_START,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].left);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_Y_ADDR_START,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].top);
+ if (rval < 0)
+ goto out;
+
+ /* Analog crop end coordinates */
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_X_ADDR_END,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].left
+ + sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width - 1);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_Y_ADDR_END,
+ sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].top
+ + sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height - 1);
+ if (rval < 0)
+ goto out;
+
+ /*
+ * Output from pixel array, including blanking, is set using
+ * controls below. No need to set here.
+ */
+
+ /* Digital crop */
+ if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_DIGITAL_CROP_X_OFFSET,
+ sensor->scaler->crop[SMIAPP_PAD_SINK].left);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_DIGITAL_CROP_Y_OFFSET,
+ sensor->scaler->crop[SMIAPP_PAD_SINK].top);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_WIDTH,
+ sensor->scaler->crop[SMIAPP_PAD_SINK].width);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_HEIGHT,
+ sensor->scaler->crop[SMIAPP_PAD_SINK].height);
+ if (rval < 0)
+ goto out;
+ }
+
+ /* Scaling */
+ if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ != SMIAPP_SCALING_CAPABILITY_NONE) {
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_SCALING_MODE,
+ sensor->scaling_mode);
+ if (rval < 0)
+ goto out;
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_SCALE_M,
+ sensor->scale_m);
+ if (rval < 0)
+ goto out;
+ }
+
+ /* Output size from sensor */
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_X_OUTPUT_SIZE,
+ sensor->src->crop[SMIAPP_PAD_SRC].width);
+ if (rval < 0)
+ goto out;
+ rval = smiapp_write(sensor, SMIAPP_REG_U16_Y_OUTPUT_SIZE,
+ sensor->src->crop[SMIAPP_PAD_SRC].height);
+ if (rval < 0)
+ goto out;
+
+ if ((sensor->flash_capability &
+ (SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE |
+ SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE)) &&
+ sensor->platform_data->strobe_setup != NULL &&
+ sensor->platform_data->strobe_setup->trigger != 0) {
+ rval = smiapp_setup_flash_strobe(sensor);
+ if (rval)
+ goto out;
+ }
+
+ rval = smiapp_call_quirk(sensor, pre_streamon);
+ if (rval) {
+ dev_err(&client->dev, "pre_streamon quirks failed\n");
+ goto out;
+ }
+
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_MODE_SELECT,
+ SMIAPP_MODE_SELECT_STREAMING);
+
+out:
+ mutex_unlock(&sensor->mutex);
+
+ return rval;
+}
+
+static int smiapp_stop_streaming(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+
+ mutex_lock(&sensor->mutex);
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_MODE_SELECT,
+ SMIAPP_MODE_SELECT_SOFTWARE_STANDBY);
+ if (rval)
+ goto out;
+
+ rval = smiapp_call_quirk(sensor, post_streamoff);
+ if (rval)
+ dev_err(&client->dev, "post_streamoff quirks failed\n");
+
+out:
+ mutex_unlock(&sensor->mutex);
+ return rval;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev video operations
+ */
+
+static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int rval;
+
+ if (sensor->streaming == enable)
+ return 0;
+
+ if (enable) {
+ sensor->streaming = 1;
+ rval = smiapp_start_streaming(sensor);
+ if (rval < 0)
+ sensor->streaming = 0;
+ } else {
+ rval = smiapp_stop_streaming(sensor);
+ sensor->streaming = 0;
+ }
+
+ return rval;
+}
+
+static int smiapp_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int i;
+ int idx = -1;
+ int rval = -EINVAL;
+
+ mutex_lock(&sensor->mutex);
+
+ dev_err(&client->dev, "subdev %s, pad %d, index %d\n",
+ subdev->name, code->pad, code->index);
+
+ if (subdev != &sensor->src->sd || code->pad != SMIAPP_PAD_SRC) {
+ if (code->index)
+ goto out;
+
+ code->code = sensor->internal_csi_format->code;
+ rval = 0;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
+ if (sensor->mbus_frame_fmts & (1 << i))
+ idx++;
+
+ if (idx == code->index) {
+ code->code = smiapp_csi_data_formats[i].code;
+ dev_err(&client->dev, "found index %d, i %d, code %x\n",
+ code->index, i, code->code);
+ rval = 0;
+ break;
+ }
+ }
+
+out:
+ mutex_unlock(&sensor->mutex);
+
+ return rval;
+}
+
+static u32 __smiapp_get_mbus_code(struct v4l2_subdev *subdev,
+ unsigned int pad)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+
+ if (subdev == &sensor->src->sd && pad == SMIAPP_PAD_SRC)
+ return sensor->csi_format->code;
+ else
+ return sensor->internal_csi_format->code;
+}
+
+static int __smiapp_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ fmt->format = *v4l2_subdev_get_try_format(fh, fmt->pad);
+ } else {
+ struct v4l2_rect *r;
+
+ if (fmt->pad == ssd->source_pad)
+ r = &ssd->crop[ssd->source_pad];
+ else
+ r = &ssd->sink_fmt;
+
+ fmt->format.code = __smiapp_get_mbus_code(subdev, fmt->pad);
+ fmt->format.width = r->width;
+ fmt->format.height = r->height;
+ }
+
+ return 0;
+}
+
+static int smiapp_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int rval;
+
+ mutex_lock(&sensor->mutex);
+ rval = __smiapp_get_format(subdev, fh, fmt);
+ mutex_unlock(&sensor->mutex);
+
+ return rval;
+}
+
+static void smiapp_get_crop_compose(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_rect **crops,
+ struct v4l2_rect **comps, int which)
+{
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ unsigned int i;
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (crops)
+ for (i = 0; i < subdev->entity.num_pads; i++)
+ crops[i] = &ssd->crop[i];
+ if (comps)
+ *comps = &ssd->compose;
+ } else {
+ if (crops) {
+ for (i = 0; i < subdev->entity.num_pads; i++) {
+ crops[i] = v4l2_subdev_get_try_crop(fh, i);
+ BUG_ON(!crops[i]);
+ }
+ }
+ if (comps) {
+ *comps = v4l2_subdev_get_try_compose(fh,
+ SMIAPP_PAD_SINK);
+ BUG_ON(!*comps);
+ }
+ }
+}
+
+/* Changes require propagation only on sink pad. */
+static void smiapp_propagate(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh, int which,
+ int target)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *comp, *crops[SMIAPP_PADS];
+
+ smiapp_get_crop_compose(subdev, fh, crops, &comp, which);
+
+ switch (target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ comp->width = crops[SMIAPP_PAD_SINK]->width;
+ comp->height = crops[SMIAPP_PAD_SINK]->height;
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (ssd == sensor->scaler) {
+ sensor->scale_m =
+ sensor->limits[
+ SMIAPP_LIMIT_SCALER_N_MIN];
+ sensor->scaling_mode =
+ SMIAPP_SCALING_MODE_NONE;
+ } else if (ssd == sensor->binner) {
+ sensor->binning_horizontal = 1;
+ sensor->binning_vertical = 1;
+ }
+ }
+ /* Fall through */
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ *crops[SMIAPP_PAD_SRC] = *comp;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static const struct smiapp_csi_data_format
+*smiapp_validate_csi_data_format(struct smiapp_sensor *sensor, u32 code)
+{
+ const struct smiapp_csi_data_format *csi_format = sensor->csi_format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
+ if (sensor->mbus_frame_fmts & (1 << i)
+ && smiapp_csi_data_formats[i].code == code)
+ return &smiapp_csi_data_formats[i];
+ }
+
+ return csi_format;
+}
+
+static int smiapp_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *crops[SMIAPP_PADS];
+
+ mutex_lock(&sensor->mutex);
+
+ /*
+ * Media bus code is changeable on src subdev's source pad. On
+ * other source pads we just get format here.
+ */
+ if (fmt->pad == ssd->source_pad) {
+ u32 code = fmt->format.code;
+ int rval = __smiapp_get_format(subdev, fh, fmt);
+
+ if (!rval && subdev == &sensor->src->sd) {
+ const struct smiapp_csi_data_format *csi_format =
+ smiapp_validate_csi_data_format(sensor, code);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ sensor->csi_format = csi_format;
+ fmt->format.code = csi_format->code;
+ }
+
+ mutex_unlock(&sensor->mutex);
+ return rval;
+ }
+
+ /* Sink pad. Width and height are changeable here. */
+ fmt->format.code = __smiapp_get_mbus_code(subdev, fmt->pad);
+ fmt->format.width &= ~1;
+ fmt->format.height &= ~1;
+
+ fmt->format.width =
+ clamp(fmt->format.width,
+ sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE],
+ sensor->limits[SMIAPP_LIMIT_MAX_X_OUTPUT_SIZE]);
+ fmt->format.height =
+ clamp(fmt->format.height,
+ sensor->limits[SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE],
+ sensor->limits[SMIAPP_LIMIT_MAX_Y_OUTPUT_SIZE]);
+
+ smiapp_get_crop_compose(subdev, fh, crops, NULL, fmt->which);
+
+ crops[ssd->sink_pad]->left = 0;
+ crops[ssd->sink_pad]->top = 0;
+ crops[ssd->sink_pad]->width = fmt->format.width;
+ crops[ssd->sink_pad]->height = fmt->format.height;
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ ssd->sink_fmt = *crops[ssd->sink_pad];
+ smiapp_propagate(subdev, fh, fmt->which,
+ V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL);
+
+ mutex_unlock(&sensor->mutex);
+
+ return 0;
+}
+
+/*
+ * Calculate goodness of scaled image size compared to expected image
+ * size and flags provided.
+ */
+#define SCALING_GOODNESS 100000
+#define SCALING_GOODNESS_EXTREME 100000000
+static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w,
+ int h, int ask_h, u32 flags)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int val = 0;
+
+ w &= ~1;
+ ask_w &= ~1;
+ h &= ~1;
+ ask_h &= ~1;
+
+ if (flags & V4L2_SUBDEV_SEL_FLAG_SIZE_GE) {
+ if (w < ask_w)
+ val -= SCALING_GOODNESS;
+ if (h < ask_h)
+ val -= SCALING_GOODNESS;
+ }
+
+ if (flags & V4L2_SUBDEV_SEL_FLAG_SIZE_LE) {
+ if (w > ask_w)
+ val -= SCALING_GOODNESS;
+ if (h > ask_h)
+ val -= SCALING_GOODNESS;
+ }
+
+ val -= abs(w - ask_w);
+ val -= abs(h - ask_h);
+
+ if (w < sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE])
+ val -= SCALING_GOODNESS_EXTREME;
+
+ dev_dbg(&client->dev, "w %d ask_w %d h %d ask_h %d goodness %d\n",
+ w, ask_h, h, ask_h, val);
+
+ return val;
+}
+
+static void smiapp_set_compose_binner(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel,
+ struct v4l2_rect **crops,
+ struct v4l2_rect *comp)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int i;
+ unsigned int binh = 1, binv = 1;
+ unsigned int best = scaling_goodness(
+ subdev,
+ crops[SMIAPP_PAD_SINK]->width, sel->r.width,
+ crops[SMIAPP_PAD_SINK]->height, sel->r.height, sel->flags);
+
+ for (i = 0; i < sensor->nbinning_subtypes; i++) {
+ int this = scaling_goodness(
+ subdev,
+ crops[SMIAPP_PAD_SINK]->width
+ / sensor->binning_subtypes[i].horizontal,
+ sel->r.width,
+ crops[SMIAPP_PAD_SINK]->height
+ / sensor->binning_subtypes[i].vertical,
+ sel->r.height, sel->flags);
+
+ if (this > best) {
+ binh = sensor->binning_subtypes[i].horizontal;
+ binv = sensor->binning_subtypes[i].vertical;
+ best = this;
+ }
+ }
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ sensor->binning_vertical = binv;
+ sensor->binning_horizontal = binh;
+ }
+
+ sel->r.width = (crops[SMIAPP_PAD_SINK]->width / binh) & ~1;
+ sel->r.height = (crops[SMIAPP_PAD_SINK]->height / binv) & ~1;
+}
+
+/*
+ * Calculate best scaling ratio and mode for given output resolution.
+ *
+ * Try all of these: horizontal ratio, vertical ratio and smallest
+ * size possible (horizontally).
+ *
+ * Also try whether horizontal scaler or full scaler gives a better
+ * result.
+ */
+static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel,
+ struct v4l2_rect **crops,
+ struct v4l2_rect *comp)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ u32 min, max, a, b, max_m;
+ u32 scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+ int mode = SMIAPP_SCALING_MODE_HORIZONTAL;
+ u32 try[4];
+ u32 ntry = 0;
+ unsigned int i;
+ int best = INT_MIN;
+
+ sel->r.width = min_t(unsigned int, sel->r.width,
+ crops[SMIAPP_PAD_SINK]->width);
+ sel->r.height = min_t(unsigned int, sel->r.height,
+ crops[SMIAPP_PAD_SINK]->height);
+
+ a = crops[SMIAPP_PAD_SINK]->width
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] / sel->r.width;
+ b = crops[SMIAPP_PAD_SINK]->height
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN] / sel->r.height;
+ max_m = crops[SMIAPP_PAD_SINK]->width
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]
+ / sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE];
+
+ a = min(sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX],
+ max(a, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN]));
+ b = min(sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX],
+ max(b, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN]));
+ max_m = min(sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX],
+ max(max_m, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN]));
+
+ dev_dbg(&client->dev, "scaling: a %d b %d max_m %d\n", a, b, max_m);
+
+ min = min(max_m, min(a, b));
+ max = min(max_m, max(a, b));
+
+ try[ntry] = min;
+ ntry++;
+ if (min != max) {
+ try[ntry] = max;
+ ntry++;
+ }
+ if (max != max_m) {
+ try[ntry] = min + 1;
+ ntry++;
+ if (min != max) {
+ try[ntry] = max + 1;
+ ntry++;
+ }
+ }
+
+ for (i = 0; i < ntry; i++) {
+ int this = scaling_goodness(
+ subdev,
+ crops[SMIAPP_PAD_SINK]->width
+ / try[i]
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN],
+ sel->r.width,
+ crops[SMIAPP_PAD_SINK]->height,
+ sel->r.height,
+ sel->flags);
+
+ dev_dbg(&client->dev, "trying factor %d (%d)\n", try[i], i);
+
+ if (this > best) {
+ scale_m = try[i];
+ mode = SMIAPP_SCALING_MODE_HORIZONTAL;
+ best = this;
+ }
+
+ if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
+ continue;
+
+ this = scaling_goodness(
+ subdev, crops[SMIAPP_PAD_SINK]->width
+ / try[i]
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN],
+ sel->r.width,
+ crops[SMIAPP_PAD_SINK]->height
+ / try[i]
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN],
+ sel->r.height,
+ sel->flags);
+
+ if (this > best) {
+ scale_m = try[i];
+ mode = SMIAPP_SCALING_MODE_BOTH;
+ best = this;
+ }
+ }
+
+ sel->r.width =
+ (crops[SMIAPP_PAD_SINK]->width
+ / scale_m
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]) & ~1;
+ if (mode == SMIAPP_SCALING_MODE_BOTH)
+ sel->r.height =
+ (crops[SMIAPP_PAD_SINK]->height
+ / scale_m
+ * sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN])
+ & ~1;
+ else
+ sel->r.height = crops[SMIAPP_PAD_SINK]->height;
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ sensor->scale_m = scale_m;
+ sensor->scaling_mode = mode;
+ }
+}
+/* We're only called on source pads. This function sets scaling. */
+static int smiapp_set_compose(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *comp, *crops[SMIAPP_PADS];
+
+ smiapp_get_crop_compose(subdev, fh, crops, &comp, sel->which);
+
+ sel->r.top = 0;
+ sel->r.left = 0;
+
+ if (ssd == sensor->binner)
+ smiapp_set_compose_binner(subdev, fh, sel, crops, comp);
+ else
+ smiapp_set_compose_scaler(subdev, fh, sel, crops, comp);
+
+ *comp = sel->r;
+ smiapp_propagate(subdev, fh, sel->which,
+ V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return smiapp_update_mode(sensor);
+
+ return 0;
+}
+
+static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+
+ /* We only implement crop in three places. */
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ if (ssd == sensor->pixel_array
+ && sel->pad == SMIAPP_PA_PAD_SRC)
+ return 0;
+ if (ssd == sensor->src
+ && sel->pad == SMIAPP_PAD_SRC)
+ return 0;
+ if (ssd == sensor->scaler
+ && sel->pad == SMIAPP_PAD_SINK
+ && sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP)
+ return 0;
+ return -EINVAL;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ if (sel->pad == ssd->source_pad)
+ return -EINVAL;
+ if (ssd == sensor->binner)
+ return 0;
+ if (ssd == sensor->scaler
+ && sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ != SMIAPP_SCALING_CAPABILITY_NONE)
+ return 0;
+ /* Fall through */
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smiapp_set_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *src_size, *crops[SMIAPP_PADS];
+ struct v4l2_rect _r;
+
+ smiapp_get_crop_compose(subdev, fh, crops, NULL, sel->which);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ if (sel->pad == ssd->sink_pad)
+ src_size = &ssd->sink_fmt;
+ else
+ src_size = &ssd->compose;
+ } else {
+ if (sel->pad == ssd->sink_pad) {
+ _r.left = 0;
+ _r.top = 0;
+ _r.width = v4l2_subdev_get_try_format(fh, sel->pad)
+ ->width;
+ _r.height = v4l2_subdev_get_try_format(fh, sel->pad)
+ ->height;
+ src_size = &_r;
+ } else {
+ src_size =
+ v4l2_subdev_get_try_compose(
+ fh, ssd->sink_pad);
+ }
+ }
+
+ if (ssd == sensor->src && sel->pad == SMIAPP_PAD_SRC) {
+ sel->r.left = 0;
+ sel->r.top = 0;
+ }
+
+ sel->r.width = min(sel->r.width, src_size->width);
+ sel->r.height = min(sel->r.height, src_size->height);
+
+ sel->r.left = min(sel->r.left, src_size->width - sel->r.width);
+ sel->r.top = min(sel->r.top, src_size->height - sel->r.height);
+
+ *crops[sel->pad] = sel->r;
+
+ if (ssd != sensor->pixel_array && sel->pad == SMIAPP_PAD_SINK)
+ smiapp_propagate(subdev, fh, sel->which,
+ V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL);
+
+ return 0;
+}
+
+static int __smiapp_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+ struct v4l2_rect *comp, *crops[SMIAPP_PADS];
+ struct v4l2_rect sink_fmt;
+ int ret;
+
+ ret = __smiapp_sel_supported(subdev, sel);
+ if (ret)
+ return ret;
+
+ smiapp_get_crop_compose(subdev, fh, crops, &comp, sel->which);
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ sink_fmt = ssd->sink_fmt;
+ } else {
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_get_try_format(fh, ssd->sink_pad);
+
+ sink_fmt.left = 0;
+ sink_fmt.top = 0;
+ sink_fmt.width = fmt->width;
+ sink_fmt.height = fmt->height;
+ }
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ if (ssd == sensor->pixel_array) {
+ sel->r.width =
+ sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
+ sel->r.height =
+ sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+ } else if (sel->pad == ssd->sink_pad) {
+ sel->r = sink_fmt;
+ } else {
+ sel->r = *comp;
+ }
+ break;
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r = *crops[sel->pad];
+ break;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ sel->r = *comp;
+ break;
+ }
+
+ return 0;
+}
+
+static int smiapp_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int rval;
+
+ mutex_lock(&sensor->mutex);
+ rval = __smiapp_get_selection(subdev, fh, sel);
+ mutex_unlock(&sensor->mutex);
+
+ return rval;
+}
+static int smiapp_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_selection *sel)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int ret;
+
+ ret = __smiapp_sel_supported(subdev, sel);
+ if (ret)
+ return ret;
+
+ mutex_lock(&sensor->mutex);
+
+ sel->r.left = max(0, sel->r.left & ~1);
+ sel->r.top = max(0, sel->r.top & ~1);
+ sel->r.width = max(0, SMIAPP_ALIGN_DIM(sel->r.width, sel->flags));
+ sel->r.height = max(0, SMIAPP_ALIGN_DIM(sel->r.height, sel->flags));
+
+ sel->r.width = max_t(unsigned int,
+ sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE],
+ sel->r.width);
+ sel->r.height = max_t(unsigned int,
+ sensor->limits[SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE],
+ sel->r.height);
+
+ switch (sel->target) {
+ case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ ret = smiapp_set_crop(subdev, fh, sel);
+ break;
+ case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ ret = smiapp_set_compose(subdev, fh, sel);
+ break;
+ default:
+ BUG();
+ }
+
+ mutex_unlock(&sensor->mutex);
+ return ret;
+}
+
+static int smiapp_get_skip_frames(struct v4l2_subdev *subdev, u32 *frames)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+
+ *frames = sensor->frame_skip;
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * sysfs attributes
+ */
+
+static ssize_t
+smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev));
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int nbytes;
+
+ if (!sensor->dev_init_done)
+ return -EBUSY;
+
+ if (!sensor->nvm_size) {
+ /* NVM not read yet - read it now */
+ sensor->nvm_size = sensor->platform_data->nvm_size;
+ if (smiapp_set_power(subdev, 1) < 0)
+ return -ENODEV;
+ if (smiapp_read_nvm(sensor, sensor->nvm)) {
+ dev_err(&client->dev, "nvm read failed\n");
+ return -ENODEV;
+ }
+ smiapp_set_power(subdev, 0);
+ }
+ /*
+ * NVM is still way below a PAGE_SIZE, so we can safely
+ * assume this for now.
+ */
+ nbytes = min_t(unsigned int, sensor->nvm_size, PAGE_SIZE);
+ memcpy(buf, sensor->nvm, nbytes);
+
+ return nbytes;
+}
+static DEVICE_ATTR(nvm, S_IRUGO, smiapp_sysfs_nvm_read, NULL);
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev core operations
+ */
+
+static int smiapp_identify_module(struct v4l2_subdev *subdev)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_module_info *minfo = &sensor->minfo;
+ unsigned int i;
+ int rval = 0;
+
+ minfo->name = SMIAPP_NAME;
+
+ /* Module info */
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U8_MANUFACTURER_ID,
+ &minfo->manufacturer_id);
+ if (!rval)
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U16_MODEL_ID,
+ &minfo->model_id);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_REVISION_NUMBER_MAJOR,
+ &minfo->revision_number_major);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_REVISION_NUMBER_MINOR,
+ &minfo->revision_number_minor);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_MODULE_DATE_YEAR,
+ &minfo->module_year);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_MODULE_DATE_MONTH,
+ &minfo->module_month);
+ if (!rval)
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U8_MODULE_DATE_DAY,
+ &minfo->module_day);
+
+ /* Sensor info */
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_SENSOR_MANUFACTURER_ID,
+ &minfo->sensor_manufacturer_id);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U16_SENSOR_MODEL_ID,
+ &minfo->sensor_model_id);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_SENSOR_REVISION_NUMBER,
+ &minfo->sensor_revision_number);
+ if (!rval)
+ rval = smiapp_read_8only(sensor,
+ SMIAPP_REG_U8_SENSOR_FIRMWARE_VERSION,
+ &minfo->sensor_firmware_version);
+
+ /* SMIA */
+ if (!rval)
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U8_SMIA_VERSION,
+ &minfo->smia_version);
+ if (!rval)
+ rval = smiapp_read_8only(sensor, SMIAPP_REG_U8_SMIAPP_VERSION,
+ &minfo->smiapp_version);
+
+ if (rval) {
+ dev_err(&client->dev, "sensor detection failed\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(&client->dev, "module 0x%2.2x-0x%4.4x\n",
+ minfo->manufacturer_id, minfo->model_id);
+
+ dev_dbg(&client->dev,
+ "module revision 0x%2.2x-0x%2.2x date %2.2d-%2.2d-%2.2d\n",
+ minfo->revision_number_major, minfo->revision_number_minor,
+ minfo->module_year, minfo->module_month, minfo->module_day);
+
+ dev_dbg(&client->dev, "sensor 0x%2.2x-0x%4.4x\n",
+ minfo->sensor_manufacturer_id, minfo->sensor_model_id);
+
+ dev_dbg(&client->dev,
+ "sensor revision 0x%2.2x firmware version 0x%2.2x\n",
+ minfo->sensor_revision_number, minfo->sensor_firmware_version);
+
+ dev_dbg(&client->dev, "smia version %2.2d smiapp version %2.2d\n",
+ minfo->smia_version, minfo->smiapp_version);
+
+ /*
+ * Some modules have bad data in the lvalues below. Hope the
+ * rvalues have better stuff. The lvalues are module
+ * parameters whereas the rvalues are sensor parameters.
+ */
+ if (!minfo->manufacturer_id && !minfo->model_id) {
+ minfo->manufacturer_id = minfo->sensor_manufacturer_id;
+ minfo->model_id = minfo->sensor_model_id;
+ minfo->revision_number_major = minfo->sensor_revision_number;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(smiapp_module_idents); i++) {
+ if (smiapp_module_idents[i].manufacturer_id
+ != minfo->manufacturer_id)
+ continue;
+ if (smiapp_module_idents[i].model_id != minfo->model_id)
+ continue;
+ if (smiapp_module_idents[i].flags
+ & SMIAPP_MODULE_IDENT_FLAG_REV_LE) {
+ if (smiapp_module_idents[i].revision_number_major
+ < minfo->revision_number_major)
+ continue;
+ } else {
+ if (smiapp_module_idents[i].revision_number_major
+ != minfo->revision_number_major)
+ continue;
+ }
+
+ minfo->name = smiapp_module_idents[i].name;
+ minfo->quirk = smiapp_module_idents[i].quirk;
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(smiapp_module_idents))
+ dev_warn(&client->dev,
+ "no quirks for this module; let's hope it's fully compliant\n");
+
+ dev_dbg(&client->dev, "the sensor is called %s, ident %2.2x%4.4x%2.2x\n",
+ minfo->name, minfo->manufacturer_id, minfo->model_id,
+ minfo->revision_number_major);
+
+ strlcpy(subdev->name, sensor->minfo.name, sizeof(subdev->name));
+
+ return 0;
+}
+
+static const struct v4l2_subdev_ops smiapp_ops;
+static const struct v4l2_subdev_internal_ops smiapp_internal_ops;
+static const struct media_entity_operations smiapp_entity_ops;
+
+static int smiapp_registered(struct v4l2_subdev *subdev)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct smiapp_subdev *last = NULL;
+ u32 tmp;
+ unsigned int i;
+ int rval;
+
+ sensor->vana = regulator_get(&client->dev, "VANA");
+ if (IS_ERR(sensor->vana)) {
+ dev_err(&client->dev, "could not get regulator for vana\n");
+ return -ENODEV;
+ }
+
+ if (!sensor->platform_data->set_xclk) {
+ sensor->ext_clk = clk_get(&client->dev,
+ sensor->platform_data->ext_clk_name);
+ if (IS_ERR(sensor->ext_clk)) {
+ dev_err(&client->dev, "could not get clock %s\n",
+ sensor->platform_data->ext_clk_name);
+ rval = -ENODEV;
+ goto out_clk_get;
+ }
+
+ rval = clk_set_rate(sensor->ext_clk,
+ sensor->platform_data->ext_clk);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "unable to set clock %s freq to %u\n",
+ sensor->platform_data->ext_clk_name,
+ sensor->platform_data->ext_clk);
+ rval = -ENODEV;
+ goto out_clk_set_rate;
+ }
+ }
+
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN) {
+ if (gpio_request_one(sensor->platform_data->xshutdown, 0,
+ "SMIA++ xshutdown") != 0) {
+ dev_err(&client->dev,
+ "unable to acquire reset gpio %d\n",
+ sensor->platform_data->xshutdown);
+ rval = -ENODEV;
+ goto out_clk_set_rate;
+ }
+ }
+
+ rval = smiapp_power_on(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_smiapp_power_on;
+ }
+
+ rval = smiapp_identify_module(subdev);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ rval = smiapp_get_all_limits(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ /*
+ * Handle Sensor Module orientation on the board.
+ *
+ * The application of H-FLIP and V-FLIP on the sensor is modified by
+ * the sensor orientation on the board.
+ *
+ * For SMIAPP_BOARD_SENSOR_ORIENT_180 the default behaviour is to set
+ * both H-FLIP and V-FLIP for normal operation which also implies
+ * that a set/unset operation for user space HFLIP and VFLIP v4l2
+ * controls will need to be internally inverted.
+ *
+ * Rotation also changes the bayer pattern.
+ */
+ if (sensor->platform_data->module_board_orient ==
+ SMIAPP_MODULE_BOARD_ORIENT_180)
+ sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
+ SMIAPP_IMAGE_ORIENTATION_VFLIP;
+
+ rval = smiapp_get_mbus_formats(sensor);
+ if (rval) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+
+ if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
+ u32 val;
+
+ rval = smiapp_read(sensor,
+ SMIAPP_REG_U8_BINNING_SUBTYPES, &val);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+ sensor->nbinning_subtypes = min_t(u8, val,
+ SMIAPP_BINNING_SUBTYPES);
+
+ for (i = 0; i < sensor->nbinning_subtypes; i++) {
+ rval = smiapp_read(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE_n(i), &val);
+ if (rval < 0) {
+ rval = -ENODEV;
+ goto out_power_off;
+ }
+ sensor->binning_subtypes[i] =
+ *(struct smiapp_binning_subtype *)&val;
+
+ dev_dbg(&client->dev, "binning %xx%x\n",
+ sensor->binning_subtypes[i].horizontal,
+ sensor->binning_subtypes[i].vertical);
+ }
+ }
+ sensor->binning_horizontal = 1;
+ sensor->binning_vertical = 1;
+
+ /* SMIA++ NVM initialization - it will be read from the sensor
+ * when it is first requested by userspace.
+ */
+ if (sensor->minfo.smiapp_version && sensor->platform_data->nvm_size) {
+ sensor->nvm = kzalloc(sensor->platform_data->nvm_size,
+ GFP_KERNEL);
+ if (sensor->nvm == NULL) {
+ dev_err(&client->dev, "nvm buf allocation failed\n");
+ rval = -ENOMEM;
+ goto out_power_off;
+ }
+
+ if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
+ dev_err(&client->dev, "sysfs nvm entry failed\n");
+ rval = -EBUSY;
+ goto out_power_off;
+ }
+ }
+
+ rval = smiapp_call_quirk(sensor, limits);
+ if (rval) {
+ dev_err(&client->dev, "limits quirks failed\n");
+ goto out_nvm_release;
+ }
+
+ /* We consider this as profile 0 sensor if any of these are zero. */
+ if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] ||
+ !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) {
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0;
+ } else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ != SMIAPP_SCALING_CAPABILITY_NONE) {
+ if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+ == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1;
+ else
+ sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2;
+ sensor->scaler = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ } else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+ == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
+ sensor->scaler = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ }
+ sensor->binner = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+ sensor->pixel_array = &sensor->ssds[sensor->ssds_used];
+ sensor->ssds_used++;
+
+ sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+
+ for (i = 0; i < SMIAPP_SUBDEVS; i++) {
+ struct {
+ struct smiapp_subdev *ssd;
+ char *name;
+ } const __this[] = {
+ { sensor->scaler, "scaler", },
+ { sensor->binner, "binner", },
+ { sensor->pixel_array, "pixel array", },
+ }, *_this = &__this[i];
+ struct smiapp_subdev *this = _this->ssd;
+
+ if (!this)
+ continue;
+
+ if (this != sensor->src)
+ v4l2_subdev_init(&this->sd, &smiapp_ops);
+
+ this->sensor = sensor;
+
+ if (this == sensor->pixel_array) {
+ this->npads = 1;
+ } else {
+ this->npads = 2;
+ this->source_pad = 1;
+ }
+
+ snprintf(this->sd.name,
+ sizeof(this->sd.name), "%s %s",
+ sensor->minfo.name, _this->name);
+
+ this->sink_fmt.width =
+ sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
+ this->sink_fmt.height =
+ sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+ this->compose.width = this->sink_fmt.width;
+ this->compose.height = this->sink_fmt.height;
+ this->crop[this->source_pad] = this->compose;
+ this->pads[this->source_pad].flags = MEDIA_PAD_FL_SOURCE;
+ if (this != sensor->pixel_array) {
+ this->crop[this->sink_pad] = this->compose;
+ this->pads[this->sink_pad].flags = MEDIA_PAD_FL_SINK;
+ }
+
+ this->sd.entity.ops = &smiapp_entity_ops;
+
+ if (last == NULL) {
+ last = this;
+ continue;
+ }
+
+ this->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ this->sd.internal_ops = &smiapp_internal_ops;
+ this->sd.owner = NULL;
+ v4l2_set_subdevdata(&this->sd, client);
+
+ rval = media_entity_init(&this->sd.entity,
+ this->npads, this->pads, 0);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_entity_init failed\n");
+ goto out_nvm_release;
+ }
+
+ rval = media_entity_create_link(&this->sd.entity,
+ this->source_pad,
+ &last->sd.entity,
+ last->sink_pad,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (rval) {
+ dev_err(&client->dev,
+ "media_entity_create_link failed\n");
+ goto out_nvm_release;
+ }
+
+ rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
+ &this->sd);
+ if (rval) {
+ dev_err(&client->dev,
+ "v4l2_device_register_subdev failed\n");
+ goto out_nvm_release;
+ }
+
+ last = this;
+ }
+
+ dev_dbg(&client->dev, "profile %d\n", sensor->minfo.smiapp_profile);
+
+ sensor->pixel_array->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+
+ /* final steps */
+ smiapp_read_frame_fmt(sensor);
+ rval = smiapp_init_controls(sensor);
+ if (rval < 0)
+ goto out_nvm_release;
+
+ rval = smiapp_update_mode(sensor);
+ if (rval) {
+ dev_err(&client->dev, "update mode failed\n");
+ goto out_nvm_release;
+ }
+
+ sensor->streaming = false;
+ sensor->dev_init_done = true;
+
+ /* check flash capability */
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_FLASH_MODE_CAPABILITY, &tmp);
+ sensor->flash_capability = tmp;
+ if (rval)
+ goto out_nvm_release;
+
+ smiapp_power_off(sensor);
+
+ return 0;
+
+out_nvm_release:
+ device_remove_file(&client->dev, &dev_attr_nvm);
+
+out_power_off:
+ kfree(sensor->nvm);
+ sensor->nvm = NULL;
+ smiapp_power_off(sensor);
+
+out_smiapp_power_on:
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_free(sensor->platform_data->xshutdown);
+
+out_clk_set_rate:
+ clk_put(sensor->ext_clk);
+ sensor->ext_clk = NULL;
+
+out_clk_get:
+ regulator_put(sensor->vana);
+ sensor->vana = NULL;
+ return rval;
+}
+
+static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct smiapp_subdev *ssd = to_smiapp_subdev(sd);
+ struct smiapp_sensor *sensor = ssd->sensor;
+ u32 mbus_code =
+ smiapp_csi_data_formats[smiapp_pixel_order(sensor)].code;
+ unsigned int i;
+
+ mutex_lock(&sensor->mutex);
+
+ for (i = 0; i < ssd->npads; i++) {
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(fh, i);
+ struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(fh, i);
+ struct v4l2_rect *try_comp;
+
+ try_fmt->width = sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
+ try_fmt->height = sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+ try_fmt->code = mbus_code;
+
+ try_crop->top = 0;
+ try_crop->left = 0;
+ try_crop->width = try_fmt->width;
+ try_crop->height = try_fmt->height;
+
+ if (ssd != sensor->pixel_array)
+ continue;
+
+ try_comp = v4l2_subdev_get_try_compose(fh, i);
+ *try_comp = *try_crop;
+ }
+
+ mutex_unlock(&sensor->mutex);
+
+ return smiapp_set_power(sd, 1);
+}
+
+static int smiapp_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return smiapp_set_power(sd, 0);
+}
+
+static const struct v4l2_subdev_video_ops smiapp_video_ops = {
+ .s_stream = smiapp_set_stream,
+};
+
+static const struct v4l2_subdev_core_ops smiapp_core_ops = {
+ .s_power = smiapp_set_power,
+};
+
+static const struct v4l2_subdev_pad_ops smiapp_pad_ops = {
+ .enum_mbus_code = smiapp_enum_mbus_code,
+ .get_fmt = smiapp_get_format,
+ .set_fmt = smiapp_set_format,
+ .get_selection = smiapp_get_selection,
+ .set_selection = smiapp_set_selection,
+};
+
+static const struct v4l2_subdev_sensor_ops smiapp_sensor_ops = {
+ .g_skip_frames = smiapp_get_skip_frames,
+};
+
+static const struct v4l2_subdev_ops smiapp_ops = {
+ .core = &smiapp_core_ops,
+ .video = &smiapp_video_ops,
+ .pad = &smiapp_pad_ops,
+ .sensor = &smiapp_sensor_ops,
+};
+
+static const struct media_entity_operations smiapp_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops smiapp_internal_src_ops = {
+ .registered = smiapp_registered,
+ .open = smiapp_open,
+ .close = smiapp_close,
+};
+
+static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
+ .open = smiapp_open,
+ .close = smiapp_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * I2C Driver
+ */
+
+#ifdef CONFIG_PM
+
+static int smiapp_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ bool streaming;
+
+ BUG_ON(mutex_is_locked(&sensor->mutex));
+
+ if (sensor->power_count == 0)
+ return 0;
+
+ if (sensor->streaming)
+ smiapp_stop_streaming(sensor);
+
+ streaming = sensor->streaming;
+
+ smiapp_power_off(sensor);
+
+ /* save state for resume */
+ sensor->streaming = streaming;
+
+ return 0;
+}
+
+static int smiapp_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ int rval;
+
+ if (sensor->power_count == 0)
+ return 0;
+
+ rval = smiapp_power_on(sensor);
+ if (rval)
+ return rval;
+
+ if (sensor->streaming)
+ rval = smiapp_start_streaming(sensor);
+
+ return rval;
+}
+
+#else
+
+#define smiapp_suspend NULL
+#define smiapp_resume NULL
+
+#endif /* CONFIG_PM */
+
+static int smiapp_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct smiapp_sensor *sensor;
+ int rval;
+
+ if (client->dev.platform_data == NULL)
+ return -ENODEV;
+
+ sensor = kzalloc(sizeof(*sensor), GFP_KERNEL);
+ if (sensor == NULL)
+ return -ENOMEM;
+
+ sensor->platform_data = client->dev.platform_data;
+ mutex_init(&sensor->mutex);
+ mutex_init(&sensor->power_mutex);
+ sensor->src = &sensor->ssds[sensor->ssds_used];
+
+ v4l2_i2c_subdev_init(&sensor->src->sd, client, &smiapp_ops);
+ sensor->src->sd.internal_ops = &smiapp_internal_src_ops;
+ sensor->src->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->src->sensor = sensor;
+
+ sensor->src->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ rval = media_entity_init(&sensor->src->sd.entity, 2,
+ sensor->src->pads, 0);
+ if (rval < 0)
+ kfree(sensor);
+
+ return rval;
+}
+
+static int __exit smiapp_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ unsigned int i;
+
+ if (sensor->power_count) {
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_set_value(sensor->platform_data->xshutdown, 0);
+ if (sensor->platform_data->set_xclk)
+ sensor->platform_data->set_xclk(&sensor->src->sd, 0);
+ else
+ clk_disable(sensor->ext_clk);
+ sensor->power_count = 0;
+ }
+
+ if (sensor->nvm) {
+ device_remove_file(&client->dev, &dev_attr_nvm);
+ kfree(sensor->nvm);
+ }
+
+ for (i = 0; i < sensor->ssds_used; i++) {
+ media_entity_cleanup(&sensor->ssds[i].sd.entity);
+ v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
+ }
+ smiapp_free_controls(sensor);
+ if (sensor->platform_data->xshutdown != SMIAPP_NO_XSHUTDOWN)
+ gpio_free(sensor->platform_data->xshutdown);
+ if (sensor->ext_clk)
+ clk_put(sensor->ext_clk);
+ if (sensor->vana)
+ regulator_put(sensor->vana);
+
+ kfree(sensor);
+
+ return 0;
+}
+
+static const struct i2c_device_id smiapp_id_table[] = {
+ { SMIAPP_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, smiapp_id_table);
+
+static const struct dev_pm_ops smiapp_pm_ops = {
+ .suspend = smiapp_suspend,
+ .resume = smiapp_resume,
+};
+
+static struct i2c_driver smiapp_i2c_driver = {
+ .driver = {
+ .name = SMIAPP_NAME,
+ .pm = &smiapp_pm_ops,
+ },
+ .probe = smiapp_probe,
+ .remove = __exit_p(smiapp_remove),
+ .id_table = smiapp_id_table,
+};
+
+module_i2c_driver(smiapp_i2c_driver);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>");
+MODULE_DESCRIPTION("Generic SMIA/SMIA++ camera module driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/smiapp/smiapp-limits.c b/drivers/media/video/smiapp/smiapp-limits.c
new file mode 100644
index 000000000000..0800e095724e
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-limits.c
@@ -0,0 +1,132 @@
+/*
+ * drivers/media/video/smiapp/smiapp-limits.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "smiapp.h"
+
+struct smiapp_reg_limits smiapp_reg_limits[] = {
+ { SMIAPP_REG_U16_ANALOGUE_GAIN_CAPABILITY, "analogue_gain_capability" }, /* 0 */
+ { SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MIN, "analogue_gain_code_min" },
+ { SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MAX, "analogue_gain_code_max" },
+ { SMIAPP_REG_U8_THS_ZERO_MIN, "ths_zero_min" },
+ { SMIAPP_REG_U8_TCLK_TRAIL_MIN, "tclk_trail_min" },
+ { SMIAPP_REG_U16_INTEGRATION_TIME_CAPABILITY, "integration_time_capability" }, /* 5 */
+ { SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MIN, "coarse_integration_time_min" },
+ { SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MAX_MARGIN, "coarse_integration_time_max_margin" },
+ { SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN, "fine_integration_time_min" },
+ { SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN, "fine_integration_time_max_margin" },
+ { SMIAPP_REG_U16_DIGITAL_GAIN_CAPABILITY, "digital_gain_capability" }, /* 10 */
+ { SMIAPP_REG_U16_DIGITAL_GAIN_MIN, "digital_gain_min" },
+ { SMIAPP_REG_U16_DIGITAL_GAIN_MAX, "digital_gain_max" },
+ { SMIAPP_REG_F32_MIN_EXT_CLK_FREQ_HZ, "min_ext_clk_freq_hz" },
+ { SMIAPP_REG_F32_MAX_EXT_CLK_FREQ_HZ, "max_ext_clk_freq_hz" },
+ { SMIAPP_REG_U16_MIN_PRE_PLL_CLK_DIV, "min_pre_pll_clk_div" }, /* 15 */
+ { SMIAPP_REG_U16_MAX_PRE_PLL_CLK_DIV, "max_pre_pll_clk_div" },
+ { SMIAPP_REG_F32_MIN_PLL_IP_FREQ_HZ, "min_pll_ip_freq_hz" },
+ { SMIAPP_REG_F32_MAX_PLL_IP_FREQ_HZ, "max_pll_ip_freq_hz" },
+ { SMIAPP_REG_U16_MIN_PLL_MULTIPLIER, "min_pll_multiplier" },
+ { SMIAPP_REG_U16_MAX_PLL_MULTIPLIER, "max_pll_multiplier" }, /* 20 */
+ { SMIAPP_REG_F32_MIN_PLL_OP_FREQ_HZ, "min_pll_op_freq_hz" },
+ { SMIAPP_REG_F32_MAX_PLL_OP_FREQ_HZ, "max_pll_op_freq_hz" },
+ { SMIAPP_REG_U16_MIN_VT_SYS_CLK_DIV, "min_vt_sys_clk_div" },
+ { SMIAPP_REG_U16_MAX_VT_SYS_CLK_DIV, "max_vt_sys_clk_div" },
+ { SMIAPP_REG_F32_MIN_VT_SYS_CLK_FREQ_HZ, "min_vt_sys_clk_freq_hz" }, /* 25 */
+ { SMIAPP_REG_F32_MAX_VT_SYS_CLK_FREQ_HZ, "max_vt_sys_clk_freq_hz" },
+ { SMIAPP_REG_F32_MIN_VT_PIX_CLK_FREQ_HZ, "min_vt_pix_clk_freq_hz" },
+ { SMIAPP_REG_F32_MAX_VT_PIX_CLK_FREQ_HZ, "max_vt_pix_clk_freq_hz" },
+ { SMIAPP_REG_U16_MIN_VT_PIX_CLK_DIV, "min_vt_pix_clk_div" },
+ { SMIAPP_REG_U16_MAX_VT_PIX_CLK_DIV, "max_vt_pix_clk_div" }, /* 30 */
+ { SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES, "min_frame_length_lines" },
+ { SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES, "max_frame_length_lines" },
+ { SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK, "min_line_length_pck" },
+ { SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK, "max_line_length_pck" },
+ { SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK, "min_line_blanking_pck" }, /* 35 */
+ { SMIAPP_REG_U16_MIN_FRAME_BLANKING_LINES, "min_frame_blanking_lines" },
+ { SMIAPP_REG_U8_MIN_LINE_LENGTH_PCK_STEP_SIZE, "min_line_length_pck_step_size" },
+ { SMIAPP_REG_U16_MIN_OP_SYS_CLK_DIV, "min_op_sys_clk_div" },
+ { SMIAPP_REG_U16_MAX_OP_SYS_CLK_DIV, "max_op_sys_clk_div" },
+ { SMIAPP_REG_F32_MIN_OP_SYS_CLK_FREQ_HZ, "min_op_sys_clk_freq_hz" }, /* 40 */
+ { SMIAPP_REG_F32_MAX_OP_SYS_CLK_FREQ_HZ, "max_op_sys_clk_freq_hz" },
+ { SMIAPP_REG_U16_MIN_OP_PIX_CLK_DIV, "min_op_pix_clk_div" },
+ { SMIAPP_REG_U16_MAX_OP_PIX_CLK_DIV, "max_op_pix_clk_div" },
+ { SMIAPP_REG_F32_MIN_OP_PIX_CLK_FREQ_HZ, "min_op_pix_clk_freq_hz" },
+ { SMIAPP_REG_F32_MAX_OP_PIX_CLK_FREQ_HZ, "max_op_pix_clk_freq_hz" }, /* 45 */
+ { SMIAPP_REG_U16_X_ADDR_MIN, "x_addr_min" },
+ { SMIAPP_REG_U16_Y_ADDR_MIN, "y_addr_min" },
+ { SMIAPP_REG_U16_X_ADDR_MAX, "x_addr_max" },
+ { SMIAPP_REG_U16_Y_ADDR_MAX, "y_addr_max" },
+ { SMIAPP_REG_U16_MIN_X_OUTPUT_SIZE, "min_x_output_size" }, /* 50 */
+ { SMIAPP_REG_U16_MIN_Y_OUTPUT_SIZE, "min_y_output_size" },
+ { SMIAPP_REG_U16_MAX_X_OUTPUT_SIZE, "max_x_output_size" },
+ { SMIAPP_REG_U16_MAX_Y_OUTPUT_SIZE, "max_y_output_size" },
+ { SMIAPP_REG_U16_MIN_EVEN_INC, "min_even_inc" },
+ { SMIAPP_REG_U16_MAX_EVEN_INC, "max_even_inc" }, /* 55 */
+ { SMIAPP_REG_U16_MIN_ODD_INC, "min_odd_inc" },
+ { SMIAPP_REG_U16_MAX_ODD_INC, "max_odd_inc" },
+ { SMIAPP_REG_U16_SCALING_CAPABILITY, "scaling_capability" },
+ { SMIAPP_REG_U16_SCALER_M_MIN, "scaler_m_min" },
+ { SMIAPP_REG_U16_SCALER_M_MAX, "scaler_m_max" }, /* 60 */
+ { SMIAPP_REG_U16_SCALER_N_MIN, "scaler_n_min" },
+ { SMIAPP_REG_U16_SCALER_N_MAX, "scaler_n_max" },
+ { SMIAPP_REG_U16_SPATIAL_SAMPLING_CAPABILITY, "spatial_sampling_capability" },
+ { SMIAPP_REG_U8_DIGITAL_CROP_CAPABILITY, "digital_crop_capability" },
+ { SMIAPP_REG_U16_COMPRESSION_CAPABILITY, "compression_capability" }, /* 65 */
+ { SMIAPP_REG_U8_FIFO_SUPPORT_CAPABILITY, "fifo_support_capability" },
+ { SMIAPP_REG_U8_DPHY_CTRL_CAPABILITY, "dphy_ctrl_capability" },
+ { SMIAPP_REG_U8_CSI_LANE_MODE_CAPABILITY, "csi_lane_mode_capability" },
+ { SMIAPP_REG_U8_CSI_SIGNALLING_MODE_CAPABILITY, "csi_signalling_mode_capability" },
+ { SMIAPP_REG_U8_FAST_STANDBY_CAPABILITY, "fast_standby_capability" }, /* 70 */
+ { SMIAPP_REG_U8_CCI_ADDRESS_CONTROL_CAPABILITY, "cci_address_control_capability" },
+ { SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_1_LANE_MODE_MBPS, "max_per_lane_bitrate_1_lane_mode_mbps" },
+ { SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_2_LANE_MODE_MBPS, "max_per_lane_bitrate_2_lane_mode_mbps" },
+ { SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_3_LANE_MODE_MBPS, "max_per_lane_bitrate_3_lane_mode_mbps" },
+ { SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_4_LANE_MODE_MBPS, "max_per_lane_bitrate_4_lane_mode_mbps" }, /* 75 */
+ { SMIAPP_REG_U8_TEMP_SENSOR_CAPABILITY, "temp_sensor_capability" },
+ { SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES_BIN, "min_frame_length_lines_bin" },
+ { SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES_BIN, "max_frame_length_lines_bin" },
+ { SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK_BIN, "min_line_length_pck_bin" },
+ { SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK_BIN, "max_line_length_pck_bin" }, /* 80 */
+ { SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK_BIN, "min_line_blanking_pck_bin" },
+ { SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN_BIN, "fine_integration_time_min_bin" },
+ { SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN, "fine_integration_time_max_margin_bin" },
+ { SMIAPP_REG_U8_BINNING_CAPABILITY, "binning_capability" },
+ { SMIAPP_REG_U8_BINNING_WEIGHTING_CAPABILITY, "binning_weighting_capability" }, /* 85 */
+ { SMIAPP_REG_U8_DATA_TRANSFER_IF_CAPABILITY, "data_transfer_if_capability" },
+ { SMIAPP_REG_U8_SHADING_CORRECTION_CAPABILITY, "shading_correction_capability" },
+ { SMIAPP_REG_U8_GREEN_IMBALANCE_CAPABILITY, "green_imbalance_capability" },
+ { SMIAPP_REG_U8_BLACK_LEVEL_CAPABILITY, "black_level_capability" },
+ { SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_CAPABILITY, "module_specific_correction_capability" }, /* 90 */
+ { SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY, "defect_correction_capability" },
+ { SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY_2, "defect_correction_capability_2" },
+ { SMIAPP_REG_U8_EDOF_CAPABILITY, "edof_capability" },
+ { SMIAPP_REG_U8_COLOUR_FEEDBACK_CAPABILITY, "colour_feedback_capability" },
+ { SMIAPP_REG_U8_ESTIMATION_MODE_CAPABILITY, "estimation_mode_capability" }, /* 95 */
+ { SMIAPP_REG_U8_ESTIMATION_ZONE_CAPABILITY, "estimation_zone_capability" },
+ { SMIAPP_REG_U16_CAPABILITY_TRDY_MIN, "capability_trdy_min" },
+ { SMIAPP_REG_U8_FLASH_MODE_CAPABILITY, "flash_mode_capability" },
+ { SMIAPP_REG_U8_ACTUATOR_CAPABILITY, "actuator_capability" },
+ { SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_1, "bracketing_lut_capability_1" }, /* 100 */
+ { SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_2, "bracketing_lut_capability_2" },
+ { SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_STEP, "analogue_gain_code_step" },
+ { 0, NULL },
+};
diff --git a/drivers/media/video/smiapp/smiapp-limits.h b/drivers/media/video/smiapp/smiapp-limits.h
new file mode 100644
index 000000000000..7f4836bb78db
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-limits.h
@@ -0,0 +1,128 @@
+/*
+ * drivers/media/video/smiapp/smiapp-limits.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#define SMIAPP_LIMIT_ANALOGUE_GAIN_CAPABILITY 0
+#define SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN 1
+#define SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MAX 2
+#define SMIAPP_LIMIT_THS_ZERO_MIN 3
+#define SMIAPP_LIMIT_TCLK_TRAIL_MIN 4
+#define SMIAPP_LIMIT_INTEGRATION_TIME_CAPABILITY 5
+#define SMIAPP_LIMIT_COARSE_INTEGRATION_TIME_MIN 6
+#define SMIAPP_LIMIT_COARSE_INTEGRATION_TIME_MAX_MARGIN 7
+#define SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN 8
+#define SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN 9
+#define SMIAPP_LIMIT_DIGITAL_GAIN_CAPABILITY 10
+#define SMIAPP_LIMIT_DIGITAL_GAIN_MIN 11
+#define SMIAPP_LIMIT_DIGITAL_GAIN_MAX 12
+#define SMIAPP_LIMIT_MIN_EXT_CLK_FREQ_HZ 13
+#define SMIAPP_LIMIT_MAX_EXT_CLK_FREQ_HZ 14
+#define SMIAPP_LIMIT_MIN_PRE_PLL_CLK_DIV 15
+#define SMIAPP_LIMIT_MAX_PRE_PLL_CLK_DIV 16
+#define SMIAPP_LIMIT_MIN_PLL_IP_FREQ_HZ 17
+#define SMIAPP_LIMIT_MAX_PLL_IP_FREQ_HZ 18
+#define SMIAPP_LIMIT_MIN_PLL_MULTIPLIER 19
+#define SMIAPP_LIMIT_MAX_PLL_MULTIPLIER 20
+#define SMIAPP_LIMIT_MIN_PLL_OP_FREQ_HZ 21
+#define SMIAPP_LIMIT_MAX_PLL_OP_FREQ_HZ 22
+#define SMIAPP_LIMIT_MIN_VT_SYS_CLK_DIV 23
+#define SMIAPP_LIMIT_MAX_VT_SYS_CLK_DIV 24
+#define SMIAPP_LIMIT_MIN_VT_SYS_CLK_FREQ_HZ 25
+#define SMIAPP_LIMIT_MAX_VT_SYS_CLK_FREQ_HZ 26
+#define SMIAPP_LIMIT_MIN_VT_PIX_CLK_FREQ_HZ 27
+#define SMIAPP_LIMIT_MAX_VT_PIX_CLK_FREQ_HZ 28
+#define SMIAPP_LIMIT_MIN_VT_PIX_CLK_DIV 29
+#define SMIAPP_LIMIT_MAX_VT_PIX_CLK_DIV 30
+#define SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES 31
+#define SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES 32
+#define SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK 33
+#define SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK 34
+#define SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK 35
+#define SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES 36
+#define SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_STEP_SIZE 37
+#define SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV 38
+#define SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV 39
+#define SMIAPP_LIMIT_MIN_OP_SYS_CLK_FREQ_HZ 40
+#define SMIAPP_LIMIT_MAX_OP_SYS_CLK_FREQ_HZ 41
+#define SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV 42
+#define SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV 43
+#define SMIAPP_LIMIT_MIN_OP_PIX_CLK_FREQ_HZ 44
+#define SMIAPP_LIMIT_MAX_OP_PIX_CLK_FREQ_HZ 45
+#define SMIAPP_LIMIT_X_ADDR_MIN 46
+#define SMIAPP_LIMIT_Y_ADDR_MIN 47
+#define SMIAPP_LIMIT_X_ADDR_MAX 48
+#define SMIAPP_LIMIT_Y_ADDR_MAX 49
+#define SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE 50
+#define SMIAPP_LIMIT_MIN_Y_OUTPUT_SIZE 51
+#define SMIAPP_LIMIT_MAX_X_OUTPUT_SIZE 52
+#define SMIAPP_LIMIT_MAX_Y_OUTPUT_SIZE 53
+#define SMIAPP_LIMIT_MIN_EVEN_INC 54
+#define SMIAPP_LIMIT_MAX_EVEN_INC 55
+#define SMIAPP_LIMIT_MIN_ODD_INC 56
+#define SMIAPP_LIMIT_MAX_ODD_INC 57
+#define SMIAPP_LIMIT_SCALING_CAPABILITY 58
+#define SMIAPP_LIMIT_SCALER_M_MIN 59
+#define SMIAPP_LIMIT_SCALER_M_MAX 60
+#define SMIAPP_LIMIT_SCALER_N_MIN 61
+#define SMIAPP_LIMIT_SCALER_N_MAX 62
+#define SMIAPP_LIMIT_SPATIAL_SAMPLING_CAPABILITY 63
+#define SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY 64
+#define SMIAPP_LIMIT_COMPRESSION_CAPABILITY 65
+#define SMIAPP_LIMIT_FIFO_SUPPORT_CAPABILITY 66
+#define SMIAPP_LIMIT_DPHY_CTRL_CAPABILITY 67
+#define SMIAPP_LIMIT_CSI_LANE_MODE_CAPABILITY 68
+#define SMIAPP_LIMIT_CSI_SIGNALLING_MODE_CAPABILITY 69
+#define SMIAPP_LIMIT_FAST_STANDBY_CAPABILITY 70
+#define SMIAPP_LIMIT_CCI_ADDRESS_CONTROL_CAPABILITY 71
+#define SMIAPP_LIMIT_MAX_PER_LANE_BITRATE_1_LANE_MODE_MBPS 72
+#define SMIAPP_LIMIT_MAX_PER_LANE_BITRATE_2_LANE_MODE_MBPS 73
+#define SMIAPP_LIMIT_MAX_PER_LANE_BITRATE_3_LANE_MODE_MBPS 74
+#define SMIAPP_LIMIT_MAX_PER_LANE_BITRATE_4_LANE_MODE_MBPS 75
+#define SMIAPP_LIMIT_TEMP_SENSOR_CAPABILITY 76
+#define SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN 77
+#define SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN 78
+#define SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN 79
+#define SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN 80
+#define SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN 81
+#define SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN_BIN 82
+#define SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN 83
+#define SMIAPP_LIMIT_BINNING_CAPABILITY 84
+#define SMIAPP_LIMIT_BINNING_WEIGHTING_CAPABILITY 85
+#define SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY 86
+#define SMIAPP_LIMIT_SHADING_CORRECTION_CAPABILITY 87
+#define SMIAPP_LIMIT_GREEN_IMBALANCE_CAPABILITY 88
+#define SMIAPP_LIMIT_BLACK_LEVEL_CAPABILITY 89
+#define SMIAPP_LIMIT_MODULE_SPECIFIC_CORRECTION_CAPABILITY 90
+#define SMIAPP_LIMIT_DEFECT_CORRECTION_CAPABILITY 91
+#define SMIAPP_LIMIT_DEFECT_CORRECTION_CAPABILITY_2 92
+#define SMIAPP_LIMIT_EDOF_CAPABILITY 93
+#define SMIAPP_LIMIT_COLOUR_FEEDBACK_CAPABILITY 94
+#define SMIAPP_LIMIT_ESTIMATION_MODE_CAPABILITY 95
+#define SMIAPP_LIMIT_ESTIMATION_ZONE_CAPABILITY 96
+#define SMIAPP_LIMIT_CAPABILITY_TRDY_MIN 97
+#define SMIAPP_LIMIT_FLASH_MODE_CAPABILITY 98
+#define SMIAPP_LIMIT_ACTUATOR_CAPABILITY 99
+#define SMIAPP_LIMIT_BRACKETING_LUT_CAPABILITY_1 100
+#define SMIAPP_LIMIT_BRACKETING_LUT_CAPABILITY_2 101
+#define SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_STEP 102
+#define SMIAPP_LIMIT_LAST 103
diff --git a/drivers/media/video/smiapp/smiapp-quirk.c b/drivers/media/video/smiapp/smiapp-quirk.c
new file mode 100644
index 000000000000..55e87950dcea
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-quirk.c
@@ -0,0 +1,306 @@
+/*
+ * drivers/media/video/smiapp/smiapp-quirk.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/delay.h>
+
+#include "smiapp.h"
+
+static int smiapp_write_8(struct smiapp_sensor *sensor, u16 reg, u8 val)
+{
+ return smiapp_write(sensor, (SMIA_REG_8BIT << 16) | reg, val);
+}
+
+static int smiapp_write_8s(struct smiapp_sensor *sensor,
+ struct smiapp_reg_8 *regs, int len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+
+ for (; len > 0; len--, regs++) {
+ rval = smiapp_write_8(sensor, regs->reg, regs->val);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "error %d writing reg 0x%4.4x, val 0x%2.2x",
+ rval, regs->reg, regs->val);
+ return rval;
+ }
+ }
+
+ return 0;
+}
+
+void smiapp_replace_limit(struct smiapp_sensor *sensor,
+ u32 limit, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+
+ dev_dbg(&client->dev, "quirk: 0x%8.8x \"%s\" = %d, 0x%x\n",
+ smiapp_reg_limits[limit].addr,
+ smiapp_reg_limits[limit].what, val, val);
+ sensor->limits[limit] = val;
+}
+
+int smiapp_replace_limit_at(struct smiapp_sensor *sensor,
+ u32 reg, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int i;
+
+ for (i = 0; smiapp_reg_limits[i].addr; i++) {
+ if ((smiapp_reg_limits[i].addr & 0xffff) != reg)
+ continue;
+
+ smiapp_replace_limit(sensor, i, val);
+
+ return 0;
+ }
+
+ dev_dbg(&client->dev, "quirk: bad register 0x%4.4x\n", reg);
+
+ return -EINVAL;
+}
+
+bool smiapp_quirk_reg(struct smiapp_sensor *sensor,
+ u32 reg, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ const struct smia_reg *sreg;
+
+ if (!sensor->minfo.quirk)
+ return false;
+
+ sreg = sensor->minfo.quirk->regs;
+
+ if (!sreg)
+ return false;
+
+ while (sreg->type) {
+ u16 type = reg >> 16;
+ u16 reg16 = reg;
+
+ if (sreg->type != type || sreg->reg != reg16) {
+ sreg++;
+ continue;
+ }
+
+ switch ((u8)type) {
+ case SMIA_REG_8BIT:
+ dev_dbg(&client->dev, "quirk: 0x%8.8x: 0x%2.2x\n",
+ reg, sreg->val);
+ break;
+ case SMIA_REG_16BIT:
+ dev_dbg(&client->dev, "quirk: 0x%8.8x: 0x%4.4x\n",
+ reg, sreg->val);
+ break;
+ case SMIA_REG_32BIT:
+ dev_dbg(&client->dev, "quirk: 0x%8.8x: 0x%8.8x\n",
+ reg, sreg->val);
+ break;
+ }
+
+ *val = sreg->val;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int jt8ew9_limits(struct smiapp_sensor *sensor)
+{
+ if (sensor->minfo.revision_number_major < 0x03)
+ sensor->frame_skip = 1;
+
+ /* Below 24 gain doesn't have effect at all, */
+ /* but ~59 is needed for full dynamic range */
+ smiapp_replace_limit(sensor, SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MIN, 59);
+ smiapp_replace_limit(
+ sensor, SMIAPP_LIMIT_ANALOGUE_GAIN_CODE_MAX, 6000);
+
+ return 0;
+}
+
+static int jt8ew9_post_poweron(struct smiapp_sensor *sensor)
+{
+ struct smiapp_reg_8 regs[] = {
+ { 0x30a3, 0xd8 }, /* Output port control : LVDS ports only */
+ { 0x30ae, 0x00 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
+ { 0x30af, 0xd0 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
+ { 0x322d, 0x04 }, /* Adjusting Processing Image Size to Scaler Toshiba Recommendation Setting */
+ { 0x3255, 0x0f }, /* Horizontal Noise Reduction Control Toshiba Recommendation Setting */
+ { 0x3256, 0x15 }, /* Horizontal Noise Reduction Control Toshiba Recommendation Setting */
+ { 0x3258, 0x70 }, /* Analog Gain Control Toshiba Recommendation Setting */
+ { 0x3259, 0x70 }, /* Analog Gain Control Toshiba Recommendation Setting */
+ { 0x325f, 0x7c }, /* Analog Gain Control Toshiba Recommendation Setting */
+ { 0x3302, 0x06 }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */
+ { 0x3304, 0x00 }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */
+ { 0x3307, 0x22 }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */
+ { 0x3308, 0x8d }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */
+ { 0x331e, 0x0f }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3320, 0x30 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3321, 0x11 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3322, 0x98 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3323, 0x64 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x3325, 0x83 }, /* Read Out Timing Control Toshiba Recommendation Setting */
+ { 0x3330, 0x18 }, /* Read Out Timing Control Toshiba Recommendation Setting */
+ { 0x333c, 0x01 }, /* Read Out Timing Control Toshiba Recommendation Setting */
+ { 0x3345, 0x2f }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */
+ { 0x33de, 0x38 }, /* Horizontal Noise Reduction Control Toshiba Recommendation Setting */
+ /* Taken from v03. No idea what the rest are. */
+ { 0x32e0, 0x05 },
+ { 0x32e1, 0x05 },
+ { 0x32e2, 0x04 },
+ { 0x32e5, 0x04 },
+ { 0x32e6, 0x04 },
+
+ };
+
+ return smiapp_write_8s(sensor, regs, ARRAY_SIZE(regs));
+}
+
+const struct smiapp_quirk smiapp_jt8ew9_quirk = {
+ .limits = jt8ew9_limits,
+ .post_poweron = jt8ew9_post_poweron,
+};
+
+static int imx125es_post_poweron(struct smiapp_sensor *sensor)
+{
+ /* Taken from v02. No idea what the other two are. */
+ struct smiapp_reg_8 regs[] = {
+ /*
+ * 0x3302: clk during frame blanking:
+ * 0x00 - HS mode, 0x01 - LP11
+ */
+ { 0x3302, 0x01 },
+ { 0x302d, 0x00 },
+ { 0x3b08, 0x8c },
+ };
+
+ return smiapp_write_8s(sensor, regs, ARRAY_SIZE(regs));
+}
+
+const struct smiapp_quirk smiapp_imx125es_quirk = {
+ .post_poweron = imx125es_post_poweron,
+};
+
+static int jt8ev1_limits(struct smiapp_sensor *sensor)
+{
+ smiapp_replace_limit(sensor, SMIAPP_LIMIT_X_ADDR_MAX, 4271);
+ smiapp_replace_limit(sensor,
+ SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN, 184);
+
+ return 0;
+}
+
+static int jt8ev1_post_poweron(struct smiapp_sensor *sensor)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ int rval;
+
+ struct smiapp_reg_8 regs[] = {
+ { 0x3031, 0xcd }, /* For digital binning (EQ_MONI) */
+ { 0x30a3, 0xd0 }, /* FLASH STROBE enable */
+ { 0x3237, 0x00 }, /* For control of pulse timing for ADC */
+ { 0x3238, 0x43 },
+ { 0x3301, 0x06 }, /* For analog bias for sensor */
+ { 0x3302, 0x06 },
+ { 0x3304, 0x00 },
+ { 0x3305, 0x88 },
+ { 0x332a, 0x14 },
+ { 0x332c, 0x6b },
+ { 0x3336, 0x01 },
+ { 0x333f, 0x1f },
+ { 0x3355, 0x00 },
+ { 0x3356, 0x20 },
+ { 0x33bf, 0x20 }, /* Adjust the FBC speed */
+ { 0x33c9, 0x20 },
+ { 0x33ce, 0x30 }, /* Adjust the parameter for logic function */
+ { 0x33cf, 0xec }, /* For Black sun */
+ { 0x3328, 0x80 }, /* Ugh. No idea what's this. */
+ };
+
+ struct smiapp_reg_8 regs_96[] = {
+ { 0x30ae, 0x00 }, /* For control of ADC clock */
+ { 0x30af, 0xd0 },
+ { 0x30b0, 0x01 },
+ };
+
+ rval = smiapp_write_8s(sensor, regs, ARRAY_SIZE(regs));
+ if (rval < 0)
+ return rval;
+
+ switch (sensor->platform_data->ext_clk) {
+ case 9600000:
+ return smiapp_write_8s(sensor, regs_96,
+ ARRAY_SIZE(regs_96));
+ default:
+ dev_warn(&client->dev, "no MSRs for %d Hz ext_clk\n",
+ sensor->platform_data->ext_clk);
+ return 0;
+ }
+}
+
+static int jt8ev1_pre_streamon(struct smiapp_sensor *sensor)
+{
+ return smiapp_write_8(sensor, 0x3328, 0x00);
+}
+
+static int jt8ev1_post_streamoff(struct smiapp_sensor *sensor)
+{
+ int rval;
+
+ /* Workaround: allows fast standby to work properly */
+ rval = smiapp_write_8(sensor, 0x3205, 0x04);
+ if (rval < 0)
+ return rval;
+
+ /* Wait for 1 ms + one line => 2 ms is likely enough */
+ usleep_range(2000, 2000);
+
+ /* Restore it */
+ rval = smiapp_write_8(sensor, 0x3205, 0x00);
+ if (rval < 0)
+ return rval;
+
+ return smiapp_write_8(sensor, 0x3328, 0x80);
+}
+
+const struct smiapp_quirk smiapp_jt8ev1_quirk = {
+ .limits = jt8ev1_limits,
+ .post_poweron = jt8ev1_post_poweron,
+ .pre_streamon = jt8ev1_pre_streamon,
+ .post_streamoff = jt8ev1_post_streamoff,
+ .flags = SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE,
+};
+
+static int tcm8500md_limits(struct smiapp_sensor *sensor)
+{
+ smiapp_replace_limit(sensor, SMIAPP_LIMIT_MIN_PLL_IP_FREQ_HZ, 2700000);
+
+ return 0;
+}
+
+const struct smiapp_quirk smiapp_tcm8500md_quirk = {
+ .limits = tcm8500md_limits,
+};
diff --git a/drivers/media/video/smiapp/smiapp-quirk.h b/drivers/media/video/smiapp/smiapp-quirk.h
new file mode 100644
index 000000000000..f4dcaabaefe7
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-quirk.h
@@ -0,0 +1,83 @@
+/*
+ * drivers/media/video/smiapp/smiapp-quirk.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __SMIAPP_QUIRK__
+#define __SMIAPP_QUIRK__
+
+struct smiapp_sensor;
+
+/**
+ * struct smiapp_quirk - quirks for sensors that deviate from SMIA++ standard
+ *
+ * @limits: Replace sensor->limits with values which can't be read from
+ * sensor registers. Called the first time the sensor is powered up.
+ * @post_poweron: Called always after the sensor has been fully powered on.
+ * @pre_streamon: Called just before streaming is enabled.
+ * @post_streamon: Called right after stopping streaming.
+ */
+struct smiapp_quirk {
+ int (*limits)(struct smiapp_sensor *sensor);
+ int (*post_poweron)(struct smiapp_sensor *sensor);
+ int (*pre_streamon)(struct smiapp_sensor *sensor);
+ int (*post_streamoff)(struct smiapp_sensor *sensor);
+ const struct smia_reg *regs;
+ unsigned long flags;
+};
+
+/* op pix clock is for all lanes in total normally */
+#define SMIAPP_QUIRK_FLAG_OP_PIX_CLOCK_PER_LANE (1 << 0)
+#define SMIAPP_QUIRK_FLAG_8BIT_READ_ONLY (1 << 1)
+
+struct smiapp_reg_8 {
+ u16 reg;
+ u8 val;
+};
+
+void smiapp_replace_limit(struct smiapp_sensor *sensor,
+ u32 limit, u32 val);
+bool smiapp_quirk_reg(struct smiapp_sensor *sensor,
+ u32 reg, u32 *val);
+
+#define SMIAPP_MK_QUIRK_REG(_reg, _val) \
+ { \
+ .type = (_reg >> 16), \
+ .reg = (u16)_reg, \
+ .val = _val, \
+ }
+
+#define smiapp_call_quirk(_sensor, _quirk, ...) \
+ (_sensor->minfo.quirk && \
+ _sensor->minfo.quirk->_quirk ? \
+ _sensor->minfo.quirk->_quirk(_sensor, ##__VA_ARGS__) : 0)
+
+#define smiapp_needs_quirk(_sensor, _quirk) \
+ (_sensor->minfo.quirk ? \
+ _sensor->minfo.quirk->flags & _quirk : 0)
+
+extern const struct smiapp_quirk smiapp_jt8ev1_quirk;
+extern const struct smiapp_quirk smiapp_imx125es_quirk;
+extern const struct smiapp_quirk smiapp_jt8ew9_quirk;
+extern const struct smiapp_quirk smiapp_tcm8500md_quirk;
+
+#endif /* __SMIAPP_QUIRK__ */
diff --git a/drivers/media/video/smiapp/smiapp-reg-defs.h b/drivers/media/video/smiapp/smiapp-reg-defs.h
new file mode 100644
index 000000000000..a089eb8161e1
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-reg-defs.h
@@ -0,0 +1,503 @@
+/*
+ * drivers/media/video/smiapp/smiapp-reg-defs.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#define SMIAPP_REG_MK_U8(r) ((SMIA_REG_8BIT << 16) | (r))
+#define SMIAPP_REG_MK_U16(r) ((SMIA_REG_16BIT << 16) | (r))
+#define SMIAPP_REG_MK_U32(r) ((SMIA_REG_32BIT << 16) | (r))
+
+#define SMIAPP_REG_MK_F32(r) (SMIA_REG_FLAG_FLOAT | (SMIA_REG_32BIT << 16) | (r))
+
+#define SMIAPP_REG_U16_MODEL_ID SMIAPP_REG_MK_U16(0x0000)
+#define SMIAPP_REG_U8_REVISION_NUMBER_MAJOR SMIAPP_REG_MK_U8(0x0002)
+#define SMIAPP_REG_U8_MANUFACTURER_ID SMIAPP_REG_MK_U8(0x0003)
+#define SMIAPP_REG_U8_SMIA_VERSION SMIAPP_REG_MK_U8(0x0004)
+#define SMIAPP_REG_U8_FRAME_COUNT SMIAPP_REG_MK_U8(0x0005)
+#define SMIAPP_REG_U8_PIXEL_ORDER SMIAPP_REG_MK_U8(0x0006)
+#define SMIAPP_REG_U16_DATA_PEDESTAL SMIAPP_REG_MK_U16(0x0008)
+#define SMIAPP_REG_U8_PIXEL_DEPTH SMIAPP_REG_MK_U8(0x000c)
+#define SMIAPP_REG_U8_REVISION_NUMBER_MINOR SMIAPP_REG_MK_U8(0x0010)
+#define SMIAPP_REG_U8_SMIAPP_VERSION SMIAPP_REG_MK_U8(0x0011)
+#define SMIAPP_REG_U8_MODULE_DATE_YEAR SMIAPP_REG_MK_U8(0x0012)
+#define SMIAPP_REG_U8_MODULE_DATE_MONTH SMIAPP_REG_MK_U8(0x0013)
+#define SMIAPP_REG_U8_MODULE_DATE_DAY SMIAPP_REG_MK_U8(0x0014)
+#define SMIAPP_REG_U8_MODULE_DATE_PHASE SMIAPP_REG_MK_U8(0x0015)
+#define SMIAPP_REG_U16_SENSOR_MODEL_ID SMIAPP_REG_MK_U16(0x0016)
+#define SMIAPP_REG_U8_SENSOR_REVISION_NUMBER SMIAPP_REG_MK_U8(0x0018)
+#define SMIAPP_REG_U8_SENSOR_MANUFACTURER_ID SMIAPP_REG_MK_U8(0x0019)
+#define SMIAPP_REG_U8_SENSOR_FIRMWARE_VERSION SMIAPP_REG_MK_U8(0x001a)
+#define SMIAPP_REG_U32_SERIAL_NUMBER SMIAPP_REG_MK_U32(0x001c)
+#define SMIAPP_REG_U8_FRAME_FORMAT_MODEL_TYPE SMIAPP_REG_MK_U8(0x0040)
+#define SMIAPP_REG_U8_FRAME_FORMAT_MODEL_SUBTYPE SMIAPP_REG_MK_U8(0x0041)
+#define SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(n) SMIAPP_REG_MK_U16(0x0042 + ((n) << 1)) /* 0 <= n <= 14 */
+#define SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(n) SMIAPP_REG_MK_U32(0x0060 + ((n) << 2)) /* 0 <= n <= 7 */
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CAPABILITY SMIAPP_REG_MK_U16(0x0080)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MIN SMIAPP_REG_MK_U16(0x0084)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_MAX SMIAPP_REG_MK_U16(0x0086)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_STEP SMIAPP_REG_MK_U16(0x0088)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_TYPE SMIAPP_REG_MK_U16(0x008a)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_M0 SMIAPP_REG_MK_U16(0x008c)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_C0 SMIAPP_REG_MK_U16(0x008e)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_M1 SMIAPP_REG_MK_U16(0x0090)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_C1 SMIAPP_REG_MK_U16(0x0092)
+#define SMIAPP_REG_U8_DATA_FORMAT_MODEL_TYPE SMIAPP_REG_MK_U8(0x00c0)
+#define SMIAPP_REG_U8_DATA_FORMAT_MODEL_SUBTYPE SMIAPP_REG_MK_U8(0x00c1)
+#define SMIAPP_REG_U16_DATA_FORMAT_DESCRIPTOR(n) SMIAPP_REG_MK_U16(0x00c2 + ((n) << 1))
+#define SMIAPP_REG_U8_MODE_SELECT SMIAPP_REG_MK_U8(0x0100)
+#define SMIAPP_REG_U8_IMAGE_ORIENTATION SMIAPP_REG_MK_U8(0x0101)
+#define SMIAPP_REG_U8_SOFTWARE_RESET SMIAPP_REG_MK_U8(0x0103)
+#define SMIAPP_REG_U8_GROUPED_PARAMETER_HOLD SMIAPP_REG_MK_U8(0x0104)
+#define SMIAPP_REG_U8_MASK_CORRUPTED_FRAMES SMIAPP_REG_MK_U8(0x0105)
+#define SMIAPP_REG_U8_FAST_STANDBY_CTRL SMIAPP_REG_MK_U8(0x0106)
+#define SMIAPP_REG_U8_CCI_ADDRESS_CONTROL SMIAPP_REG_MK_U8(0x0107)
+#define SMIAPP_REG_U8_2ND_CCI_IF_CONTROL SMIAPP_REG_MK_U8(0x0108)
+#define SMIAPP_REG_U8_2ND_CCI_ADDRESS_CONTROL SMIAPP_REG_MK_U8(0x0109)
+#define SMIAPP_REG_U8_CSI_CHANNEL_IDENTIFIER SMIAPP_REG_MK_U8(0x0110)
+#define SMIAPP_REG_U8_CSI_SIGNALLING_MODE SMIAPP_REG_MK_U8(0x0111)
+#define SMIAPP_REG_U16_CSI_DATA_FORMAT SMIAPP_REG_MK_U16(0x0112)
+#define SMIAPP_REG_U8_CSI_LANE_MODE SMIAPP_REG_MK_U8(0x0114)
+#define SMIAPP_REG_U8_CSI2_10_TO_8_DT SMIAPP_REG_MK_U8(0x0115)
+#define SMIAPP_REG_U8_CSI2_10_TO_7_DT SMIAPP_REG_MK_U8(0x0116)
+#define SMIAPP_REG_U8_CSI2_10_TO_6_DT SMIAPP_REG_MK_U8(0x0117)
+#define SMIAPP_REG_U8_CSI2_12_TO_8_DT SMIAPP_REG_MK_U8(0x0118)
+#define SMIAPP_REG_U8_CSI2_12_TO_7_DT SMIAPP_REG_MK_U8(0x0119)
+#define SMIAPP_REG_U8_CSI2_12_TO_6_DT SMIAPP_REG_MK_U8(0x011a)
+#define SMIAPP_REG_U8_CSI2_14_TO_10_DT SMIAPP_REG_MK_U8(0x011b)
+#define SMIAPP_REG_U8_CSI2_14_TO_8_DT SMIAPP_REG_MK_U8(0x011c)
+#define SMIAPP_REG_U8_CSI2_16_TO_10_DT SMIAPP_REG_MK_U8(0x011d)
+#define SMIAPP_REG_U8_CSI2_16_TO_8_DT SMIAPP_REG_MK_U8(0x011e)
+#define SMIAPP_REG_U8_GAIN_MODE SMIAPP_REG_MK_U8(0x0120)
+#define SMIAPP_REG_U16_VANA_VOLTAGE SMIAPP_REG_MK_U16(0x0130)
+#define SMIAPP_REG_U16_VDIG_VOLTAGE SMIAPP_REG_MK_U16(0x0132)
+#define SMIAPP_REG_U16_VIO_VOLTAGE SMIAPP_REG_MK_U16(0x0134)
+#define SMIAPP_REG_U16_EXTCLK_FREQUENCY_MHZ SMIAPP_REG_MK_U16(0x0136)
+#define SMIAPP_REG_U8_TEMP_SENSOR_CONTROL SMIAPP_REG_MK_U8(0x0138)
+#define SMIAPP_REG_U8_TEMP_SENSOR_MODE SMIAPP_REG_MK_U8(0x0139)
+#define SMIAPP_REG_U8_TEMP_SENSOR_OUTPUT SMIAPP_REG_MK_U8(0x013a)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME SMIAPP_REG_MK_U16(0x0200)
+#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME SMIAPP_REG_MK_U16(0x0202)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GLOBAL SMIAPP_REG_MK_U16(0x0204)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GREENR SMIAPP_REG_MK_U16(0x0206)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_RED SMIAPP_REG_MK_U16(0x0208)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_BLUE SMIAPP_REG_MK_U16(0x020a)
+#define SMIAPP_REG_U16_ANALOGUE_GAIN_CODE_GREENB SMIAPP_REG_MK_U16(0x020c)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_GREENR SMIAPP_REG_MK_U16(0x020e)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_RED SMIAPP_REG_MK_U16(0x0210)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_BLUE SMIAPP_REG_MK_U16(0x0212)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_GREENB SMIAPP_REG_MK_U16(0x0214)
+#define SMIAPP_REG_U16_VT_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x0300)
+#define SMIAPP_REG_U16_VT_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x0302)
+#define SMIAPP_REG_U16_PRE_PLL_CLK_DIV SMIAPP_REG_MK_U16(0x0304)
+#define SMIAPP_REG_U16_PLL_MULTIPLIER SMIAPP_REG_MK_U16(0x0306)
+#define SMIAPP_REG_U16_OP_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x0308)
+#define SMIAPP_REG_U16_OP_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x030a)
+#define SMIAPP_REG_U16_FRAME_LENGTH_LINES SMIAPP_REG_MK_U16(0x0340)
+#define SMIAPP_REG_U16_LINE_LENGTH_PCK SMIAPP_REG_MK_U16(0x0342)
+#define SMIAPP_REG_U16_X_ADDR_START SMIAPP_REG_MK_U16(0x0344)
+#define SMIAPP_REG_U16_Y_ADDR_START SMIAPP_REG_MK_U16(0x0346)
+#define SMIAPP_REG_U16_X_ADDR_END SMIAPP_REG_MK_U16(0x0348)
+#define SMIAPP_REG_U16_Y_ADDR_END SMIAPP_REG_MK_U16(0x034a)
+#define SMIAPP_REG_U16_X_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x034c)
+#define SMIAPP_REG_U16_Y_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x034e)
+#define SMIAPP_REG_U16_X_EVEN_INC SMIAPP_REG_MK_U16(0x0380)
+#define SMIAPP_REG_U16_X_ODD_INC SMIAPP_REG_MK_U16(0x0382)
+#define SMIAPP_REG_U16_Y_EVEN_INC SMIAPP_REG_MK_U16(0x0384)
+#define SMIAPP_REG_U16_Y_ODD_INC SMIAPP_REG_MK_U16(0x0386)
+#define SMIAPP_REG_U16_SCALING_MODE SMIAPP_REG_MK_U16(0x0400)
+#define SMIAPP_REG_U16_SPATIAL_SAMPLING SMIAPP_REG_MK_U16(0x0402)
+#define SMIAPP_REG_U16_SCALE_M SMIAPP_REG_MK_U16(0x0404)
+#define SMIAPP_REG_U16_SCALE_N SMIAPP_REG_MK_U16(0x0406)
+#define SMIAPP_REG_U16_DIGITAL_CROP_X_OFFSET SMIAPP_REG_MK_U16(0x0408)
+#define SMIAPP_REG_U16_DIGITAL_CROP_Y_OFFSET SMIAPP_REG_MK_U16(0x040a)
+#define SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_WIDTH SMIAPP_REG_MK_U16(0x040c)
+#define SMIAPP_REG_U16_DIGITAL_CROP_IMAGE_HEIGHT SMIAPP_REG_MK_U16(0x040e)
+#define SMIAPP_REG_U16_COMPRESSION_MODE SMIAPP_REG_MK_U16(0x0500)
+#define SMIAPP_REG_U16_TEST_PATTERN_MODE SMIAPP_REG_MK_U16(0x0600)
+#define SMIAPP_REG_U16_TEST_DATA_RED SMIAPP_REG_MK_U16(0x0602)
+#define SMIAPP_REG_U16_TEST_DATA_GREENR SMIAPP_REG_MK_U16(0x0604)
+#define SMIAPP_REG_U16_TEST_DATA_BLUE SMIAPP_REG_MK_U16(0x0606)
+#define SMIAPP_REG_U16_TEST_DATA_GREENB SMIAPP_REG_MK_U16(0x0608)
+#define SMIAPP_REG_U16_HORIZONTAL_CURSOR_WIDTH SMIAPP_REG_MK_U16(0x060a)
+#define SMIAPP_REG_U16_HORIZONTAL_CURSOR_POSITION SMIAPP_REG_MK_U16(0x060c)
+#define SMIAPP_REG_U16_VERTICAL_CURSOR_WIDTH SMIAPP_REG_MK_U16(0x060e)
+#define SMIAPP_REG_U16_VERTICAL_CURSOR_POSITION SMIAPP_REG_MK_U16(0x0610)
+#define SMIAPP_REG_U16_FIFO_WATER_MARK_PIXELS SMIAPP_REG_MK_U16(0x0700)
+#define SMIAPP_REG_U8_TCLK_POST SMIAPP_REG_MK_U8(0x0800)
+#define SMIAPP_REG_U8_THS_PREPARE SMIAPP_REG_MK_U8(0x0801)
+#define SMIAPP_REG_U8_THS_ZERO_MIN SMIAPP_REG_MK_U8(0x0802)
+#define SMIAPP_REG_U8_THS_TRAIL SMIAPP_REG_MK_U8(0x0803)
+#define SMIAPP_REG_U8_TCLK_TRAIL_MIN SMIAPP_REG_MK_U8(0x0804)
+#define SMIAPP_REG_U8_TCLK_PREPARE SMIAPP_REG_MK_U8(0x0805)
+#define SMIAPP_REG_U8_TCLK_ZERO SMIAPP_REG_MK_U8(0x0806)
+#define SMIAPP_REG_U8_TLPX SMIAPP_REG_MK_U8(0x0807)
+#define SMIAPP_REG_U8_DPHY_CTRL SMIAPP_REG_MK_U8(0x0808)
+#define SMIAPP_REG_U32_REQUESTED_LINK_BIT_RATE_MBPS SMIAPP_REG_MK_U32(0x0820)
+#define SMIAPP_REG_U8_BINNING_MODE SMIAPP_REG_MK_U8(0x0900)
+#define SMIAPP_REG_U8_BINNING_TYPE SMIAPP_REG_MK_U8(0x0901)
+#define SMIAPP_REG_U8_BINNING_WEIGHTING SMIAPP_REG_MK_U8(0x0902)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL SMIAPP_REG_MK_U8(0x0a00)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS SMIAPP_REG_MK_U8(0x0a01)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT SMIAPP_REG_MK_U8(0x0a02)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 SMIAPP_REG_MK_U8(0x0a04)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_1 SMIAPP_REG_MK_U8(0x0a05)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_2 SMIAPP_REG_MK_U8(0x0a06)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_3 SMIAPP_REG_MK_U8(0x0a07)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_4 SMIAPP_REG_MK_U8(0x0a08)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_5 SMIAPP_REG_MK_U8(0x0a09)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_12 SMIAPP_REG_MK_U8(0x0a10)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_13 SMIAPP_REG_MK_U8(0x0a11)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_14 SMIAPP_REG_MK_U8(0x0a12)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_15 SMIAPP_REG_MK_U8(0x0a13)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_16 SMIAPP_REG_MK_U8(0x0a14)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_17 SMIAPP_REG_MK_U8(0x0a15)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_18 SMIAPP_REG_MK_U8(0x0a16)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_19 SMIAPP_REG_MK_U8(0x0a17)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_20 SMIAPP_REG_MK_U8(0x0a18)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_21 SMIAPP_REG_MK_U8(0x0a19)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_22 SMIAPP_REG_MK_U8(0x0a1a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_23 SMIAPP_REG_MK_U8(0x0a1b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_24 SMIAPP_REG_MK_U8(0x0a1c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_25 SMIAPP_REG_MK_U8(0x0a1d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_26 SMIAPP_REG_MK_U8(0x0a1e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_27 SMIAPP_REG_MK_U8(0x0a1f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_28 SMIAPP_REG_MK_U8(0x0a20)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_29 SMIAPP_REG_MK_U8(0x0a21)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_30 SMIAPP_REG_MK_U8(0x0a22)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_31 SMIAPP_REG_MK_U8(0x0a23)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_32 SMIAPP_REG_MK_U8(0x0a24)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_33 SMIAPP_REG_MK_U8(0x0a25)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_34 SMIAPP_REG_MK_U8(0x0a26)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_35 SMIAPP_REG_MK_U8(0x0a27)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_36 SMIAPP_REG_MK_U8(0x0a28)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_37 SMIAPP_REG_MK_U8(0x0a29)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_38 SMIAPP_REG_MK_U8(0x0a2a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_39 SMIAPP_REG_MK_U8(0x0a2b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_40 SMIAPP_REG_MK_U8(0x0a2c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_41 SMIAPP_REG_MK_U8(0x0a2d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_42 SMIAPP_REG_MK_U8(0x0a2e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_43 SMIAPP_REG_MK_U8(0x0a2f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_44 SMIAPP_REG_MK_U8(0x0a30)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_45 SMIAPP_REG_MK_U8(0x0a31)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_46 SMIAPP_REG_MK_U8(0x0a32)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_47 SMIAPP_REG_MK_U8(0x0a33)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_48 SMIAPP_REG_MK_U8(0x0a34)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_49 SMIAPP_REG_MK_U8(0x0a35)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_50 SMIAPP_REG_MK_U8(0x0a36)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_51 SMIAPP_REG_MK_U8(0x0a37)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_52 SMIAPP_REG_MK_U8(0x0a38)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_53 SMIAPP_REG_MK_U8(0x0a39)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_54 SMIAPP_REG_MK_U8(0x0a3a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_55 SMIAPP_REG_MK_U8(0x0a3b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_56 SMIAPP_REG_MK_U8(0x0a3c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_57 SMIAPP_REG_MK_U8(0x0a3d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_58 SMIAPP_REG_MK_U8(0x0a3e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_59 SMIAPP_REG_MK_U8(0x0a3f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_60 SMIAPP_REG_MK_U8(0x0a40)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_61 SMIAPP_REG_MK_U8(0x0a41)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_62 SMIAPP_REG_MK_U8(0x0a42)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_63 SMIAPP_REG_MK_U8(0x0a43)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_CTRL SMIAPP_REG_MK_U8(0x0a44)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_STATUS SMIAPP_REG_MK_U8(0x0a45)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_PAGE_SELECT SMIAPP_REG_MK_U8(0x0a46)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_0 SMIAPP_REG_MK_U8(0x0a48)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_1 SMIAPP_REG_MK_U8(0x0a49)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_2 SMIAPP_REG_MK_U8(0x0a4a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_3 SMIAPP_REG_MK_U8(0x0a4b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_4 SMIAPP_REG_MK_U8(0x0a4c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_5 SMIAPP_REG_MK_U8(0x0a4d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_6 SMIAPP_REG_MK_U8(0x0a4e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_7 SMIAPP_REG_MK_U8(0x0a4f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_8 SMIAPP_REG_MK_U8(0x0a50)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_9 SMIAPP_REG_MK_U8(0x0a51)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_10 SMIAPP_REG_MK_U8(0x0a52)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_11 SMIAPP_REG_MK_U8(0x0a53)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_12 SMIAPP_REG_MK_U8(0x0a54)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_13 SMIAPP_REG_MK_U8(0x0a55)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_14 SMIAPP_REG_MK_U8(0x0a56)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_15 SMIAPP_REG_MK_U8(0x0a57)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_16 SMIAPP_REG_MK_U8(0x0a58)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_17 SMIAPP_REG_MK_U8(0x0a59)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_18 SMIAPP_REG_MK_U8(0x0a5a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_19 SMIAPP_REG_MK_U8(0x0a5b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_20 SMIAPP_REG_MK_U8(0x0a5c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_21 SMIAPP_REG_MK_U8(0x0a5d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_22 SMIAPP_REG_MK_U8(0x0a5e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_23 SMIAPP_REG_MK_U8(0x0a5f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_24 SMIAPP_REG_MK_U8(0x0a60)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_25 SMIAPP_REG_MK_U8(0x0a61)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_26 SMIAPP_REG_MK_U8(0x0a62)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_27 SMIAPP_REG_MK_U8(0x0a63)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_28 SMIAPP_REG_MK_U8(0x0a64)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_29 SMIAPP_REG_MK_U8(0x0a65)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_30 SMIAPP_REG_MK_U8(0x0a66)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_31 SMIAPP_REG_MK_U8(0x0a67)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_32 SMIAPP_REG_MK_U8(0x0a68)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_33 SMIAPP_REG_MK_U8(0x0a69)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_34 SMIAPP_REG_MK_U8(0x0a6a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_35 SMIAPP_REG_MK_U8(0x0a6b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_36 SMIAPP_REG_MK_U8(0x0a6c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_37 SMIAPP_REG_MK_U8(0x0a6d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_38 SMIAPP_REG_MK_U8(0x0a6e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_39 SMIAPP_REG_MK_U8(0x0a6f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_40 SMIAPP_REG_MK_U8(0x0a70)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_41 SMIAPP_REG_MK_U8(0x0a71)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_42 SMIAPP_REG_MK_U8(0x0a72)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_43 SMIAPP_REG_MK_U8(0x0a73)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_44 SMIAPP_REG_MK_U8(0x0a74)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_45 SMIAPP_REG_MK_U8(0x0a75)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_46 SMIAPP_REG_MK_U8(0x0a76)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_47 SMIAPP_REG_MK_U8(0x0a77)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_48 SMIAPP_REG_MK_U8(0x0a78)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_49 SMIAPP_REG_MK_U8(0x0a79)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_50 SMIAPP_REG_MK_U8(0x0a7a)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_51 SMIAPP_REG_MK_U8(0x0a7b)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_52 SMIAPP_REG_MK_U8(0x0a7c)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_53 SMIAPP_REG_MK_U8(0x0a7d)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_54 SMIAPP_REG_MK_U8(0x0a7e)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_55 SMIAPP_REG_MK_U8(0x0a7f)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_56 SMIAPP_REG_MK_U8(0x0a80)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_57 SMIAPP_REG_MK_U8(0x0a81)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_58 SMIAPP_REG_MK_U8(0x0a82)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_59 SMIAPP_REG_MK_U8(0x0a83)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_60 SMIAPP_REG_MK_U8(0x0a84)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_61 SMIAPP_REG_MK_U8(0x0a85)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_62 SMIAPP_REG_MK_U8(0x0a86)
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_2_DATA_63 SMIAPP_REG_MK_U8(0x0a87)
+#define SMIAPP_REG_U8_SHADING_CORRECTION_ENABLE SMIAPP_REG_MK_U8(0x0b00)
+#define SMIAPP_REG_U8_LUMINANCE_CORRECTION_LEVEL SMIAPP_REG_MK_U8(0x0b01)
+#define SMIAPP_REG_U8_GREEN_IMBALANCE_FILTER_ENABLE SMIAPP_REG_MK_U8(0x0b02)
+#define SMIAPP_REG_U8_GREEN_IMBALANCE_FILTER_WEIGHT SMIAPP_REG_MK_U8(0x0b03)
+#define SMIAPP_REG_U8_BLACK_LEVEL_CORRECTION_ENABLE SMIAPP_REG_MK_U8(0x0b04)
+#define SMIAPP_REG_U8_MAPPED_COUPLET_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b05)
+#define SMIAPP_REG_U8_SINGLE_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b06)
+#define SMIAPP_REG_U8_SINGLE_DEFECT_CORRECT_WEIGHT SMIAPP_REG_MK_U8(0x0b07)
+#define SMIAPP_REG_U8_DYNAMIC_COUPLET_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b08)
+#define SMIAPP_REG_U8_DYNAMIC_COUPLET_CORRECT_WEIGHT SMIAPP_REG_MK_U8(0x0b09)
+#define SMIAPP_REG_U8_COMBINED_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b0a)
+#define SMIAPP_REG_U8_COMBINED_DEFECT_CORRECT_WEIGHT SMIAPP_REG_MK_U8(0x0b0b)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_ENABLE SMIAPP_REG_MK_U8(0x0b0c)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_WEIGHT SMIAPP_REG_MK_U8(0x0b0d)
+#define SMIAPP_REG_U8_MAPPED_LINE_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b0e)
+#define SMIAPP_REG_U8_MAPPED_LINE_DEFECT_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b0f)
+#define SMIAPP_REG_U8_MAPPED_COUPLET_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b10)
+#define SMIAPP_REG_U8_MAPPED_TRIPLET_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b11)
+#define SMIAPP_REG_U8_MAPPED_TRIPLET_DEFECT_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b12)
+#define SMIAPP_REG_U8_DYNAMIC_TRIPLET_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b13)
+#define SMIAPP_REG_U8_DYNAMIC_TRIPLET_DEFECT_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b14)
+#define SMIAPP_REG_U8_DYNAMIC_LINE_DEFECT_CORRECT_ENABLE SMIAPP_REG_MK_U8(0x0b15)
+#define SMIAPP_REG_U8_DYNAMIC_LINE_DEFECT_CORRECT_ADJUST SMIAPP_REG_MK_U8(0x0b16)
+#define SMIAPP_REG_U8_EDOF_MODE SMIAPP_REG_MK_U8(0x0b80)
+#define SMIAPP_REG_U8_SHARPNESS SMIAPP_REG_MK_U8(0x0b83)
+#define SMIAPP_REG_U8_DENOISING SMIAPP_REG_MK_U8(0x0b84)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC SMIAPP_REG_MK_U8(0x0b85)
+#define SMIAPP_REG_U16_DEPTH_OF_FIELD SMIAPP_REG_MK_U16(0x0b86)
+#define SMIAPP_REG_U16_FOCUS_DISTANCE SMIAPP_REG_MK_U16(0x0b88)
+#define SMIAPP_REG_U8_ESTIMATION_MODE_CTRL SMIAPP_REG_MK_U8(0x0b8a)
+#define SMIAPP_REG_U16_COLOUR_TEMPERATURE SMIAPP_REG_MK_U16(0x0b8c)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_GREENR SMIAPP_REG_MK_U16(0x0b8e)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_RED SMIAPP_REG_MK_U16(0x0b90)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_BLUE SMIAPP_REG_MK_U16(0x0b92)
+#define SMIAPP_REG_U16_ABSOLUTE_GAIN_GREENB SMIAPP_REG_MK_U16(0x0b94)
+#define SMIAPP_REG_U8_ESTIMATION_ZONE_MODE SMIAPP_REG_MK_U8(0x0bc0)
+#define SMIAPP_REG_U16_FIXED_ZONE_WEIGHTING SMIAPP_REG_MK_U16(0x0bc2)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_X_START SMIAPP_REG_MK_U16(0x0bc4)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_Y_START SMIAPP_REG_MK_U16(0x0bc6)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_WIDTH SMIAPP_REG_MK_U16(0x0bc8)
+#define SMIAPP_REG_U16_CUSTOM_ZONE_HEIGHT SMIAPP_REG_MK_U16(0x0bca)
+#define SMIAPP_REG_U8_GLOBAL_RESET_CTRL1 SMIAPP_REG_MK_U8(0x0c00)
+#define SMIAPP_REG_U8_GLOBAL_RESET_CTRL2 SMIAPP_REG_MK_U8(0x0c01)
+#define SMIAPP_REG_U8_GLOBAL_RESET_MODE_CONFIG_1 SMIAPP_REG_MK_U8(0x0c02)
+#define SMIAPP_REG_U8_GLOBAL_RESET_MODE_CONFIG_2 SMIAPP_REG_MK_U8(0x0c03)
+#define SMIAPP_REG_U16_TRDY_CTRL SMIAPP_REG_MK_U16(0x0c04)
+#define SMIAPP_REG_U16_TRDOUT_CTRL SMIAPP_REG_MK_U16(0x0c06)
+#define SMIAPP_REG_U16_TSHUTTER_STROBE_DELAY_CTRL SMIAPP_REG_MK_U16(0x0c08)
+#define SMIAPP_REG_U16_TSHUTTER_STROBE_WIDTH_CTRL SMIAPP_REG_MK_U16(0x0c0a)
+#define SMIAPP_REG_U16_TFLASH_STROBE_DELAY_CTRL SMIAPP_REG_MK_U16(0x0c0c)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_CTRL SMIAPP_REG_MK_U16(0x0c0e)
+#define SMIAPP_REG_U16_TGRST_INTERVAL_CTRL SMIAPP_REG_MK_U16(0x0c10)
+#define SMIAPP_REG_U8_FLASH_STROBE_ADJUSTMENT SMIAPP_REG_MK_U8(0x0c12)
+#define SMIAPP_REG_U16_FLASH_STROBE_START_POINT SMIAPP_REG_MK_U16(0x0c14)
+#define SMIAPP_REG_U16_TFLASH_STROBE_DELAY_RS_CTRL SMIAPP_REG_MK_U16(0x0c16)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_HIGH_RS_CTRL SMIAPP_REG_MK_U16(0x0c18)
+#define SMIAPP_REG_U8_FLASH_MODE_RS SMIAPP_REG_MK_U8(0x0c1a)
+#define SMIAPP_REG_U8_FLASH_TRIGGER_RS SMIAPP_REG_MK_U8(0x0c1b)
+#define SMIAPP_REG_U8_FLASH_STATUS SMIAPP_REG_MK_U8(0x0c1c)
+#define SMIAPP_REG_U8_SA_STROBE_MODE SMIAPP_REG_MK_U8(0x0c1d)
+#define SMIAPP_REG_U16_SA_STROBE_START_POINT SMIAPP_REG_MK_U16(0x0c1e)
+#define SMIAPP_REG_U16_TSA_STROBE_DELAY_CTRL SMIAPP_REG_MK_U16(0x0c20)
+#define SMIAPP_REG_U16_TSA_STROBE_WIDTH_CTRL SMIAPP_REG_MK_U16(0x0c22)
+#define SMIAPP_REG_U8_SA_STROBE_TRIGGER SMIAPP_REG_MK_U8(0x0c24)
+#define SMIAPP_REG_U8_SPECIAL_ACTUATOR_STATUS SMIAPP_REG_MK_U8(0x0c25)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH2_HIGH_RS_CTRL SMIAPP_REG_MK_U16(0x0c26)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_LOW_RS_CTRL SMIAPP_REG_MK_U16(0x0c28)
+#define SMIAPP_REG_U8_TFLASH_STROBE_COUNT_RS_CTRL SMIAPP_REG_MK_U8(0x0c2a)
+#define SMIAPP_REG_U8_TFLASH_STROBE_COUNT_CTRL SMIAPP_REG_MK_U8(0x0c2b)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH2_HIGH_CTRL SMIAPP_REG_MK_U16(0x0c2c)
+#define SMIAPP_REG_U16_TFLASH_STROBE_WIDTH_LOW_CTRL SMIAPP_REG_MK_U16(0x0c2e)
+#define SMIAPP_REG_U8_LOW_LEVEL_CTRL SMIAPP_REG_MK_U8(0x0c80)
+#define SMIAPP_REG_U16_MAIN_TRIGGER_REF_POINT SMIAPP_REG_MK_U16(0x0c82)
+#define SMIAPP_REG_U16_MAIN_TRIGGER_T3 SMIAPP_REG_MK_U16(0x0c84)
+#define SMIAPP_REG_U8_MAIN_TRIGGER_COUNT SMIAPP_REG_MK_U8(0x0c86)
+#define SMIAPP_REG_U16_PHASE1_TRIGGER_T3 SMIAPP_REG_MK_U16(0x0c88)
+#define SMIAPP_REG_U8_PHASE1_TRIGGER_COUNT SMIAPP_REG_MK_U8(0x0c8a)
+#define SMIAPP_REG_U16_PHASE2_TRIGGER_T3 SMIAPP_REG_MK_U16(0x0c8c)
+#define SMIAPP_REG_U8_PHASE2_TRIGGER_COUNT SMIAPP_REG_MK_U8(0x0c8e)
+#define SMIAPP_REG_U8_MECH_SHUTTER_CTRL SMIAPP_REG_MK_U8(0x0d00)
+#define SMIAPP_REG_U8_OPERATION_MODE SMIAPP_REG_MK_U8(0x0d01)
+#define SMIAPP_REG_U8_ACT_STATE1 SMIAPP_REG_MK_U8(0x0d02)
+#define SMIAPP_REG_U8_ACT_STATE2 SMIAPP_REG_MK_U8(0x0d03)
+#define SMIAPP_REG_U16_FOCUS_CHANGE SMIAPP_REG_MK_U16(0x0d80)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_CONTROL SMIAPP_REG_MK_U16(0x0d82)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_NUMBER_PHASE1 SMIAPP_REG_MK_U16(0x0d84)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_NUMBER_PHASE2 SMIAPP_REG_MK_U16(0x0d86)
+#define SMIAPP_REG_U8_STROBE_COUNT_PHASE1 SMIAPP_REG_MK_U8(0x0d88)
+#define SMIAPP_REG_U8_STROBE_COUNT_PHASE2 SMIAPP_REG_MK_U8(0x0d89)
+#define SMIAPP_REG_U8_POSITION SMIAPP_REG_MK_U8(0x0d8a)
+#define SMIAPP_REG_U8_BRACKETING_LUT_CONTROL SMIAPP_REG_MK_U8(0x0e00)
+#define SMIAPP_REG_U8_BRACKETING_LUT_MODE SMIAPP_REG_MK_U8(0x0e01)
+#define SMIAPP_REG_U8_BRACKETING_LUT_ENTRY_CONTROL SMIAPP_REG_MK_U8(0x0e02)
+#define SMIAPP_REG_U8_LUT_PARAMETERS_START SMIAPP_REG_MK_U8(0x0e10)
+#define SMIAPP_REG_U8_LUT_PARAMETERS_END SMIAPP_REG_MK_U8(0x0eff)
+#define SMIAPP_REG_U16_INTEGRATION_TIME_CAPABILITY SMIAPP_REG_MK_U16(0x1000)
+#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MIN SMIAPP_REG_MK_U16(0x1004)
+#define SMIAPP_REG_U16_COARSE_INTEGRATION_TIME_MAX_MARGIN SMIAPP_REG_MK_U16(0x1006)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN SMIAPP_REG_MK_U16(0x1008)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN SMIAPP_REG_MK_U16(0x100a)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_CAPABILITY SMIAPP_REG_MK_U16(0x1080)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_MIN SMIAPP_REG_MK_U16(0x1084)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_MAX SMIAPP_REG_MK_U16(0x1086)
+#define SMIAPP_REG_U16_DIGITAL_GAIN_STEP_SIZE SMIAPP_REG_MK_U16(0x1088)
+#define SMIAPP_REG_F32_MIN_EXT_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1100)
+#define SMIAPP_REG_F32_MAX_EXT_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1104)
+#define SMIAPP_REG_U16_MIN_PRE_PLL_CLK_DIV SMIAPP_REG_MK_U16(0x1108)
+#define SMIAPP_REG_U16_MAX_PRE_PLL_CLK_DIV SMIAPP_REG_MK_U16(0x110a)
+#define SMIAPP_REG_F32_MIN_PLL_IP_FREQ_HZ SMIAPP_REG_MK_F32(0x110c)
+#define SMIAPP_REG_F32_MAX_PLL_IP_FREQ_HZ SMIAPP_REG_MK_F32(0x1110)
+#define SMIAPP_REG_U16_MIN_PLL_MULTIPLIER SMIAPP_REG_MK_U16(0x1114)
+#define SMIAPP_REG_U16_MAX_PLL_MULTIPLIER SMIAPP_REG_MK_U16(0x1116)
+#define SMIAPP_REG_F32_MIN_PLL_OP_FREQ_HZ SMIAPP_REG_MK_F32(0x1118)
+#define SMIAPP_REG_F32_MAX_PLL_OP_FREQ_HZ SMIAPP_REG_MK_F32(0x111c)
+#define SMIAPP_REG_U16_MIN_VT_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x1120)
+#define SMIAPP_REG_U16_MAX_VT_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x1122)
+#define SMIAPP_REG_F32_MIN_VT_SYS_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1124)
+#define SMIAPP_REG_F32_MAX_VT_SYS_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1128)
+#define SMIAPP_REG_F32_MIN_VT_PIX_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x112c)
+#define SMIAPP_REG_F32_MAX_VT_PIX_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1130)
+#define SMIAPP_REG_U16_MIN_VT_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x1134)
+#define SMIAPP_REG_U16_MAX_VT_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x1136)
+#define SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES SMIAPP_REG_MK_U16(0x1140)
+#define SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES SMIAPP_REG_MK_U16(0x1142)
+#define SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK SMIAPP_REG_MK_U16(0x1144)
+#define SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK SMIAPP_REG_MK_U16(0x1146)
+#define SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK SMIAPP_REG_MK_U16(0x1148)
+#define SMIAPP_REG_U16_MIN_FRAME_BLANKING_LINES SMIAPP_REG_MK_U16(0x114a)
+#define SMIAPP_REG_U8_MIN_LINE_LENGTH_PCK_STEP_SIZE SMIAPP_REG_MK_U8(0x114c)
+#define SMIAPP_REG_U16_MIN_OP_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x1160)
+#define SMIAPP_REG_U16_MAX_OP_SYS_CLK_DIV SMIAPP_REG_MK_U16(0x1162)
+#define SMIAPP_REG_F32_MIN_OP_SYS_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1164)
+#define SMIAPP_REG_F32_MAX_OP_SYS_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1168)
+#define SMIAPP_REG_U16_MIN_OP_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x116c)
+#define SMIAPP_REG_U16_MAX_OP_PIX_CLK_DIV SMIAPP_REG_MK_U16(0x116e)
+#define SMIAPP_REG_F32_MIN_OP_PIX_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1170)
+#define SMIAPP_REG_F32_MAX_OP_PIX_CLK_FREQ_HZ SMIAPP_REG_MK_F32(0x1174)
+#define SMIAPP_REG_U16_X_ADDR_MIN SMIAPP_REG_MK_U16(0x1180)
+#define SMIAPP_REG_U16_Y_ADDR_MIN SMIAPP_REG_MK_U16(0x1182)
+#define SMIAPP_REG_U16_X_ADDR_MAX SMIAPP_REG_MK_U16(0x1184)
+#define SMIAPP_REG_U16_Y_ADDR_MAX SMIAPP_REG_MK_U16(0x1186)
+#define SMIAPP_REG_U16_MIN_X_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x1188)
+#define SMIAPP_REG_U16_MIN_Y_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x118a)
+#define SMIAPP_REG_U16_MAX_X_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x118c)
+#define SMIAPP_REG_U16_MAX_Y_OUTPUT_SIZE SMIAPP_REG_MK_U16(0x118e)
+#define SMIAPP_REG_U16_MIN_EVEN_INC SMIAPP_REG_MK_U16(0x11c0)
+#define SMIAPP_REG_U16_MAX_EVEN_INC SMIAPP_REG_MK_U16(0x11c2)
+#define SMIAPP_REG_U16_MIN_ODD_INC SMIAPP_REG_MK_U16(0x11c4)
+#define SMIAPP_REG_U16_MAX_ODD_INC SMIAPP_REG_MK_U16(0x11c6)
+#define SMIAPP_REG_U16_SCALING_CAPABILITY SMIAPP_REG_MK_U16(0x1200)
+#define SMIAPP_REG_U16_SCALER_M_MIN SMIAPP_REG_MK_U16(0x1204)
+#define SMIAPP_REG_U16_SCALER_M_MAX SMIAPP_REG_MK_U16(0x1206)
+#define SMIAPP_REG_U16_SCALER_N_MIN SMIAPP_REG_MK_U16(0x1208)
+#define SMIAPP_REG_U16_SCALER_N_MAX SMIAPP_REG_MK_U16(0x120a)
+#define SMIAPP_REG_U16_SPATIAL_SAMPLING_CAPABILITY SMIAPP_REG_MK_U16(0x120c)
+#define SMIAPP_REG_U8_DIGITAL_CROP_CAPABILITY SMIAPP_REG_MK_U8(0x120e)
+#define SMIAPP_REG_U16_COMPRESSION_CAPABILITY SMIAPP_REG_MK_U16(0x1300)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINRED SMIAPP_REG_MK_U16(0x1400)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINRED SMIAPP_REG_MK_U16(0x1402)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINRED SMIAPP_REG_MK_U16(0x1404)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINGREEN SMIAPP_REG_MK_U16(0x1406)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINGREEN SMIAPP_REG_MK_U16(0x1408)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINGREEN SMIAPP_REG_MK_U16(0x140a)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_REDINBLUE SMIAPP_REG_MK_U16(0x140c)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_GREENINBLUE SMIAPP_REG_MK_U16(0x140e)
+#define SMIAPP_REG_U16_MATRIX_ELEMENT_BLUEINBLUE SMIAPP_REG_MK_U16(0x1410)
+#define SMIAPP_REG_U16_FIFO_SIZE_PIXELS SMIAPP_REG_MK_U16(0x1500)
+#define SMIAPP_REG_U8_FIFO_SUPPORT_CAPABILITY SMIAPP_REG_MK_U8(0x1502)
+#define SMIAPP_REG_U8_DPHY_CTRL_CAPABILITY SMIAPP_REG_MK_U8(0x1600)
+#define SMIAPP_REG_U8_CSI_LANE_MODE_CAPABILITY SMIAPP_REG_MK_U8(0x1601)
+#define SMIAPP_REG_U8_CSI_SIGNALLING_MODE_CAPABILITY SMIAPP_REG_MK_U8(0x1602)
+#define SMIAPP_REG_U8_FAST_STANDBY_CAPABILITY SMIAPP_REG_MK_U8(0x1603)
+#define SMIAPP_REG_U8_CCI_ADDRESS_CONTROL_CAPABILITY SMIAPP_REG_MK_U8(0x1604)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_1_LANE_MODE_MBPS SMIAPP_REG_MK_U32(0x1608)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_2_LANE_MODE_MBPS SMIAPP_REG_MK_U32(0x160c)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_3_LANE_MODE_MBPS SMIAPP_REG_MK_U32(0x1610)
+#define SMIAPP_REG_U32_MAX_PER_LANE_BITRATE_4_LANE_MODE_MBPS SMIAPP_REG_MK_U32(0x1614)
+#define SMIAPP_REG_U8_TEMP_SENSOR_CAPABILITY SMIAPP_REG_MK_U8(0x1618)
+#define SMIAPP_REG_U16_MIN_FRAME_LENGTH_LINES_BIN SMIAPP_REG_MK_U16(0x1700)
+#define SMIAPP_REG_U16_MAX_FRAME_LENGTH_LINES_BIN SMIAPP_REG_MK_U16(0x1702)
+#define SMIAPP_REG_U16_MIN_LINE_LENGTH_PCK_BIN SMIAPP_REG_MK_U16(0x1704)
+#define SMIAPP_REG_U16_MAX_LINE_LENGTH_PCK_BIN SMIAPP_REG_MK_U16(0x1706)
+#define SMIAPP_REG_U16_MIN_LINE_BLANKING_PCK_BIN SMIAPP_REG_MK_U16(0x1708)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MIN_BIN SMIAPP_REG_MK_U16(0x170a)
+#define SMIAPP_REG_U16_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN SMIAPP_REG_MK_U16(0x170c)
+#define SMIAPP_REG_U8_BINNING_CAPABILITY SMIAPP_REG_MK_U8(0x1710)
+#define SMIAPP_REG_U8_BINNING_WEIGHTING_CAPABILITY SMIAPP_REG_MK_U8(0x1711)
+#define SMIAPP_REG_U8_BINNING_SUBTYPES SMIAPP_REG_MK_U8(0x1712)
+#define SMIAPP_REG_U8_BINNING_TYPE_n(n) SMIAPP_REG_MK_U8(0x1713 + (n)) /* 1 <= n <= 237 */
+#define SMIAPP_REG_U8_DATA_TRANSFER_IF_CAPABILITY SMIAPP_REG_MK_U8(0x1800)
+#define SMIAPP_REG_U8_SHADING_CORRECTION_CAPABILITY SMIAPP_REG_MK_U8(0x1900)
+#define SMIAPP_REG_U8_GREEN_IMBALANCE_CAPABILITY SMIAPP_REG_MK_U8(0x1901)
+#define SMIAPP_REG_U8_BLACK_LEVEL_CAPABILITY SMIAPP_REG_MK_U8(0x1902)
+#define SMIAPP_REG_U8_MODULE_SPECIFIC_CORRECTION_CAPABILITY SMIAPP_REG_MK_U8(0x1903)
+#define SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY SMIAPP_REG_MK_U16(0x1904)
+#define SMIAPP_REG_U16_DEFECT_CORRECTION_CAPABILITY_2 SMIAPP_REG_MK_U16(0x1906)
+#define SMIAPP_REG_U8_EDOF_CAPABILITY SMIAPP_REG_MK_U8(0x1980)
+#define SMIAPP_REG_U8_ESTIMATION_FRAMES SMIAPP_REG_MK_U8(0x1981)
+#define SMIAPP_REG_U8_SUPPORTS_SHARPNESS_ADJ SMIAPP_REG_MK_U8(0x1982)
+#define SMIAPP_REG_U8_SUPPORTS_DENOISING_ADJ SMIAPP_REG_MK_U8(0x1983)
+#define SMIAPP_REG_U8_SUPPORTS_MODULE_SPECIFIC_ADJ SMIAPP_REG_MK_U8(0x1984)
+#define SMIAPP_REG_U8_SUPPORTS_DEPTH_OF_FIELD_ADJ SMIAPP_REG_MK_U8(0x1985)
+#define SMIAPP_REG_U8_SUPPORTS_FOCUS_DISTANCE_ADJ SMIAPP_REG_MK_U8(0x1986)
+#define SMIAPP_REG_U8_COLOUR_FEEDBACK_CAPABILITY SMIAPP_REG_MK_U8(0x1987)
+#define SMIAPP_REG_U8_EDOF_SUPPORT_AB_NXM SMIAPP_REG_MK_U8(0x1988)
+#define SMIAPP_REG_U8_ESTIMATION_MODE_CAPABILITY SMIAPP_REG_MK_U8(0x19c0)
+#define SMIAPP_REG_U8_ESTIMATION_ZONE_CAPABILITY SMIAPP_REG_MK_U8(0x19c1)
+#define SMIAPP_REG_U16_EST_DEPTH_OF_FIELD SMIAPP_REG_MK_U16(0x19c2)
+#define SMIAPP_REG_U16_EST_FOCUS_DISTANCE SMIAPP_REG_MK_U16(0x19c4)
+#define SMIAPP_REG_U16_CAPABILITY_TRDY_MIN SMIAPP_REG_MK_U16(0x1a00)
+#define SMIAPP_REG_U8_FLASH_MODE_CAPABILITY SMIAPP_REG_MK_U8(0x1a02)
+#define SMIAPP_REG_U16_MECH_SHUT_AND_ACT_START_ADDR SMIAPP_REG_MK_U16(0x1b02)
+#define SMIAPP_REG_U8_ACTUATOR_CAPABILITY SMIAPP_REG_MK_U8(0x1b04)
+#define SMIAPP_REG_U16_ACTUATOR_TYPE SMIAPP_REG_MK_U16(0x1b40)
+#define SMIAPP_REG_U8_AF_DEVICE_ADDRESS SMIAPP_REG_MK_U8(0x1b42)
+#define SMIAPP_REG_U16_FOCUS_CHANGE_ADDRESS SMIAPP_REG_MK_U16(0x1b44)
+#define SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_1 SMIAPP_REG_MK_U8(0x1c00)
+#define SMIAPP_REG_U8_BRACKETING_LUT_CAPABILITY_2 SMIAPP_REG_MK_U8(0x1c01)
+#define SMIAPP_REG_U8_BRACKETING_LUT_SIZE SMIAPP_REG_MK_U8(0x1c02)
diff --git a/drivers/media/video/smiapp/smiapp-reg.h b/drivers/media/video/smiapp/smiapp-reg.h
new file mode 100644
index 000000000000..d0167aa17534
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-reg.h
@@ -0,0 +1,122 @@
+/*
+ * drivers/media/video/smiapp/smiapp-reg.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __SMIAPP_REG_H_
+#define __SMIAPP_REG_H_
+
+#include "smiapp-reg-defs.h"
+
+/* Bits for above register */
+#define SMIAPP_IMAGE_ORIENTATION_HFLIP (1 << 0)
+#define SMIAPP_IMAGE_ORIENTATION_VFLIP (1 << 1)
+
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN (1 << 0)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN (0 << 1)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN (1 << 1)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR (1 << 2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY (1 << 0)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY (1 << 1)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA (1 << 2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE (1 << 3)
+
+#define SMIAPP_SOFTWARE_RESET (1 << 0)
+
+#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE (1 << 0)
+#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE (1 << 1)
+
+#define SMIAPP_DPHY_CTRL_AUTOMATIC 0
+/* DPHY control based on REQUESTED_LINK_BIT_RATE_MBPS */
+#define SMIAPP_DPHY_CTRL_UI 1
+#define SMIAPP_DPHY_CTRL_REGISTER 2
+
+#define SMIAPP_COMPRESSION_MODE_SIMPLE_PREDICTOR 1
+#define SMIAPP_COMPRESSION_MODE_ADVANCED_PREDICTOR 2
+
+#define SMIAPP_MODE_SELECT_SOFTWARE_STANDBY 0
+#define SMIAPP_MODE_SELECT_STREAMING 1
+
+#define SMIAPP_SCALING_MODE_NONE 0
+#define SMIAPP_SCALING_MODE_HORIZONTAL 1
+#define SMIAPP_SCALING_MODE_BOTH 2
+
+#define SMIAPP_SCALING_CAPABILITY_NONE 0
+#define SMIAPP_SCALING_CAPABILITY_HORIZONTAL 1
+#define SMIAPP_SCALING_CAPABILITY_BOTH 2 /* horizontal/both */
+
+/* digital crop right before scaler */
+#define SMIAPP_DIGITAL_CROP_CAPABILITY_NONE 0
+#define SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP 1
+
+#define SMIAPP_BINNING_CAPABILITY_NO 0
+#define SMIAPP_BINNING_CAPABILITY_YES 1
+
+/* Maximum number of binning subtypes */
+#define SMIAPP_BINNING_SUBTYPES 253
+
+#define SMIAPP_PIXEL_ORDER_GRBG 0
+#define SMIAPP_PIXEL_ORDER_RGGB 1
+#define SMIAPP_PIXEL_ORDER_BGGR 2
+#define SMIAPP_PIXEL_ORDER_GBRG 3
+
+#define SMIAPP_DATA_FORMAT_MODEL_TYPE_NORMAL 1
+#define SMIAPP_DATA_FORMAT_MODEL_TYPE_EXTENDED 2
+#define SMIAPP_DATA_FORMAT_MODEL_TYPE_NORMAL_N 8
+#define SMIAPP_DATA_FORMAT_MODEL_TYPE_EXTENDED_N 16
+
+#define SMIAPP_FRAME_FORMAT_MODEL_TYPE_2BYTE 0x01
+#define SMIAPP_FRAME_FORMAT_MODEL_TYPE_4BYTE 0x02
+#define SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NROWS_MASK 0x0f
+#define SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NCOLS_MASK 0xf0
+#define SMIAPP_FRAME_FORMAT_MODEL_SUBTYPE_NCOLS_SHIFT 4
+
+#define SMIAPP_FRAME_FORMAT_DESC_2_PIXELCODE_MASK 0xf000
+#define SMIAPP_FRAME_FORMAT_DESC_2_PIXELCODE_SHIFT 12
+#define SMIAPP_FRAME_FORMAT_DESC_2_PIXELS_MASK 0x0fff
+
+#define SMIAPP_FRAME_FORMAT_DESC_4_PIXELCODE_MASK 0xf0000000
+#define SMIAPP_FRAME_FORMAT_DESC_4_PIXELCODE_SHIFT 28
+#define SMIAPP_FRAME_FORMAT_DESC_4_PIXELS_MASK 0x0000ffff
+
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED 1
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_DUMMY 2
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_BLACK 3
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_DARK 4
+#define SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE 5
+
+#define SMIAPP_FAST_STANDBY_CTRL_COMPLETE_FRAMES 0
+#define SMIAPP_FAST_STANDBY_CTRL_IMMEDIATE 1
+
+/* Scaling N factor */
+#define SMIAPP_SCALE_N 16
+
+/* Image statistics registers */
+/* Registers 0x2000 to 0x2fff are reserved for future
+ * use for statistics features.
+ */
+
+/* Manufacturer Specific Registers: 0x3000 to 0x3fff
+ * The manufacturer specifies these as a black box.
+ */
+
+#endif /* __SMIAPP_REG_H_ */
diff --git a/drivers/media/video/smiapp/smiapp-regs.c b/drivers/media/video/smiapp/smiapp-regs.c
new file mode 100644
index 000000000000..b1812b17a407
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-regs.c
@@ -0,0 +1,273 @@
+/*
+ * drivers/media/video/smiapp/smiapp-regs.c
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+
+#include "smiapp.h"
+#include "smiapp-regs.h"
+
+static uint32_t float_to_u32_mul_1000000(struct i2c_client *client,
+ uint32_t phloat)
+{
+ int32_t exp;
+ uint64_t man;
+
+ if (phloat >= 0x80000000) {
+ dev_err(&client->dev, "this is a negative number\n");
+ return 0;
+ }
+
+ if (phloat == 0x7f800000)
+ return ~0; /* Inf. */
+
+ if ((phloat & 0x7f800000) == 0x7f800000) {
+ dev_err(&client->dev, "NaN or other special number\n");
+ return 0;
+ }
+
+ /* Valid cases begin here */
+ if (phloat == 0)
+ return 0; /* Valid zero */
+
+ if (phloat > 0x4f800000)
+ return ~0; /* larger than 4294967295 */
+
+ /*
+ * Unbias exponent (note how phloat is now guaranteed to
+ * have 0 in the high bit)
+ */
+ exp = ((int32_t)phloat >> 23) - 127;
+
+ /* Extract mantissa, add missing '1' bit and it's in MHz */
+ man = ((phloat & 0x7fffff) | 0x800000) * 1000000ULL;
+
+ if (exp < 0)
+ man >>= -exp;
+ else
+ man <<= exp;
+
+ man >>= 23; /* Remove mantissa bias */
+
+ return man & 0xffffffff;
+}
+
+
+/*
+ * Read a 8/16/32-bit i2c register. The value is returned in 'val'.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int ____smiapp_read(struct smiapp_sensor *sensor, u16 reg,
+ u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct i2c_msg msg;
+ unsigned char data[4];
+ u16 offset = reg;
+ int r;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8) (offset >> 8);
+ data[1] = (u8) offset;
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r != 1) {
+ if (r >= 0)
+ r = -EBUSY;
+ goto err;
+ }
+
+ msg.len = len;
+ msg.flags = I2C_M_RD;
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r != 1) {
+ if (r >= 0)
+ r = -EBUSY;
+ goto err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ switch (len) {
+ case SMIA_REG_32BIT:
+ *val = (data[0] << 24) + (data[1] << 16) + (data[2] << 8) +
+ data[3];
+ break;
+ case SMIA_REG_16BIT:
+ *val = (data[0] << 8) + data[1];
+ break;
+ case SMIA_REG_8BIT:
+ *val = data[0];
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+
+err:
+ dev_err(&client->dev, "read from offset 0x%x error %d\n", offset, r);
+
+ return r;
+}
+
+/* Read a register using 8-bit access only. */
+static int ____smiapp_read_8only(struct smiapp_sensor *sensor, u16 reg,
+ u16 len, u32 *val)
+{
+ unsigned int i;
+ int rval;
+
+ *val = 0;
+
+ for (i = 0; i < len; i++) {
+ u32 val8;
+
+ rval = ____smiapp_read(sensor, reg + i, 1, &val8);
+ if (rval < 0)
+ return rval;
+ *val |= val8 << ((len - i - 1) << 3);
+ }
+
+ return 0;
+}
+
+/*
+ * Read a 8/16/32-bit i2c register. The value is returned in 'val'.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int __smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val,
+ bool only8)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int len = (u8)(reg >> 16);
+ int rval;
+
+ if (len != SMIA_REG_8BIT && len != SMIA_REG_16BIT
+ && len != SMIA_REG_32BIT)
+ return -EINVAL;
+
+ if (smiapp_quirk_reg(sensor, reg, val))
+ goto found_quirk;
+
+ if (len == SMIA_REG_8BIT && !only8)
+ rval = ____smiapp_read(sensor, (u16)reg, len, val);
+ else
+ rval = ____smiapp_read_8only(sensor, (u16)reg, len, val);
+ if (rval < 0)
+ return rval;
+
+found_quirk:
+ if (reg & SMIA_REG_FLAG_FLOAT)
+ *val = float_to_u32_mul_1000000(client, *val);
+
+ return 0;
+}
+
+int smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val)
+{
+ return __smiapp_read(
+ sensor, reg, val,
+ smiapp_needs_quirk(sensor,
+ SMIAPP_QUIRK_FLAG_8BIT_READ_ONLY));
+}
+
+int smiapp_read_8only(struct smiapp_sensor *sensor, u32 reg, u32 *val)
+{
+ return __smiapp_read(sensor, reg, val, true);
+}
+
+/*
+ * Write to a 8/16-bit register.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+int smiapp_write(struct smiapp_sensor *sensor, u32 reg, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ struct i2c_msg msg;
+ unsigned char data[6];
+ unsigned int retries;
+ unsigned int flags = reg >> 24;
+ unsigned int len = (u8)(reg >> 16);
+ u16 offset = reg;
+ int r;
+
+ if ((len != SMIA_REG_8BIT && len != SMIA_REG_16BIT &&
+ len != SMIA_REG_32BIT) || flags)
+ return -EINVAL;
+
+ msg.addr = client->addr;
+ msg.flags = 0; /* Write */
+ msg.len = 2 + len;
+ msg.buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8) (reg >> 8);
+ data[1] = (u8) (reg & 0xff);
+
+ switch (len) {
+ case SMIA_REG_8BIT:
+ data[2] = val;
+ break;
+ case SMIA_REG_16BIT:
+ data[2] = val >> 8;
+ data[3] = val;
+ break;
+ case SMIA_REG_32BIT:
+ data[2] = val >> 24;
+ data[3] = val >> 16;
+ data[4] = val >> 8;
+ data[5] = val;
+ break;
+ default:
+ BUG();
+ }
+
+ for (retries = 0; retries < 5; retries++) {
+ /*
+ * Due to unknown reason sensor stops responding. This
+ * loop is a temporaty solution until the root cause
+ * is found.
+ */
+ r = i2c_transfer(client->adapter, &msg, 1);
+ if (r == 1) {
+ if (retries)
+ dev_err(&client->dev,
+ "sensor i2c stall encountered. "
+ "retries: %d\n", retries);
+ return 0;
+ }
+
+ usleep_range(2000, 2000);
+ }
+
+ dev_err(&client->dev,
+ "wrote 0x%x to offset 0x%x error %d\n", val, offset, r);
+
+ return r;
+}
diff --git a/drivers/media/video/smiapp/smiapp-regs.h b/drivers/media/video/smiapp/smiapp-regs.h
new file mode 100644
index 000000000000..7f9013b47971
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp-regs.h
@@ -0,0 +1,49 @@
+/*
+ * include/media/smiapp/smiapp-regs.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2011--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef SMIAPP_REGS_H
+#define SMIAPP_REGS_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+/* Use upper 8 bits of the type field for flags */
+#define SMIA_REG_FLAG_FLOAT (1 << 24)
+
+#define SMIA_REG_8BIT 1
+#define SMIA_REG_16BIT 2
+#define SMIA_REG_32BIT 4
+struct smia_reg {
+ u16 type;
+ u16 reg; /* 16-bit offset */
+ u32 val; /* 8/16/32-bit value */
+};
+
+struct smiapp_sensor;
+
+int smiapp_read(struct smiapp_sensor *sensor, u32 reg, u32 *val);
+int smiapp_read_8only(struct smiapp_sensor *sensor, u32 reg, u32 *val);
+int smiapp_write(struct smiapp_sensor *sensor, u32 reg, u32 val);
+
+#endif
diff --git a/drivers/media/video/smiapp/smiapp.h b/drivers/media/video/smiapp/smiapp.h
new file mode 100644
index 000000000000..587f7f11238d
--- /dev/null
+++ b/drivers/media/video/smiapp/smiapp.h
@@ -0,0 +1,252 @@
+/*
+ * drivers/media/video/smiapp/smiapp.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2010--2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __SMIAPP_PRIV_H_
+#define __SMIAPP_PRIV_H_
+
+#include <linux/mutex.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+#include <media/smiapp.h>
+
+#include "smiapp-pll.h"
+#include "smiapp-reg.h"
+#include "smiapp-regs.h"
+#include "smiapp-quirk.h"
+
+/*
+ * Standard SMIA++ constants
+ */
+#define SMIA_VERSION_1 10
+#define SMIAPP_VERSION_0_8 8 /* Draft 0.8 */
+#define SMIAPP_VERSION_0_9 9 /* Draft 0.9 */
+#define SMIAPP_VERSION_1 10
+
+#define SMIAPP_PROFILE_0 0
+#define SMIAPP_PROFILE_1 1
+#define SMIAPP_PROFILE_2 2
+
+#define SMIAPP_NVM_PAGE_SIZE 64 /* bytes */
+
+#define SMIAPP_RESET_DELAY_CLOCKS 2400
+#define SMIAPP_RESET_DELAY(clk) \
+ (1000 + (SMIAPP_RESET_DELAY_CLOCKS * 1000 \
+ + (clk) / 1000 - 1) / ((clk) / 1000))
+
+#include "smiapp-limits.h"
+
+struct smiapp_quirk;
+
+#define SMIAPP_MODULE_IDENT_FLAG_REV_LE (1 << 0)
+
+struct smiapp_module_ident {
+ u8 manufacturer_id;
+ u16 model_id;
+ u8 revision_number_major;
+
+ u8 flags;
+
+ char *name;
+ const struct smiapp_quirk *quirk;
+};
+
+struct smiapp_module_info {
+ u32 manufacturer_id;
+ u32 model_id;
+ u32 revision_number_major;
+ u32 revision_number_minor;
+
+ u32 module_year;
+ u32 module_month;
+ u32 module_day;
+
+ u32 sensor_manufacturer_id;
+ u32 sensor_model_id;
+ u32 sensor_revision_number;
+ u32 sensor_firmware_version;
+
+ u32 smia_version;
+ u32 smiapp_version;
+
+ u32 smiapp_profile;
+
+ char *name;
+ const struct smiapp_quirk *quirk;
+};
+
+#define SMIAPP_IDENT_FQ(manufacturer, model, rev, fl, _name, _quirk) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = fl, \
+ .name = _name, \
+ .quirk = _quirk, }
+
+#define SMIAPP_IDENT_LQ(manufacturer, model, rev, _name, _quirk) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = SMIAPP_MODULE_IDENT_FLAG_REV_LE, \
+ .name = _name, \
+ .quirk = _quirk, }
+
+#define SMIAPP_IDENT_L(manufacturer, model, rev, _name) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = SMIAPP_MODULE_IDENT_FLAG_REV_LE, \
+ .name = _name, }
+
+#define SMIAPP_IDENT_Q(manufacturer, model, rev, _name, _quirk) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = 0, \
+ .name = _name, \
+ .quirk = _quirk, }
+
+#define SMIAPP_IDENT(manufacturer, model, rev, _name) \
+ { .manufacturer_id = manufacturer, \
+ .model_id = model, \
+ .revision_number_major = rev, \
+ .flags = 0, \
+ .name = _name, }
+
+struct smiapp_reg_limits {
+ u32 addr;
+ char *what;
+};
+
+extern struct smiapp_reg_limits smiapp_reg_limits[];
+
+struct smiapp_csi_data_format {
+ u32 code;
+ u8 width;
+ u8 compressed;
+ u8 pixel_order;
+};
+
+#define SMIAPP_SUBDEVS 3
+
+#define SMIAPP_PA_PAD_SRC 0
+#define SMIAPP_PAD_SINK 0
+#define SMIAPP_PAD_SRC 1
+#define SMIAPP_PADS 2
+
+struct smiapp_binning_subtype {
+ u8 horizontal:4;
+ u8 vertical:4;
+} __packed;
+
+struct smiapp_subdev {
+ struct v4l2_subdev sd;
+ struct media_pad pads[2];
+ struct v4l2_rect sink_fmt;
+ struct v4l2_rect crop[2];
+ struct v4l2_rect compose; /* compose on sink */
+ unsigned short sink_pad;
+ unsigned short source_pad;
+ int npads;
+ struct smiapp_sensor *sensor;
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+/*
+ * struct smiapp_sensor - Main device structure
+ */
+struct smiapp_sensor {
+ /*
+ * "mutex" is used to serialise access to all fields here
+ * except v4l2_ctrls at the end of the struct. "mutex" is also
+ * used to serialise access to file handle specific
+ * information. The exception to this rule is the power_mutex
+ * below.
+ */
+ struct mutex mutex;
+ /*
+ * power_mutex is used to serialise power management related
+ * activities. Acquiring "mutex" at that time isn't necessary
+ * since there are no other users anyway.
+ */
+ struct mutex power_mutex;
+ struct smiapp_subdev ssds[SMIAPP_SUBDEVS];
+ u32 ssds_used;
+ struct smiapp_subdev *src;
+ struct smiapp_subdev *binner;
+ struct smiapp_subdev *scaler;
+ struct smiapp_subdev *pixel_array;
+ struct smiapp_platform_data *platform_data;
+ struct regulator *vana;
+ struct clk *ext_clk;
+ u32 limits[SMIAPP_LIMIT_LAST];
+ u8 nbinning_subtypes;
+ struct smiapp_binning_subtype binning_subtypes[SMIAPP_BINNING_SUBTYPES];
+ u32 mbus_frame_fmts;
+ const struct smiapp_csi_data_format *csi_format;
+ const struct smiapp_csi_data_format *internal_csi_format;
+ u32 default_mbus_frame_fmts;
+ int default_pixel_order;
+
+ u8 binning_horizontal;
+ u8 binning_vertical;
+
+ u8 scale_m;
+ u8 scaling_mode;
+
+ u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
+ u8 flash_capability;
+ u8 frame_skip;
+
+ int power_count;
+
+ bool streaming;
+ bool dev_init_done;
+
+ u8 *nvm; /* nvm memory buffer */
+ unsigned int nvm_size; /* bytes */
+
+ struct smiapp_module_info minfo;
+
+ struct smiapp_pll pll;
+
+ /* Pixel array controls */
+ struct v4l2_ctrl *analog_gain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *pixel_rate_parray;
+ /* src controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate_csi;
+};
+
+#define to_smiapp_subdev(_sd) \
+ container_of(_sd, struct smiapp_subdev, sd)
+
+#define to_smiapp_sensor(_sd) \
+ (to_smiapp_subdev(_sd)->sensor)
+
+#endif /* __SMIAPP_PRIV_H_ */
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index c2882fa5be85..19ea780b16ff 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -995,10 +995,8 @@ static int sn9c102_stop_transfer(struct sn9c102_device* cam)
static int sn9c102_stream_interrupt(struct sn9c102_device* cam)
{
- long timeout;
-
cam->stream = STREAM_INTERRUPT;
- timeout = wait_event_timeout(cam->wait_stream,
+ wait_event_timeout(cam->wait_stream,
(cam->stream == STREAM_OFF) ||
(cam->state & DEV_DISCONNECTED),
SN9C102_URB_TIMEOUT);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index aedb970d13f6..0421bf9453b4 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -164,35 +164,38 @@ static int soc_camera_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
int ret;
dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
pixfmtstr(pix->pixelformat), pix->width, pix->height);
- pix->bytesperline = 0;
- pix->sizeimage = 0;
+ if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
+ pix->bytesperline = 0;
+ pix->sizeimage = 0;
+ }
ret = ici->ops->try_fmt(icd, f);
if (ret < 0)
return ret;
- if (!pix->sizeimage) {
- if (!pix->bytesperline) {
- const struct soc_camera_format_xlate *xlate;
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate)
+ return -EINVAL;
+
+ ret = soc_mbus_bytes_per_line(pix->width, xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ pix->bytesperline = max_t(u32, pix->bytesperline, ret);
- xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
- if (!xlate)
- return -EINVAL;
+ ret = soc_mbus_image_size(xlate->host_fmt, pix->bytesperline,
+ pix->height);
+ if (ret < 0)
+ return ret;
- ret = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (ret > 0)
- pix->bytesperline = ret;
- }
- if (pix->bytesperline)
- pix->sizeimage = pix->bytesperline * pix->height;
- }
+ pix->sizeimage = max_t(u32, pix->sizeimage, ret);
return 0;
}
@@ -257,13 +260,13 @@ static int soc_camera_g_std(struct file *file, void *priv, v4l2_std_id *a)
return v4l2_subdev_call(sd, core, g_std, a);
}
-static int soc_camera_enum_fsizes(struct file *file, void *fh,
+static int soc_camera_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- return ici->ops->enum_fsizes(icd, fsize);
+ return ici->ops->enum_framesizes(icd, fsize);
}
static int soc_camera_reqbufs(struct file *file, void *priv,
@@ -1244,8 +1247,8 @@ static int default_s_parm(struct soc_camera_device *icd,
return v4l2_subdev_call(sd, video, s_parm, parm);
}
-static int default_enum_fsizes(struct soc_camera_device *icd,
- struct v4l2_frmsizeenum *fsize)
+static int default_enum_framesizes(struct soc_camera_device *icd,
+ struct v4l2_frmsizeenum *fsize)
{
int ret;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
@@ -1259,7 +1262,7 @@ static int default_enum_fsizes(struct soc_camera_device *icd,
/* map xlate-code to pixel_format, sensor only handle xlate-code*/
fsize_mbus.pixel_format = xlate->code;
- ret = v4l2_subdev_call(sd, video, enum_mbus_fsizes, &fsize_mbus);
+ ret = v4l2_subdev_call(sd, video, enum_framesizes, &fsize_mbus);
if (ret < 0)
return ret;
@@ -1298,8 +1301,8 @@ int soc_camera_host_register(struct soc_camera_host *ici)
ici->ops->set_parm = default_s_parm;
if (!ici->ops->get_parm)
ici->ops->get_parm = default_g_parm;
- if (!ici->ops->enum_fsizes)
- ici->ops->enum_fsizes = default_enum_fsizes;
+ if (!ici->ops->enum_framesizes)
+ ici->ops->enum_framesizes = default_enum_framesizes;
mutex_lock(&list_lock);
list_for_each_entry(ix, &hosts, list) {
@@ -1390,7 +1393,7 @@ static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
.vidioc_s_input = soc_camera_s_input,
.vidioc_s_std = soc_camera_s_std,
.vidioc_g_std = soc_camera_g_std,
- .vidioc_enum_framesizes = soc_camera_enum_fsizes,
+ .vidioc_enum_framesizes = soc_camera_enum_framesizes,
.vidioc_reqbufs = soc_camera_reqbufs,
.vidioc_querybuf = soc_camera_querybuf,
.vidioc_qbuf = soc_camera_qbuf,
@@ -1429,6 +1432,10 @@ static int video_dev_create(struct soc_camera_device *icd)
vdev->tvnorms = V4L2_STD_UNKNOWN;
vdev->ctrl_handler = &icd->ctrl_handler;
vdev->lock = &icd->video_lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags);
icd->vdev = vdev;
diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c
index cf7f2194ded4..89dce097a827 100644
--- a/drivers/media/video/soc_mediabus.c
+++ b/drivers/media/video/soc_mediabus.c
@@ -24,6 +24,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YVYU8_2X8,
@@ -33,6 +34,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_UYVY8_2X8,
@@ -42,6 +44,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_VYUY8_2X8,
@@ -51,6 +54,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
@@ -60,6 +64,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
@@ -69,6 +74,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_LE,
@@ -78,6 +84,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_BE,
@@ -87,6 +94,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR8_1X8,
@@ -96,6 +104,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_1X10,
@@ -105,6 +114,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_Y8_1X8,
@@ -114,6 +124,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_Y10_1X10,
@@ -123,6 +134,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE,
@@ -132,6 +144,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE,
@@ -141,6 +154,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADLO,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE,
@@ -150,6 +164,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE,
@@ -159,6 +174,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADLO,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_JPEG_1X8,
@@ -168,6 +184,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_VARIABLE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE,
@@ -177,6 +194,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_BE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YUYV8_1_5X8,
@@ -186,6 +204,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YVYU8_1_5X8,
@@ -195,6 +214,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_UYVY8_1X16,
@@ -204,6 +224,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_VYUY8_1X16,
@@ -213,6 +234,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YUYV8_1X16,
@@ -222,6 +244,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_YVYU8_1X16,
@@ -231,6 +254,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGRBG8_1X8,
@@ -240,6 +264,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
@@ -249,6 +274,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGBRG10_1X10,
@@ -258,6 +284,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGRBG10_1X10,
@@ -267,6 +294,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SRGGB10_1X10,
@@ -276,6 +304,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 10,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SBGGR12_1X12,
@@ -285,6 +314,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGBRG12_1X12,
@@ -294,6 +324,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SGRBG12_1X12,
@@ -303,6 +334,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
}, {
.code = V4L2_MBUS_FMT_SRGGB12_1X12,
@@ -312,6 +344,7 @@ static const struct soc_mbus_lookup mbus_fmt[] = {
.bits_per_sample = 12,
.packing = SOC_MBUS_PACKING_EXTEND16,
.order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
},
},
};
@@ -345,6 +378,9 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel);
s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
{
+ if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
+ return width * mf->bits_per_sample / 8;
+
switch (mf->packing) {
case SOC_MBUS_PACKING_NONE:
return width * mf->bits_per_sample / 8;
@@ -361,6 +397,24 @@ s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
}
EXPORT_SYMBOL(soc_mbus_bytes_per_line);
+s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
+ u32 bytes_per_line, u32 height)
+{
+ if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
+ return bytes_per_line * height;
+
+ switch (mf->packing) {
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ case SOC_MBUS_PACKING_2X8_PADLO:
+ return bytes_per_line * height * 2;
+ case SOC_MBUS_PACKING_1_5X8:
+ return bytes_per_line * height * 3 / 2;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(soc_mbus_image_size);
+
const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
enum v4l2_mbus_pixelcode code,
const struct soc_mbus_lookup *lookup,
diff --git a/drivers/media/video/sta2x11_vip.c b/drivers/media/video/sta2x11_vip.c
new file mode 100644
index 000000000000..4c10205264d4
--- /dev/null
+++ b/drivers/media/video/sta2x11_vip.c
@@ -0,0 +1,1550 @@
+/*
+ * This is the driver for the STA2x11 Video Input Port.
+ *
+ * Copyright (C) 2010 WindRiver Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Author: Andreas Kies <andreas.kies@windriver.com>
+ * Vlad Lungu <vlad.lungu@windriver.com>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+
+#include <linux/videodev2.h>
+
+#include <linux/kmod.h>
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-dma-contig.h>
+
+#include "sta2x11_vip.h"
+
+#define DRV_NAME "sta2x11_vip"
+#define DRV_VERSION "1.3"
+
+#ifndef PCI_DEVICE_ID_STMICRO_VIP
+#define PCI_DEVICE_ID_STMICRO_VIP 0xCC0D
+#endif
+
+#define MAX_FRAMES 4
+
+/*Register offsets*/
+#define DVP_CTL 0x00
+#define DVP_TFO 0x04
+#define DVP_TFS 0x08
+#define DVP_BFO 0x0C
+#define DVP_BFS 0x10
+#define DVP_VTP 0x14
+#define DVP_VBP 0x18
+#define DVP_VMP 0x1C
+#define DVP_ITM 0x98
+#define DVP_ITS 0x9C
+#define DVP_STA 0xA0
+#define DVP_HLFLN 0xA8
+#define DVP_RGB 0xC0
+#define DVP_PKZ 0xF0
+
+/*Register fields*/
+#define DVP_CTL_ENA 0x00000001
+#define DVP_CTL_RST 0x80000000
+#define DVP_CTL_DIS (~0x00040001)
+
+#define DVP_IT_VSB 0x00000008
+#define DVP_IT_VST 0x00000010
+#define DVP_IT_FIFO 0x00000020
+
+#define DVP_HLFLN_SD 0x00000001
+
+#define REG_WRITE(vip, reg, value) iowrite32((value), (vip->iomem)+(reg))
+#define REG_READ(vip, reg) ioread32((vip->iomem)+(reg))
+
+#define SAVE_COUNT 8
+#define AUX_COUNT 3
+#define IRQ_COUNT 1
+
+/**
+ * struct sta2x11_vip - All internal data for one instance of device
+ * @v4l2_dev: device registered in v4l layer
+ * @video_dev: properties of our device
+ * @pdev: PCI device
+ * @adapter: contains I2C adapter information
+ * @register_save_area: All relevant register are saved here during suspend
+ * @decoder: contains information about video DAC
+ * @format: pixel format, fixed UYVY
+ * @std: video standard (e.g. PAL/NTSC)
+ * @input: input line for video signal ( 0 or 1 )
+ * @users: Number of open of device ( max. 1 )
+ * @disabled: Device is in power down state
+ * @mutex: ensures exclusive opening of device
+ * @slock: for excluse acces of registers
+ * @vb_vidq: queue maintained by videobuf layer
+ * @capture: linked list of capture buffer
+ * @active: struct videobuf_buffer currently beingg filled
+ * @started: device is ready to capture frame
+ * @closing: device will be shut down
+ * @tcount: Number of top frames
+ * @bcount: Number of bottom frames
+ * @overflow: Number of FIFO overflows
+ * @mem_spare: small buffer of unused frame
+ * @dma_spare: dma addres of mem_spare
+ * @iomem: hardware base address
+ * @config: I2C and gpio config from platform
+ *
+ * All non-local data is accessed via this structure.
+ */
+
+struct sta2x11_vip {
+ struct v4l2_device v4l2_dev;
+ struct video_device *video_dev;
+ struct pci_dev *pdev;
+ struct i2c_adapter *adapter;
+ unsigned int register_save_area[IRQ_COUNT + SAVE_COUNT + AUX_COUNT];
+ struct v4l2_subdev *decoder;
+ struct v4l2_pix_format format;
+ v4l2_std_id std;
+ unsigned int input;
+ int users;
+ int disabled;
+ struct mutex mutex; /* exclusive access during open */
+ spinlock_t slock; /* spin lock for hardware and queue access */
+ struct videobuf_queue vb_vidq;
+ struct list_head capture;
+ struct videobuf_buffer *active;
+ int started, closing, tcount, bcount;
+ int overflow;
+ void *mem_spare;
+ dma_addr_t dma_spare;
+ void *iomem;
+ struct vip_config *config;
+};
+
+static const unsigned int registers_to_save[AUX_COUNT] = {
+ DVP_HLFLN, DVP_RGB, DVP_PKZ
+};
+
+static struct v4l2_pix_format formats_50[] = {
+ { /*PAL interlaced */
+ .width = 720,
+ .height = 576,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_INTERLACED,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 576,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+ { /*PAL top */
+ .width = 720,
+ .height = 288,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_TOP,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 288,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+ { /*PAL bottom */
+ .width = 720,
+ .height = 288,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_BOTTOM,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 288,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+
+};
+
+static struct v4l2_pix_format formats_60[] = {
+ { /*NTSC interlaced */
+ .width = 720,
+ .height = 480,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_INTERLACED,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 480,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+ { /*NTSC top */
+ .width = 720,
+ .height = 240,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_TOP,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 240,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+ { /*NTSC bottom */
+ .width = 720,
+ .height = 240,
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .field = V4L2_FIELD_BOTTOM,
+ .bytesperline = 720 * 2,
+ .sizeimage = 720 * 2 * 240,
+ .colorspace = V4L2_COLORSPACE_SMPTE170M},
+};
+
+/**
+ * buf_setup - Get size and number of video buffer
+ * @vq: queue in videobuf
+ * @count: Number of buffers (1..MAX_FRAMES).
+ * 0 use default value.
+ * @size: size of buffer in bytes
+ *
+ * returns size and number of buffers
+ * a preset value of 0 returns the default number.
+ * return value: 0, always succesfull.
+ */
+static int buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct sta2x11_vip *vip = vq->priv_data;
+
+ *size = vip->format.width * vip->format.height * 2;
+ if (0 == *count || MAX_FRAMES < *count)
+ *count = MAX_FRAMES;
+ return 0;
+};
+
+/**
+ * buf_prepare - prepare buffer for usage
+ * @vq: queue in videobuf layer
+ * @vb: buffer to be prepared
+ * @field: type of video data (interlaced/non-interlaced)
+ *
+ * Allocate or realloc buffer
+ * return value: 0, successful.
+ *
+ * -EINVAL, supplied buffer is too small.
+ *
+ * other, buffer could not be locked.
+ */
+static int buf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct sta2x11_vip *vip = vq->priv_data;
+ int ret;
+
+ vb->size = vip->format.width * vip->format.height * 2;
+ if ((0 != vb->baddr) && (vb->bsize < vb->size))
+ return -EINVAL;
+ vb->width = vip->format.width;
+ vb->height = vip->format.height;
+ vb->field = field;
+
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ ret = videobuf_iolock(vq, vb, NULL);
+ if (ret)
+ goto fail;
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ return 0;
+fail:
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ return ret;
+}
+
+/**
+ * buf_queu - queue buffer for filling
+ * @vq: queue in videobuf layer
+ * @vb: buffer to be queued
+ *
+ * if capturing is already running, the buffer will be queued. Otherwise
+ * capture is started and the buffer is used directly.
+ */
+static void buf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct sta2x11_vip *vip = vq->priv_data;
+ u32 dma;
+
+ vb->state = VIDEOBUF_QUEUED;
+
+ if (vip->active) {
+ list_add_tail(&vb->queue, &vip->capture);
+ return;
+ }
+
+ vip->started = 1;
+ vip->tcount = 0;
+ vip->bcount = 0;
+ vip->active = vb;
+ vb->state = VIDEOBUF_ACTIVE;
+
+ dma = videobuf_to_dma_contig(vb);
+
+ REG_WRITE(vip, DVP_TFO, (0 << 16) | (0));
+ /* despite of interlace mode, upper and lower frames start at zero */
+ REG_WRITE(vip, DVP_BFO, (0 << 16) | (0));
+
+ switch (vip->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ REG_WRITE(vip, DVP_TFS,
+ ((vip->format.height / 2 - 1) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_BFS, ((vip->format.height / 2 - 1) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma + vip->format.width * 2);
+ REG_WRITE(vip, DVP_VMP, 4 * vip->format.width);
+ break;
+ case V4L2_FIELD_TOP:
+ REG_WRITE(vip, DVP_TFS,
+ ((vip->format.height - 1) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_BFS, ((0) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma);
+ REG_WRITE(vip, DVP_VMP, 2 * vip->format.width);
+ break;
+ case V4L2_FIELD_BOTTOM:
+ REG_WRITE(vip, DVP_TFS, ((0) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_BFS,
+ ((vip->format.height) << 16) |
+ (2 * vip->format.width - 1));
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma);
+ REG_WRITE(vip, DVP_VMP, 2 * vip->format.width);
+ break;
+
+ default:
+ pr_warning("VIP: unknown field format\n");
+ return;
+ }
+
+ REG_WRITE(vip, DVP_CTL, DVP_CTL_ENA);
+}
+
+/**
+ * buff_release - release buffer
+ * @vq: queue in videobuf layer
+ * @vb: buffer to be released
+ *
+ * release buffer in videobuf layer
+ */
+static void buf_release(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+
+ videobuf_dma_contig_free(vq, vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops vip_qops = {
+ .buf_setup = buf_setup,
+ .buf_prepare = buf_prepare,
+ .buf_queue = buf_queue,
+ .buf_release = buf_release,
+};
+
+/**
+ * vip_open - open video device
+ * @file: descriptor of device
+ *
+ * open device, make sure it is only opened once.
+ * return value: 0, no error.
+ *
+ * -EBUSY, device is already opened
+ *
+ * -ENOMEM, no memory for auxiliary DMA buffer
+ */
+static int vip_open(struct file *file)
+{
+ struct video_device *dev = video_devdata(file);
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ mutex_lock(&vip->mutex);
+ vip->users++;
+
+ if (vip->users > 1) {
+ vip->users--;
+ mutex_unlock(&vip->mutex);
+ return -EBUSY;
+ }
+
+ file->private_data = dev;
+ vip->overflow = 0;
+ vip->started = 0;
+ vip->closing = 0;
+ vip->active = NULL;
+
+ INIT_LIST_HEAD(&vip->capture);
+ vip->mem_spare = dma_alloc_coherent(&vip->pdev->dev, 64,
+ &vip->dma_spare, GFP_KERNEL);
+ if (!vip->mem_spare) {
+ vip->users--;
+ mutex_unlock(&vip->mutex);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&vip->mutex);
+ videobuf_queue_dma_contig_init_cached(&vip->vb_vidq,
+ &vip_qops,
+ &vip->pdev->dev,
+ &vip->slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct videobuf_buffer),
+ vip, NULL);
+ REG_READ(vip, DVP_ITS);
+ REG_WRITE(vip, DVP_HLFLN, DVP_HLFLN_SD);
+ REG_WRITE(vip, DVP_ITM, DVP_IT_VSB | DVP_IT_VST);
+ REG_WRITE(vip, DVP_CTL, DVP_CTL_RST);
+ REG_WRITE(vip, DVP_CTL, 0);
+ REG_READ(vip, DVP_ITS);
+ return 0;
+}
+
+/**
+ * vip_close - close video device
+ * @file: descriptor of device
+ *
+ * close video device, wait until all pending operations are finished
+ * ( maximum FRAME_MAX buffers pending )
+ * Turn off interrupts.
+ *
+ * return value: 0, always succesful.
+ */
+static int vip_close(struct file *file)
+{
+ struct video_device *dev = video_devdata(file);
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ vip->closing = 1;
+ if (vip->active)
+ videobuf_waiton(&vip->vb_vidq, vip->active, 0, 0);
+ spin_lock_irq(&vip->slock);
+
+ REG_WRITE(vip, DVP_ITM, 0);
+ REG_WRITE(vip, DVP_CTL, DVP_CTL_RST);
+ REG_WRITE(vip, DVP_CTL, 0);
+ REG_READ(vip, DVP_ITS);
+
+ vip->started = 0;
+ vip->active = NULL;
+
+ spin_unlock_irq(&vip->slock);
+
+ videobuf_stop(&vip->vb_vidq);
+ videobuf_mmap_free(&vip->vb_vidq);
+
+ dma_free_coherent(&vip->pdev->dev, 64, vip->mem_spare, vip->dma_spare);
+ file->private_data = NULL;
+ mutex_lock(&vip->mutex);
+ vip->users--;
+ mutex_unlock(&vip->mutex);
+ return 0;
+}
+
+/**
+ * vip_read - read from video input
+ * @file: descriptor of device
+ * @data: user buffer
+ * @count: number of bytes to be read
+ * @ppos: position within stream
+ *
+ * read video data from video device.
+ * handling is done in generic videobuf layer
+ * return value: provided by videobuf layer
+ */
+static ssize_t vip_read(struct file *file, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *dev = file->private_data;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_read_stream(&vip->vb_vidq, data, count, ppos, 0,
+ file->f_flags & O_NONBLOCK);
+}
+
+/**
+ * vip_mmap - map user buffer
+ * @file: descriptor of device
+ * @vma: user buffer
+ *
+ * map user space buffer into kernel mode, including DMA address.
+ * handling is done in generic videobuf layer.
+ * return value: provided by videobuf layer
+ */
+static int vip_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *dev = file->private_data;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_mmap_mapper(&vip->vb_vidq, vma);
+}
+
+/**
+ * vip_poll - poll for event
+ * @file: descriptor of device
+ * @wait: contains events to be waited for
+ *
+ * wait for event related to video device.
+ * handling is done in generic videobuf layer.
+ * return value: provided by videobuf layer
+ */
+static unsigned int vip_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct video_device *dev = file->private_data;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_poll_stream(file, &vip->vb_vidq, wait);
+}
+
+/**
+ * vidioc_querycap - return capabilities of device
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @cap: contains return values
+ *
+ * the capabilities of the device are returned
+ *
+ * return value: 0, no error.
+ */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ memset(cap, 0, sizeof(struct v4l2_capability));
+ strcpy(cap->driver, DRV_NAME);
+ strcpy(cap->card, DRV_NAME);
+ cap->version = 0;
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
+ pci_name(vip->pdev));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+
+ return 0;
+}
+
+/**
+ * vidioc_s_std - set video standard
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @std: contains standard to be set
+ *
+ * the video standard is set
+ *
+ * return value: 0, no error.
+ *
+ * -EIO, no input signal detected
+ *
+ * other, returned from video DAC.
+ */
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+ v4l2_std_id oldstd = vip->std, newstd;
+ int status;
+
+ if (V4L2_STD_ALL == *std) {
+ v4l2_subdev_call(vip->decoder, core, s_std, *std);
+ ssleep(2);
+ v4l2_subdev_call(vip->decoder, video, querystd, &newstd);
+ v4l2_subdev_call(vip->decoder, video, g_input_status, &status);
+ if (status & V4L2_IN_ST_NO_SIGNAL)
+ return -EIO;
+ *std = vip->std = newstd;
+ if (oldstd != *std) {
+ if (V4L2_STD_525_60 & (*std))
+ vip->format = formats_60[0];
+ else
+ vip->format = formats_50[0];
+ }
+ return 0;
+ }
+
+ if (oldstd != *std) {
+ if (V4L2_STD_525_60 & (*std))
+ vip->format = formats_60[0];
+ else
+ vip->format = formats_50[0];
+ }
+
+ return v4l2_subdev_call(vip->decoder, core, s_std, *std);
+}
+
+/**
+ * vidioc_g_std - get video standard
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @std: contains return values
+ *
+ * the current video standard is returned
+ *
+ * return value: 0, no error.
+ */
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ *std = vip->std;
+ return 0;
+}
+
+/**
+ * vidioc_querystd - get possible video standards
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @std: contains return values
+ *
+ * all possible video standards are returned
+ *
+ * return value: delivered by video DAC routine.
+ */
+static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return v4l2_subdev_call(vip->decoder, video, querystd, std);
+
+}
+
+/**
+ * vidioc_queryctl - get possible control settings
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @ctrl: contains return values
+ *
+ * return possible values for a control
+ * return value: delivered by video DAC routine.
+ */
+static int vidioc_queryctrl(struct file *file, void *priv,
+ struct v4l2_queryctrl *ctrl)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return v4l2_subdev_call(vip->decoder, core, queryctrl, ctrl);
+}
+
+/**
+ * vidioc_g_ctl - get control value
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @ctrl: contains return values
+ *
+ * return setting for a control value
+ * return value: delivered by video DAC routine.
+ */
+static int vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return v4l2_subdev_call(vip->decoder, core, g_ctrl, ctrl);
+}
+
+/**
+ * vidioc_s_ctl - set control value
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @ctrl: contains value to be set
+ *
+ * set value for a specific control
+ * return value: delivered by video DAC routine.
+ */
+static int vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return v4l2_subdev_call(vip->decoder, core, s_ctrl, ctrl);
+}
+
+/**
+ * vidioc_enum_input - return name of input line
+ * @file: descriptor of device (not used)
+ * @priv: points to current videodevice
+ * @inp: contains return values
+ *
+ * the user friendly name of the input line is returned
+ *
+ * return value: 0, no error.
+ *
+ * -EINVAL, input line number out of range
+ */
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ if (inp->index > 1)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_ALL;
+ sprintf(inp->name, "Camera %u", inp->index);
+
+ return 0;
+}
+
+/**
+ * vidioc_s_input - set input line
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @i: new input line number
+ *
+ * the current active input line is set
+ *
+ * return value: 0, no error.
+ *
+ * -EINVAL, line number out of range
+ */
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+ int ret;
+
+ if (i > 1)
+ return -EINVAL;
+ ret = v4l2_subdev_call(vip->decoder, video, s_routing, i, 0, 0);
+
+ if (!ret)
+ vip->input = i;
+
+ return 0;
+}
+
+/**
+ * vidioc_g_input - return input line
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @i: returned input line number
+ *
+ * the current active input line is returned
+ *
+ * return value: always 0.
+ */
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ *i = vip->input;
+ return 0;
+}
+
+/**
+ * vidioc_enum_fmt_vid_cap - return video capture format
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @f: returned format information
+ *
+ * returns name and format of video capture
+ * Only UYVY is supported by hardware.
+ *
+ * return value: always 0.
+ */
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+
+ if (f->index != 0)
+ return -EINVAL;
+
+ strcpy(f->description, "4:2:2, packed, UYVY");
+ f->pixelformat = V4L2_PIX_FMT_UYVY;
+ f->flags = 0;
+ return 0;
+}
+
+/**
+ * vidioc_try_fmt_vid_cap - set video capture format
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @f: new format
+ *
+ * new video format is set which includes width and
+ * field type. width is fixed to 720, no scaling.
+ * Only UYVY is supported by this hardware.
+ * the minimum height is 200, the maximum is 576 (PAL)
+ *
+ * return value: 0, no error
+ *
+ * -EINVAL, pixel or field format not supported
+ *
+ */
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+ int interlace_lim;
+
+ if (V4L2_PIX_FMT_UYVY != f->fmt.pix.pixelformat)
+ return -EINVAL;
+
+ if (V4L2_STD_525_60 & vip->std)
+ interlace_lim = 240;
+ else
+ interlace_lim = 288;
+
+ switch (f->fmt.pix.field) {
+ case V4L2_FIELD_ANY:
+ if (interlace_lim < f->fmt.pix.height)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ else
+ f->fmt.pix.field = V4L2_FIELD_BOTTOM;
+ break;
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ if (interlace_lim < f->fmt.pix.height)
+ f->fmt.pix.height = interlace_lim;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ f->fmt.pix.height &= ~1;
+ if (2 * interlace_lim < f->fmt.pix.height)
+ f->fmt.pix.height = 2 * interlace_lim;
+ if (200 > f->fmt.pix.height)
+ f->fmt.pix.height = 200;
+ f->fmt.pix.width = 720;
+ f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
+ f->fmt.pix.sizeimage = f->fmt.pix.width * 2 * f->fmt.pix.height;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.priv = 0;
+ return 0;
+}
+
+/**
+ * vidioc_s_fmt_vid_cap - set current video format parameters
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @f: returned format information
+ *
+ * set new capture format
+ * return value: 0, no error
+ *
+ * other, delivered by video DAC routine.
+ */
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+ int ret;
+
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ memcpy(&vip->format, &f->fmt.pix, sizeof(struct v4l2_pix_format));
+ return 0;
+}
+
+/**
+ * vidioc_g_fmt_vid_cap - get current video format parameters
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @f: contains format information
+ *
+ * returns current video format parameters
+ *
+ * return value: 0, always successful
+ */
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ memcpy(&f->fmt.pix, &vip->format, sizeof(struct v4l2_pix_format));
+ return 0;
+}
+
+/**
+ * vidioc_reqfs - request buffer
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @p: video buffer
+ *
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_reqbufs(&vip->vb_vidq, p);
+}
+
+/**
+ * vidioc_querybuf - query buffer
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @p: video buffer
+ *
+ * query buffer state.
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_querybuf(&vip->vb_vidq, p);
+}
+
+/**
+ * vidioc_qbuf - queue a buffer
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @p: video buffer
+ *
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_qbuf(&vip->vb_vidq, p);
+}
+
+/**
+ * vidioc_dqbuf - dequeue a buffer
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @p: video buffer
+ *
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_dqbuf(&vip->vb_vidq, p, file->f_flags & O_NONBLOCK);
+}
+
+/**
+ * vidioc_streamon - turn on streaming
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @type: type of capture
+ *
+ * turn on streaming.
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_streamon(&vip->vb_vidq);
+}
+
+/**
+ * vidioc_streamoff - turn off streaming
+ * @file: descriptor of device ( not used)
+ * @priv: points to current videodevice
+ * @type: type of capture
+ *
+ * turn off streaming.
+ * Handling is done in generic videobuf layer.
+ */
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct video_device *dev = priv;
+ struct sta2x11_vip *vip = video_get_drvdata(dev);
+
+ return videobuf_streamoff(&vip->vb_vidq);
+}
+
+static const struct v4l2_file_operations vip_fops = {
+ .owner = THIS_MODULE,
+ .open = vip_open,
+ .release = vip_close,
+ .ioctl = video_ioctl2,
+ .read = vip_read,
+ .mmap = vip_mmap,
+ .poll = vip_poll
+};
+
+static const struct v4l2_ioctl_ops vip_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_querystd = vidioc_querystd,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+};
+
+static struct video_device video_dev_template = {
+ .name = DRV_NAME,
+ .release = video_device_release,
+ .fops = &vip_fops,
+ .ioctl_ops = &vip_ioctl_ops,
+ .tvnorms = V4L2_STD_ALL,
+};
+
+/**
+ * vip_irq - interrupt routine
+ * @irq: Number of interrupt ( not used, correct number is assumed )
+ * @vip: local data structure containing all information
+ *
+ * check for both frame interrupts set ( top and bottom ).
+ * check FIFO overflow, but limit number of log messages after open.
+ * signal a complete buffer if done.
+ * dequeue a new buffer if available.
+ * disable VIP if no buffer available.
+ *
+ * return value: IRQ_NONE, interrupt was not generated by VIP
+ *
+ * IRQ_HANDLED, interrupt done.
+ */
+static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
+{
+ u32 status, dma;
+ unsigned long flags;
+ struct videobuf_buffer *vb;
+
+ status = REG_READ(vip, DVP_ITS);
+
+ if (!status) {
+ pr_debug("VIP: irq ignored\n");
+ return IRQ_NONE;
+ }
+
+ if (!vip->started)
+ return IRQ_HANDLED;
+
+ if (status & DVP_IT_VSB)
+ vip->bcount++;
+
+ if (status & DVP_IT_VST)
+ vip->tcount++;
+
+ if ((DVP_IT_VSB | DVP_IT_VST) == (status & (DVP_IT_VST | DVP_IT_VSB))) {
+ /* this is bad, we are too slow, hope the condition is gone
+ * on the next frame */
+ pr_info("VIP: both irqs\n");
+ return IRQ_HANDLED;
+ }
+
+ if (status & DVP_IT_FIFO) {
+ if (5 > vip->overflow++)
+ pr_info("VIP: fifo overflow\n");
+ }
+
+ if (2 > vip->tcount)
+ return IRQ_HANDLED;
+
+ if (status & DVP_IT_VSB)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&vip->slock, flags);
+
+ REG_WRITE(vip, DVP_CTL, REG_READ(vip, DVP_CTL) & ~DVP_CTL_ENA);
+ if (vip->active) {
+ do_gettimeofday(&vip->active->ts);
+ vip->active->field_count++;
+ vip->active->state = VIDEOBUF_DONE;
+ wake_up(&vip->active->done);
+ vip->active = NULL;
+ }
+ if (!vip->closing) {
+ if (list_empty(&vip->capture))
+ goto done;
+
+ vb = list_first_entry(&vip->capture, struct videobuf_buffer,
+ queue);
+ if (NULL == vb) {
+ pr_info("VIP: no buffer\n");
+ goto done;
+ }
+ vb->state = VIDEOBUF_ACTIVE;
+ list_del(&vb->queue);
+ vip->active = vb;
+ dma = videobuf_to_dma_contig(vb);
+ switch (vip->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma + vip->format.width * 2);
+ break;
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ REG_WRITE(vip, DVP_VTP, dma);
+ REG_WRITE(vip, DVP_VBP, dma);
+ break;
+ default:
+ pr_warning("VIP: unknown field format\n");
+ goto done;
+ break;
+ }
+ REG_WRITE(vip, DVP_CTL, REG_READ(vip, DVP_CTL) | DVP_CTL_ENA);
+ }
+done:
+ spin_unlock_irqrestore(&vip->slock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * vip_gpio_reserve - reserve gpio pin
+ * @dev: device
+ * @pin: GPIO pin number
+ * @dir: direction, input or output
+ * @name: GPIO pin name
+ *
+ */
+static int vip_gpio_reserve(struct device *dev, int pin, int dir,
+ const char *name)
+{
+ int ret;
+
+ if (pin == -1)
+ return 0;
+
+ ret = gpio_request(pin, name);
+ if (ret) {
+ dev_err(dev, "Failed to allocate pin %d (%s)\n", pin, name);
+ return ret;
+ }
+
+ ret = gpio_direction_output(pin, dir);
+ if (ret) {
+ dev_err(dev, "Failed to set direction for pin %d (%s)\n",
+ pin, name);
+ gpio_free(pin);
+ return ret;
+ }
+
+ ret = gpio_export(pin, false);
+ if (ret) {
+ dev_err(dev, "Failed to export pin %d (%s)\n", pin, name);
+ gpio_free(pin);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vip_gpio_release - release gpio pin
+ * @dev: device
+ * @pin: GPIO pin number
+ * @name: GPIO pin name
+ *
+ */
+static void vip_gpio_release(struct device *dev, int pin, const char *name)
+{
+ if (pin != -1) {
+ dev_dbg(dev, "releasing pin %d (%s)\n", pin, name);
+ gpio_unexport(pin);
+ gpio_free(pin);
+ }
+}
+
+/**
+ * sta2x11_vip_init_one - init one instance of video device
+ * @pdev: PCI device
+ * @ent: (not used)
+ *
+ * allocate reset pins for DAC.
+ * Reset video DAC, this is done via reset line.
+ * allocate memory for managing device
+ * request interrupt
+ * map IO region
+ * register device
+ * find and initialize video DAC
+ *
+ * return value: 0, no error
+ *
+ * -ENOMEM, no memory
+ *
+ * -ENODEV, device could not be detected or registered
+ */
+static int __devinit sta2x11_vip_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+ struct sta2x11_vip *vip;
+ struct vip_config *config;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ config = dev_get_platdata(&pdev->dev);
+ if (!config) {
+ dev_info(&pdev->dev, "VIP slot disabled\n");
+ ret = -EINVAL;
+ goto disable;
+ }
+
+ ret = vip_gpio_reserve(&pdev->dev, config->pwr_pin, 0,
+ config->pwr_name);
+ if (ret)
+ goto disable;
+
+ if (config->reset_pin >= 0) {
+ ret = vip_gpio_reserve(&pdev->dev, config->reset_pin, 0,
+ config->reset_name);
+ if (ret) {
+ vip_gpio_release(&pdev->dev, config->pwr_pin,
+ config->pwr_name);
+ goto disable;
+ }
+ }
+
+ if (config->pwr_pin != -1) {
+ /* Datasheet says 5ms between PWR and RST */
+ usleep_range(5000, 25000);
+ ret = gpio_direction_output(config->pwr_pin, 1);
+ }
+
+ if (config->reset_pin != -1) {
+ /* Datasheet says 5ms between PWR and RST */
+ usleep_range(5000, 25000);
+ ret = gpio_direction_output(config->reset_pin, 1);
+ }
+ usleep_range(5000, 25000);
+
+ vip = kzalloc(sizeof(struct sta2x11_vip), GFP_KERNEL);
+ if (!vip) {
+ ret = -ENOMEM;
+ goto release_gpios;
+ }
+
+ vip->pdev = pdev;
+ vip->std = V4L2_STD_PAL;
+ vip->format = formats_50[0];
+ vip->config = config;
+
+ if (v4l2_device_register(&pdev->dev, &vip->v4l2_dev))
+ goto free_mem;
+
+ dev_dbg(&pdev->dev, "BAR #0 at 0x%lx 0x%lx irq %d\n",
+ (unsigned long)pci_resource_start(pdev, 0),
+ (unsigned long)pci_resource_len(pdev, 0), pdev->irq);
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, DRV_NAME);
+ if (ret)
+ goto unreg;
+
+ vip->iomem = pci_iomap(pdev, 0, 0x100);
+ if (!vip->iomem) {
+ ret = -ENOMEM; /* FIXME */
+ goto release;
+ }
+
+ pci_enable_msi(pdev);
+
+ INIT_LIST_HEAD(&vip->capture);
+ spin_lock_init(&vip->slock);
+ mutex_init(&vip->mutex);
+ vip->started = 0;
+ vip->disabled = 0;
+
+ ret = request_irq(pdev->irq,
+ (irq_handler_t) vip_irq,
+ IRQF_SHARED, DRV_NAME, vip);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ ret = -ENODEV;
+ goto unmap;
+ }
+
+ vip->video_dev = video_device_alloc();
+ if (!vip->video_dev) {
+ ret = -ENOMEM;
+ goto release_irq;
+ }
+
+ *(vip->video_dev) = video_dev_template;
+ video_set_drvdata(vip->video_dev, vip);
+
+ ret = video_register_device(vip->video_dev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto vrelease;
+
+ vip->adapter = i2c_get_adapter(vip->config->i2c_id);
+ if (!vip->adapter) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no I2C adapter found\n");
+ goto vunreg;
+ }
+
+ vip->decoder = v4l2_i2c_new_subdev(&vip->v4l2_dev, vip->adapter,
+ "adv7180", vip->config->i2c_addr,
+ NULL);
+ if (!vip->decoder) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "no decoder found\n");
+ goto vunreg;
+ }
+
+ i2c_put_adapter(vip->adapter);
+
+ v4l2_subdev_call(vip->decoder, core, init, 0);
+
+ pr_info("STA2X11 Video Input Port (VIP) loaded\n");
+ return 0;
+
+vunreg:
+ video_set_drvdata(vip->video_dev, NULL);
+vrelease:
+ if (video_is_registered(vip->video_dev))
+ video_unregister_device(vip->video_dev);
+ else
+ video_device_release(vip->video_dev);
+release_irq:
+ free_irq(pdev->irq, vip);
+ pci_disable_msi(pdev);
+unmap:
+ pci_iounmap(pdev, vip->iomem);
+ mutex_destroy(&vip->mutex);
+release:
+ pci_release_regions(pdev);
+unreg:
+ v4l2_device_unregister(&vip->v4l2_dev);
+free_mem:
+ kfree(vip);
+release_gpios:
+ vip_gpio_release(&pdev->dev, config->reset_pin, config->reset_name);
+ vip_gpio_release(&pdev->dev, config->pwr_pin, config->pwr_name);
+disable:
+ /*
+ * do not call pci_disable_device on sta2x11 because it break all
+ * other Bus masters on this EP
+ */
+ return ret;
+}
+
+/**
+ * sta2x11_vip_remove_one - release device
+ * @pdev: PCI device
+ *
+ * Undo everything done in .._init_one
+ *
+ * unregister video device
+ * free interrupt
+ * unmap ioadresses
+ * free memory
+ * free GPIO pins
+ */
+static void __devexit sta2x11_vip_remove_one(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
+ struct sta2x11_vip *vip =
+ container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
+
+ video_set_drvdata(vip->video_dev, NULL);
+ video_unregister_device(vip->video_dev);
+ /*do not call video_device_release() here, is already done */
+ free_irq(pdev->irq, vip);
+ pci_disable_msi(pdev);
+ pci_iounmap(pdev, vip->iomem);
+ pci_release_regions(pdev);
+
+ v4l2_device_unregister(&vip->v4l2_dev);
+ mutex_destroy(&vip->mutex);
+
+ vip_gpio_release(&pdev->dev, vip->config->pwr_pin,
+ vip->config->pwr_name);
+ vip_gpio_release(&pdev->dev, vip->config->reset_pin,
+ vip->config->reset_name);
+
+ kfree(vip);
+ /*
+ * do not call pci_disable_device on sta2x11 because it break all
+ * other Bus masters on this EP
+ */
+}
+
+#ifdef CONFIG_PM
+
+/**
+ * sta2x11_vip_suspend - set device into power save mode
+ * @pdev: PCI device
+ * @state: new state of device
+ *
+ * all relevant registers are saved and an attempt to set a new state is made.
+ *
+ * return value: 0 always indicate success,
+ * even if device could not be disabled. (workaround for hardware problem)
+ *
+ * reurn value : 0, always succesful, even if hardware does not not support
+ * power down mode.
+ */
+static int sta2x11_vip_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
+ struct sta2x11_vip *vip =
+ container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&vip->slock, flags);
+ vip->register_save_area[0] = REG_READ(vip, DVP_CTL);
+ REG_WRITE(vip, DVP_CTL, vip->register_save_area[0] & DVP_CTL_DIS);
+ vip->register_save_area[SAVE_COUNT] = REG_READ(vip, DVP_ITM);
+ REG_WRITE(vip, DVP_ITM, 0);
+ for (i = 1; i < SAVE_COUNT; i++)
+ vip->register_save_area[i] = REG_READ(vip, 4 * i);
+ for (i = 0; i < AUX_COUNT; i++)
+ vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i] =
+ REG_READ(vip, registers_to_save[i]);
+ spin_unlock_irqrestore(&vip->slock, flags);
+ /* save pci state */
+ pci_save_state(pdev);
+ if (pci_set_power_state(pdev, pci_choose_state(pdev, state))) {
+ /*
+ * do not call pci_disable_device on sta2x11 because it
+ * break all other Bus masters on this EP
+ */
+ vip->disabled = 1;
+ }
+
+ pr_info("VIP: suspend\n");
+ return 0;
+}
+
+/**
+ * sta2x11_vip_resume - resume device operation
+ * @pdev : PCI device
+ *
+ * re-enable device, set PCI state to powered and restore registers.
+ * resume normal device operation afterwards.
+ *
+ * return value: 0, no error.
+ *
+ * other, could not set device to power on state.
+ */
+static int sta2x11_vip_resume(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev);
+ struct sta2x11_vip *vip =
+ container_of(v4l2_dev, struct sta2x11_vip, v4l2_dev);
+ unsigned long flags;
+ int ret, i;
+
+ pr_info("VIP: resume\n");
+ /* restore pci state */
+ if (vip->disabled) {
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ pr_warning("VIP: Can't enable device.\n");
+ return ret;
+ }
+ vip->disabled = 0;
+ }
+ ret = pci_set_power_state(pdev, PCI_D0);
+ if (ret) {
+ /*
+ * do not call pci_disable_device on sta2x11 because it
+ * break all other Bus masters on this EP
+ */
+ pr_warning("VIP: Can't enable device.\n");
+ vip->disabled = 1;
+ return ret;
+ }
+
+ pci_restore_state(pdev);
+
+ spin_lock_irqsave(&vip->slock, flags);
+ for (i = 1; i < SAVE_COUNT; i++)
+ REG_WRITE(vip, 4 * i, vip->register_save_area[i]);
+ for (i = 0; i < AUX_COUNT; i++)
+ REG_WRITE(vip, registers_to_save[i],
+ vip->register_save_area[SAVE_COUNT + IRQ_COUNT + i]);
+ REG_WRITE(vip, DVP_CTL, vip->register_save_area[0]);
+ REG_WRITE(vip, DVP_ITM, vip->register_save_area[SAVE_COUNT]);
+ spin_unlock_irqrestore(&vip->slock, flags);
+ return 0;
+}
+
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(sta2x11_vip_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIP)},
+ {0,}
+};
+
+static struct pci_driver sta2x11_vip_driver = {
+ .name = DRV_NAME,
+ .probe = sta2x11_vip_init_one,
+ .remove = __devexit_p(sta2x11_vip_remove_one),
+ .id_table = sta2x11_vip_pci_tbl,
+#ifdef CONFIG_PM
+ .suspend = sta2x11_vip_suspend,
+ .resume = sta2x11_vip_resume,
+#endif
+};
+
+static int __init sta2x11_vip_init_module(void)
+{
+ return pci_register_driver(&sta2x11_vip_driver);
+}
+
+static void __exit sta2x11_vip_exit_module(void)
+{
+ pci_unregister_driver(&sta2x11_vip_driver);
+}
+
+#ifdef MODULE
+module_init(sta2x11_vip_init_module);
+module_exit(sta2x11_vip_exit_module);
+#else
+late_initcall_sync(sta2x11_vip_init_module);
+#endif
+
+MODULE_DESCRIPTION("STA2X11 Video Input Port driver");
+MODULE_AUTHOR("Wind River");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("sta2x11 video input");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl);
diff --git a/drivers/media/video/sta2x11_vip.h b/drivers/media/video/sta2x11_vip.h
new file mode 100644
index 000000000000..4f81a13666eb
--- /dev/null
+++ b/drivers/media/video/sta2x11_vip.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2011 Wind River Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Anders Wallin <anders.wallin@windriver.com>
+ *
+ */
+
+#ifndef __STA2X11_VIP_H
+#define __STA2X11_VIP_H
+
+/**
+ * struct vip_config - video input configuration data
+ * @pwr_name: ADV powerdown name
+ * @pwr_pin: ADV powerdown pin
+ * @reset_name: ADV reset name
+ * @reset_pin: ADV reset pin
+ */
+struct vip_config {
+ const char *pwr_name;
+ int pwr_pin;
+ const char *reset_name;
+ int reset_pin;
+ int i2c_id;
+ int i2c_addr;
+};
+
+#endif /* __STA2X11_VIP_H */
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index d427f8436c70..86a0fc56c330 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -38,13 +38,13 @@
#include "stk-webcam.h"
-static bool hflip = 1;
+static bool hflip;
module_param(hflip, bool, 0444);
-MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 1");
+MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 0");
-static bool vflip = 1;
+static bool vflip;
module_param(vflip, bool, 0444);
-MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 1");
+MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 0");
static int debug;
module_param(debug, int, 0444);
diff --git a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c
index 465d7086babf..3d7ddd93282d 100644
--- a/drivers/media/video/tda9840.c
+++ b/drivers/media/video/tda9840.c
@@ -66,29 +66,53 @@ static void tda9840_write(struct v4l2_subdev *sd, u8 reg, u8 val)
val, reg);
}
+static int tda9840_status(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 byte;
+
+ if (1 != i2c_master_recv(client, &byte, 1)) {
+ v4l2_dbg(1, debug, sd,
+ "i2c_master_recv() failed\n");
+ return -EIO;
+ }
+
+ if (byte & 0x80) {
+ v4l2_dbg(1, debug, sd,
+ "TDA9840_DETECT: register contents invalid\n");
+ return -EINVAL;
+ }
+
+ v4l2_dbg(1, debug, sd, "TDA9840_DETECT: byte: 0x%02x\n", byte);
+ return byte & 0x60;
+}
+
static int tda9840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t)
{
+ int stat = tda9840_status(sd);
int byte;
if (t->index)
return -EINVAL;
- switch (t->audmode) {
- case V4L2_TUNER_MODE_STEREO:
- byte = TDA9840_SET_STEREO;
- break;
- case V4L2_TUNER_MODE_LANG1_LANG2:
- byte = TDA9840_SET_BOTH;
- break;
- case V4L2_TUNER_MODE_LANG1:
- byte = TDA9840_SET_LANG1;
- break;
- case V4L2_TUNER_MODE_LANG2:
- byte = TDA9840_SET_LANG2;
- break;
- default:
+ stat = stat < 0 ? 0 : stat;
+ if (stat == 0 || stat == 0x60) /* mono input */
byte = TDA9840_SET_MONO;
- break;
+ else if (stat == 0x40) /* stereo input */
+ byte = (t->audmode == V4L2_TUNER_MODE_MONO) ?
+ TDA9840_SET_MONO : TDA9840_SET_STEREO;
+ else { /* bilingual */
+ switch (t->audmode) {
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ byte = TDA9840_SET_BOTH;
+ break;
+ case V4L2_TUNER_MODE_LANG2:
+ byte = TDA9840_SET_LANG2;
+ break;
+ default:
+ byte = TDA9840_SET_LANG1;
+ break;
+ }
}
v4l2_dbg(1, debug, sd, "TDA9840_SWITCH: 0x%02x\n", byte);
tda9840_write(sd, SWITCH, byte);
@@ -97,25 +121,14 @@ static int tda9840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t)
static int tda9840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 byte;
-
- t->rxsubchans = V4L2_TUNER_SUB_MONO;
- if (1 != i2c_master_recv(client, &byte, 1)) {
- v4l2_dbg(1, debug, sd,
- "i2c_master_recv() failed\n");
- return -EIO;
- }
+ int stat = tda9840_status(sd);
- if (byte & 0x80) {
- v4l2_dbg(1, debug, sd,
- "TDA9840_DETECT: register contents invalid\n");
- return -EINVAL;
- }
+ if (stat < 0)
+ return stat;
- v4l2_dbg(1, debug, sd, "TDA9840_DETECT: byte: 0x%02x\n", byte);
+ t->rxsubchans = V4L2_TUNER_SUB_MONO;
- switch (byte & 0x60) {
+ switch (stat & 0x60) {
case 0x00:
t->rxsubchans = V4L2_TUNER_SUB_MONO;
break;
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index a794ae62aebf..bfbf9e56b0a4 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -150,7 +150,6 @@ static int vidioc_querycap(struct file *file, void *fh,
strcpy(cap->driver, "tele-video");
strcpy(cap->card, "Telegent Poseidon");
usb_make_path(p->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = KERNEL_VERSION(0, 0, 1);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE | V4L2_CAP_VBI_CAPTURE;
diff --git a/drivers/media/video/tm6000/tm6000-input.c b/drivers/media/video/tm6000/tm6000-input.c
index 859eb90e4d56..e80b7e190471 100644
--- a/drivers/media/video/tm6000/tm6000-input.c
+++ b/drivers/media/video/tm6000/tm6000-input.c
@@ -168,7 +168,6 @@ static void tm6000_ir_urb_received(struct urb *urb)
struct tm6000_IR *ir = dev->ir;
struct tm6000_ir_poll_result poll_result;
char *buf;
- int rc;
dprintk(2, "%s\n",__func__);
if (urb->status < 0 || urb->actual_length <= 0) {
@@ -192,7 +191,7 @@ static void tm6000_ir_urb_received(struct urb *urb)
dprintk(1, "%s, scancode: 0x%04x\n",__func__, poll_result.rc_data);
rc_keydown(ir->rc, poll_result.rc_data, 0);
- rc = usb_submit_urb(urb, GFP_ATOMIC);
+ usb_submit_urb(urb, GFP_ATOMIC);
/*
* Flash the led. We can't do it here, as it is running on IRQ context.
* So, use the scheduler to do it, in a few ms.
diff --git a/drivers/media/video/tm6000/tm6000-stds.c b/drivers/media/video/tm6000/tm6000-stds.c
index 9dc0831d813f..5e28d6a2412f 100644
--- a/drivers/media/video/tm6000/tm6000-stds.c
+++ b/drivers/media/video/tm6000/tm6000-stds.c
@@ -338,7 +338,6 @@ static int tm6000_set_audio_std(struct tm6000_core *dev)
uint8_t areg_02 = 0x04; /* GC1 Fixed gain 0dB */
uint8_t areg_05 = 0x01; /* Auto 4.5 = M Japan, Auto 6.5 = DK */
uint8_t areg_06 = 0x02; /* Auto de-emphasis, mannual channel mode */
- uint8_t nicam_flag = 0; /* No NICAM */
if (dev->radio) {
tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00);
@@ -398,7 +397,6 @@ static int tm6000_set_audio_std(struct tm6000_core *dev)
} else {
areg_05 = 0x07;
}
- nicam_flag = 1;
break;
/* other */
case 3:
diff --git a/drivers/media/video/tm6000/tm6000-video.c b/drivers/media/video/tm6000/tm6000-video.c
index bc13db736e24..f7034df94e0a 100644
--- a/drivers/media/video/tm6000/tm6000-video.c
+++ b/drivers/media/video/tm6000/tm6000-video.c
@@ -169,7 +169,6 @@ static inline void get_next_buf(struct tm6000_dmaqueue *dma_q,
struct tm6000_buffer **buf)
{
struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
- char *outp;
if (list_empty(&dma_q->active)) {
dprintk(dev, V4L2_DEBUG_QUEUE, "No active queue to serve\n");
@@ -179,11 +178,6 @@ static inline void get_next_buf(struct tm6000_dmaqueue *dma_q,
*buf = list_entry(dma_q->active.next,
struct tm6000_buffer, vb.queue);
-
- /* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
-
- return;
}
/*
@@ -211,7 +205,7 @@ static int copy_streams(u8 *data, unsigned long len,
{
struct tm6000_dmaqueue *dma_q = urb->context;
struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
- u8 *ptr = data, *endp = data+len, c;
+ u8 *ptr = data, *endp = data+len;
unsigned long header = 0;
int rc = 0;
unsigned int cmd, cpysize, pktsize, size, field, block, line, pos = 0;
@@ -264,7 +258,6 @@ static int copy_streams(u8 *data, unsigned long len,
}
/* split the header fields */
- c = (header >> 24) & 0xff;
size = ((header & 0x7e) << 1);
if (size > 0)
size -= 4;
@@ -889,7 +882,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->driver, "tm6000", sizeof(cap->driver));
strlcpy(cap->card, "Trident TVMaster TM5600/6000/6010", sizeof(cap->card));
- cap->version = TM6000_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
V4L2_CAP_AUDIO |
@@ -1732,6 +1724,10 @@ static struct video_device *vdev_init(struct tm6000_core *dev,
vfd->release = video_device_release;
vfd->debug = tm6000_debug;
vfd->lock = &dev->lock;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
diff --git a/drivers/media/video/tm6000/tm6000.h b/drivers/media/video/tm6000/tm6000.h
index 27ba659cfa85..6df418658c9c 100644
--- a/drivers/media/video/tm6000/tm6000.h
+++ b/drivers/media/video/tm6000/tm6000.h
@@ -33,8 +33,6 @@
#include "dvb_frontend.h"
#include "dmxdev.h"
-#define TM6000_VERSION KERNEL_VERSION(0, 0, 2)
-
/* Inputs */
enum tm6000_itype {
TM6000_INPUT_TV = 1,
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index a5c6397ad591..1ad5ab6ce5cf 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -1178,7 +1178,7 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
return 0;
if (vt->type == t->mode && analog_ops->get_afc)
vt->afc = analog_ops->get_afc(&t->fe);
- if (t->mode != V4L2_TUNER_RADIO) {
+ if (vt->type != V4L2_TUNER_RADIO) {
vt->capability |= V4L2_TUNER_CAP_NORM;
vt->rangelow = tv_range[0] * 16;
vt->rangehigh = tv_range[1] * 16;
@@ -1241,8 +1241,10 @@ static int tuner_log_status(struct v4l2_subdev *sd)
return 0;
}
-static int tuner_suspend(struct i2c_client *c, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tuner_suspend(struct device *dev)
{
+ struct i2c_client *c = to_i2c_client(dev);
struct tuner *t = to_tuner(i2c_get_clientdata(c));
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
@@ -1254,8 +1256,9 @@ static int tuner_suspend(struct i2c_client *c, pm_message_t state)
return 0;
}
-static int tuner_resume(struct i2c_client *c)
+static int tuner_resume(struct device *dev)
{
+ struct i2c_client *c = to_i2c_client(dev);
struct tuner *t = to_tuner(i2c_get_clientdata(c));
tuner_dbg("resume\n");
@@ -1266,6 +1269,7 @@ static int tuner_resume(struct i2c_client *c)
return 0;
}
+#endif
static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)
{
@@ -1310,6 +1314,10 @@ static const struct v4l2_subdev_ops tuner_ops = {
* I2C structs and module init functions
*/
+static const struct dev_pm_ops tuner_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tuner_suspend, tuner_resume)
+};
+
static const struct i2c_device_id tuner_id[] = {
{ "tuner", }, /* autodetect */
{ }
@@ -1320,12 +1328,11 @@ static struct i2c_driver tuner_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "tuner",
+ .pm = &tuner_pm_ops,
},
.probe = tuner_probe,
.remove = tuner_remove,
.command = tuner_command,
- .suspend = tuner_suspend,
- .resume = tuner_resume,
.id_table = tuner_id,
};
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 1326e11cf4a9..b7867427e5c4 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -822,7 +822,7 @@ static int tvp5150_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
if (index)
return -EINVAL;
- *code = V4L2_MBUS_FMT_YUYV8_2X8;
+ *code = V4L2_MBUS_FMT_UYVY8_2X8;
return 0;
}
@@ -830,23 +830,16 @@ static int tvp5150_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *f)
{
struct tvp5150 *decoder = to_tvp5150(sd);
- v4l2_std_id std;
if (f == NULL)
return -EINVAL;
tvp5150_reset(sd, 0);
- /* Calculate height and width based on current standard */
- if (decoder->norm == V4L2_STD_ALL)
- std = tvp5150_read_std(sd);
- else
- std = decoder->norm;
-
f->width = decoder->rect.width;
f->height = decoder->rect.height;
- f->code = V4L2_MBUS_FMT_YUYV8_2X8;
+ f->code = V4L2_MBUS_FMT_UYVY8_2X8;
f->field = V4L2_FIELD_SEQ_TB;
f->colorspace = V4L2_COLORSPACE_SMPTE170M;
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index d7676d85c4df..fb6a5b57eb83 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/module.h>
+#include <linux/v4l2-dv-timings.h>
#include <media/tvp7002.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
@@ -328,6 +329,7 @@ static const struct i2c_reg_value tvp7002_parms_720P50[] = {
/* Preset definition for handling device operation */
struct tvp7002_preset_definition {
u32 preset;
+ struct v4l2_dv_timings timings;
const struct i2c_reg_value *p_settings;
enum v4l2_colorspace color_space;
enum v4l2_field scanmode;
@@ -341,6 +343,7 @@ struct tvp7002_preset_definition {
static const struct tvp7002_preset_definition tvp7002_presets[] = {
{
V4L2_DV_720P60,
+ V4L2_DV_BT_CEA_1280X720P60,
tvp7002_parms_720P60,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_NONE,
@@ -351,6 +354,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_1080I60,
+ V4L2_DV_BT_CEA_1920X1080I60,
tvp7002_parms_1080I60,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_INTERLACED,
@@ -361,6 +365,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_1080I50,
+ V4L2_DV_BT_CEA_1920X1080I50,
tvp7002_parms_1080I50,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_INTERLACED,
@@ -371,6 +376,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_720P50,
+ V4L2_DV_BT_CEA_1280X720P50,
tvp7002_parms_720P50,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_NONE,
@@ -381,6 +387,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_1080P60,
+ V4L2_DV_BT_CEA_1920X1080P60,
tvp7002_parms_1080P60,
V4L2_COLORSPACE_REC709,
V4L2_FIELD_NONE,
@@ -391,6 +398,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_480P59_94,
+ V4L2_DV_BT_CEA_720X480P59_94,
tvp7002_parms_480P,
V4L2_COLORSPACE_SMPTE170M,
V4L2_FIELD_NONE,
@@ -401,6 +409,7 @@ static const struct tvp7002_preset_definition tvp7002_presets[] = {
},
{
V4L2_DV_576P50,
+ V4L2_DV_BT_CEA_720X576P50,
tvp7002_parms_576P,
V4L2_COLORSPACE_SMPTE170M,
V4L2_FIELD_NONE,
@@ -605,6 +614,35 @@ static int tvp7002_s_dv_preset(struct v4l2_subdev *sd,
return -EINVAL;
}
+static int tvp7002_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct tvp7002 *device = to_tvp7002(sd);
+ const struct v4l2_bt_timings *bt = &dv_timings->bt;
+ int i;
+
+ if (dv_timings->type != V4L2_DV_BT_656_1120)
+ return -EINVAL;
+ for (i = 0; i < NUM_PRESETS; i++) {
+ const struct v4l2_bt_timings *t = &tvp7002_presets[i].timings.bt;
+
+ if (!memcmp(bt, t, &bt->standards - &bt->width)) {
+ device->current_preset = &tvp7002_presets[i];
+ return tvp7002_write_inittab(sd, tvp7002_presets[i].p_settings);
+ }
+ }
+ return -EINVAL;
+}
+
+static int tvp7002_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *dv_timings)
+{
+ struct tvp7002 *device = to_tvp7002(sd);
+
+ *dv_timings = device->current_preset->timings;
+ return 0;
+}
+
/*
* tvp7002_s_ctrl() - Set a control
* @ctrl: ptr to v4l2_ctrl struct
@@ -666,11 +704,9 @@ static int tvp7002_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *f
* Returns the current DV preset by TVP7002. If no active input is
* detected, returns -EINVAL
*/
-static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
- struct v4l2_dv_preset *qpreset)
+static int tvp7002_query_dv(struct v4l2_subdev *sd, int *index)
{
const struct tvp7002_preset_definition *presets = tvp7002_presets;
- struct tvp7002 *device;
u8 progressive;
u32 lpfr;
u32 cpln;
@@ -679,12 +715,9 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
u8 lpf_msb;
u8 cpl_lsb;
u8 cpl_msb;
- int index;
-
- /* Return invalid preset if no active input is detected */
- qpreset->preset = V4L2_DV_INVALID;
- device = to_tvp7002(sd);
+ /* Return invalid index if no active input is detected */
+ *index = NUM_PRESETS;
/* Read standards from device registers */
tvp7002_read_err(sd, TVP7002_L_FRAME_STAT_LSBS, &lpf_lsb, &error);
@@ -705,8 +738,8 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
progressive = (lpf_msb & TVP7002_INPR_MASK) >> TVP7002_IP_SHIFT;
/* Do checking of video modes */
- for (index = 0; index < NUM_PRESETS; index++, presets++)
- if (lpfr == presets->lines_per_frame &&
+ for (*index = 0; *index < NUM_PRESETS; (*index)++, presets++)
+ if (lpfr == presets->lines_per_frame &&
progressive == presets->progressive) {
if (presets->cpl_min == 0xffff)
break;
@@ -714,17 +747,42 @@ static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
break;
}
- if (index == NUM_PRESETS) {
+ if (*index == NUM_PRESETS) {
v4l2_dbg(1, debug, sd, "detection failed: lpf = %x, cpl = %x\n",
lpfr, cpln);
- return 0;
+ return -ENOLINK;
}
- /* Set values in found preset */
- qpreset->preset = presets->preset;
-
/* Update lines per frame and clocks per line info */
- v4l2_dbg(1, debug, sd, "detected preset: %d\n", presets->preset);
+ v4l2_dbg(1, debug, sd, "detected preset: %d\n", *index);
+ return 0;
+}
+
+static int tvp7002_query_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *qpreset)
+{
+ int index;
+ int err = tvp7002_query_dv(sd, &index);
+
+ if (err || index == NUM_PRESETS) {
+ qpreset->preset = V4L2_DV_INVALID;
+ if (err == -ENOLINK)
+ err = 0;
+ return err;
+ }
+ qpreset->preset = tvp7002_presets[index].preset;
+ return 0;
+}
+
+static int tvp7002_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ int index;
+ int err = tvp7002_query_dv(sd, &index);
+
+ if (err)
+ return err;
+ *timings = tvp7002_presets[index].timings;
return 0;
}
@@ -894,6 +952,17 @@ static int tvp7002_enum_dv_presets(struct v4l2_subdev *sd,
return v4l_fill_dv_preset_info(tvp7002_presets[preset->index].preset, preset);
}
+static int tvp7002_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ /* Check requested format index is within range */
+ if (timings->index >= NUM_PRESETS)
+ return -EINVAL;
+
+ timings->timings = tvp7002_presets[timings->index].timings;
+ return 0;
+}
+
static const struct v4l2_ctrl_ops tvp7002_ctrl_ops = {
.s_ctrl = tvp7002_s_ctrl,
};
@@ -920,6 +989,10 @@ static const struct v4l2_subdev_video_ops tvp7002_video_ops = {
.enum_dv_presets = tvp7002_enum_dv_presets,
.s_dv_preset = tvp7002_s_dv_preset,
.query_dv_preset = tvp7002_query_dv_preset,
+ .g_dv_timings = tvp7002_g_dv_timings,
+ .s_dv_timings = tvp7002_s_dv_timings,
+ .enum_dv_timings = tvp7002_enum_dv_timings,
+ .query_dv_timings = tvp7002_query_dv_timings,
.s_stream = tvp7002_s_stream,
.g_mbus_fmt = tvp7002_mbus_fmt,
.try_mbus_fmt = tvp7002_mbus_fmt,
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index f344411a4578..c9b2042f8bdf 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -601,13 +601,12 @@ static int usbvision_decompress(struct usb_usbvision *usbvision, unsigned char *
unsigned char *decompressed, int *start_pos,
int *block_typestart_pos, int len)
{
- int rest_pixel, idx, max_pos, pos, extra_pos, block_len, block_type_pos, block_type_len;
+ int rest_pixel, idx, pos, extra_pos, block_len, block_type_pos, block_type_len;
unsigned char block_byte, block_code, block_type, block_type_byte, integrator;
integrator = 0;
pos = *start_pos;
block_type_pos = *block_typestart_pos;
- max_pos = 396; /* pos + len; */
extra_pos = pos;
block_len = 0;
block_byte = 0;
@@ -702,7 +701,7 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
unsigned char strip_data[USBVISION_STRIP_LEN_MAX];
unsigned char strip_header[USBVISION_STRIP_HEADER_LEN];
int idx, idx_end, strip_len, strip_ptr, startblock_pos, block_pos, block_type_pos;
- int clipmask_index, bytes_per_pixel, rc;
+ int clipmask_index;
int image_size;
unsigned char rv, gv, bv;
static unsigned char *Y, *U, *V;
@@ -769,7 +768,6 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
return parse_state_next_frame;
}
- bytes_per_pixel = frame->v4l2_format.bytes_per_pixel;
clipmask_index = frame->curline * MAX_FRAME_WIDTH;
scratch_get(usbvision, strip_data, strip_len);
@@ -781,14 +779,14 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
usbvision->block_pos = block_pos;
- rc = usbvision_decompress(usbvision, strip_data, Y, &block_pos, &block_type_pos, idx_end);
+ usbvision_decompress(usbvision, strip_data, Y, &block_pos, &block_type_pos, idx_end);
if (strip_len > usbvision->max_strip_len)
usbvision->max_strip_len = strip_len;
if (frame->curline % 2)
- rc = usbvision_decompress(usbvision, strip_data, V, &block_pos, &block_type_pos, idx_end / 2);
+ usbvision_decompress(usbvision, strip_data, V, &block_pos, &block_type_pos, idx_end / 2);
else
- rc = usbvision_decompress(usbvision, strip_data, U, &block_pos, &block_type_pos, idx_end / 2);
+ usbvision_decompress(usbvision, strip_data, U, &block_pos, &block_type_pos, idx_end / 2);
if (block_pos > usbvision->comprblock_pos)
usbvision->comprblock_pos = block_pos;
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 5a74f5e07d7d..9bd8f084f348 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1296,6 +1296,10 @@ static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
if (NULL == vdev)
return NULL;
*vdev = *vdev_template;
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags);
vdev->lock = &usbvision->v4l2_lock;
vdev->v4l2_dev = &usbvision->v4l2_dev;
snprintf(vdev->name, sizeof(vdev->name), "%s", name);
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 0efd3b10b353..af26bbe6f76e 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -21,6 +21,7 @@
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <media/v4l2-ctrls.h>
#include "uvcvideo.h"
@@ -420,6 +421,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ .master_id = V4L2_CID_HUE_AUTO,
+ .master_manual = 0,
},
{
.id = V4L2_CID_SATURATION,
@@ -492,6 +495,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
+ .slave_ids = { V4L2_CID_HUE, },
},
{
.id = V4L2_CID_EXPOSURE_AUTO,
@@ -504,6 +508,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.data_type = UVC_CTRL_DATA_TYPE_BITMASK,
.menu_info = exposure_auto_controls,
.menu_count = ARRAY_SIZE(exposure_auto_controls),
+ .slave_ids = { V4L2_CID_EXPOSURE_ABSOLUTE, },
},
{
.id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
@@ -524,6 +529,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .master_id = V4L2_CID_EXPOSURE_AUTO,
+ .master_manual = V4L2_EXPOSURE_MANUAL,
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
@@ -534,6 +541,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
+ .slave_ids = { V4L2_CID_WHITE_BALANCE_TEMPERATURE, },
},
{
.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
@@ -544,6 +552,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .master_id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .master_manual = 0,
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
@@ -554,6 +564,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
+ .slave_ids = { V4L2_CID_BLUE_BALANCE,
+ V4L2_CID_RED_BALANCE },
},
{
.id = V4L2_CID_BLUE_BALANCE,
@@ -564,6 +576,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ .master_id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .master_manual = 0,
},
{
.id = V4L2_CID_RED_BALANCE,
@@ -574,6 +588,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 16,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_SIGNED,
+ .master_id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .master_manual = 0,
},
{
.id = V4L2_CID_FOCUS_ABSOLUTE,
@@ -584,6 +600,8 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_INTEGER,
.data_type = UVC_CTRL_DATA_TYPE_UNSIGNED,
+ .master_id = V4L2_CID_FOCUS_AUTO,
+ .master_manual = 0,
},
{
.id = V4L2_CID_FOCUS_AUTO,
@@ -594,6 +612,7 @@ static struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_BOOLEAN,
.data_type = UVC_CTRL_DATA_TYPE_BOOLEAN,
+ .slave_ids = { V4L2_CID_FOCUS_ABSOLUTE, },
},
{
.id = V4L2_CID_IRIS_ABSOLUTE,
@@ -899,25 +918,54 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
return 0;
}
-int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
- struct v4l2_queryctrl *v4l2_ctrl)
+static int __uvc_ctrl_get(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
+ s32 *value)
{
- struct uvc_control *ctrl;
- struct uvc_control_mapping *mapping;
struct uvc_menu_info *menu;
unsigned int i;
int ret;
- ret = mutex_lock_interruptible(&chain->ctrl_mutex);
- if (ret < 0)
- return -ERESTARTSYS;
+ if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
+ return -EINVAL;
- ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
- if (ctrl == NULL) {
- ret = -EINVAL;
- goto done;
+ if (!ctrl->loaded) {
+ ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
+ chain->dev->intfnum, ctrl->info.selector,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
+ ctrl->info.size);
+ if (ret < 0)
+ return ret;
+
+ ctrl->loaded = 1;
}
+ *value = mapping->get(mapping, UVC_GET_CUR,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+
+ if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
+ menu = mapping->menu_info;
+ for (i = 0; i < mapping->menu_count; ++i, ++menu) {
+ if (menu->value == *value) {
+ *value = i;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_queryctrl *v4l2_ctrl)
+{
+ struct uvc_control_mapping *master_map = NULL;
+ struct uvc_control *master_ctrl = NULL;
+ struct uvc_menu_info *menu;
+ unsigned int i;
+
memset(v4l2_ctrl, 0, sizeof *v4l2_ctrl);
v4l2_ctrl->id = mapping->id;
v4l2_ctrl->type = mapping->v4l2_type;
@@ -929,10 +977,23 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ if (mapping->master_id)
+ __uvc_find_control(ctrl->entity, mapping->master_id,
+ &master_map, &master_ctrl, 0);
+ if (master_ctrl && (master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) {
+ s32 val;
+ int ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != mapping->master_manual)
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ }
+
if (!ctrl->cached) {
- ret = uvc_ctrl_populate_cache(chain, ctrl);
+ int ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
- goto done;
+ return ret;
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) {
@@ -954,19 +1015,19 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
}
- goto done;
+ return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 1;
v4l2_ctrl->step = 1;
- goto done;
+ return 0;
case V4L2_CTRL_TYPE_BUTTON:
v4l2_ctrl->minimum = 0;
v4l2_ctrl->maximum = 0;
v4l2_ctrl->step = 0;
- goto done;
+ return 0;
default:
break;
@@ -984,6 +1045,27 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ return 0;
+}
+
+int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ struct v4l2_queryctrl *v4l2_ctrl)
+{
+ struct uvc_control *ctrl;
+ struct uvc_control_mapping *mapping;
+ int ret;
+
+ ret = mutex_lock_interruptible(&chain->ctrl_mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+
+ ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
+ if (ctrl == NULL) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl);
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
@@ -1054,6 +1136,174 @@ done:
return ret;
}
+/* --------------------------------------------------------------------------
+ * Ctrl event handling
+ */
+
+static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
+ struct v4l2_event *ev,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ s32 value, u32 changes)
+{
+ struct v4l2_queryctrl v4l2_ctrl;
+
+ __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
+
+ memset(ev->reserved, 0, sizeof(ev->reserved));
+ ev->type = V4L2_EVENT_CTRL;
+ ev->id = v4l2_ctrl.id;
+ ev->u.ctrl.value = value;
+ ev->u.ctrl.changes = changes;
+ ev->u.ctrl.type = v4l2_ctrl.type;
+ ev->u.ctrl.flags = v4l2_ctrl.flags;
+ ev->u.ctrl.minimum = v4l2_ctrl.minimum;
+ ev->u.ctrl.maximum = v4l2_ctrl.maximum;
+ ev->u.ctrl.step = v4l2_ctrl.step;
+ ev->u.ctrl.default_value = v4l2_ctrl.default_value;
+}
+
+static void uvc_ctrl_send_event(struct uvc_fh *handle,
+ struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
+ s32 value, u32 changes)
+{
+ struct v4l2_subscribed_event *sev;
+ struct v4l2_event ev;
+
+ if (list_empty(&mapping->ev_subs))
+ return;
+
+ uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, value, changes);
+
+ list_for_each_entry(sev, &mapping->ev_subs, node) {
+ if (sev->fh && (sev->fh != &handle->vfh ||
+ (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK) ||
+ (changes & V4L2_EVENT_CTRL_CH_FLAGS)))
+ v4l2_event_queue_fh(sev->fh, &ev);
+ }
+}
+
+static void uvc_ctrl_send_slave_event(struct uvc_fh *handle,
+ struct uvc_control *master, u32 slave_id,
+ const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
+{
+ struct uvc_control_mapping *mapping = NULL;
+ struct uvc_control *ctrl = NULL;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+ unsigned int i;
+ s32 val = 0;
+
+ /*
+ * We can skip sending an event for the slave if the slave
+ * is being modified in the same transaction.
+ */
+ for (i = 0; i < xctrls_count; i++) {
+ if (xctrls[i].id == slave_id)
+ return;
+ }
+
+ __uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0);
+ if (ctrl == NULL)
+ return;
+
+ if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+
+ uvc_ctrl_send_event(handle, ctrl, mapping, val, changes);
+}
+
+static void uvc_ctrl_send_events(struct uvc_fh *handle,
+ const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
+{
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl;
+ u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ unsigned int i;
+ unsigned int j;
+
+ for (i = 0; i < xctrls_count; ++i) {
+ ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
+
+ for (j = 0; j < ARRAY_SIZE(mapping->slave_ids); ++j) {
+ if (!mapping->slave_ids[j])
+ break;
+ uvc_ctrl_send_slave_event(handle, ctrl,
+ mapping->slave_ids[j],
+ xctrls, xctrls_count);
+ }
+
+ /*
+ * If the master is being modified in the same transaction
+ * flags may change too.
+ */
+ if (mapping->master_id) {
+ for (j = 0; j < xctrls_count; j++) {
+ if (xctrls[j].id == mapping->master_id) {
+ changes |= V4L2_EVENT_CTRL_CH_FLAGS;
+ break;
+ }
+ }
+ }
+
+ uvc_ctrl_send_event(handle, ctrl, mapping, xctrls[i].value,
+ changes);
+ }
+}
+
+static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
+{
+ struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh);
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl;
+ int ret;
+
+ ret = mutex_lock_interruptible(&handle->chain->ctrl_mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+
+ ctrl = uvc_find_control(handle->chain, sev->id, &mapping);
+ if (ctrl == NULL) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ list_add_tail(&sev->node, &mapping->ev_subs);
+ if (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL) {
+ struct v4l2_event ev;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+ s32 val = 0;
+
+ if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+
+ uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val,
+ changes);
+ /* Mark the queue as active, allowing this initial
+ event to be accepted. */
+ sev->elems = elems;
+ v4l2_event_queue_fh(sev->fh, &ev);
+ }
+
+done:
+ mutex_unlock(&handle->chain->ctrl_mutex);
+ return ret;
+}
+
+static void uvc_ctrl_del_event(struct v4l2_subscribed_event *sev)
+{
+ struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh);
+
+ mutex_lock(&handle->chain->ctrl_mutex);
+ list_del(&sev->node);
+ mutex_unlock(&handle->chain->ctrl_mutex);
+}
+
+const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops = {
+ .add = uvc_ctrl_add_event,
+ .del = uvc_ctrl_del_event,
+ .replace = v4l2_ctrl_replace,
+ .merge = v4l2_ctrl_merge,
+};
/* --------------------------------------------------------------------------
* Control transactions
@@ -1101,9 +1351,12 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
/* Reset the loaded flag for auto-update controls that were
* marked as loaded in uvc_ctrl_get/uvc_ctrl_set to prevent
- * uvc_ctrl_get from using the cached value.
+ * uvc_ctrl_get from using the cached value, and for write-only
+ * controls to prevent uvc_ctrl_set from setting bits not
+ * explicitly set by the user.
*/
- if (ctrl->info.flags & UVC_CTRL_FLAG_AUTO_UPDATE)
+ if (ctrl->info.flags & UVC_CTRL_FLAG_AUTO_UPDATE ||
+ !(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
ctrl->loaded = 0;
if (!ctrl->dirty)
@@ -1131,8 +1384,11 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
return 0;
}
-int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback)
+int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
+ const struct v4l2_ext_control *xctrls,
+ unsigned int xctrls_count)
{
+ struct uvc_video_chain *chain = handle->chain;
struct uvc_entity *entity;
int ret = 0;
@@ -1143,6 +1399,8 @@ int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback)
goto done;
}
+ if (!rollback)
+ uvc_ctrl_send_events(handle, xctrls, xctrls_count);
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
@@ -1153,39 +1411,12 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
{
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
- struct uvc_menu_info *menu;
- unsigned int i;
- int ret;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
+ if (ctrl == NULL)
return -EINVAL;
- if (!ctrl->loaded) {
- ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id,
- chain->dev->intfnum, ctrl->info.selector,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info.size);
- if (ret < 0)
- return ret;
-
- ctrl->loaded = 1;
- }
-
- xctrl->value = mapping->get(mapping, UVC_GET_CUR,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
-
- if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
- menu = mapping->menu_info;
- for (i = 0; i < mapping->menu_count; ++i, ++menu) {
- if (menu->value == xctrl->value) {
- xctrl->value = i;
- break;
- }
- }
- }
-
- return 0;
+ return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
}
int uvc_ctrl_set(struct uvc_video_chain *chain,
@@ -1641,6 +1872,8 @@ static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
if (map == NULL)
return -ENOMEM;
+ INIT_LIST_HEAD(&map->ev_subs);
+
size = sizeof(*mapping->menu_info) * mapping->menu_count;
map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL);
if (map->menu_info == NULL) {
@@ -1653,7 +1886,6 @@ static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
if (map->set == NULL)
map->set = uvc_set_le_value;
- map->ctrl = &ctrl->info;
list_add_tail(&map->list, &ctrl->info.mappings);
uvc_trace(UVC_TRACE_CONTROL,
"Adding mapping '%s' to control %pUl/%u.\n",
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 8f54e24e3f35..9288fbd5001b 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -207,6 +207,19 @@ int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
return ret;
}
+#ifndef CONFIG_MMU
+unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
+ unsigned long pgoff)
+{
+ unsigned long ret;
+
+ mutex_lock(&queue->mutex);
+ ret = vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
+ mutex_unlock(&queue->mutex);
+ return ret;
+}
+#endif
+
unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
@@ -237,36 +250,6 @@ int uvc_queue_allocated(struct uvc_video_queue *queue)
return allocated;
}
-#ifndef CONFIG_MMU
-/*
- * Get unmapped area.
- *
- * NO-MMU arch need this function to make mmap() work correctly.
- */
-unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
- unsigned long pgoff)
-{
- struct uvc_buffer *buffer;
- unsigned int i;
- unsigned long ret;
-
- mutex_lock(&queue->mutex);
- for (i = 0; i < queue->count; ++i) {
- buffer = &queue->buffer[i];
- if ((buffer->buf.m.offset >> PAGE_SHIFT) == pgoff)
- break;
- }
- if (i == queue->count) {
- ret = -EINVAL;
- goto done;
- }
- ret = (unsigned long)buf->mem;
-done:
- mutex_unlock(&queue->mutex);
- return ret;
-}
-#endif
-
/*
* Enable or disable the video buffers queue.
*
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index ff2cdddf9bc6..759bef8897e9 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -25,6 +25,8 @@
#include <linux/atomic.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include "uvcvideo.h"
@@ -505,6 +507,8 @@ static int uvc_v4l2_open(struct file *file)
}
}
+ v4l2_fh_init(&handle->vfh, stream->vdev);
+ v4l2_fh_add(&handle->vfh);
handle->chain = stream->chain;
handle->stream = stream;
handle->state = UVC_HANDLE_PASSIVE;
@@ -528,6 +532,8 @@ static int uvc_v4l2_release(struct file *file)
/* Release the file handle. */
uvc_dismiss_privileges(handle);
+ v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
kfree(handle);
file->private_data = NULL;
@@ -584,7 +590,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return ret;
ret = uvc_ctrl_get(chain, &xctrl);
- uvc_ctrl_rollback(chain);
+ uvc_ctrl_rollback(handle);
if (ret >= 0)
ctrl->value = xctrl.value;
break;
@@ -605,10 +611,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ret = uvc_ctrl_set(chain, &xctrl);
if (ret < 0) {
- uvc_ctrl_rollback(chain);
+ uvc_ctrl_rollback(handle);
return ret;
}
- ret = uvc_ctrl_commit(chain);
+ ret = uvc_ctrl_commit(handle, &xctrl, 1);
if (ret == 0)
ctrl->value = xctrl.value;
break;
@@ -630,13 +636,13 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
ret = uvc_ctrl_get(chain, ctrl);
if (ret < 0) {
- uvc_ctrl_rollback(chain);
+ uvc_ctrl_rollback(handle);
ctrls->error_idx = i;
return ret;
}
}
ctrls->error_idx = 0;
- ret = uvc_ctrl_rollback(chain);
+ ret = uvc_ctrl_rollback(handle);
break;
}
@@ -654,7 +660,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
ret = uvc_ctrl_set(chain, ctrl);
if (ret < 0) {
- uvc_ctrl_rollback(chain);
+ uvc_ctrl_rollback(handle);
ctrls->error_idx = i;
return ret;
}
@@ -663,9 +669,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
ctrls->error_idx = 0;
if (cmd == VIDIOC_S_EXT_CTRLS)
- ret = uvc_ctrl_commit(chain);
+ ret = uvc_ctrl_commit(handle,
+ ctrls->controls, ctrls->count);
else
- ret = uvc_ctrl_rollback(chain);
+ ret = uvc_ctrl_rollback(handle);
break;
}
@@ -687,7 +694,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
break;
}
pin = iterm->id;
- } else if (pin < selector->bNrInPins) {
+ } else if (index < selector->bNrInPins) {
pin = selector->baSourceID[index];
list_for_each_entry(iterm, &chain->entities, chain) {
if (!UVC_ENTITY_IS_ITERM(iterm))
@@ -990,6 +997,26 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return uvc_video_enable(stream, 0);
}
+ case VIDIOC_SUBSCRIBE_EVENT:
+ {
+ struct v4l2_event_subscription *sub = arg;
+
+ switch (sub->type) {
+ case V4L2_EVENT_CTRL:
+ return v4l2_event_subscribe(&handle->vfh, sub, 0,
+ &uvc_ctrl_sub_ev_ops);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_event_unsubscribe(&handle->vfh, arg);
+
+ case VIDIOC_DQEVENT:
+ return v4l2_event_dequeue(&handle->vfh, arg,
+ file->f_flags & O_NONBLOCK);
+
/* Analog video standards make no sense for digital cameras. */
case VIDIOC_ENUMSTD:
case VIDIOC_QUERYSTD:
@@ -1097,7 +1124,8 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
__put_user(kp->menu_count, &up->menu_count))
return -EFAULT;
- __clear_user(up->reserved, sizeof(up->reserved));
+ if (__clear_user(up->reserved, sizeof(up->reserved)))
+ return -EFAULT;
if (kp->menu_count == 0)
return 0;
@@ -1105,8 +1133,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
if (get_user(p, &up->menu_info))
return -EFAULT;
umenus = compat_ptr(p);
- if (!access_ok(VERIFY_WRITE, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
return -EFAULT;
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 67f88d85bb16..7c3d082505b7 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -13,6 +13,8 @@
#include <linux/videodev2.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
#include <media/videobuf2-core.h>
/* --------------------------------------------------------------------------
@@ -153,8 +155,7 @@ struct uvc_control_info {
struct uvc_control_mapping {
struct list_head list;
-
- struct uvc_control_info *ctrl;
+ struct list_head ev_subs;
__u32 id;
__u8 name[32];
@@ -169,6 +170,10 @@ struct uvc_control_mapping {
struct uvc_menu_info *menu_info;
__u32 menu_count;
+ __u32 master_id;
+ __s32 master_manual;
+ __u32 slave_ids[2];
+
__s32 (*get) (struct uvc_control_mapping *mapping, __u8 query,
const __u8 *data);
void (*set) (struct uvc_control_mapping *mapping, __s32 value,
@@ -524,6 +529,7 @@ enum uvc_handle_state {
};
struct uvc_fh {
+ struct v4l2_fh vfh;
struct uvc_video_chain *chain;
struct uvc_streaming *stream;
enum uvc_handle_state state;
@@ -643,6 +649,8 @@ extern int uvc_status_suspend(struct uvc_device *dev);
extern int uvc_status_resume(struct uvc_device *dev);
/* Controls */
+extern const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops;
+
extern int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct v4l2_queryctrl *v4l2_ctrl);
extern int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
@@ -655,14 +663,18 @@ extern void uvc_ctrl_cleanup_device(struct uvc_device *dev);
extern int uvc_ctrl_resume_device(struct uvc_device *dev);
extern int uvc_ctrl_begin(struct uvc_video_chain *chain);
-extern int __uvc_ctrl_commit(struct uvc_video_chain *chain, int rollback);
-static inline int uvc_ctrl_commit(struct uvc_video_chain *chain)
+extern int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
+ const struct v4l2_ext_control *xctrls,
+ unsigned int xctrls_count);
+static inline int uvc_ctrl_commit(struct uvc_fh *handle,
+ const struct v4l2_ext_control *xctrls,
+ unsigned int xctrls_count)
{
- return __uvc_ctrl_commit(chain, 0);
+ return __uvc_ctrl_commit(handle, 0, xctrls, xctrls_count);
}
-static inline int uvc_ctrl_rollback(struct uvc_video_chain *chain)
+static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
{
- return __uvc_ctrl_commit(chain, 1);
+ return __uvc_ctrl_commit(handle, 1, NULL, 0);
}
extern int uvc_ctrl_get(struct uvc_video_chain *chain,
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 2829d256e4b7..5327ad3a6390 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -37,7 +37,7 @@ struct v4l2_clip32 {
struct v4l2_window32 {
struct v4l2_rect w;
- enum v4l2_field field;
+ __u32 field; /* enum v4l2_field */
__u32 chromakey;
compat_caddr_t clips; /* actually struct v4l2_clip32 * */
__u32 clipcount;
@@ -147,7 +147,7 @@ static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp,
}
struct v4l2_format32 {
- enum v4l2_buf_type type;
+ __u32 type; /* enum v4l2_buf_type */
union {
struct v4l2_pix_format pix;
struct v4l2_pix_format_mplane pix_mp;
@@ -170,7 +170,7 @@ struct v4l2_format32 {
struct v4l2_create_buffers32 {
__u32 index;
__u32 count;
- enum v4l2_memory memory;
+ __u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
__u32 reserved[8];
};
@@ -311,16 +311,16 @@ struct v4l2_plane32 {
struct v4l2_buffer32 {
__u32 index;
- enum v4l2_buf_type type;
+ __u32 type; /* enum v4l2_buf_type */
__u32 bytesused;
__u32 flags;
- enum v4l2_field field;
+ __u32 field; /* enum v4l2_field */
struct compat_timeval timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
/* memory location */
- enum v4l2_memory memory;
+ __u32 memory; /* enum v4l2_memory */
union {
__u32 offset;
compat_long_t userptr;
@@ -1023,6 +1023,9 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_UNSUBSCRIBE_EVENT:
case VIDIOC_CREATE_BUFS32:
case VIDIOC_PREPARE_BUF32:
+ case VIDIOC_ENUM_DV_TIMINGS:
+ case VIDIOC_QUERY_DV_TIMINGS:
+ case VIDIOC_DV_TIMINGS_CAP:
ret = do_video_ioctl(file, cmd, arg);
break;
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 18015c0a8d31..9abd9abd4502 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -230,6 +230,19 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Aperture Priority Mode",
NULL
};
+ static const char * const camera_exposure_metering[] = {
+ "Average",
+ "Center Weighted",
+ "Spot",
+ NULL
+ };
+ static const char * const camera_auto_focus_range[] = {
+ "Auto",
+ "Normal",
+ "Macro",
+ "Infinity",
+ NULL
+ };
static const char * const colorfx[] = {
"None",
"Black & White",
@@ -241,6 +254,47 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Grass Green",
"Skin Whiten",
"Vivid",
+ "Aqua",
+ "Art Freeze",
+ "Silhouette",
+ "Solarization",
+ "Antique",
+ "Set Cb/Cr",
+ NULL
+ };
+ static const char * const auto_n_preset_white_balance[] = {
+ "Manual",
+ "Auto",
+ "Incandescent",
+ "Fluorescent",
+ "Fluorescent H",
+ "Horizon",
+ "Daylight",
+ "Flash",
+ "Cloudy",
+ "Shade",
+ NULL,
+ };
+ static const char * const camera_iso_sensitivity_auto[] = {
+ "Manual",
+ "Auto",
+ NULL
+ };
+ static const char * const scene_mode[] = {
+ "None",
+ "Backlight",
+ "Beach/Snow",
+ "Candle Light",
+ "Dusk/Dawn",
+ "Fall Colors",
+ "Fireworks",
+ "Landscape",
+ "Night",
+ "Party/Indoor",
+ "Portrait",
+ "Sports",
+ "Sunset",
+ "Text",
NULL
};
static const char * const tune_preemphasis[] = {
@@ -410,8 +464,18 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return camera_power_line_frequency;
case V4L2_CID_EXPOSURE_AUTO:
return camera_exposure_auto;
+ case V4L2_CID_EXPOSURE_METERING:
+ return camera_exposure_metering;
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ return camera_auto_focus_range;
case V4L2_CID_COLORFX:
return colorfx;
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ return auto_n_preset_white_balance;
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ return camera_iso_sensitivity_auto;
+ case V4L2_CID_SCENE_MODE:
+ return scene_mode;
case V4L2_CID_TUNE_PREEMPHASIS:
return tune_preemphasis;
case V4L2_CID_FLASH_LED_MODE:
@@ -493,6 +557,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
+ case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
/* MPEG controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -590,13 +655,26 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
- case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic";
+ case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
case V4L2_CID_PRIVACY: return "Privacy";
case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
+ case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
+ case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
+ case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
+ case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
+ case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
+ case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
+ case V4L2_CID_SCENE_MODE: return "Scene Mode";
+ case V4L2_CID_3A_LOCK: return "3A Lock";
+ case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
+ case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
+ case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
+ case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
/* FM Radio Modulator control */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -644,6 +722,17 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
+ /* Image source controls */
+ case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
+ case V4L2_CID_VBLANK: return "Vertical Blanking";
+ case V4L2_CID_HBLANK: return "Horizontal Blanking";
+ case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
+
+ /* Image processing controls */
+ case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
+ case V4L2_CID_LINK_FREQ: return "Link Frequency";
+ case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
+
default:
return NULL;
}
@@ -688,6 +777,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ case V4L2_CID_IMAGE_STABILIZATION:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
@@ -696,6 +787,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_TILT_RESET:
case V4L2_CID_FLASH_STROBE:
case V4L2_CID_FLASH_STROBE_STOP:
+ case V4L2_CID_AUTO_FOCUS_START:
+ case V4L2_CID_AUTO_FOCUS_STOP:
*type = V4L2_CTRL_TYPE_BUTTON;
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
*min = *max = *step = *def = 0;
@@ -719,7 +812,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_STREAM_TYPE:
case V4L2_CID_MPEG_STREAM_VBI_FMT:
case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_AUTO_FOCUS_RANGE:
case V4L2_CID_COLORFX:
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
case V4L2_CID_TUNE_PREEMPHASIS:
case V4L2_CID_FLASH_LED_MODE:
case V4L2_CID_FLASH_STROBE_SOURCE:
@@ -733,18 +828,30 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_SCENE_MODE:
*type = V4L2_CTRL_TYPE_MENU;
break;
+ case V4L2_CID_LINK_FREQ:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
case V4L2_CID_RDS_TX_PS_NAME:
case V4L2_CID_RDS_TX_RADIO_TEXT:
*type = V4L2_CTRL_TYPE_STRING;
break;
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
case V4L2_CID_USER_CLASS:
case V4L2_CID_CAMERA_CLASS:
case V4L2_CID_MPEG_CLASS:
case V4L2_CID_FM_TX_CLASS:
case V4L2_CID_FLASH_CLASS:
case V4L2_CID_JPEG_CLASS:
+ case V4L2_CID_IMAGE_SOURCE_CLASS:
+ case V4L2_CID_IMAGE_PROC_CLASS:
*type = V4L2_CTRL_TYPE_CTRL_CLASS;
/* You can neither read not write these */
*flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -759,6 +866,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
break;
case V4L2_CID_FLASH_FAULT:
case V4L2_CID_JPEG_ACTIVE_MARKER:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
*type = V4L2_CTRL_TYPE_BITMASK;
break;
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
@@ -768,8 +877,12 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
break;
case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
case V4L2_CID_MPEG_VIDEO_DEC_PTS:
+ *flags |= V4L2_CTRL_FLAG_VOLATILE;
+ /* Fall through */
+ case V4L2_CID_PIXEL_RATE:
*type = V4L2_CTRL_TYPE_INTEGER64;
- *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *max = *step = *def = 0;
break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
@@ -817,6 +930,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
break;
case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
case V4L2_CID_FLASH_READY:
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
@@ -852,7 +966,8 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
ev->u.ctrl.value64 = ctrl->cur.val64;
ev->u.ctrl.minimum = ctrl->minimum;
ev->u.ctrl.maximum = ctrl->maximum;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU)
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU
+ || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
ev->u.ctrl.step = 1;
else
ev->u.ctrl.step = ctrl->step;
@@ -1083,10 +1198,13 @@ static int validate_new_int(const struct v4l2_ctrl *ctrl, s32 *pval)
return 0;
case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
if (val < ctrl->minimum || val > ctrl->maximum)
return -ERANGE;
- if (ctrl->qmenu[val][0] == '\0' ||
- (ctrl->menu_skip_mask & (1 << val)))
+ if (ctrl->menu_skip_mask & (1 << val))
+ return -EINVAL;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
+ ctrl->qmenu[val][0] == '\0')
return -EINVAL;
return 0;
@@ -1114,6 +1232,7 @@ static int validate_new(const struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_BUTTON:
case V4L2_CTRL_TYPE_CTRL_CLASS:
@@ -1152,7 +1271,8 @@ static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
int v4l2_ctrl_handler_init(struct v4l2_ctrl_handler *hdl,
unsigned nr_of_controls_hint)
{
- mutex_init(&hdl->lock);
+ hdl->lock = &hdl->_lock;
+ mutex_init(hdl->lock);
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
@@ -1173,7 +1293,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
if (hdl == NULL || hdl->buckets == NULL)
return;
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
/* Free all nodes */
list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
list_del(&ref->node);
@@ -1190,7 +1310,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
hdl->buckets = NULL;
hdl->cached = NULL;
hdl->error = 0;
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
}
EXPORT_SYMBOL(v4l2_ctrl_handler_free);
@@ -1255,9 +1375,9 @@ static struct v4l2_ctrl_ref *find_ref_lock(
struct v4l2_ctrl_ref *ref = NULL;
if (hdl) {
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
ref = find_ref(hdl, id);
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
}
return ref;
}
@@ -1304,7 +1424,7 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
INIT_LIST_HEAD(&new_ref->node);
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
/* Add immediately at the end of the list if the list is empty, or if
the last element in the list has a lower ID.
@@ -1334,7 +1454,7 @@ insert_in_hash:
hdl->buckets[bucket] = new_ref;
unlock:
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
return 0;
}
@@ -1343,7 +1463,8 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, const char *name, enum v4l2_ctrl_type type,
s32 min, s32 max, u32 step, s32 def,
- u32 flags, const char * const *qmenu, void *priv)
+ u32 flags, const char * const *qmenu,
+ const s64 *qmenu_int, void *priv)
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra = 0;
@@ -1356,6 +1477,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
(type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
(type == V4L2_CTRL_TYPE_BITMASK && max == 0) ||
(type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
+ (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL) ||
(type == V4L2_CTRL_TYPE_STRING && max == 0)) {
handler_set_err(hdl, -ERANGE);
return NULL;
@@ -1366,6 +1488,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
}
if ((type == V4L2_CTRL_TYPE_INTEGER ||
type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU ||
type == V4L2_CTRL_TYPE_BOOLEAN) &&
(def < min || def > max)) {
handler_set_err(hdl, -ERANGE);
@@ -1400,7 +1523,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->minimum = min;
ctrl->maximum = max;
ctrl->step = step;
- ctrl->qmenu = qmenu;
+ if (type == V4L2_CTRL_TYPE_MENU)
+ ctrl->qmenu = qmenu;
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ctrl->qmenu_int = qmenu_int;
ctrl->priv = priv;
ctrl->cur.val = ctrl->val = ctrl->default_value = def;
@@ -1414,9 +1540,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
kfree(ctrl);
return NULL;
}
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
list_add_tail(&ctrl->node, &hdl->ctrls);
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
return ctrl;
}
@@ -1427,6 +1553,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl *ctrl;
const char *name = cfg->name;
const char * const *qmenu = cfg->qmenu;
+ const s64 *qmenu_int = cfg->qmenu_int;
enum v4l2_ctrl_type type = cfg->type;
u32 flags = cfg->flags;
s32 min = cfg->min;
@@ -1438,18 +1565,24 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
&def, &flags);
- is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU);
+ is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
+ cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
if (is_menu)
WARN_ON(step);
else
WARN_ON(cfg->menu_skip_mask);
- if (is_menu && qmenu == NULL)
+ if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
qmenu = v4l2_ctrl_get_menu(cfg->id);
+ else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
+ qmenu_int == NULL) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->id, name,
type, min, max,
is_menu ? cfg->menu_skip_mask : step,
- def, flags, qmenu, priv);
+ def, flags, qmenu, qmenu_int, priv);
if (ctrl)
ctrl->is_private = cfg->is_private;
return ctrl;
@@ -1466,12 +1599,13 @@ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
u32 flags;
v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type == V4L2_CTRL_TYPE_MENU) {
+ if (type == V4L2_CTRL_TYPE_MENU
+ || type == V4L2_CTRL_TYPE_INTEGER_MENU) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
return v4l2_ctrl_new(hdl, ops, id, name, type,
- min, max, step, def, flags, NULL, NULL);
+ min, max, step, def, flags, NULL, NULL, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std);
@@ -1493,10 +1627,31 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
return NULL;
}
return v4l2_ctrl_new(hdl, ops, id, name, type,
- 0, max, mask, def, flags, qmenu, NULL);
+ 0, max, mask, def, flags, qmenu, NULL, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
+/* Helper function for standard integer menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s32 max, s32 def, const s64 *qmenu_int)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s32 min;
+ s32 step;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, id, name, type,
+ 0, max, 0, def, flags, NULL, qmenu_int, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
+
/* Add a control from another handler to this handler */
struct v4l2_ctrl *v4l2_ctrl_add_ctrl(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl *ctrl)
@@ -1525,7 +1680,7 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
return 0;
if (hdl->error)
return hdl->error;
- mutex_lock(&add->lock);
+ mutex_lock(add->lock);
list_for_each_entry(ref, &add->ctrl_refs, node) {
struct v4l2_ctrl *ctrl = ref->ctrl;
@@ -1539,7 +1694,7 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
if (ret)
break;
}
- mutex_unlock(&add->lock);
+ mutex_unlock(add->lock);
return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_add_handler);
@@ -1659,6 +1814,9 @@ static void log_ctrl(const struct v4l2_ctrl *ctrl,
case V4L2_CTRL_TYPE_MENU:
printk(KERN_CONT "%s", ctrl->qmenu[ctrl->cur.val]);
break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ printk(KERN_CONT "%lld", ctrl->qmenu_int[ctrl->cur.val]);
+ break;
case V4L2_CTRL_TYPE_BITMASK:
printk(KERN_CONT "0x%08x", ctrl->cur.val);
break;
@@ -1700,11 +1858,11 @@ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
len = strlen(prefix);
if (len && prefix[len - 1] != ' ')
colon = ": ";
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
list_for_each_entry(ctrl, &hdl->ctrls, node)
if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
log_ctrl(ctrl, prefix, colon);
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
}
EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
@@ -1716,7 +1874,7 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
if (hdl == NULL)
return 0;
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
list_for_each_entry(ctrl, &hdl->ctrls, node)
ctrl->done = false;
@@ -1741,7 +1899,7 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
if (ret)
break;
}
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
@@ -1756,7 +1914,7 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
if (hdl == NULL)
return -EINVAL;
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
/* Try to find it */
ref = find_ref(hdl, id);
@@ -1781,7 +1939,7 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
break;
}
}
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
if (!ref)
return -EINVAL;
@@ -1795,7 +1953,8 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
qc->minimum = ctrl->minimum;
qc->maximum = ctrl->maximum;
qc->default_value = ctrl->default_value;
- if (ctrl->type == V4L2_CTRL_TYPE_MENU)
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU
+ || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
qc->step = 1;
else
qc->step = ctrl->step;
@@ -1825,16 +1984,33 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
qm->reserved = 0;
/* Sanity checks */
- if (ctrl->qmenu == NULL ||
- i < ctrl->minimum || i > ctrl->maximum)
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_MENU:
+ if (ctrl->qmenu == NULL)
+ return -EINVAL;
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (ctrl->qmenu_int == NULL)
+ return -EINVAL;
+ break;
+ default:
return -EINVAL;
+ }
+
+ if (i < ctrl->minimum || i > ctrl->maximum)
+ return -EINVAL;
+
/* Use mask to see if this menu item should be skipped */
if (ctrl->menu_skip_mask & (1 << i))
return -EINVAL;
/* Empty menu items should also be skipped */
- if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
- return -EINVAL;
- strlcpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
+ if (ctrl->qmenu[i] == NULL || ctrl->qmenu[i][0] == '\0')
+ return -EINVAL;
+ strlcpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ } else {
+ qm->value = ctrl->qmenu_int[i];
+ }
return 0;
}
EXPORT_SYMBOL(v4l2_querymenu);
@@ -1940,7 +2116,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
belong to the same cluster. */
/* This has to be done with the handler lock taken. */
- mutex_lock(&hdl->lock);
+ mutex_lock(hdl->lock);
/* First zero the helper field in the master control references */
for (i = 0; i < cs->count; i++)
@@ -1962,7 +2138,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
/* Point the mref helper to the current helper struct. */
mref->helper = h;
}
- mutex_unlock(&hdl->lock);
+ mutex_unlock(hdl->lock);
return 0;
}
@@ -1996,7 +2172,8 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
return class_check(hdl, cs->ctrl_class);
if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kmalloc(sizeof(helper[0]) * cs->count, GFP_KERNEL);
+ helpers = kmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
if (helpers == NULL)
return -ENOMEM;
}
@@ -2218,7 +2395,8 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
return class_check(hdl, cs->ctrl_class);
if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kmalloc(sizeof(helper[0]) * cs->count, GFP_KERNEL);
+ helpers = kmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
if (!helpers)
return -ENOMEM;
}
@@ -2381,9 +2559,13 @@ int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
-void v4l2_ctrl_add_event(struct v4l2_ctrl *ctrl,
- struct v4l2_subscribed_event *sev)
+static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ if (ctrl == NULL)
+ return -EINVAL;
+
v4l2_ctrl_lock(ctrl);
list_add_tail(&sev->node, &ctrl->ev_subs);
if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
@@ -2394,20 +2576,46 @@ void v4l2_ctrl_add_event(struct v4l2_ctrl *ctrl,
if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
changes |= V4L2_EVENT_CTRL_CH_VALUE;
fill_event(&ev, ctrl, changes);
+ /* Mark the queue as active, allowing this initial
+ event to be accepted. */
+ sev->elems = elems;
v4l2_event_queue_fh(sev->fh, &ev);
}
v4l2_ctrl_unlock(ctrl);
+ return 0;
}
-EXPORT_SYMBOL(v4l2_ctrl_add_event);
-void v4l2_ctrl_del_event(struct v4l2_ctrl *ctrl,
- struct v4l2_subscribed_event *sev)
+static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
v4l2_ctrl_lock(ctrl);
list_del(&sev->node);
v4l2_ctrl_unlock(ctrl);
}
-EXPORT_SYMBOL(v4l2_ctrl_del_event);
+
+void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.ctrl.changes;
+
+ old->u.ctrl = new->u.ctrl;
+ old->u.ctrl.changes |= old_changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_replace);
+
+void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
+{
+ new->u.ctrl.changes |= old->u.ctrl.changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_merge);
+
+const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
+ .add = v4l2_ctrl_add_event,
+ .del = v4l2_ctrl_del_event,
+ .replace = v4l2_ctrl_replace,
+ .merge = v4l2_ctrl_merge,
+};
+EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
int v4l2_ctrl_log_status(struct file *file, void *fh)
{
@@ -2425,7 +2633,7 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
if (sub->type == V4L2_EVENT_CTRL)
- return v4l2_event_subscribe(fh, sub, 0);
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
return -EINVAL;
}
EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 70bec548d904..0cbada18f6f5 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -274,11 +274,12 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf,
if (!vdev->fops->read)
return -EINVAL;
- if (vdev->lock && mutex_lock_interruptible(vdev->lock))
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) &&
+ mutex_lock_interruptible(vdev->lock))
return -ERESTARTSYS;
if (video_is_registered(vdev))
ret = vdev->fops->read(filp, buf, sz, off);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
return ret;
}
@@ -291,11 +292,12 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
if (!vdev->fops->write)
return -EINVAL;
- if (vdev->lock && mutex_lock_interruptible(vdev->lock))
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) &&
+ mutex_lock_interruptible(vdev->lock))
return -ERESTARTSYS;
if (video_is_registered(vdev))
ret = vdev->fops->write(filp, buf, sz, off);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
return ret;
}
@@ -307,11 +309,11 @@ static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
if (!vdev->fops->poll)
return DEFAULT_POLLMASK;
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_lock(vdev->lock);
if (video_is_registered(vdev))
ret = vdev->fops->poll(filp, poll);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
return ret;
}
@@ -322,11 +324,19 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int ret = -ENODEV;
if (vdev->fops->unlocked_ioctl) {
- if (vdev->lock && mutex_lock_interruptible(vdev->lock))
- return -ERESTARTSYS;
+ bool locked = false;
+
+ if (vdev->lock) {
+ /* always lock unless the cmd is marked as "don't use lock" */
+ locked = !v4l2_is_known_ioctl(cmd) ||
+ !test_bit(_IOC_NR(cmd), vdev->disable_locking);
+
+ if (locked && mutex_lock_interruptible(vdev->lock))
+ return -ERESTARTSYS;
+ }
if (video_is_registered(vdev))
ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
- if (vdev->lock)
+ if (locked)
mutex_unlock(vdev->lock);
} else if (vdev->fops->ioctl) {
/* This code path is a replacement for the BKL. It is a major
@@ -391,11 +401,12 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
if (!vdev->fops->mmap)
return ret;
- if (vdev->lock && mutex_lock_interruptible(vdev->lock))
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) &&
+ mutex_lock_interruptible(vdev->lock))
return -ERESTARTSYS;
if (video_is_registered(vdev))
ret = vdev->fops->mmap(filp, vm);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
return ret;
}
@@ -418,7 +429,8 @@ static int v4l2_open(struct inode *inode, struct file *filp)
video_get(vdev);
mutex_unlock(&videodev_lock);
if (vdev->fops->open) {
- if (vdev->lock && mutex_lock_interruptible(vdev->lock)) {
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) &&
+ mutex_lock_interruptible(vdev->lock)) {
ret = -ERESTARTSYS;
goto err;
}
@@ -426,7 +438,7 @@ static int v4l2_open(struct inode *inode, struct file *filp)
ret = vdev->fops->open(filp);
else
ret = -ENODEV;
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
}
@@ -444,10 +456,10 @@ static int v4l2_release(struct inode *inode, struct file *filp)
int ret = 0;
if (vdev->fops->release) {
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_lock(vdev->lock);
vdev->fops->release(filp);
- if (vdev->lock)
+ if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
}
/* decrease the refcount unconditionally since the release()
@@ -508,6 +520,178 @@ static int get_index(struct video_device *vdev)
return find_first_zero_bit(used, VIDEO_NUM_DEVICES);
}
+#define SET_VALID_IOCTL(ops, cmd, op) \
+ if (ops->op) \
+ set_bit(_IOC_NR(cmd), valid_ioctls)
+
+/* This determines which ioctls are actually implemented in the driver.
+ It's a one-time thing which simplifies video_ioctl2 as it can just do
+ a bit test.
+
+ Note that drivers can override this by setting bits to 1 in
+ vdev->valid_ioctls. If an ioctl is marked as 1 when this function is
+ called, then that ioctl will actually be marked as unimplemented.
+
+ It does that by first setting up the local valid_ioctls bitmap, and
+ at the end do a:
+
+ vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls)
+ */
+static void determine_valid_ioctls(struct video_device *vdev)
+{
+ DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
+ const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
+
+ bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
+
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap);
+ if (ops->vidioc_g_priority ||
+ test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
+ set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
+ if (ops->vidioc_s_priority ||
+ test_bit(V4L2_FL_USE_FH_PRIO, &vdev->flags))
+ set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
+ if (ops->vidioc_enum_fmt_vid_cap ||
+ ops->vidioc_enum_fmt_vid_out ||
+ ops->vidioc_enum_fmt_vid_cap_mplane ||
+ ops->vidioc_enum_fmt_vid_out_mplane ||
+ ops->vidioc_enum_fmt_vid_overlay ||
+ ops->vidioc_enum_fmt_type_private)
+ set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ if (ops->vidioc_g_fmt_vid_cap ||
+ ops->vidioc_g_fmt_vid_out ||
+ ops->vidioc_g_fmt_vid_cap_mplane ||
+ ops->vidioc_g_fmt_vid_out_mplane ||
+ ops->vidioc_g_fmt_vid_overlay ||
+ ops->vidioc_g_fmt_vbi_cap ||
+ ops->vidioc_g_fmt_vid_out_overlay ||
+ ops->vidioc_g_fmt_vbi_out ||
+ ops->vidioc_g_fmt_sliced_vbi_cap ||
+ ops->vidioc_g_fmt_sliced_vbi_out ||
+ ops->vidioc_g_fmt_type_private)
+ set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if (ops->vidioc_s_fmt_vid_cap ||
+ ops->vidioc_s_fmt_vid_out ||
+ ops->vidioc_s_fmt_vid_cap_mplane ||
+ ops->vidioc_s_fmt_vid_out_mplane ||
+ ops->vidioc_s_fmt_vid_overlay ||
+ ops->vidioc_s_fmt_vbi_cap ||
+ ops->vidioc_s_fmt_vid_out_overlay ||
+ ops->vidioc_s_fmt_vbi_out ||
+ ops->vidioc_s_fmt_sliced_vbi_cap ||
+ ops->vidioc_s_fmt_sliced_vbi_out ||
+ ops->vidioc_s_fmt_type_private)
+ set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if (ops->vidioc_try_fmt_vid_cap ||
+ ops->vidioc_try_fmt_vid_out ||
+ ops->vidioc_try_fmt_vid_cap_mplane ||
+ ops->vidioc_try_fmt_vid_out_mplane ||
+ ops->vidioc_try_fmt_vid_overlay ||
+ ops->vidioc_try_fmt_vbi_cap ||
+ ops->vidioc_try_fmt_vid_out_overlay ||
+ ops->vidioc_try_fmt_vbi_out ||
+ ops->vidioc_try_fmt_sliced_vbi_cap ||
+ ops->vidioc_try_fmt_sliced_vbi_out ||
+ ops->vidioc_try_fmt_type_private)
+ set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
+ SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
+ if (vdev->tvnorms)
+ set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
+ if (ops->vidioc_g_std || vdev->current_norm)
+ set_bit(_IOC_NR(VIDIOC_G_STD), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
+ SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
+ SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ /* Note: the control handler can also be passed through the filehandle,
+ and that can't be tested here. If the bit for these control ioctls
+ is set, then the ioctl is valid. But if it is 0, then it can still
+ be valid if the filehandle passed the control handler. */
+ if (vdev->ctrl_handler || ops->vidioc_queryctrl)
+ set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls)
+ set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_querymenu)
+ set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio);
+ SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
+ SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout);
+ SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
+ SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
+ SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
+ SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
+ if (ops->vidioc_g_crop || ops->vidioc_g_selection)
+ set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
+ if (ops->vidioc_s_crop || ops->vidioc_s_selection)
+ set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
+ SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
+ if (ops->vidioc_cropcap || ops->vidioc_g_selection)
+ set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp);
+ SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp);
+ SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index);
+ SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
+ if (ops->vidioc_g_parm || vdev->vfl_type == VFL_TYPE_GRABBER)
+ set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
+ SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ SET_VALID_IOCTL(ops, VIDIOC_DBG_G_REGISTER, vidioc_g_register);
+ SET_VALID_IOCTL(ops, VIDIOC_DBG_S_REGISTER, vidioc_s_register);
+#endif
+ SET_VALID_IOCTL(ops, VIDIOC_DBG_G_CHIP_IDENT, vidioc_g_chip_ident);
+ SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_PRESETS, vidioc_enum_dv_presets);
+ SET_VALID_IOCTL(ops, VIDIOC_S_DV_PRESET, vidioc_s_dv_preset);
+ SET_VALID_IOCTL(ops, VIDIOC_G_DV_PRESET, vidioc_g_dv_preset);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
+ SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
+ /* yes, really vidioc_subscribe_event */
+ SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
+ SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
+ bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
+ BASE_VIDIOC_PRIVATE);
+}
+
/**
* __video_register_device - register video4linux devices
* @vdev: video device structure we want to register
@@ -654,6 +838,13 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
WARN_ON(video_device[vdev->minor] != NULL);
vdev->index = get_index(vdev);
mutex_unlock(&videodev_lock);
+ /* if no lock was passed, then make sure the LOCK_ALL_FOPS bit is
+ clear and warn if it wasn't. */
+ if (vdev->lock == NULL)
+ WARN_ON(test_and_clear_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags));
+
+ if (vdev->ioctl_ops)
+ determine_valid_ioctls(vdev);
/* Part 3: Initialize the character device */
vdev->cdev = cdev_alloc();
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index c26ad9637143..ef2a33c94045 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -25,7 +25,6 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
-#include <media/v4l2-ctrls.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -120,6 +119,14 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (sev == NULL)
return;
+ /*
+ * If the event has been added to the fh->subscribed list, but its
+ * add op has not completed yet elems will be 0, treat this as
+ * not being subscribed.
+ */
+ if (!sev->elems)
+ return;
+
/* Increase event sequence number on fh. */
fh->sequence++;
@@ -132,14 +139,14 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
sev->first = sev_pos(sev, 1);
fh->navailable--;
if (sev->elems == 1) {
- if (sev->replace) {
- sev->replace(&kev->event, ev);
+ if (sev->ops && sev->ops->replace) {
+ sev->ops->replace(&kev->event, ev);
copy_payload = false;
}
- } else if (sev->merge) {
+ } else if (sev->ops && sev->ops->merge) {
struct v4l2_kevent *second_oldest =
sev->events + sev_pos(sev, 0);
- sev->merge(&kev->event, &second_oldest->event);
+ sev->ops->merge(&kev->event, &second_oldest->event);
}
}
@@ -195,24 +202,11 @@ int v4l2_event_pending(struct v4l2_fh *fh)
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
-static void ctrls_replace(struct v4l2_event *old, const struct v4l2_event *new)
-{
- u32 old_changes = old->u.ctrl.changes;
-
- old->u.ctrl = new->u.ctrl;
- old->u.ctrl.changes |= old_changes;
-}
-
-static void ctrls_merge(const struct v4l2_event *old, struct v4l2_event *new)
-{
- new->u.ctrl.changes |= old->u.ctrl.changes;
-}
-
int v4l2_event_subscribe(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub, unsigned elems)
+ struct v4l2_event_subscription *sub, unsigned elems,
+ const struct v4l2_subscribed_event_ops *ops)
{
struct v4l2_subscribed_event *sev, *found_ev;
- struct v4l2_ctrl *ctrl = NULL;
unsigned long flags;
unsigned i;
@@ -221,11 +215,6 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
if (elems < 1)
elems = 1;
- if (sub->type == V4L2_EVENT_CTRL) {
- ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
- if (ctrl == NULL)
- return -EINVAL;
- }
sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
if (!sev)
@@ -236,11 +225,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->id = sub->id;
sev->flags = sub->flags;
sev->fh = fh;
- sev->elems = elems;
- if (ctrl) {
- sev->replace = ctrls_replace;
- sev->merge = ctrls_merge;
- }
+ sev->ops = ops;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -248,11 +233,22 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- /* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
- if (found_ev)
+ if (found_ev) {
kfree(sev);
- else if (ctrl)
- v4l2_ctrl_add_event(ctrl, sev);
+ return 0; /* Already listening */
+ }
+
+ if (sev->ops && sev->ops->add) {
+ int ret = sev->ops->add(sev, elems);
+ if (ret) {
+ sev->ops = NULL;
+ v4l2_event_unsubscribe(fh, sub);
+ return ret;
+ }
+ }
+
+ /* Mark as ready for use */
+ sev->elems = elems;
return 0;
}
@@ -306,12 +302,9 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- if (sev && sev->type == V4L2_EVENT_CTRL) {
- struct v4l2_ctrl *ctrl = v4l2_ctrl_find(fh->ctrl_handler, sev->id);
- if (ctrl)
- v4l2_ctrl_del_event(ctrl, sev);
- }
+ if (sev && sev->ops && sev->ops->del)
+ sev->ops->del(sev);
kfree(sev);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 5b2ec1fd2d0a..d7fa8962d8b3 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -55,19 +55,6 @@
memset((u8 *)(p) + offsetof(typeof(*(p)), field) + sizeof((p)->field), \
0, sizeof(*(p)) - offsetof(typeof(*(p)), field) - sizeof((p)->field))
-#define have_fmt_ops(foo) ( \
- ops->vidioc_##foo##_fmt_vid_cap || \
- ops->vidioc_##foo##_fmt_vid_out || \
- ops->vidioc_##foo##_fmt_vid_cap_mplane || \
- ops->vidioc_##foo##_fmt_vid_out_mplane || \
- ops->vidioc_##foo##_fmt_vid_overlay || \
- ops->vidioc_##foo##_fmt_vbi_cap || \
- ops->vidioc_##foo##_fmt_vid_out_overlay || \
- ops->vidioc_##foo##_fmt_vbi_out || \
- ops->vidioc_##foo##_fmt_sliced_vbi_cap || \
- ops->vidioc_##foo##_fmt_sliced_vbi_out || \
- ops->vidioc_##foo##_fmt_type_private)
-
struct std_descr {
v4l2_std_id std;
const char *descr;
@@ -195,93 +182,118 @@ static const char *v4l2_memory_names[] = {
/* ------------------------------------------------------------------ */
/* debug help functions */
-static const char *v4l2_ioctls[] = {
- [_IOC_NR(VIDIOC_QUERYCAP)] = "VIDIOC_QUERYCAP",
- [_IOC_NR(VIDIOC_RESERVED)] = "VIDIOC_RESERVED",
- [_IOC_NR(VIDIOC_ENUM_FMT)] = "VIDIOC_ENUM_FMT",
- [_IOC_NR(VIDIOC_G_FMT)] = "VIDIOC_G_FMT",
- [_IOC_NR(VIDIOC_S_FMT)] = "VIDIOC_S_FMT",
- [_IOC_NR(VIDIOC_REQBUFS)] = "VIDIOC_REQBUFS",
- [_IOC_NR(VIDIOC_QUERYBUF)] = "VIDIOC_QUERYBUF",
- [_IOC_NR(VIDIOC_G_FBUF)] = "VIDIOC_G_FBUF",
- [_IOC_NR(VIDIOC_S_FBUF)] = "VIDIOC_S_FBUF",
- [_IOC_NR(VIDIOC_OVERLAY)] = "VIDIOC_OVERLAY",
- [_IOC_NR(VIDIOC_QBUF)] = "VIDIOC_QBUF",
- [_IOC_NR(VIDIOC_DQBUF)] = "VIDIOC_DQBUF",
- [_IOC_NR(VIDIOC_STREAMON)] = "VIDIOC_STREAMON",
- [_IOC_NR(VIDIOC_STREAMOFF)] = "VIDIOC_STREAMOFF",
- [_IOC_NR(VIDIOC_G_PARM)] = "VIDIOC_G_PARM",
- [_IOC_NR(VIDIOC_S_PARM)] = "VIDIOC_S_PARM",
- [_IOC_NR(VIDIOC_G_STD)] = "VIDIOC_G_STD",
- [_IOC_NR(VIDIOC_S_STD)] = "VIDIOC_S_STD",
- [_IOC_NR(VIDIOC_ENUMSTD)] = "VIDIOC_ENUMSTD",
- [_IOC_NR(VIDIOC_ENUMINPUT)] = "VIDIOC_ENUMINPUT",
- [_IOC_NR(VIDIOC_G_CTRL)] = "VIDIOC_G_CTRL",
- [_IOC_NR(VIDIOC_S_CTRL)] = "VIDIOC_S_CTRL",
- [_IOC_NR(VIDIOC_G_TUNER)] = "VIDIOC_G_TUNER",
- [_IOC_NR(VIDIOC_S_TUNER)] = "VIDIOC_S_TUNER",
- [_IOC_NR(VIDIOC_G_AUDIO)] = "VIDIOC_G_AUDIO",
- [_IOC_NR(VIDIOC_S_AUDIO)] = "VIDIOC_S_AUDIO",
- [_IOC_NR(VIDIOC_QUERYCTRL)] = "VIDIOC_QUERYCTRL",
- [_IOC_NR(VIDIOC_QUERYMENU)] = "VIDIOC_QUERYMENU",
- [_IOC_NR(VIDIOC_G_INPUT)] = "VIDIOC_G_INPUT",
- [_IOC_NR(VIDIOC_S_INPUT)] = "VIDIOC_S_INPUT",
- [_IOC_NR(VIDIOC_G_OUTPUT)] = "VIDIOC_G_OUTPUT",
- [_IOC_NR(VIDIOC_S_OUTPUT)] = "VIDIOC_S_OUTPUT",
- [_IOC_NR(VIDIOC_ENUMOUTPUT)] = "VIDIOC_ENUMOUTPUT",
- [_IOC_NR(VIDIOC_G_AUDOUT)] = "VIDIOC_G_AUDOUT",
- [_IOC_NR(VIDIOC_S_AUDOUT)] = "VIDIOC_S_AUDOUT",
- [_IOC_NR(VIDIOC_G_MODULATOR)] = "VIDIOC_G_MODULATOR",
- [_IOC_NR(VIDIOC_S_MODULATOR)] = "VIDIOC_S_MODULATOR",
- [_IOC_NR(VIDIOC_G_FREQUENCY)] = "VIDIOC_G_FREQUENCY",
- [_IOC_NR(VIDIOC_S_FREQUENCY)] = "VIDIOC_S_FREQUENCY",
- [_IOC_NR(VIDIOC_CROPCAP)] = "VIDIOC_CROPCAP",
- [_IOC_NR(VIDIOC_G_CROP)] = "VIDIOC_G_CROP",
- [_IOC_NR(VIDIOC_S_CROP)] = "VIDIOC_S_CROP",
- [_IOC_NR(VIDIOC_G_SELECTION)] = "VIDIOC_G_SELECTION",
- [_IOC_NR(VIDIOC_S_SELECTION)] = "VIDIOC_S_SELECTION",
- [_IOC_NR(VIDIOC_G_JPEGCOMP)] = "VIDIOC_G_JPEGCOMP",
- [_IOC_NR(VIDIOC_S_JPEGCOMP)] = "VIDIOC_S_JPEGCOMP",
- [_IOC_NR(VIDIOC_QUERYSTD)] = "VIDIOC_QUERYSTD",
- [_IOC_NR(VIDIOC_TRY_FMT)] = "VIDIOC_TRY_FMT",
- [_IOC_NR(VIDIOC_ENUMAUDIO)] = "VIDIOC_ENUMAUDIO",
- [_IOC_NR(VIDIOC_ENUMAUDOUT)] = "VIDIOC_ENUMAUDOUT",
- [_IOC_NR(VIDIOC_G_PRIORITY)] = "VIDIOC_G_PRIORITY",
- [_IOC_NR(VIDIOC_S_PRIORITY)] = "VIDIOC_S_PRIORITY",
- [_IOC_NR(VIDIOC_G_SLICED_VBI_CAP)] = "VIDIOC_G_SLICED_VBI_CAP",
- [_IOC_NR(VIDIOC_LOG_STATUS)] = "VIDIOC_LOG_STATUS",
- [_IOC_NR(VIDIOC_G_EXT_CTRLS)] = "VIDIOC_G_EXT_CTRLS",
- [_IOC_NR(VIDIOC_S_EXT_CTRLS)] = "VIDIOC_S_EXT_CTRLS",
- [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS",
-#if 1
- [_IOC_NR(VIDIOC_ENUM_FRAMESIZES)] = "VIDIOC_ENUM_FRAMESIZES",
- [_IOC_NR(VIDIOC_ENUM_FRAMEINTERVALS)] = "VIDIOC_ENUM_FRAMEINTERVALS",
- [_IOC_NR(VIDIOC_G_ENC_INDEX)] = "VIDIOC_G_ENC_INDEX",
- [_IOC_NR(VIDIOC_ENCODER_CMD)] = "VIDIOC_ENCODER_CMD",
- [_IOC_NR(VIDIOC_TRY_ENCODER_CMD)] = "VIDIOC_TRY_ENCODER_CMD",
-
- [_IOC_NR(VIDIOC_DECODER_CMD)] = "VIDIOC_DECODER_CMD",
- [_IOC_NR(VIDIOC_TRY_DECODER_CMD)] = "VIDIOC_TRY_DECODER_CMD",
- [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
- [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
-
- [_IOC_NR(VIDIOC_DBG_G_CHIP_IDENT)] = "VIDIOC_DBG_G_CHIP_IDENT",
- [_IOC_NR(VIDIOC_S_HW_FREQ_SEEK)] = "VIDIOC_S_HW_FREQ_SEEK",
+
+struct v4l2_ioctl_info {
+ unsigned int ioctl;
+ u16 flags;
+ const char * const name;
+};
+
+/* This control needs a priority check */
+#define INFO_FL_PRIO (1 << 0)
+/* This control can be valid if the filehandle passes a control handler. */
+#define INFO_FL_CTRL (1 << 1)
+
+#define IOCTL_INFO(_ioctl, _flags) [_IOC_NR(_ioctl)] = { \
+ .ioctl = _ioctl, \
+ .flags = _flags, \
+ .name = #_ioctl, \
+}
+
+static struct v4l2_ioctl_info v4l2_ioctls[] = {
+ IOCTL_INFO(VIDIOC_QUERYCAP, 0),
+ IOCTL_INFO(VIDIOC_ENUM_FMT, 0),
+ IOCTL_INFO(VIDIOC_G_FMT, 0),
+ IOCTL_INFO(VIDIOC_S_FMT, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_REQBUFS, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYBUF, 0),
+ IOCTL_INFO(VIDIOC_G_FBUF, 0),
+ IOCTL_INFO(VIDIOC_S_FBUF, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_OVERLAY, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QBUF, 0),
+ IOCTL_INFO(VIDIOC_DQBUF, 0),
+ IOCTL_INFO(VIDIOC_STREAMON, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_STREAMOFF, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_PARM, 0),
+ IOCTL_INFO(VIDIOC_S_PARM, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_STD, 0),
+ IOCTL_INFO(VIDIOC_S_STD, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUMSTD, 0),
+ IOCTL_INFO(VIDIOC_ENUMINPUT, 0),
+ IOCTL_INFO(VIDIOC_G_CTRL, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_S_CTRL, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_TUNER, 0),
+ IOCTL_INFO(VIDIOC_S_TUNER, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_AUDIO, 0),
+ IOCTL_INFO(VIDIOC_S_AUDIO, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYCTRL, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_QUERYMENU, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_INPUT, 0),
+ IOCTL_INFO(VIDIOC_S_INPUT, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_OUTPUT, 0),
+ IOCTL_INFO(VIDIOC_S_OUTPUT, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUMOUTPUT, 0),
+ IOCTL_INFO(VIDIOC_G_AUDOUT, 0),
+ IOCTL_INFO(VIDIOC_S_AUDOUT, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_MODULATOR, 0),
+ IOCTL_INFO(VIDIOC_S_MODULATOR, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_FREQUENCY, 0),
+ IOCTL_INFO(VIDIOC_S_FREQUENCY, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_CROPCAP, 0),
+ IOCTL_INFO(VIDIOC_G_CROP, 0),
+ IOCTL_INFO(VIDIOC_S_CROP, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_SELECTION, 0),
+ IOCTL_INFO(VIDIOC_S_SELECTION, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_JPEGCOMP, 0),
+ IOCTL_INFO(VIDIOC_S_JPEGCOMP, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYSTD, 0),
+ IOCTL_INFO(VIDIOC_TRY_FMT, 0),
+ IOCTL_INFO(VIDIOC_ENUMAUDIO, 0),
+ IOCTL_INFO(VIDIOC_ENUMAUDOUT, 0),
+ IOCTL_INFO(VIDIOC_G_PRIORITY, 0),
+ IOCTL_INFO(VIDIOC_S_PRIORITY, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, 0),
+ IOCTL_INFO(VIDIOC_LOG_STATUS, 0),
+ IOCTL_INFO(VIDIOC_G_EXT_CTRLS, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_S_EXT_CTRLS, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, 0),
+ IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, 0),
+ IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, 0),
+ IOCTL_INFO(VIDIOC_G_ENC_INDEX, 0),
+ IOCTL_INFO(VIDIOC_ENCODER_CMD, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, 0),
+ IOCTL_INFO(VIDIOC_DECODER_CMD, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, 0),
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ IOCTL_INFO(VIDIOC_DBG_S_REGISTER, 0),
+ IOCTL_INFO(VIDIOC_DBG_G_REGISTER, 0),
#endif
- [_IOC_NR(VIDIOC_ENUM_DV_PRESETS)] = "VIDIOC_ENUM_DV_PRESETS",
- [_IOC_NR(VIDIOC_S_DV_PRESET)] = "VIDIOC_S_DV_PRESET",
- [_IOC_NR(VIDIOC_G_DV_PRESET)] = "VIDIOC_G_DV_PRESET",
- [_IOC_NR(VIDIOC_QUERY_DV_PRESET)] = "VIDIOC_QUERY_DV_PRESET",
- [_IOC_NR(VIDIOC_S_DV_TIMINGS)] = "VIDIOC_S_DV_TIMINGS",
- [_IOC_NR(VIDIOC_G_DV_TIMINGS)] = "VIDIOC_G_DV_TIMINGS",
- [_IOC_NR(VIDIOC_DQEVENT)] = "VIDIOC_DQEVENT",
- [_IOC_NR(VIDIOC_SUBSCRIBE_EVENT)] = "VIDIOC_SUBSCRIBE_EVENT",
- [_IOC_NR(VIDIOC_UNSUBSCRIBE_EVENT)] = "VIDIOC_UNSUBSCRIBE_EVENT",
- [_IOC_NR(VIDIOC_CREATE_BUFS)] = "VIDIOC_CREATE_BUFS",
- [_IOC_NR(VIDIOC_PREPARE_BUF)] = "VIDIOC_PREPARE_BUF",
+ IOCTL_INFO(VIDIOC_DBG_G_CHIP_IDENT, 0),
+ IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUM_DV_PRESETS, 0),
+ IOCTL_INFO(VIDIOC_S_DV_PRESET, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_DV_PRESET, 0),
+ IOCTL_INFO(VIDIOC_QUERY_DV_PRESET, 0),
+ IOCTL_INFO(VIDIOC_S_DV_TIMINGS, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_DV_TIMINGS, 0),
+ IOCTL_INFO(VIDIOC_DQEVENT, 0),
+ IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, 0),
+ IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, 0),
+ IOCTL_INFO(VIDIOC_CREATE_BUFS, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_PREPARE_BUF, 0),
+ IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, 0),
+ IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, 0),
+ IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, 0),
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
+bool v4l2_is_known_ioctl(unsigned int cmd)
+{
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS)
+ return false;
+ return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
+}
+
/* Common ioctl debug function. This function can be used by
external ioctl messages as well as internal V4L ioctl */
void v4l_printk_ioctl(unsigned int cmd)
@@ -297,7 +309,7 @@ void v4l_printk_ioctl(unsigned int cmd)
type = "v4l2";
break;
}
- printk("%s", v4l2_ioctls[_IOC_NR(cmd)]);
+ printk("%s", v4l2_ioctls[_IOC_NR(cmd)].name);
return;
default:
type = "unknown";
@@ -359,6 +371,34 @@ static inline void dbgrect(struct video_device *vfd, char *s,
r->width, r->height);
};
+static void dbgtimings(struct video_device *vfd,
+ const struct v4l2_dv_timings *p)
+{
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ dbgarg2("bt-656/1120:interlaced=%d,"
+ " pixelclock=%lld,"
+ " width=%d, height=%d, polarities=%x,"
+ " hfrontporch=%d, hsync=%d,"
+ " hbackporch=%d, vfrontporch=%d,"
+ " vsync=%d, vbackporch=%d,"
+ " il_vfrontporch=%d, il_vsync=%d,"
+ " il_vbackporch=%d, standards=%x, flags=%x\n",
+ p->bt.interlaced, p->bt.pixelclock,
+ p->bt.width, p->bt.height,
+ p->bt.polarities, p->bt.hfrontporch,
+ p->bt.hsync, p->bt.hbackporch,
+ p->bt.vfrontporch, p->bt.vsync,
+ p->bt.vbackporch, p->bt.il_vfrontporch,
+ p->bt.il_vsync, p->bt.il_vbackporch,
+ p->bt.standards, p->bt.flags);
+ break;
+ default:
+ dbgarg2("Unknown type %d!\n", p->type);
+ break;
+ }
+}
+
static inline void v4l_print_pix_fmt(struct video_device *vfd,
struct v4l2_pix_format *fmt)
{
@@ -504,7 +544,6 @@ static long __video_do_ioctl(struct file *file,
void *fh = file->private_data;
struct v4l2_fh *vfh = NULL;
int use_fh_prio = 0;
- long ret_prio = 0;
long ret = -ENOTTY;
if (ops == NULL) {
@@ -513,19 +552,30 @@ static long __video_do_ioctl(struct file *file,
return ret;
}
- if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
- !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
- v4l_print_ioctl(vfd->name, cmd);
- printk(KERN_CONT "\n");
- }
-
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
vfh = file->private_data;
use_fh_prio = test_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
}
- if (use_fh_prio)
- ret_prio = v4l2_prio_check(vfd->prio, vfh->prio);
+ if (v4l2_is_known_ioctl(cmd)) {
+ struct v4l2_ioctl_info *info = &v4l2_ioctls[_IOC_NR(cmd)];
+
+ if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
+ !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
+ return -ENOTTY;
+
+ if (use_fh_prio && (info->flags & INFO_FL_PRIO)) {
+ ret = v4l2_prio_check(vfd->prio, vfh->prio);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
+ !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
+ v4l_print_ioctl(vfd->name, cmd);
+ printk(KERN_CONT "\n");
+ }
switch (cmd) {
@@ -534,9 +584,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_capability *cap = (struct v4l2_capability *)arg;
- if (!ops->vidioc_querycap)
- break;
-
cap->version = LINUX_VERSION_CODE;
ret = ops->vidioc_querycap(file, fh, cap);
if (!ret)
@@ -570,14 +617,11 @@ static long __video_do_ioctl(struct file *file,
{
enum v4l2_priority *p = arg;
- if (!ops->vidioc_s_priority && !use_fh_prio)
- break;
dbgarg(cmd, "setting priority to %d\n", *p);
if (ops->vidioc_s_priority)
ret = ops->vidioc_s_priority(file, fh, *p);
else
- ret = ret_prio ? ret_prio :
- v4l2_prio_change(&vfd->v4l2_dev->prio,
+ ret = v4l2_prio_change(&vfd->v4l2_dev->prio,
&vfh->prio, *p);
break;
}
@@ -587,6 +631,7 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_fmtdesc *f = arg;
+ ret = -EINVAL;
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (likely(ops->vidioc_enum_fmt_vid_cap))
@@ -619,7 +664,7 @@ static long __video_do_ioctl(struct file *file,
default:
break;
}
- if (likely (!ret))
+ if (likely(!ret))
dbgarg(cmd, "index=%d, type=%d, flags=%d, "
"pixelformat=%c%c%c%c, description='%s'\n",
f->index, f->type, f->flags,
@@ -628,14 +673,6 @@ static long __video_do_ioctl(struct file *file,
(f->pixelformat >> 16) & 0xff,
(f->pixelformat >> 24) & 0xff,
f->description);
- else if (ret == -ENOTTY &&
- (ops->vidioc_enum_fmt_vid_cap ||
- ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_vid_cap_mplane ||
- ops->vidioc_enum_fmt_vid_out_mplane ||
- ops->vidioc_enum_fmt_vid_overlay ||
- ops->vidioc_enum_fmt_type_private))
- ret = -EINVAL;
break;
}
case VIDIOC_G_FMT:
@@ -645,6 +682,7 @@ static long __video_do_ioctl(struct file *file,
/* FIXME: Should be one dump per type */
dbgarg(cmd, "type=%s\n", prt_names(f->type, v4l2_type_names));
+ ret = -EINVAL;
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (ops->vidioc_g_fmt_vid_cap)
@@ -706,21 +744,12 @@ static long __video_do_ioctl(struct file *file,
fh, f);
break;
}
- if (unlikely(ret == -ENOTTY && have_fmt_ops(g)))
- ret = -EINVAL;
-
break;
}
case VIDIOC_S_FMT:
{
struct v4l2_format *f = (struct v4l2_format *)arg;
- if (!have_fmt_ops(s))
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = -EINVAL;
/* FIXME: Should be one dump per type */
@@ -804,6 +833,7 @@ static long __video_do_ioctl(struct file *file,
/* FIXME: Should be one dump per type */
dbgarg(cmd, "type=%s\n", prt_names(f->type,
v4l2_type_names));
+ ret = -EINVAL;
switch (f->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
CLEAR_AFTER_FIELD(f, fmt.pix);
@@ -876,8 +906,6 @@ static long __video_do_ioctl(struct file *file,
fh, f);
break;
}
- if (unlikely(ret == -ENOTTY && have_fmt_ops(try)))
- ret = -EINVAL;
break;
}
/* FIXME: Those buf reqs could be handled here,
@@ -888,12 +916,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_requestbuffers *p = arg;
- if (!ops->vidioc_reqbufs)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -912,8 +934,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_buffer *p = arg;
- if (!ops->vidioc_querybuf)
- break;
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -927,8 +947,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_buffer *p = arg;
- if (!ops->vidioc_qbuf)
- break;
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -942,8 +960,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_buffer *p = arg;
- if (!ops->vidioc_dqbuf)
- break;
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -957,12 +973,6 @@ static long __video_do_ioctl(struct file *file,
{
int *i = arg;
- if (!ops->vidioc_overlay)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "value=%d\n", *i);
ret = ops->vidioc_overlay(file, fh, *i);
break;
@@ -971,8 +981,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_framebuffer *p = arg;
- if (!ops->vidioc_g_fbuf)
- break;
ret = ops->vidioc_g_fbuf(file, fh, arg);
if (!ret) {
dbgarg(cmd, "capability=0x%x, flags=%d, base=0x%08lx\n",
@@ -986,12 +994,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_framebuffer *p = arg;
- if (!ops->vidioc_s_fbuf)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "capability=0x%x, flags=%d, base=0x%08lx\n",
p->capability, p->flags, (unsigned long)p->base);
v4l_print_pix_fmt(vfd, &p->fmt);
@@ -1002,12 +1004,6 @@ static long __video_do_ioctl(struct file *file,
{
enum v4l2_buf_type i = *(int *)arg;
- if (!ops->vidioc_streamon)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
ret = ops->vidioc_streamon(file, fh, i);
break;
@@ -1016,12 +1012,6 @@ static long __video_do_ioctl(struct file *file,
{
enum v4l2_buf_type i = *(int *)arg;
- if (!ops->vidioc_streamoff)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
ret = ops->vidioc_streamoff(file, fh, i);
break;
@@ -1091,13 +1081,6 @@ static long __video_do_ioctl(struct file *file,
dbgarg(cmd, "std=%08Lx\n", (long long unsigned)*id);
- if (!ops->vidioc_s_std)
- break;
-
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = -EINVAL;
norm = (*id) & vfd->tvnorms;
if (vfd->tvnorms && !norm) /* Check if std is supported */
@@ -1115,8 +1098,6 @@ static long __video_do_ioctl(struct file *file,
{
v4l2_std_id *p = arg;
- if (!ops->vidioc_querystd)
- break;
/*
* If nothing detected, it should return all supported
* Drivers just need to mask the std argument, in order
@@ -1150,9 +1131,6 @@ static long __video_do_ioctl(struct file *file,
if (ops->vidioc_s_dv_timings)
p->capabilities |= V4L2_IN_CAP_CUSTOM_TIMINGS;
- if (!ops->vidioc_enum_input)
- break;
-
ret = ops->vidioc_enum_input(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1168,8 +1146,6 @@ static long __video_do_ioctl(struct file *file,
{
unsigned int *i = arg;
- if (!ops->vidioc_g_input)
- break;
ret = ops->vidioc_g_input(file, fh, i);
if (!ret)
dbgarg(cmd, "value=%d\n", *i);
@@ -1179,12 +1155,6 @@ static long __video_do_ioctl(struct file *file,
{
unsigned int *i = arg;
- if (!ops->vidioc_s_input)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "value=%d\n", *i);
ret = ops->vidioc_s_input(file, fh, *i);
break;
@@ -1195,9 +1165,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_output *p = arg;
- if (!ops->vidioc_enum_output)
- break;
-
/*
* We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
* CAP_STD here based on ioctl handler provided by the
@@ -1224,8 +1191,6 @@ static long __video_do_ioctl(struct file *file,
{
unsigned int *i = arg;
- if (!ops->vidioc_g_output)
- break;
ret = ops->vidioc_g_output(file, fh, i);
if (!ret)
dbgarg(cmd, "value=%d\n", *i);
@@ -1235,12 +1200,6 @@ static long __video_do_ioctl(struct file *file,
{
unsigned int *i = arg;
- if (!ops->vidioc_s_output)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "value=%d\n", *i);
ret = ops->vidioc_s_output(file, fh, *i);
break;
@@ -1310,10 +1269,6 @@ static long __video_do_ioctl(struct file *file,
if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
!ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls)
break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value);
@@ -1369,10 +1324,6 @@ static long __video_do_ioctl(struct file *file,
if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
!ops->vidioc_s_ext_ctrls)
break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
v4l_print_ext_ctrls(cmd, vfd, p, 1);
if (vfh && vfh->ctrl_handler)
ret = v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
@@ -1428,8 +1379,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audio *p = arg;
- if (!ops->vidioc_enumaudio)
- break;
ret = ops->vidioc_enumaudio(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
@@ -1443,9 +1392,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audio *p = arg;
- if (!ops->vidioc_g_audio)
- break;
-
ret = ops->vidioc_g_audio(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
@@ -1459,12 +1405,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audio *p = arg;
- if (!ops->vidioc_s_audio)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
"mode=0x%x\n", p->index, p->name,
p->capability, p->mode);
@@ -1475,8 +1415,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audioout *p = arg;
- if (!ops->vidioc_enumaudout)
- break;
dbgarg(cmd, "Enum for index=%d\n", p->index);
ret = ops->vidioc_enumaudout(file, fh, p);
if (!ret)
@@ -1489,9 +1427,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audioout *p = arg;
- if (!ops->vidioc_g_audout)
- break;
-
ret = ops->vidioc_g_audout(file, fh, p);
if (!ret)
dbgarg2("index=%d, name=%s, capability=%d, "
@@ -1503,12 +1438,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_audioout *p = arg;
- if (!ops->vidioc_s_audout)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "index=%d, name=%s, capability=%d, "
"mode=%d\n", p->index, p->name,
p->capability, p->mode);
@@ -1520,8 +1449,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_modulator *p = arg;
- if (!ops->vidioc_g_modulator)
- break;
ret = ops->vidioc_g_modulator(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, "
@@ -1536,12 +1463,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_modulator *p = arg;
- if (!ops->vidioc_s_modulator)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "index=%d, name=%s, capability=%d, "
"rangelow=%d, rangehigh=%d, txsubchans=%d\n",
p->index, p->name, p->capability, p->rangelow,
@@ -1553,9 +1474,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_crop *p = arg;
- if (!ops->vidioc_g_crop && !ops->vidioc_g_selection)
- break;
-
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
if (ops->vidioc_g_crop) {
@@ -1587,13 +1505,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_crop *p = arg;
- if (!ops->vidioc_s_crop && !ops->vidioc_s_selection)
- break;
-
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
dbgrect(vfd, "", &p->c);
@@ -1620,9 +1531,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_selection *p = arg;
- if (!ops->vidioc_g_selection)
- break;
-
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
ret = ops->vidioc_g_selection(file, fh, p);
@@ -1634,13 +1542,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_selection *p = arg;
- if (!ops->vidioc_s_selection)
- break;
-
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
dbgrect(vfd, "", &p->r);
@@ -1653,9 +1554,6 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_cropcap *p = arg;
/*FIXME: Should also show v4l2_fract pixelaspect */
- if (!ops->vidioc_cropcap && !ops->vidioc_g_selection)
- break;
-
dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
if (ops->vidioc_cropcap) {
ret = ops->vidioc_cropcap(file, fh, p);
@@ -1699,9 +1597,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_jpegcompression *p = arg;
- if (!ops->vidioc_g_jpegcomp)
- break;
-
ret = ops->vidioc_g_jpegcomp(file, fh, p);
if (!ret)
dbgarg(cmd, "quality=%d, APPn=%d, "
@@ -1715,12 +1610,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_jpegcompression *p = arg;
- if (!ops->vidioc_g_jpegcomp)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
dbgarg(cmd, "quality=%d, APPn=%d, APP_len=%d, "
"COM_len=%d, jpeg_markers=%d\n",
p->quality, p->APPn, p->APP_len,
@@ -1732,8 +1621,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_enc_idx *p = arg;
- if (!ops->vidioc_g_enc_index)
- break;
ret = ops->vidioc_g_enc_index(file, fh, p);
if (!ret)
dbgarg(cmd, "entries=%d, entries_cap=%d\n",
@@ -1744,12 +1631,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_encoder_cmd *p = arg;
- if (!ops->vidioc_encoder_cmd)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = ops->vidioc_encoder_cmd(file, fh, p);
if (!ret)
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
@@ -1759,8 +1640,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_encoder_cmd *p = arg;
- if (!ops->vidioc_try_encoder_cmd)
- break;
ret = ops->vidioc_try_encoder_cmd(file, fh, p);
if (!ret)
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
@@ -1770,12 +1649,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_decoder_cmd *p = arg;
- if (!ops->vidioc_decoder_cmd)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = ops->vidioc_decoder_cmd(file, fh, p);
if (!ret)
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
@@ -1785,8 +1658,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_decoder_cmd *p = arg;
- if (!ops->vidioc_try_decoder_cmd)
- break;
ret = ops->vidioc_try_decoder_cmd(file, fh, p);
if (!ret)
dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
@@ -1796,8 +1667,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_streamparm *p = arg;
- if (!ops->vidioc_g_parm && !vfd->current_norm)
- break;
if (ops->vidioc_g_parm) {
ret = check_fmt(ops, p->type);
if (ret)
@@ -1811,6 +1680,7 @@ static long __video_do_ioctl(struct file *file,
break;
ret = 0;
+ p->parm.capture.readbuffers = 2;
if (ops->vidioc_g_std)
ret = ops->vidioc_g_std(file, fh, &std);
if (ret == 0)
@@ -1825,12 +1695,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_streamparm *p = arg;
- if (!ops->vidioc_s_parm)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = check_fmt(ops, p->type);
if (ret)
break;
@@ -1843,9 +1707,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_tuner *p = arg;
- if (!ops->vidioc_g_tuner)
- break;
-
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_tuner(file, fh, p);
@@ -1864,12 +1725,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_tuner *p = arg;
- if (!ops->vidioc_s_tuner)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1887,9 +1742,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_frequency *p = arg;
- if (!ops->vidioc_g_frequency)
- break;
-
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_frequency(file, fh, p);
@@ -1903,12 +1755,6 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_frequency *p = arg;
enum v4l2_tuner_type type;
- if (!ops->vidioc_s_frequency)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
@@ -1923,9 +1769,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_sliced_vbi_cap *p = arg;
- if (!ops->vidioc_g_sliced_vbi_cap)
- break;
-
/* Clear up to type, everything after type is zerod already */
memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
@@ -1937,8 +1780,6 @@ static long __video_do_ioctl(struct file *file,
}
case VIDIOC_LOG_STATUS:
{
- if (!ops->vidioc_log_status)
- break;
if (vfd->v4l2_dev)
pr_info("%s: ================= START STATUS =================\n",
vfd->v4l2_dev->name);
@@ -1948,38 +1789,34 @@ static long __video_do_ioctl(struct file *file,
vfd->v4l2_dev->name);
break;
}
-#ifdef CONFIG_VIDEO_ADV_DEBUG
case VIDIOC_DBG_G_REGISTER:
{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
struct v4l2_dbg_register *p = arg;
- if (ops->vidioc_g_register) {
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else
- ret = ops->vidioc_g_register(file, fh, p);
- }
+ if (!capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ret = ops->vidioc_g_register(file, fh, p);
+#endif
break;
}
case VIDIOC_DBG_S_REGISTER:
{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
struct v4l2_dbg_register *p = arg;
- if (ops->vidioc_s_register) {
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else
- ret = ops->vidioc_s_register(file, fh, p);
- }
+ if (!capable(CAP_SYS_ADMIN))
+ ret = -EPERM;
+ else
+ ret = ops->vidioc_s_register(file, fh, p);
+#endif
break;
}
-#endif
case VIDIOC_DBG_G_CHIP_IDENT:
{
struct v4l2_dbg_chip_ident *p = arg;
- if (!ops->vidioc_g_chip_ident)
- break;
p->ident = V4L2_IDENT_NONE;
p->revision = 0;
ret = ops->vidioc_g_chip_ident(file, fh, p);
@@ -1992,12 +1829,6 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_hw_freq_seek *p = arg;
enum v4l2_tuner_type type;
- if (!ops->vidioc_s_hw_freq_seek)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd,
@@ -2013,9 +1844,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_frmsizeenum *p = arg;
- if (!ops->vidioc_enum_framesizes)
- break;
-
ret = ops->vidioc_enum_framesizes(file, fh, p);
dbgarg(cmd,
"index=%d, pixelformat=%c%c%c%c, type=%d ",
@@ -2049,9 +1877,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_frmivalenum *p = arg;
- if (!ops->vidioc_enum_frameintervals)
- break;
-
ret = ops->vidioc_enum_frameintervals(file, fh, p);
dbgarg(cmd,
"index=%d, pixelformat=%d, width=%d, height=%d, type=%d ",
@@ -2084,9 +1909,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_enum_preset *p = arg;
- if (!ops->vidioc_enum_dv_presets)
- break;
-
ret = ops->vidioc_enum_dv_presets(file, fh, p);
if (!ret)
dbgarg(cmd,
@@ -2100,13 +1922,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_preset *p = arg;
- if (!ops->vidioc_s_dv_preset)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
-
dbgarg(cmd, "preset=%d\n", p->preset);
ret = ops->vidioc_s_dv_preset(file, fh, p);
break;
@@ -2115,9 +1930,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_preset *p = arg;
- if (!ops->vidioc_g_dv_preset)
- break;
-
ret = ops->vidioc_g_dv_preset(file, fh, p);
if (!ret)
dbgarg(cmd, "preset=%d\n", p->preset);
@@ -2127,9 +1939,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_preset *p = arg;
- if (!ops->vidioc_query_dv_preset)
- break;
-
ret = ops->vidioc_query_dv_preset(file, fh, p);
if (!ret)
dbgarg(cmd, "preset=%d\n", p->preset);
@@ -2139,32 +1948,13 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_timings *p = arg;
- if (!ops->vidioc_s_dv_timings)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
-
+ dbgtimings(vfd, p);
switch (p->type) {
case V4L2_DV_BT_656_1120:
- dbgarg2("bt-656/1120:interlaced=%d, pixelclock=%lld,"
- " width=%d, height=%d, polarities=%x,"
- " hfrontporch=%d, hsync=%d, hbackporch=%d,"
- " vfrontporch=%d, vsync=%d, vbackporch=%d,"
- " il_vfrontporch=%d, il_vsync=%d,"
- " il_vbackporch=%d\n",
- p->bt.interlaced, p->bt.pixelclock,
- p->bt.width, p->bt.height, p->bt.polarities,
- p->bt.hfrontporch, p->bt.hsync,
- p->bt.hbackporch, p->bt.vfrontporch,
- p->bt.vsync, p->bt.vbackporch,
- p->bt.il_vfrontporch, p->bt.il_vsync,
- p->bt.il_vbackporch);
ret = ops->vidioc_s_dv_timings(file, fh, p);
break;
default:
- dbgarg2("Unknown type %d!\n", p->type);
+ ret = -EINVAL;
break;
}
break;
@@ -2173,43 +1963,68 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_dv_timings *p = arg;
- if (!ops->vidioc_g_dv_timings)
+ ret = ops->vidioc_g_dv_timings(file, fh, p);
+ if (!ret)
+ dbgtimings(vfd, p);
+ break;
+ }
+ case VIDIOC_ENUM_DV_TIMINGS:
+ {
+ struct v4l2_enum_dv_timings *p = arg;
+
+ if (!ops->vidioc_enum_dv_timings)
break;
- ret = ops->vidioc_g_dv_timings(file, fh, p);
+ ret = ops->vidioc_enum_dv_timings(file, fh, p);
if (!ret) {
- switch (p->type) {
- case V4L2_DV_BT_656_1120:
- dbgarg2("bt-656/1120:interlaced=%d,"
- " pixelclock=%lld,"
- " width=%d, height=%d, polarities=%x,"
- " hfrontporch=%d, hsync=%d,"
- " hbackporch=%d, vfrontporch=%d,"
- " vsync=%d, vbackporch=%d,"
- " il_vfrontporch=%d, il_vsync=%d,"
- " il_vbackporch=%d\n",
- p->bt.interlaced, p->bt.pixelclock,
- p->bt.width, p->bt.height,
- p->bt.polarities, p->bt.hfrontporch,
- p->bt.hsync, p->bt.hbackporch,
- p->bt.vfrontporch, p->bt.vsync,
- p->bt.vbackporch, p->bt.il_vfrontporch,
- p->bt.il_vsync, p->bt.il_vbackporch);
- break;
- default:
- dbgarg2("Unknown type %d!\n", p->type);
- break;
- }
+ dbgarg(cmd, "index=%d: ", p->index);
+ dbgtimings(vfd, &p->timings);
}
break;
}
- case VIDIOC_DQEVENT:
+ case VIDIOC_QUERY_DV_TIMINGS:
{
- struct v4l2_event *ev = arg;
+ struct v4l2_dv_timings *p = arg;
+
+ if (!ops->vidioc_query_dv_timings)
+ break;
- if (!ops->vidioc_subscribe_event)
+ ret = ops->vidioc_query_dv_timings(file, fh, p);
+ if (!ret)
+ dbgtimings(vfd, p);
+ break;
+ }
+ case VIDIOC_DV_TIMINGS_CAP:
+ {
+ struct v4l2_dv_timings_cap *p = arg;
+
+ if (!ops->vidioc_dv_timings_cap)
break;
+ ret = ops->vidioc_dv_timings_cap(file, fh, p);
+ if (ret)
+ break;
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ dbgarg(cmd,
+ "type=%d, width=%u-%u, height=%u-%u, "
+ "pixelclock=%llu-%llu, standards=%x, capabilities=%x ",
+ p->type,
+ p->bt.min_width, p->bt.max_width,
+ p->bt.min_height, p->bt.max_height,
+ p->bt.min_pixelclock, p->bt.max_pixelclock,
+ p->bt.standards, p->bt.capabilities);
+ break;
+ default:
+ dbgarg(cmd, "unknown type ");
+ break;
+ }
+ break;
+ }
+ case VIDIOC_DQEVENT:
+ {
+ struct v4l2_event *ev = arg;
+
ret = v4l2_event_dequeue(fh, ev, file->f_flags & O_NONBLOCK);
if (ret < 0) {
dbgarg(cmd, "no pending events?");
@@ -2226,9 +2041,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_event_subscription *sub = arg;
- if (!ops->vidioc_subscribe_event)
- break;
-
ret = ops->vidioc_subscribe_event(fh, sub);
if (ret < 0) {
dbgarg(cmd, "failed, ret=%ld", ret);
@@ -2241,9 +2053,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_event_subscription *sub = arg;
- if (!ops->vidioc_unsubscribe_event)
- break;
-
ret = ops->vidioc_unsubscribe_event(fh, sub);
if (ret < 0) {
dbgarg(cmd, "failed, ret=%ld", ret);
@@ -2256,12 +2065,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_create_buffers *create = arg;
- if (!ops->vidioc_create_bufs)
- break;
- if (ret_prio) {
- ret = ret_prio;
- break;
- }
ret = check_fmt(ops, create->format.type);
if (ret)
break;
@@ -2275,8 +2078,6 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_buffer *b = arg;
- if (!ops->vidioc_prepare_buf)
- break;
ret = check_fmt(ops, b->type);
if (ret)
break;
@@ -2289,7 +2090,9 @@ static long __video_do_ioctl(struct file *file,
default:
if (!ops->vidioc_default)
break;
- ret = ops->vidioc_default(file, fh, ret_prio >= 0, cmd, arg);
+ ret = ops->vidioc_default(file, fh, use_fh_prio ?
+ v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
+ cmd, arg);
break;
} /* switch */
@@ -2463,7 +2266,9 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
err = -EFAULT;
goto out_array_args;
}
- if (err < 0)
+ /* VIDIOC_QUERY_DV_TIMINGS can return an error, but still have valid
+ results that must be returned. */
+ if (err < 0 && cmd != VIDIOC_QUERY_DV_TIMINGS)
goto out;
out_array_args:
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index 6fe88e965a8c..db6e859b93d4 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -35,14 +35,9 @@
static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
{
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- /* Allocate try format and crop in the same memory block */
- fh->try_fmt = kzalloc((sizeof(*fh->try_fmt) + sizeof(*fh->try_crop))
- * sd->entity.num_pads, GFP_KERNEL);
- if (fh->try_fmt == NULL)
+ fh->pad = kzalloc(sizeof(*fh->pad) * sd->entity.num_pads, GFP_KERNEL);
+ if (fh->pad == NULL)
return -ENOMEM;
-
- fh->try_crop = (struct v4l2_rect *)
- (fh->try_fmt + sd->entity.num_pads);
#endif
return 0;
}
@@ -50,9 +45,8 @@ static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
static void subdev_fh_free(struct v4l2_subdev_fh *fh)
{
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- kfree(fh->try_fmt);
- fh->try_fmt = NULL;
- fh->try_crop = NULL;
+ kfree(fh->pad);
+ fh->pad = NULL;
#endif
}
@@ -234,6 +228,8 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_G_CROP: {
struct v4l2_subdev_crop *crop = arg;
+ struct v4l2_subdev_selection sel;
+ int rval;
if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -242,11 +238,27 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (crop->pad >= sd->entity.num_pads)
return -EINVAL;
- return v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
+ rval = v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
+ if (rval != -ENOIOCTLCMD)
+ return rval;
+
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
+ sel.target = V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL;
+
+ rval = v4l2_subdev_call(
+ sd, pad, get_selection, subdev_fh, &sel);
+
+ crop->rect = sel.r;
+
+ return rval;
}
case VIDIOC_SUBDEV_S_CROP: {
struct v4l2_subdev_crop *crop = arg;
+ struct v4l2_subdev_selection sel;
+ int rval;
if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -255,7 +267,22 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (crop->pad >= sd->entity.num_pads)
return -EINVAL;
- return v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
+ rval = v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
+ if (rval != -ENOIOCTLCMD)
+ return rval;
+
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
+ sel.target = V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL;
+ sel.r = crop->rect;
+
+ rval = v4l2_subdev_call(
+ sd, pad, set_selection, subdev_fh, &sel);
+
+ crop->rect = sel.r;
+
+ return rval;
}
case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
@@ -293,6 +320,34 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh,
fie);
}
+
+ case VIDIOC_SUBDEV_G_SELECTION: {
+ struct v4l2_subdev_selection *sel = arg;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
+ sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (sel->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(
+ sd, pad, get_selection, subdev_fh, sel);
+ }
+
+ case VIDIOC_SUBDEV_S_SELECTION: {
+ struct v4l2_subdev_selection *sel = arg;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
+ sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ if (sel->pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ return v4l2_subdev_call(
+ sd, pad, set_selection, subdev_fh, sel);
+ }
#endif
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
@@ -332,6 +387,70 @@ const struct v4l2_file_operations v4l2_subdev_fops = {
.poll = subdev_poll,
};
+#ifdef CONFIG_MEDIA_CONTROLLER
+int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ if (source_fmt->format.width != sink_fmt->format.width
+ || source_fmt->format.height != sink_fmt->format.height
+ || source_fmt->format.code != sink_fmt->format.code)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
+
+static int
+v4l2_subdev_link_validate_get_format(struct media_pad *pad,
+ struct v4l2_subdev_format *fmt)
+{
+ switch (media_entity_type(pad->entity)) {
+ case MEDIA_ENT_T_V4L2_SUBDEV:
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+ return v4l2_subdev_call(media_entity_to_v4l2_subdev(
+ pad->entity),
+ pad, get_fmt, NULL, fmt);
+ default:
+ WARN(1, "Driver bug! Wrong media entity type %d, entity %s\n",
+ media_entity_type(pad->entity), pad->entity->name);
+ /* Fall through */
+ case MEDIA_ENT_T_DEVNODE_V4L:
+ return -EINVAL;
+ }
+}
+
+int v4l2_subdev_link_validate(struct media_link *link)
+{
+ struct v4l2_subdev *sink;
+ struct v4l2_subdev_format sink_fmt, source_fmt;
+ int rval;
+
+ rval = v4l2_subdev_link_validate_get_format(
+ link->source, &source_fmt);
+ if (rval < 0)
+ return 0;
+
+ rval = v4l2_subdev_link_validate_get_format(
+ link->sink, &sink_fmt);
+ if (rval < 0)
+ return 0;
+
+ sink = media_entity_to_v4l2_subdev(link->sink->entity);
+
+ rval = v4l2_subdev_call(sink, pad, link_validate, link,
+ &source_fmt, &sink_fmt);
+ if (rval != -ENOIOCTLCMD)
+ return rval;
+
+ return v4l2_subdev_link_validate_default(
+ sink, link, &source_fmt, &sink_fmt);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
{
INIT_LIST_HEAD(&sd->list);
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index 20f7237b8242..308e150a39bc 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -18,6 +18,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
+#include <media/ov7670.h>
#include <media/videobuf-dma-sg.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -1347,11 +1348,21 @@ static __devinit bool viacam_serial_is_enabled(void)
return false;
}
+static struct ov7670_config sensor_cfg = {
+ /* The XO-1.5 (only known user) clocks the camera at 90MHz. */
+ .clock_speed = 90,
+};
+
static __devinit int viacam_probe(struct platform_device *pdev)
{
int ret;
struct i2c_adapter *sensor_adapter;
struct viafb_dev *viadev = pdev->dev.platform_data;
+ struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42 >> 1,
+ .platform_data = &sensor_cfg,
+ };
/*
* Note that there are actually two capture channels on
@@ -1433,8 +1444,8 @@ static __devinit int viacam_probe(struct platform_device *pdev)
* is OLPC-specific. 0x42 assumption is ov7670-specific.
*/
sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31);
- cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, sensor_adapter,
- "ov7670", 0x42 >> 1, NULL);
+ cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, sensor_adapter,
+ &ov7670_info, NULL);
if (cam->sensor == NULL) {
dev_err(&pdev->dev, "Unable to find the sensor!\n");
ret = -ENODEV;
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index de4fa4eb8844..ffdf59cfe405 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -1129,6 +1129,7 @@ unsigned int videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct videobuf_buffer *buf = NULL;
unsigned int rc = 0;
@@ -1137,7 +1138,7 @@ unsigned int videobuf_poll_stream(struct file *file,
if (!list_empty(&q->stream))
buf = list_entry(q->stream.next,
struct videobuf_buffer, stream);
- } else {
+ } else if (req_events & (POLLIN | POLLRDNORM)) {
if (!q->reading)
__videobuf_read_start(q);
if (!q->reading) {
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index c9691115f2d2..b6b5cc1a43cb 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -27,6 +27,7 @@ struct videobuf_dma_contig_memory {
u32 magic;
void *vaddr;
dma_addr_t dma_handle;
+ bool cached;
unsigned long size;
};
@@ -37,8 +38,58 @@ struct videobuf_dma_contig_memory {
BUG(); \
}
-static void
-videobuf_vm_open(struct vm_area_struct *vma)
+static int __videobuf_dc_alloc(struct device *dev,
+ struct videobuf_dma_contig_memory *mem,
+ unsigned long size, unsigned long flags)
+{
+ mem->size = size;
+ if (mem->cached) {
+ mem->vaddr = alloc_pages_exact(mem->size, flags | GFP_DMA);
+ if (mem->vaddr) {
+ int err;
+
+ mem->dma_handle = dma_map_single(dev, mem->vaddr,
+ mem->size,
+ DMA_FROM_DEVICE);
+ err = dma_mapping_error(dev, mem->dma_handle);
+ if (err) {
+ dev_err(dev, "dma_map_single failed\n");
+
+ free_pages_exact(mem->vaddr, mem->size);
+ mem->vaddr = 0;
+ return err;
+ }
+ }
+ } else
+ mem->vaddr = dma_alloc_coherent(dev, mem->size,
+ &mem->dma_handle, flags);
+
+ if (!mem->vaddr) {
+ dev_err(dev, "memory alloc size %ld failed\n", mem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
+
+ return 0;
+}
+
+static void __videobuf_dc_free(struct device *dev,
+ struct videobuf_dma_contig_memory *mem)
+{
+ if (mem->cached) {
+ if (!mem->vaddr)
+ return;
+ dma_unmap_single(dev, mem->dma_handle, mem->size,
+ DMA_FROM_DEVICE);
+ free_pages_exact(mem->vaddr, mem->size);
+ } else
+ dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
+
+ mem->vaddr = NULL;
+}
+
+static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
@@ -91,12 +142,11 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dev_dbg(q->dev, "buf[%d] freeing %p\n",
i, mem->vaddr);
- dma_free_coherent(q->dev, mem->size,
- mem->vaddr, mem->dma_handle);
+ __videobuf_dc_free(q->dev, mem);
mem->vaddr = NULL;
}
- q->bufs[i]->map = NULL;
+ q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
}
@@ -107,8 +157,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
}
static const struct vm_operations_struct videobuf_vm_ops = {
- .open = videobuf_vm_open,
- .close = videobuf_vm_close,
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
};
/**
@@ -178,26 +228,38 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
pages_done++;
}
- out_up:
+out_up:
up_read(&current->mm->mmap_sem);
return ret;
}
-static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (vb) {
- mem = vb->priv = ((char *)vb) + size;
+ vb->priv = ((char *)vb) + size;
+ mem = vb->priv;
mem->magic = MAGIC_DC_MEM;
+ mem->cached = cached;
}
return vb;
}
+static struct videobuf_buffer *__videobuf_alloc_uncached(size_t size)
+{
+ return __videobuf_alloc_vb(size, false);
+}
+
+static struct videobuf_buffer *__videobuf_alloc_cached(size_t size)
+{
+ return __videobuf_alloc_vb(size, true);
+}
+
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
@@ -235,28 +297,32 @@ static int __videobuf_iolock(struct videobuf_queue *q,
return videobuf_dma_contig_user_get(mem, vb);
/* allocate memory for the read() method */
- mem->size = PAGE_ALIGN(vb->size);
- mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
- &mem->dma_handle, GFP_KERNEL);
- if (!mem->vaddr) {
- dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
- mem->size);
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
+ GFP_KERNEL))
return -ENOMEM;
- }
-
- dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
- mem->vaddr, mem->size);
break;
case V4L2_MEMORY_OVERLAY:
default:
- dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
- __func__);
+ dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
return -EINVAL;
}
return 0;
}
+static int __videobuf_sync(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_contig_memory *mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
+ DMA_FROM_DEVICE);
+
+ return 0;
+}
+
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
@@ -265,6 +331,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_mapping *map;
int retval;
unsigned long size;
+ unsigned long pos, start = vma->vm_start;
+ struct page *page;
dev_dbg(q->dev, "%s\n", __func__);
@@ -282,41 +350,50 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
- mem->size = PAGE_ALIGN(buf->bsize);
- mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
- &mem->dma_handle, GFP_KERNEL);
- if (!mem->vaddr) {
- dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
- mem->size);
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
+ GFP_KERNEL | __GFP_COMP))
goto error;
- }
- dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
- mem->vaddr, mem->size);
/* Try to remap memory */
size = vma->vm_end - vma->vm_start;
size = (size < mem->size) ? size : mem->size;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- retval = remap_pfn_range(vma, vma->vm_start,
- mem->dma_handle >> PAGE_SHIFT,
- size, vma->vm_page_prot);
- if (retval) {
- dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
- dma_free_coherent(q->dev, mem->size,
- mem->vaddr, mem->dma_handle);
- goto error;
+ if (!mem->cached)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ pos = (unsigned long)mem->vaddr;
+
+ while (size > 0) {
+ page = virt_to_page((void *)pos);
+ if (NULL == page) {
+ dev_err(q->dev, "mmap: virt_to_page failed\n");
+ __videobuf_dc_free(q->dev, mem);
+ goto error;
+ }
+ retval = vm_insert_page(vma, start, page);
+ if (retval) {
+ dev_err(q->dev, "mmap: insert failed with error %d\n",
+ retval);
+ __videobuf_dc_free(q->dev, mem);
+ goto error;
+ }
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+
+ if (size > PAGE_SIZE)
+ size -= PAGE_SIZE;
+ else
+ size = 0;
}
- vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND;
+ vma->vm_ops = &videobuf_vm_ops;
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
- (long int)buf->bsize,
- vma->vm_pgoff, buf->i);
+ (long int)buf->bsize, vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
@@ -328,12 +405,20 @@ error:
}
static struct videobuf_qtype_ops qops = {
- .magic = MAGIC_QTYPE_OPS,
+ .magic = MAGIC_QTYPE_OPS,
+ .alloc_vb = __videobuf_alloc_uncached,
+ .iolock = __videobuf_iolock,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = __videobuf_to_vaddr,
+};
- .alloc_vb = __videobuf_alloc_vb,
- .iolock = __videobuf_iolock,
- .mmap_mapper = __videobuf_mmap_mapper,
- .vaddr = __videobuf_to_vaddr,
+static struct videobuf_qtype_ops qops_cached = {
+ .magic = MAGIC_QTYPE_OPS,
+ .alloc_vb = __videobuf_alloc_cached,
+ .iolock = __videobuf_iolock,
+ .sync = __videobuf_sync,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
@@ -351,6 +436,20 @@ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
+void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv, struct mutex *ext_lock)
+{
+ videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+ priv, &qops_cached, ext_lock);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init_cached);
+
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
@@ -389,7 +488,7 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
/* read() method */
if (mem->vaddr) {
- dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
+ __videobuf_dc_free(q->dev, mem);
mem->vaddr = NULL;
}
}
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 59cb54aa2946..94d83a41381b 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -45,7 +45,6 @@ static int videobuf_dvb_thread(void *data)
struct videobuf_dvb *dvb = data;
struct videobuf_buffer *buf;
unsigned long flags;
- int err;
void *outp;
dprintk("dvb thread started\n");
@@ -57,7 +56,7 @@ static int videobuf_dvb_thread(void *data)
buf = list_entry(dvb->dvbq.stream.next,
struct videobuf_buffer, stream);
list_del(&buf->stream);
- err = videobuf_waiton(&dvb->dvbq, buf, 0, 1);
+ videobuf_waiton(&dvb->dvbq, buf, 0, 1);
/* no more feeds left or stop_feed() asked us to quit */
if (0 == dvb->nfeeds)
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 2e8f1df775b6..9d4e9edbd2e7 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -19,6 +19,9 @@
#include <linux/slab.h>
#include <linux/sched.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/videobuf2-core.h>
static int debug;
@@ -1642,32 +1645,46 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q);
* For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
* will be reported as available for writing.
*
+ * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
+ * pending events.
+ *
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
- unsigned long flags;
- unsigned int ret;
+ struct video_device *vfd = video_devdata(file);
+ unsigned long req_events = poll_requested_events(wait);
struct vb2_buffer *vb = NULL;
+ unsigned int res = 0;
+ unsigned long flags;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ if (v4l2_event_pending(fh))
+ res = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->wait, wait);
+ }
/*
* Start file I/O emulator only if streaming API has not been used yet.
*/
if (q->num_buffers == 0 && q->fileio == NULL) {
- if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ)) {
- ret = __vb2_init_fileio(q, 1);
- if (ret)
- return POLLERR;
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
+ (req_events & (POLLIN | POLLRDNORM))) {
+ if (__vb2_init_fileio(q, 1))
+ return res | POLLERR;
}
- if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE)) {
- ret = __vb2_init_fileio(q, 0);
- if (ret)
- return POLLERR;
+ if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
+ (req_events & (POLLOUT | POLLWRNORM))) {
+ if (__vb2_init_fileio(q, 0))
+ return res | POLLERR;
/*
* Write to OUTPUT queue can be done immediately.
*/
- return POLLOUT | POLLWRNORM;
+ return res | POLLOUT | POLLWRNORM;
}
}
@@ -1675,7 +1692,7 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
* There is nothing to wait for if no buffers have already been queued.
*/
if (list_empty(&q->queued_list))
- return POLLERR;
+ return res | POLLERR;
poll_wait(file, &q->done_wq, wait);
@@ -1690,10 +1707,11 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
if (vb && (vb->state == VB2_BUF_STATE_DONE
|| vb->state == VB2_BUF_STATE_ERROR)) {
- return (V4L2_TYPE_IS_OUTPUT(q->type)) ? POLLOUT | POLLWRNORM :
- POLLIN | POLLRDNORM;
+ return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
+ res | POLLOUT | POLLWRNORM :
+ res | POLLIN | POLLRDNORM;
}
- return 0;
+ return res;
}
EXPORT_SYMBOL_GPL(vb2_poll);
@@ -1839,7 +1857,6 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
* (multiplane buffers are not supported).
*/
if (q->bufs[0]->num_planes != 1) {
- fileio->req.count = 0;
ret = -EBUSY;
goto err_reqbufs;
}
@@ -1886,6 +1903,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
return ret;
err_reqbufs:
+ fileio->req.count = 0;
vb2_reqbufs(q, &fileio->req);
err_kfree:
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 4d7391ec8001..aae1720b2f2d 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -2561,7 +2561,7 @@ static int vino_acquire_input(struct vino_channel_settings *vcs)
} else if (vino_drvdata->decoder
&& (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
int input;
- int data_norm;
+ int data_norm = 0;
v4l2_std_id norm;
input = VINO_INPUT_COMPOSITE;
@@ -2651,7 +2651,7 @@ static int vino_set_input(struct vino_channel_settings *vcs, int input)
}
if (vino_drvdata->decoder_owner == vcs->channel) {
- int data_norm;
+ int data_norm = 0;
v4l2_std_id norm;
ret = decoder_call(video, s_routing,
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 5e8b0710105b..08c10240e70f 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -95,6 +95,16 @@ static struct vivi_fmt formats[] = {
.depth = 16,
},
{
+ .name = "4:2:2, packed, YVYU",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = 16,
+ },
+ {
+ .name = "4:2:2, packed, VYUY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = 16,
+ },
+ {
.name = "RGB565 (LE)",
.fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
.depth = 16,
@@ -114,6 +124,26 @@ static struct vivi_fmt formats[] = {
.fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
.depth = 16,
},
+ {
+ .name = "RGB24 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
+ .depth = 24,
+ },
+ {
+ .name = "RGB24 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
+ .depth = 24,
+ },
+ {
+ .name = "RGB32 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
+ .depth = 32,
+ },
+ {
+ .name = "RGB32 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR32, /* bgra */
+ .depth = 32,
+ },
};
static struct vivi_fmt *get_format(struct v4l2_format *f)
@@ -170,6 +200,7 @@ struct vivi_dev {
struct v4l2_ctrl *gain;
};
struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *alpha;
struct v4l2_ctrl *button;
struct v4l2_ctrl *boolean;
struct v4l2_ctrl *int32;
@@ -177,6 +208,7 @@ struct vivi_dev {
struct v4l2_ctrl *menu;
struct v4l2_ctrl *string;
struct v4l2_ctrl *bitmask;
+ struct v4l2_ctrl *int_menu;
spinlock_t slock;
struct mutex mutex;
@@ -203,8 +235,10 @@ struct vivi_dev {
enum v4l2_field field;
unsigned int field_count;
- u8 bars[9][3];
- u8 line[MAX_WIDTH * 4];
+ u8 bars[9][3];
+ u8 line[MAX_WIDTH * 8];
+ unsigned int pixelsize;
+ u8 alpha_component;
};
/* ------------------------------------------------------------------
@@ -283,6 +317,8 @@ static void precalculate_bars(struct vivi_dev *dev)
switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
is_yuv = 1;
break;
case V4L2_PIX_FMT_RGB565:
@@ -297,6 +333,11 @@ static void precalculate_bars(struct vivi_dev *dev)
g >>= 3;
b >>= 3;
break;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ break;
}
if (is_yuv) {
@@ -316,9 +357,11 @@ static void precalculate_bars(struct vivi_dev *dev)
#define TSTAMP_INPUT_X 10
#define TSTAMP_MIN_X (54 + TSTAMP_INPUT_X)
-static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
+/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */
+static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos, bool odd)
{
u8 r_y, g_u, b_v;
+ u8 alpha = dev->alpha_component;
int color;
u8 *p;
@@ -326,46 +369,56 @@ static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
g_u = dev->bars[colorpos][1]; /* G or precalculated U */
b_v = dev->bars[colorpos][2]; /* B or precalculated V */
- for (color = 0; color < 4; color++) {
+ for (color = 0; color < dev->pixelsize; color++) {
p = buf + color;
switch (dev->fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
switch (color) {
case 0:
- case 2:
*p = r_y;
break;
case 1:
- *p = g_u;
- break;
- case 3:
- *p = b_v;
+ *p = odd ? b_v : g_u;
break;
}
break;
case V4L2_PIX_FMT_UYVY:
switch (color) {
+ case 0:
+ *p = odd ? b_v : g_u;
+ break;
case 1:
- case 3:
*p = r_y;
break;
+ }
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ switch (color) {
case 0:
- *p = g_u;
+ *p = r_y;
break;
- case 2:
- *p = b_v;
+ case 1:
+ *p = odd ? g_u : b_v;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ switch (color) {
+ case 0:
+ *p = odd ? g_u : b_v;
+ break;
+ case 1:
+ *p = r_y;
break;
}
break;
case V4L2_PIX_FMT_RGB565:
switch (color) {
case 0:
- case 2:
*p = (g_u << 5) | b_v;
break;
case 1:
- case 3:
*p = (r_y << 3) | (g_u >> 3);
break;
}
@@ -373,11 +426,9 @@ static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
case V4L2_PIX_FMT_RGB565X:
switch (color) {
case 0:
- case 2:
*p = (r_y << 3) | (g_u >> 3);
break;
case 1:
- case 3:
*p = (g_u << 5) | b_v;
break;
}
@@ -385,24 +436,78 @@ static void gen_twopix(struct vivi_dev *dev, u8 *buf, int colorpos)
case V4L2_PIX_FMT_RGB555:
switch (color) {
case 0:
- case 2:
*p = (g_u << 5) | b_v;
break;
case 1:
- case 3:
- *p = (r_y << 2) | (g_u >> 3);
+ *p = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
break;
}
break;
case V4L2_PIX_FMT_RGB555X:
switch (color) {
case 0:
+ *p = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+ break;
+ case 1:
+ *p = (g_u << 5) | b_v;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ switch (color) {
+ case 0:
+ *p = r_y;
+ break;
+ case 1:
+ *p = g_u;
+ break;
+ case 2:
+ *p = b_v;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ switch (color) {
+ case 0:
+ *p = b_v;
+ break;
+ case 1:
+ *p = g_u;
+ break;
+ case 2:
+ *p = r_y;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ switch (color) {
+ case 0:
+ *p = alpha;
+ break;
+ case 1:
+ *p = r_y;
+ break;
case 2:
- *p = (r_y << 2) | (g_u >> 3);
+ *p = g_u;
+ break;
+ case 3:
+ *p = b_v;
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ switch (color) {
+ case 0:
+ *p = b_v;
break;
case 1:
+ *p = g_u;
+ break;
+ case 2:
+ *p = r_y;
+ break;
case 3:
- *p = (g_u << 5) | b_v;
+ *p = alpha;
break;
}
break;
@@ -414,10 +519,10 @@ static void precalculate_line(struct vivi_dev *dev)
{
int w;
- for (w = 0; w < dev->width * 2; w += 2) {
- int colorpos = (w / (dev->width / 8) % 8);
+ for (w = 0; w < dev->width * 2; w++) {
+ int colorpos = w / (dev->width / 8) % 8;
- gen_twopix(dev, dev->line + w * 2, colorpos);
+ gen_twopix(dev, dev->line + w * dev->pixelsize, colorpos, w & 1);
}
}
@@ -433,7 +538,7 @@ static void gen_text(struct vivi_dev *dev, char *basep,
/* Print stream time */
for (line = y; line < y + 16; line++) {
int j = 0;
- char *pos = basep + line * dev->width * 2 + x * 2;
+ char *pos = basep + line * dev->width * dev->pixelsize + x * dev->pixelsize;
char *s;
for (s = text; *s; s++) {
@@ -443,9 +548,9 @@ static void gen_text(struct vivi_dev *dev, char *basep,
for (i = 0; i < 7; i++, j++) {
/* Draw white font on black background */
if (chr & (1 << (7 - i)))
- gen_twopix(dev, pos + j * 2, WHITE);
+ gen_twopix(dev, pos + j * dev->pixelsize, WHITE, (x+y) & 1);
else
- gen_twopix(dev, pos + j * 2, TEXT_BLACK);
+ gen_twopix(dev, pos + j * dev->pixelsize, TEXT_BLACK, (x+y) & 1);
}
}
}
@@ -466,7 +571,9 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
return;
for (h = 0; h < hmax; h++)
- memcpy(vbuf + h * wmax * 2, dev->line + (dev->mv_count % wmax) * 2, wmax * 2);
+ memcpy(vbuf + h * wmax * dev->pixelsize,
+ dev->line + (dev->mv_count % wmax) * dev->pixelsize,
+ wmax * dev->pixelsize);
/* Updates stream time */
@@ -484,15 +591,16 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
gen_text(dev, vbuf, line++ * 16, 16, str);
gain = v4l2_ctrl_g_ctrl(dev->gain);
- mutex_lock(&dev->ctrl_handler.lock);
+ mutex_lock(dev->ctrl_handler.lock);
snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
dev->brightness->cur.val,
dev->contrast->cur.val,
dev->saturation->cur.val,
dev->hue->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " autogain %d, gain %3d, volume %3d ",
- dev->autogain->cur.val, gain, dev->volume->cur.val);
+ snprintf(str, sizeof(str), " autogain %d, gain %3d, volume %3d, alpha 0x%02x ",
+ dev->autogain->cur.val, gain, dev->volume->cur.val,
+ dev->alpha->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
dev->int32->cur.val,
@@ -503,8 +611,12 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
dev->boolean->cur.val,
dev->menu->qmenu[dev->menu->cur.val],
dev->string->cur.string);
- mutex_unlock(&dev->ctrl_handler.lock);
gen_text(dev, vbuf, line++ * 16, 16, str);
+ snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
+ dev->int_menu->qmenu_int[dev->int_menu->cur.val],
+ dev->int_menu->cur.val);
+ gen_text(dev, vbuf, line++ * 16, 16, str);
+ mutex_unlock(dev->ctrl_handler.lock);
if (dev->button_pressed) {
dev->button_pressed--;
snprintf(str, sizeof(str), " button pressed!");
@@ -657,7 +769,7 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
struct vivi_dev *dev = vb2_get_drv_priv(vq);
unsigned long size;
- size = dev->width * dev->height * 2;
+ size = dev->width * dev->height * dev->pixelsize;
if (0 == *nbuffers)
*nbuffers = 32;
@@ -721,7 +833,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
dev->height < 32 || dev->height > MAX_HEIGHT)
return -EINVAL;
- size = dev->width * dev->height * 2;
+ size = dev->width * dev->height * dev->pixelsize;
if (vb2_plane_size(vb, 0) < size) {
dprintk(dev, 1, "%s data will not fit into plane (%lu < %lu)\n",
__func__, vb2_plane_size(vb, 0), size);
@@ -915,6 +1027,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
}
dev->fmt = get_format(f);
+ dev->pixelsize = dev->fmt->depth / 8;
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
dev->field = f->fmt.pix.field;
@@ -1016,8 +1129,15 @@ static int vivi_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
- if (ctrl == dev->button)
- dev->button_pressed = 30;
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ dev->alpha_component = ctrl->val;
+ break;
+ default:
+ if (ctrl == dev->button)
+ dev->button_pressed = 30;
+ break;
+ }
return 0;
}
@@ -1029,27 +1149,24 @@ static ssize_t
vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct vivi_dev *dev = video_drvdata(file);
+ int err;
dprintk(dev, 1, "read called\n");
- return vb2_read(&dev->vb_vidq, data, count, ppos,
+ mutex_lock(&dev->mutex);
+ err = vb2_read(&dev->vb_vidq, data, count, ppos,
file->f_flags & O_NONBLOCK);
+ mutex_unlock(&dev->mutex);
+ return err;
}
static unsigned int
vivi_poll(struct file *file, struct poll_table_struct *wait)
{
struct vivi_dev *dev = video_drvdata(file);
- struct v4l2_fh *fh = file->private_data;
struct vb2_queue *q = &dev->vb_vidq;
- unsigned int res;
dprintk(dev, 1, "%s\n", __func__);
- res = vb2_poll(q, file, wait);
- if (v4l2_event_pending(fh))
- res |= POLLPRI;
- else
- poll_wait(file, &fh->wait, wait);
- return res;
+ return vb2_poll(q, file, wait);
}
static int vivi_close(struct file *file)
@@ -1165,6 +1282,22 @@ static const struct v4l2_ctrl_config vivi_ctrl_bitmask = {
.step = 0,
};
+static const s64 vivi_ctrl_int_menu_values[] = {
+ 1, 1, 2, 3, 5, 8, 13, 21, 42,
+};
+
+static const struct v4l2_ctrl_config vivi_ctrl_int_menu = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 7,
+ .name = "Integer menu",
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .min = 1,
+ .max = 8,
+ .def = 4,
+ .menu_skip_mask = 0x02,
+ .qmenu_int = vivi_ctrl_int_menu_values,
+};
+
static const struct v4l2_file_operations vivi_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
@@ -1252,6 +1385,7 @@ static int __init vivi_create_instance(int inst)
dev->fmt = &formats[0];
dev->width = 640;
dev->height = 480;
+ dev->pixelsize = dev->fmt->depth / 8;
hdl = &dev->ctrl_handler;
v4l2_ctrl_handler_init(hdl, 11);
dev->volume = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
@@ -1268,6 +1402,8 @@ static int __init vivi_create_instance(int inst)
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
dev->gain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
V4L2_CID_GAIN, 0, 255, 1, 100);
+ dev->alpha = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0);
dev->button = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_button, NULL);
dev->int32 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int32, NULL);
dev->int64 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int64, NULL);
@@ -1275,6 +1411,7 @@ static int __init vivi_create_instance(int inst)
dev->menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_menu, NULL);
dev->string = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_string, NULL);
dev->bitmask = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_bitmask, NULL);
+ dev->int_menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int_menu, NULL);
if (hdl->error) {
ret = hdl->error;
goto unreg_dev;
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 7fd7ac567e1a..db2a6003a1c3 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -62,6 +62,9 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <linux/parport.h>
/*#define DEBUG*/ /* Undef me for production */
@@ -104,6 +107,7 @@
struct w9966 {
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
unsigned char dev_state;
unsigned char i2c_state;
unsigned short ppmode;
@@ -567,7 +571,8 @@ static int cam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
+ vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -595,67 +600,25 @@ static int cam_s_input(struct file *file, void *fh, unsigned int inp)
return (inp > 0) ? -EINVAL : 0;
}
-static int cam_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
+static int cam_s_ctrl(struct v4l2_ctrl *ctrl)
{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, 0, 255, 1, 128);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, -64, 64, 1, 64);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, -128, 127, 1, 0);
- }
- return -EINVAL;
-}
-
-static int cam_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct w9966 *cam = video_drvdata(file);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = cam->brightness;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = cam->contrast;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = cam->color;
- break;
- case V4L2_CID_HUE:
- ctrl->value = cam->hue;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int cam_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct w9966 *cam = video_drvdata(file);
+ struct w9966 *cam =
+ container_of(ctrl->handler, struct w9966, hdl);
int ret = 0;
mutex_lock(&cam->lock);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- cam->brightness = ctrl->value;
+ cam->brightness = ctrl->val;
break;
case V4L2_CID_CONTRAST:
- cam->contrast = ctrl->value;
+ cam->contrast = ctrl->val;
break;
case V4L2_CID_SATURATION:
- cam->color = ctrl->value;
+ cam->color = ctrl->val;
break;
case V4L2_CID_HUE:
- cam->hue = ctrl->value;
+ cam->hue = ctrl->val;
break;
default:
ret = -EINVAL;
@@ -813,6 +776,9 @@ out:
static const struct v4l2_file_operations w9966_fops = {
.owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
.unlocked_ioctl = video_ioctl2,
.read = w9966_v4l_read,
};
@@ -822,13 +788,17 @@ static const struct v4l2_ioctl_ops w9966_ioctl_ops = {
.vidioc_g_input = cam_g_input,
.vidioc_s_input = cam_s_input,
.vidioc_enum_input = cam_enum_input,
- .vidioc_queryctrl = cam_queryctrl,
- .vidioc_g_ctrl = cam_g_ctrl,
- .vidioc_s_ctrl = cam_s_ctrl,
.vidioc_enum_fmt_vid_cap = cam_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = cam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = cam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = cam_try_fmt_vid_cap,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_ctrl_ops cam_ctrl_ops = {
+ .s_ctrl = cam_s_ctrl,
};
@@ -849,6 +819,20 @@ static int w9966_init(struct w9966 *cam, struct parport *port)
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
return -1;
}
+
+ v4l2_ctrl_handler_init(&cam->hdl, 4);
+ v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
+ V4L2_CID_CONTRAST, -64, 64, 1, 64);
+ v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
+ V4L2_CID_SATURATION, -64, 64, 1, 64);
+ v4l2_ctrl_new_std(&cam->hdl, &cam_ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ if (cam->hdl.error) {
+ v4l2_err(v4l2_dev, "couldn't register controls\n");
+ return -1;
+ }
cam->pport = port;
cam->brightness = 128;
cam->contrast = 64;
@@ -898,6 +882,8 @@ static int w9966_init(struct w9966 *cam, struct parport *port)
cam->vdev.fops = &w9966_fops;
cam->vdev.ioctl_ops = &w9966_ioctl_ops;
cam->vdev.release = video_device_release_empty;
+ cam->vdev.ctrl_handler = &cam->hdl;
+ set_bit(V4L2_FL_USE_FH_PRIO, &cam->vdev.flags);
video_set_drvdata(&cam->vdev, cam);
mutex_init(&cam->lock);
@@ -923,6 +909,8 @@ static void w9966_term(struct w9966 *cam)
w9966_set_state(cam, W9966_STATE_VDEV, 0);
}
+ v4l2_ctrl_handler_free(&cam->hdl);
+
/* Terminate from IEEE1284 mode and release pdev block */
if (w9966_get_state(cam, W9966_STATE_PDEV, W9966_STATE_PDEV)) {
w9966_pdev_claim(cam);
diff --git a/drivers/media/video/zoran/zoran_device.c b/drivers/media/video/zoran/zoran_device.c
index e86173bd1327..a4cd504b8eee 100644
--- a/drivers/media/video/zoran/zoran_device.c
+++ b/drivers/media/video/zoran/zoran_device.c
@@ -542,11 +542,9 @@ void write_overlay_mask(struct zoran_fh *fh, struct v4l2_clip *vp, int count)
u32 *mask;
int x, y, width, height;
unsigned i, j, k;
- u32 reg;
/* fill mask with one bits */
memset(fh->overlay_mask, ~0, mask_line_size * 4 * BUZ_MAX_HEIGHT);
- reg = 0;
for (i = 0; i < count; ++i) {
/* pick up local copy of clip */
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 4c09ab781ec3..c57310931810 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1131,8 +1131,14 @@ static int setup_fbuffer(struct zoran_fh *fh,
}
-static int setup_window(struct zoran_fh *fh, int x, int y, int width, int height,
- struct v4l2_clip __user *clips, int clipcount, void __user *bitmap)
+static int setup_window(struct zoran_fh *fh,
+ int x,
+ int y,
+ int width,
+ int height,
+ struct v4l2_clip __user *clips,
+ unsigned int clipcount,
+ void __user *bitmap)
{
struct zoran *zr = fh->zr;
struct v4l2_clip *vcp = NULL;
@@ -1155,6 +1161,14 @@ static int setup_window(struct zoran_fh *fh, int x, int y, int width, int height
return -EINVAL;
}
+ if (clipcount > 2048) {
+ dprintk(1,
+ KERN_ERR
+ "%s: %s - invalid clipcount\n",
+ ZR_DEVNAME(zr), __func__);
+ return -EINVAL;
+ }
+
/*
* The video front end needs 4-byte alinged line sizes, we correct that
* silently here if necessary
@@ -1218,7 +1232,7 @@ static int setup_window(struct zoran_fh *fh, int x, int y, int width, int height
(width * height + 7) / 8)) {
return -EFAULT;
}
- } else if (clipcount > 0) {
+ } else if (clipcount) {
/* write our own bitmap from the clips */
vcp = vmalloc(sizeof(struct v4l2_clip) * (clipcount + 4));
if (vcp == NULL) {
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index cd2e39fc4bf0..e44cb330bbc8 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -507,14 +507,12 @@ static void zr364xx_fillbuff(struct zr364xx_camera *cam,
const char *tmpbuf;
char *vbuf = videobuf_to_vmalloc(&buf->vb);
unsigned long last_frame;
- struct zr364xx_framei *frm;
if (!vbuf)
return;
last_frame = cam->last_frame;
if (last_frame != -1) {
- frm = &cam->buffer.frame[last_frame];
tmpbuf = (const char *)cam->buffer.frame[last_frame].lpvbits;
switch (buf->fmt->fourcc) {
case V4L2_PIX_FMT_JPEG:
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a5c591ffe395..d99db5623acf 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1653,7 +1653,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
unsigned long port;
u32 msize;
u32 psize;
- u8 revision;
int r = -ENODEV;
struct pci_dev *pdev;
@@ -1670,8 +1669,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
return r;
}
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask = dma_get_required_mask
(&pdev->dev);
@@ -1779,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
MPT_ADAPTER *ioc;
u8 cb_idx;
int r = -ENODEV;
- u8 revision;
u8 pcixcmd;
static int mpt_ids = 0;
#ifdef CONFIG_PROC_FS
@@ -1887,8 +1883,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0]));
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
- mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
+ mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
+ ioc->prod_name);
switch (pdev->device)
{
@@ -1903,7 +1899,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
- if (revision < XL_929) {
+ if (pdev->revision < XL_929) {
/* 929X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
@@ -1934,7 +1930,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
- if (revision < C0_1030) {
+ if (pdev->revision < C0_1030) {
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
@@ -6483,6 +6479,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
printk(MYIOC_s_INFO_FMT "%s: host reset in"
" progress mpt_config timed out.!!\n",
__func__, ioc->name);
+ mutex_unlock(&ioc->mptbase_cmds.mutex);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 6e6e16aab9da..b383b6961e59 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1250,7 +1250,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
int iocnum;
unsigned int port;
int cim_rev;
- u8 revision;
struct scsi_device *sdev;
VirtDevice *vdevice;
@@ -1324,8 +1323,7 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
pdev = (struct pci_dev *) ioc->pcidev;
karg->pciId = pdev->device;
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
- karg->hwRev = revision;
+ karg->hwRev = pdev->revision;
karg->subSystemDevice = pdev->subsystem_device;
karg->subSystemVendor = pdev->subsystem_vendor;
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 6d115c7208ab..506c36f6e1db 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -283,7 +283,6 @@ static char *bus_strings[] = {
"Local Bus",
"ISA",
"EISA",
- "MCA",
"PCI",
"PCMCIA",
"NUBUS",
@@ -351,18 +350,6 @@ static int i2o_seq_show_hrt(struct seq_file *seq, void *v)
EisaSlotNumber);
break;
- case I2O_BUS_MCA:
- seq_printf(seq, " IOBase: %0#6x,",
- hrt->hrt_entry[i].bus.mca_bus.
- McaBaseIOPort);
- seq_printf(seq, " MemoryBase: %0#10x,",
- hrt->hrt_entry[i].bus.mca_bus.
- McaBaseMemoryAddress);
- seq_printf(seq, " Slot: %0#4x,",
- hrt->hrt_entry[i].bus.mca_bus.
- McaSlotNumber);
- break;
-
case I2O_BUS_PCI:
seq_printf(seq, " Bus: %0#4x",
hrt->hrt_entry[i].bus.pci_bus.
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 49ef8b0794ae..92144ed1ad46 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -106,6 +106,19 @@ config UCB1400_CORE
To compile this driver as a module, choose M here: the
module will be called ucb1400_core.
+config MFD_LM3533
+ tristate "LM3533 Lighting Power chip"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say yes here to enable support for National Semiconductor / TI
+ LM3533 Lighting Power chips.
+
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the LED,
+ backlight or ambient-light-sensor functionality of the device.
+
config TPS6105X
tristate "TPS61050/61052 Boost Converters"
depends on I2C
@@ -177,8 +190,8 @@ config MFD_TPS65910
bool "TPS65910 Power Management chip"
depends on I2C=y && GPIOLIB
select MFD_CORE
- select GPIO_TPS65910
select REGMAP_I2C
+ select IRQ_DOMAIN
help
if you say yes here you get support for the TPS65910 series of
Power Management chips.
@@ -273,6 +286,7 @@ config TWL6040_CORE
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
+ select IRQ_DOMAIN
default n
help
Say yes here if you want support for Texas Instruments TWL6040 audio
@@ -377,7 +391,6 @@ config PMIC_DA9052
config MFD_DA9052_SPI
bool "Support Dialog Semiconductor DA9052/53 PMIC variants with SPI"
- select IRQ_DOMAIN
select REGMAP_SPI
select REGMAP_IRQ
select PMIC_DA9052
@@ -390,7 +403,6 @@ config MFD_DA9052_SPI
config MFD_DA9052_I2C
bool "Support Dialog Semiconductor DA9052/53 PMIC variants with I2C"
- select IRQ_DOMAIN
select REGMAP_I2C
select REGMAP_IRQ
select PMIC_DA9052
@@ -411,13 +423,26 @@ config PMIC_ADP5520
individual components like LCD backlight, LEDs, GPIOs and Kepad
under the corresponding menus.
+config MFD_MAX77693
+ bool "Maxim Semiconductor MAX77693 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say yes here to support for Maxim Semiconductor MAX77693.
+ This is a companion Power Management IC with Flash, Haptic, Charger,
+ and MUIC(Micro USB Interface Controller) controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MAX8925
bool "Maxim Semiconductor MAX8925 PMIC Support"
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
help
Say yes here to support for Maxim Semiconductor MAX8925. This is
- a Power Management IC. This driver provies common support for
+ a Power Management IC. This driver provides common support for
accessing the device, additional drivers must be enabled in order
to use the functionality of the device.
@@ -440,7 +465,7 @@ config MFD_MAX8998
help
Say yes here to support for Maxim Semiconductor MAX8998 and
National Semiconductor LP3974. This is a Power Management IC.
- This driver provies common support for accessing the device,
+ This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
of the device.
@@ -451,14 +476,14 @@ config MFD_S5M_CORE
select REGMAP_I2C
help
Support for the Samsung Electronics S5M MFD series.
- This driver provies common support for accessing the device,
+ This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
of the device
config MFD_WM8400
- tristate "Support Wolfson Microelectronics WM8400"
+ bool "Support Wolfson Microelectronics WM8400"
select MFD_CORE
- depends on I2C
+ depends on I2C=y
select REGMAP_I2C
help
Support for the Wolfson Microelecronics WM8400 PMIC and audio
@@ -475,6 +500,7 @@ config MFD_WM831X_I2C
select MFD_CORE
select MFD_WM831X
select REGMAP_I2C
+ select IRQ_DOMAIN
depends on I2C=y && GENERIC_HARDIRQS
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -487,6 +513,7 @@ config MFD_WM831X_SPI
select MFD_CORE
select MFD_WM831X
select REGMAP_SPI
+ select IRQ_DOMAIN
depends on SPI_MASTER && GENERIC_HARDIRQS
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -561,7 +588,6 @@ config MFD_WM8994
bool "Support Wolfson Microelectronics WM8994"
select MFD_CORE
select REGMAP_I2C
- select IRQ_DOMAIN
select REGMAP_IRQ
depends on I2C=y && GENERIC_HARDIRQS
help
@@ -600,17 +626,32 @@ config MFD_MC13783
tristate
config MFD_MC13XXX
- tristate "Support Freescale MC13783 and MC13892"
- depends on SPI_MASTER
+ tristate
+ depends on SPI_MASTER || I2C
select MFD_CORE
select MFD_MC13783
help
- Support for the Freescale (Atlas) PMIC and audio CODECs
- MC13783 and MC13892.
- This driver provides common support for accessing the device,
+ Enable support for the Freescale MC13783 and MC13892 PMICs.
+ This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_MC13XXX_SPI
+ tristate "Freescale MC13783 and MC13892 SPI interface"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ select MFD_MC13XXX
+ help
+ Select this if your MC13xxx is connected via an SPI bus.
+
+config MFD_MC13XXX_I2C
+ tristate "Freescale MC13892 I2C interface"
+ depends on I2C
+ select REGMAP_I2C
+ select MFD_MC13XXX
+ help
+ Select this if your MC13xxx is connected via an I2C bus.
+
config ABX500_CORE
bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
default y if ARCH_U300 || ARCH_U8500
@@ -654,7 +695,7 @@ config EZX_PCAP
config AB8500_CORE
bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
- depends on GENERIC_HARDIRQS && ABX500_CORE
+ depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
select MFD_CORE
help
Select this option to enable access to AB8500 power management
@@ -725,6 +766,16 @@ config LPC_SCH
LPC bridge function of the Intel SCH provides support for
System Management Bus and General Purpose I/O.
+config LPC_ICH
+ tristate "Intel ICH LPC"
+ depends on PCI
+ select MFD_CORE
+ help
+ The LPC bridge function of the Intel ICH provides support for
+ many functional units. This driver provides needed support for
+ other drivers to control these functions, currently GPIO and
+ watchdog.
+
config MFD_RDC321X
tristate "Support for RDC-R321x southbridge"
select MFD_CORE
@@ -857,6 +908,11 @@ config MFD_RC5T583
Additional drivers must be enabled in order to use the
different functionality of the device.
+config MFD_STA2X11
+ bool "STA2X11 multi function device support"
+ depends on STA2X11
+ select MFD_CORE
+
config MFD_ANATOP
bool "Support for Freescale i.MX on-chip ANATOP controller"
depends on SOC_IMX6Q
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 43672b87805a..75f6ed68a4b9 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
+obj-$(CONFIG_MFD_STA2X11) += sta2x11-mfd.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
@@ -54,6 +55,8 @@ obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
+obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
+obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
obj-$(CONFIG_MFD_CORE) += mfd-core.o
@@ -75,6 +78,7 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
+obj-$(CONFIG_MFD_MAX77693) += max77693.o max77693-irq.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
@@ -87,15 +91,15 @@ obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
-obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
-# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
-obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
+# ab8500-core need to come after db8500-prcmu (which provides the channel)
+obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
+obj-$(CONFIG_LPC_ICH) += lpc_ich.o
obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
diff --git a/drivers/mfd/ab5500-core.h b/drivers/mfd/ab5500-core.h
deleted file mode 100644
index 63b30b17e4f3..000000000000
--- a/drivers/mfd/ab5500-core.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2011 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Shared definitions and data structures for the AB5500 MFD driver
- */
-
-/* Read/write operation values. */
-#define AB5500_PERM_RD (0x01)
-#define AB5500_PERM_WR (0x02)
-
-/* Read/write permissions. */
-#define AB5500_PERM_RO (AB5500_PERM_RD)
-#define AB5500_PERM_RW (AB5500_PERM_RD | AB5500_PERM_WR)
-
-#define AB5500_MASK_BASE (0x60)
-#define AB5500_MASK_END (0x79)
-#define AB5500_CHIP_ID (0x20)
-
-/**
- * struct ab5500_reg_range
- * @first: the first address of the range
- * @last: the last address of the range
- * @perm: access permissions for the range
- */
-struct ab5500_reg_range {
- u8 first;
- u8 last;
- u8 perm;
-};
-
-/**
- * struct ab5500_i2c_ranges
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_ranges {
- u8 nranges;
- u8 bankid;
- const struct ab5500_reg_range *range;
-};
-
-/**
- * struct ab5500_i2c_banks
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_banks {
- u8 nbanks;
- const struct ab5500_i2c_ranges *bank;
-};
-
-/**
- * struct ab5500_bank
- * @slave_addr: I2C slave_addr found in AB5500 specification
- * @name: Documentation name of the bank. For reference
- */
-struct ab5500_bank {
- u8 slave_addr;
- const char *name;
-};
-
-static const struct ab5500_bank bankinfo[AB5500_NUM_BANKS] = {
- [AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP] = {
- AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP, "VIT_IO_I2C_CLK_TST_OTP"},
- [AB5500_BANK_VDDDIG_IO_I2C_CLK_TST] = {
- AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST, "VDDDIG_IO_I2C_CLK_TST"},
- [AB5500_BANK_VDENC] = {AB5500_ADDR_VDENC, "VDENC"},
- [AB5500_BANK_SIM_USBSIM] = {AB5500_ADDR_SIM_USBSIM, "SIM_USBSIM"},
- [AB5500_BANK_LED] = {AB5500_ADDR_LED, "LED"},
- [AB5500_BANK_ADC] = {AB5500_ADDR_ADC, "ADC"},
- [AB5500_BANK_RTC] = {AB5500_ADDR_RTC, "RTC"},
- [AB5500_BANK_STARTUP] = {AB5500_ADDR_STARTUP, "STARTUP"},
- [AB5500_BANK_DBI_ECI] = {AB5500_ADDR_DBI_ECI, "DBI-ECI"},
- [AB5500_BANK_CHG] = {AB5500_ADDR_CHG, "CHG"},
- [AB5500_BANK_FG_BATTCOM_ACC] = {
- AB5500_ADDR_FG_BATTCOM_ACC, "FG_BATCOM_ACC"},
- [AB5500_BANK_USB] = {AB5500_ADDR_USB, "USB"},
- [AB5500_BANK_IT] = {AB5500_ADDR_IT, "IT"},
- [AB5500_BANK_VIBRA] = {AB5500_ADDR_VIBRA, "VIBRA"},
- [AB5500_BANK_AUDIO_HEADSETUSB] = {
- AB5500_ADDR_AUDIO_HEADSETUSB, "AUDIO_HEADSETUSB"},
-};
-
-int ab5500_get_register_interruptible_raw(struct ab5500 *ab, u8 bank, u8 reg,
- u8 *value);
-int ab5500_mask_and_set_register_interruptible_raw(struct ab5500 *ab, u8 bank,
- u8 reg, u8 bitmask, u8 bitvalues);
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1f08704f7ae8..dac0e2998603 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -18,7 +18,10 @@
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/ab8500.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
/*
* Interrupt register offsets
@@ -91,12 +94,24 @@
#define AB8500_IT_MASK23_REG 0x56
#define AB8500_IT_MASK24_REG 0x57
+/*
+ * latch hierarchy registers
+ */
+#define AB8500_IT_LATCHHIER1_REG 0x60
+#define AB8500_IT_LATCHHIER2_REG 0x61
+#define AB8500_IT_LATCHHIER3_REG 0x62
+
+#define AB8500_IT_LATCHHIER_NUM 3
+
#define AB8500_REV_REG 0x80
#define AB8500_IC_NAME_REG 0x82
#define AB8500_SWITCH_OFF_STATUS 0x00
#define AB8500_TURN_ON_STATUS 0x00
+static bool no_bm; /* No battery management */
+module_param(no_bm, bool, S_IRUGO);
+
#define AB9540_MODEM_CTRL2_REG 0x23
#define AB9540_MODEM_CTRL2_SWDBBRSTN_BIT BIT(2)
@@ -125,6 +140,41 @@ static const char ab8500_version_str[][7] = {
[AB8500_VERSION_AB8540] = "AB8540",
};
+static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ int ret;
+
+ ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+ if (ret < 0)
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+}
+
+static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
+ u8 data)
+{
+ int ret;
+
+ ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
+ &mask, 1);
+ if (ret < 0)
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+}
+
+static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
+{
+ int ret;
+ u8 data;
+
+ ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+ if (ret < 0) {
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+ }
+ return (int)data;
+}
+
static int ab8500_get_chip_id(struct device *dev)
{
struct ab8500 *ab8500;
@@ -161,9 +211,13 @@ static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
static int ab8500_set_register(struct device *dev, u8 bank,
u8 reg, u8 value)
{
+ int ret;
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return set_register_interruptible(ab8500, bank, reg, value);
+ atomic_inc(&ab8500->transfer_ongoing);
+ ret = set_register_interruptible(ab8500, bank, reg, value);
+ atomic_dec(&ab8500->transfer_ongoing);
+ return ret;
}
static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -192,9 +246,13 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
static int ab8500_get_register(struct device *dev, u8 bank,
u8 reg, u8 *value)
{
+ int ret;
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return get_register_interruptible(ab8500, bank, reg, value);
+ atomic_inc(&ab8500->transfer_ongoing);
+ ret = get_register_interruptible(ab8500, bank, reg, value);
+ atomic_dec(&ab8500->transfer_ongoing);
+ return ret;
}
static int mask_and_set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -241,11 +299,14 @@ out:
static int ab8500_mask_and_set_register(struct device *dev,
u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
{
+ int ret;
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return mask_and_set_register_interruptible(ab8500, bank, reg,
- bitmask, bitvalues);
-
+ atomic_inc(&ab8500->transfer_ongoing);
+ ret= mask_and_set_register_interruptible(ab8500, bank, reg,
+ bitmask, bitvalues);
+ atomic_dec(&ab8500->transfer_ongoing);
+ return ret;
}
static struct abx500_ops ab8500_ops = {
@@ -264,6 +325,7 @@ static void ab8500_irq_lock(struct irq_data *data)
struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
mutex_lock(&ab8500->irq_lock);
+ atomic_inc(&ab8500->transfer_ongoing);
}
static void ab8500_irq_sync_unlock(struct irq_data *data)
@@ -292,7 +354,7 @@ static void ab8500_irq_sync_unlock(struct irq_data *data)
reg = AB8500_IT_MASK1_REG + ab8500->irq_reg_offset[i];
set_register_interruptible(ab8500, AB8500_INTERRUPT, reg, new);
}
-
+ atomic_dec(&ab8500->transfer_ongoing);
mutex_unlock(&ab8500->irq_lock);
}
@@ -325,6 +387,90 @@ static struct irq_chip ab8500_irq_chip = {
.irq_unmask = ab8500_irq_unmask,
};
+static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
+ int latch_offset, u8 latch_val)
+{
+ int int_bit = __ffs(latch_val);
+ int line, i;
+
+ do {
+ int_bit = __ffs(latch_val);
+
+ for (i = 0; i < ab8500->mask_size; i++)
+ if (ab8500->irq_reg_offset[i] == latch_offset)
+ break;
+
+ if (i >= ab8500->mask_size) {
+ dev_err(ab8500->dev, "Register offset 0x%2x not declared\n",
+ latch_offset);
+ return -ENXIO;
+ }
+
+ line = (i << 3) + int_bit;
+ latch_val &= ~(1 << int_bit);
+
+ handle_nested_irq(ab8500->irq_base + line);
+ } while (latch_val);
+
+ return 0;
+}
+
+static int ab8500_handle_hierarchical_latch(struct ab8500 *ab8500,
+ int hier_offset, u8 hier_val)
+{
+ int latch_bit, status;
+ u8 latch_offset, latch_val;
+
+ do {
+ latch_bit = __ffs(hier_val);
+ latch_offset = (hier_offset << 3) + latch_bit;
+
+ /* Fix inconsistent ITFromLatch25 bit mapping... */
+ if (unlikely(latch_offset == 17))
+ latch_offset = 24;
+
+ status = get_register_interruptible(ab8500,
+ AB8500_INTERRUPT,
+ AB8500_IT_LATCH1_REG + latch_offset,
+ &latch_val);
+ if (status < 0 || latch_val == 0)
+ goto discard;
+
+ status = ab8500_handle_hierarchical_line(ab8500,
+ latch_offset, latch_val);
+ if (status < 0)
+ return status;
+discard:
+ hier_val &= ~(1 << latch_bit);
+ } while (hier_val);
+
+ return 0;
+}
+
+static irqreturn_t ab8500_hierarchical_irq(int irq, void *dev)
+{
+ struct ab8500 *ab8500 = dev;
+ u8 i;
+
+ dev_vdbg(ab8500->dev, "interrupt\n");
+
+ /* Hierarchical interrupt version */
+ for (i = 0; i < AB8500_IT_LATCHHIER_NUM; i++) {
+ int status;
+ u8 hier_val;
+
+ status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
+ AB8500_IT_LATCHHIER1_REG + i, &hier_val);
+ if (status < 0 || hier_val == 0)
+ continue;
+
+ status = ab8500_handle_hierarchical_latch(ab8500, i, hier_val);
+ if (status < 0)
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
static irqreturn_t ab8500_irq(int irq, void *dev)
{
struct ab8500 *ab8500 = dev;
@@ -332,6 +478,8 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
dev_vdbg(ab8500->dev, "interrupt\n");
+ atomic_inc(&ab8500->transfer_ongoing);
+
for (i = 0; i < ab8500->mask_size; i++) {
int regoffset = ab8500->irq_reg_offset[i];
int status;
@@ -355,9 +503,10 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
handle_nested_irq(ab8500->irq_base + line);
value &= ~(1 << bit);
+
} while (value);
}
-
+ atomic_dec(&ab8500->transfer_ongoing);
return IRQ_HANDLED;
}
@@ -411,6 +560,14 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
}
}
+int ab8500_suspend(struct ab8500 *ab8500)
+{
+ if (atomic_read(&ab8500->transfer_ongoing))
+ return -EINVAL;
+ else
+ return 0;
+}
+
/* AB8500 GPIO Resources */
static struct resource __devinitdata ab8500_gpio_resources[] = {
{
@@ -744,6 +901,39 @@ static struct resource __devinitdata ab8500_usb_resources[] = {
},
};
+static struct resource __devinitdata ab8505_iddet_resources[] = {
+ {
+ .name = "KeyDeglitch",
+ .start = AB8505_INT_KEYDEGLITCH,
+ .end = AB8505_INT_KEYDEGLITCH,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "KP",
+ .start = AB8505_INT_KP,
+ .end = AB8505_INT_KP,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IKP",
+ .start = AB8505_INT_IKP,
+ .end = AB8505_INT_IKP,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IKR",
+ .start = AB8505_INT_IKR,
+ .end = AB8505_INT_IKR,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "KeyStuck",
+ .start = AB8505_INT_KEYSTUCK,
+ .end = AB8505_INT_KEYSTUCK,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct resource __devinitdata ab8500_temp_resources[] = {
{
.name = "AB8500_TEMP_WARM",
@@ -778,35 +968,11 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
.resources = ab8500_rtc_resources,
},
{
- .name = "ab8500-charger",
- .num_resources = ARRAY_SIZE(ab8500_charger_resources),
- .resources = ab8500_charger_resources,
- },
- {
- .name = "ab8500-btemp",
- .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
- .resources = ab8500_btemp_resources,
- },
- {
- .name = "ab8500-fg",
- .num_resources = ARRAY_SIZE(ab8500_fg_resources),
- .resources = ab8500_fg_resources,
- },
- {
- .name = "ab8500-chargalg",
- .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
- .resources = ab8500_chargalg_resources,
- },
- {
.name = "ab8500-acc-det",
.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
.resources = ab8500_av_acc_detect_resources,
},
{
- .name = "ab8500-codec",
- },
-
- {
.name = "ab8500-poweron-key",
.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
.resources = ab8500_poweronkey_db_resources,
@@ -834,6 +1000,29 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
},
};
+static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
+ {
+ .name = "ab8500-charger",
+ .num_resources = ARRAY_SIZE(ab8500_charger_resources),
+ .resources = ab8500_charger_resources,
+ },
+ {
+ .name = "ab8500-btemp",
+ .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+ .resources = ab8500_btemp_resources,
+ },
+ {
+ .name = "ab8500-fg",
+ .num_resources = ARRAY_SIZE(ab8500_fg_resources),
+ .resources = ab8500_fg_resources,
+ },
+ {
+ .name = "ab8500-chargalg",
+ .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+ .resources = ab8500_chargalg_resources,
+ },
+};
+
static struct mfd_cell __devinitdata ab8500_devs[] = {
{
.name = "ab8500-gpio",
@@ -845,6 +1034,9 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
.resources = ab8500_usb_resources,
},
+ {
+ .name = "ab8500-codec",
+ },
};
static struct mfd_cell __devinitdata ab9540_devs[] = {
@@ -858,6 +1050,18 @@ static struct mfd_cell __devinitdata ab9540_devs[] = {
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
.resources = ab8500_usb_resources,
},
+ {
+ .name = "ab9540-codec",
+ },
+};
+
+/* Device list common to ab9540 and ab8505 */
+static struct mfd_cell __devinitdata ab9540_ab8505_devs[] = {
+ {
+ .name = "ab-iddet",
+ .num_resources = ARRAY_SIZE(ab8505_iddet_resources),
+ .resources = ab8505_iddet_resources,
+ },
};
static ssize_t show_chip_id(struct device *dev,
@@ -1003,18 +1207,66 @@ static struct attribute_group ab9540_attr_group = {
.attrs = ab9540_sysfs_entries,
};
-int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
+static const struct of_device_id ab8500_match[] = {
+ {
+ .compatible = "stericsson,ab8500",
+ .data = (void *)AB8500_VERSION_AB8500,
+ },
+ {},
+};
+
+static int __devinit ab8500_probe(struct platform_device *pdev)
{
- struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
+ struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+ enum ab8500_version version = AB8500_VERSION_UNDEFINED;
+ struct device_node *np = pdev->dev.of_node;
+ struct ab8500 *ab8500;
+ struct resource *resource;
int ret;
int i;
u8 value;
+ ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+ if (!ab8500)
+ return -ENOMEM;
+
if (plat)
ab8500->irq_base = plat->irq_base;
+ else if (np)
+ ret = of_property_read_u32(np, "stericsson,irq-base", &ab8500->irq_base);
+
+ if (!ab8500->irq_base) {
+ dev_info(&pdev->dev, "couldn't find irq-base\n");
+ ret = -EINVAL;
+ goto out_free_ab8500;
+ }
+
+ ab8500->dev = &pdev->dev;
+
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!resource) {
+ ret = -ENODEV;
+ goto out_free_ab8500;
+ }
+
+ ab8500->irq = resource->start;
+
+ ab8500->read = ab8500_i2c_read;
+ ab8500->write = ab8500_i2c_write;
+ ab8500->write_masked = ab8500_i2c_write_masked;
mutex_init(&ab8500->lock);
mutex_init(&ab8500->irq_lock);
+ atomic_set(&ab8500->transfer_ongoing, 0);
+
+ platform_set_drvdata(pdev, ab8500);
+
+ if (platid)
+ version = platid->driver_data;
+ else if (np)
+ version = (unsigned int)
+ of_match_device(ab8500_match, &pdev->dev)->data;
if (version != AB8500_VERSION_UNDEFINED)
ab8500->version = version;
@@ -1022,7 +1274,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_IC_NAME_REG, &value);
if (ret < 0)
- return ret;
+ goto out_free_ab8500;
ab8500->version = value;
}
@@ -1030,7 +1282,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_REV_REG, &value);
if (ret < 0)
- return ret;
+ goto out_free_ab8500;
ab8500->chip_id = value;
@@ -1105,30 +1357,57 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
if (ret)
goto out_freeoldmask;
- ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- "ab8500", ab8500);
+ /* Activate this feature only in ab9540 */
+ /* till tests are done on ab8500 1p2 or later*/
+ if (is_ab9540(ab8500))
+ ret = request_threaded_irq(ab8500->irq, NULL,
+ ab8500_hierarchical_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
+ else
+ ret = request_threaded_irq(ab8500->irq, NULL,
+ ab8500_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
if (ret)
goto out_removeirq;
}
- ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
- ARRAY_SIZE(abx500_common_devs), NULL,
- ab8500->irq_base);
+ if (!np) {
+ ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
+ ARRAY_SIZE(abx500_common_devs), NULL,
+ ab8500->irq_base);
- if (ret)
- goto out_freeirq;
+ if (ret)
+ goto out_freeirq;
+
+ if (is_ab9540(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
+ ARRAY_SIZE(ab9540_devs), NULL,
+ ab8500->irq_base);
+ else
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
+ ARRAY_SIZE(ab8500_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
- if (is_ab9540(ab8500))
- ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
- ARRAY_SIZE(ab9540_devs), NULL,
- ab8500->irq_base);
- else
- ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
- ARRAY_SIZE(ab9540_devs), NULL,
- ab8500->irq_base);
- if (ret)
- goto out_freeirq;
+ if (is_ab9540(ab8500) || is_ab8505(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
+ ARRAY_SIZE(ab9540_ab8505_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
+ }
+
+ if (!no_bm) {
+ /* Add battery management devices */
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
+ ARRAY_SIZE(ab8500_bm_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ dev_err(ab8500->dev, "error adding bm devices\n");
+ }
if (is_ab9540(ab8500))
ret = sysfs_create_group(&ab8500->dev->kobj,
@@ -1151,12 +1430,16 @@ out_freeoldmask:
kfree(ab8500->oldmask);
out_freemask:
kfree(ab8500->mask);
+out_free_ab8500:
+ kfree(ab8500);
return ret;
}
-int __devexit ab8500_exit(struct ab8500 *ab8500)
+static int __devexit ab8500_remove(struct platform_device *pdev)
{
+ struct ab8500 *ab8500 = platform_get_drvdata(pdev);
+
if (is_ab9540(ab8500))
sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
else
@@ -1168,10 +1451,42 @@ int __devexit ab8500_exit(struct ab8500 *ab8500)
}
kfree(ab8500->oldmask);
kfree(ab8500->mask);
+ kfree(ab8500);
return 0;
}
+static const struct platform_device_id ab8500_id[] = {
+ { "ab8500-core", AB8500_VERSION_AB8500 },
+ { "ab8505-i2c", AB8500_VERSION_AB8505 },
+ { "ab9540-i2c", AB8500_VERSION_AB9540 },
+ { "ab8540-i2c", AB8500_VERSION_AB8540 },
+ { }
+};
+
+static struct platform_driver ab8500_core_driver = {
+ .driver = {
+ .name = "ab8500-core",
+ .owner = THIS_MODULE,
+ .of_match_table = ab8500_match,
+ },
+ .probe = ab8500_probe,
+ .remove = __devexit_p(ab8500_remove),
+ .id_table = ab8500_id,
+};
+
+static int __init ab8500_core_init(void)
+{
+ return platform_driver_register(&ab8500_core_driver);
+}
+
+static void __exit ab8500_core_exit(void)
+{
+ platform_driver_unregister(&ab8500_core_driver);
+}
+arch_initcall(ab8500_core_init);
+module_exit(ab8500_core_exit);
+
MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
MODULE_DESCRIPTION("AB8500 MFD core");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 9a0211aa8897..50c4c89ab220 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -608,10 +608,16 @@ static int __devexit ab8500_debug_remove(struct platform_device *plf)
return 0;
}
+static const struct of_device_id ab8500_debug_match[] = {
+ { .compatible = "stericsson,ab8500-debug", },
+ {}
+};
+
static struct platform_driver ab8500_debug_driver = {
.driver = {
.name = "ab8500-debug",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_debug_match,
},
.probe = ab8500_debug_probe,
.remove = __devexit_p(ab8500_debug_remove)
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index c39fc716e1dc..b86fd8e1ec3f 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -584,7 +584,7 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
if (gpadc->irq < 0) {
- dev_err(gpadc->dev, "failed to get platform irq-%d\n",
+ dev_err(&pdev->dev, "failed to get platform irq-%d\n",
gpadc->irq);
ret = gpadc->irq;
goto fail;
@@ -648,12 +648,18 @@ static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_gpadc_match[] = {
+ { .compatible = "stericsson,ab8500-gpadc", },
+ {}
+};
+
static struct platform_driver ab8500_gpadc_driver = {
.probe = ab8500_gpadc_probe,
.remove = __devexit_p(ab8500_gpadc_remove),
.driver = {
.name = "ab8500-gpadc",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_gpadc_match,
},
};
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
deleted file mode 100644
index b83045f102be..000000000000
--- a/drivers/mfd/ab8500-i2c.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
- * License Terms: GNU General Public License v2
- * This file was based on drivers/mfd/ab8500-spi.c
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/dbx500-prcmu.h>
-
-static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
-{
- int ret;
-
- ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
- if (ret < 0)
- dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
- return ret;
-}
-
-static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
- u8 data)
-{
- int ret;
-
- ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
- &mask, 1);
- if (ret < 0)
- dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
- return ret;
-}
-
-static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
-{
- int ret;
- u8 data;
-
- ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
- if (ret < 0) {
- dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
- return ret;
- }
- return (int)data;
-}
-
-static int __devinit ab8500_i2c_probe(struct platform_device *plf)
-{
- const struct platform_device_id *platid = platform_get_device_id(plf);
- struct ab8500 *ab8500;
- struct resource *resource;
- int ret;
-
- ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
- if (!ab8500)
- return -ENOMEM;
-
- ab8500->dev = &plf->dev;
-
- resource = platform_get_resource(plf, IORESOURCE_IRQ, 0);
- if (!resource) {
- kfree(ab8500);
- return -ENODEV;
- }
-
- ab8500->irq = resource->start;
-
- ab8500->read = ab8500_i2c_read;
- ab8500->write = ab8500_i2c_write;
- ab8500->write_masked = ab8500_i2c_write_masked;
-
- platform_set_drvdata(plf, ab8500);
-
- ret = ab8500_init(ab8500, platid->driver_data);
- if (ret)
- kfree(ab8500);
-
-
- return ret;
-}
-
-static int __devexit ab8500_i2c_remove(struct platform_device *plf)
-{
- struct ab8500 *ab8500 = platform_get_drvdata(plf);
-
- ab8500_exit(ab8500);
- kfree(ab8500);
-
- return 0;
-}
-
-static const struct platform_device_id ab8500_id[] = {
- { "ab8500-i2c", AB8500_VERSION_AB8500 },
- { "ab8505-i2c", AB8500_VERSION_AB8505 },
- { "ab9540-i2c", AB8500_VERSION_AB9540 },
- { "ab8540-i2c", AB8500_VERSION_AB8540 },
- { }
-};
-
-static struct platform_driver ab8500_i2c_driver = {
- .driver = {
- .name = "ab8500-i2c",
- .owner = THIS_MODULE,
- },
- .probe = ab8500_i2c_probe,
- .remove = __devexit_p(ab8500_i2c_remove),
- .id_table = ab8500_id,
-};
-
-static int __init ab8500_i2c_init(void)
-{
- return platform_driver_register(&ab8500_i2c_driver);
-}
-
-static void __exit ab8500_i2c_exit(void)
-{
- platform_driver_unregister(&ab8500_i2c_driver);
-}
-arch_initcall(ab8500_i2c_init);
-module_exit(ab8500_i2c_exit);
-
-MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
-MODULE_DESCRIPTION("AB8500 Core access via PRCMU I2C");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index c28d4eb1eff0..5a3e51ccf258 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -61,10 +61,16 @@ static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_sysctrl_match[] = {
+ { .compatible = "stericsson,ab8500-sysctrl", },
+ {}
+};
+
static struct platform_driver ab8500_sysctrl_driver = {
.driver = {
.name = "ab8500-sysctrl",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_sysctrl_match,
},
.probe = ab8500_sysctrl_probe,
.remove = __devexit_p(ab8500_sysctrl_remove),
diff --git a/drivers/mfd/anatop-mfd.c b/drivers/mfd/anatop-mfd.c
index 2af42480635e..6da06341f6c9 100644
--- a/drivers/mfd/anatop-mfd.c
+++ b/drivers/mfd/anatop-mfd.c
@@ -41,39 +41,26 @@
#include <linux/of_address.h>
#include <linux/mfd/anatop.h>
-u32 anatop_get_bits(struct anatop *adata, u32 addr, int bit_shift,
- int bit_width)
+u32 anatop_read_reg(struct anatop *adata, u32 addr)
{
- u32 val, mask;
-
- if (bit_width == 32)
- mask = ~0;
- else
- mask = (1 << bit_width) - 1;
-
- val = readl(adata->ioreg + addr);
- val = (val >> bit_shift) & mask;
-
- return val;
+ return readl(adata->ioreg + addr);
}
-EXPORT_SYMBOL_GPL(anatop_get_bits);
+EXPORT_SYMBOL_GPL(anatop_read_reg);
-void anatop_set_bits(struct anatop *adata, u32 addr, int bit_shift,
- int bit_width, u32 data)
+void anatop_write_reg(struct anatop *adata, u32 addr, u32 data, u32 mask)
{
- u32 val, mask;
+ u32 val;
- if (bit_width == 32)
- mask = ~0;
- else
- mask = (1 << bit_width) - 1;
+ data &= mask;
spin_lock(&adata->reglock);
- val = readl(adata->ioreg + addr) & ~(mask << bit_shift);
- writel((data << bit_shift) | val, adata->ioreg + addr);
+ val = readl(adata->ioreg + addr);
+ val &= ~mask;
+ val |= data;
+ writel(val, adata->ioreg + addr);
spin_unlock(&adata->reglock);
}
-EXPORT_SYMBOL_GPL(anatop_set_bits);
+EXPORT_SYMBOL_GPL(anatop_write_reg);
static const struct of_device_id of_anatop_match[] = {
{ .compatible = "fsl,imx6q-anatop", },
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 1582c3d95257..383421bf5760 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -353,12 +353,28 @@ static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
return 0;
}
+static int asic3_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct asic3 *asic = irq_data_get_irq_chip_data(data);
+ u32 bank, index;
+ u16 bit;
+
+ bank = asic3_irq_to_bank(asic, data->irq);
+ index = asic3_irq_to_index(asic, data->irq);
+ bit = 1<<index;
+
+ asic3_set_register(asic, bank + ASIC3_GPIO_SLEEP_MASK, bit, !on);
+
+ return 0;
+}
+
static struct irq_chip asic3_gpio_irq_chip = {
.name = "ASIC3-GPIO",
.irq_ack = asic3_mask_gpio_irq,
.irq_mask = asic3_mask_gpio_irq,
.irq_unmask = asic3_unmask_gpio_irq,
.irq_set_type = asic3_gpio_irq_type,
+ .irq_set_wake = asic3_gpio_irq_set_wake,
};
static struct irq_chip asic3_irq_chip = {
@@ -529,7 +545,7 @@ static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct asic3 *asic = container_of(chip, struct asic3, gpio);
- return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO;
+ return asic->irq_base + offset;
}
static __init int asic3_gpio_probe(struct platform_device *pdev,
@@ -894,10 +910,13 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
- ret = mfd_add_devices(&pdev->dev, pdev->id,
+ if (pdata->clock_rate) {
+ ds1wm_pdata.clock_rate = pdata->clock_rate;
+ ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_ds1wm, 1, mem, asic->irq_base);
- if (ret < 0)
- goto out;
+ if (ret < 0)
+ goto out;
+ }
if (mem_sdio && (irq >= 0)) {
ret = mfd_add_devices(&pdev->dev, pdev->id,
@@ -1000,6 +1019,9 @@ static int __init asic3_probe(struct platform_device *pdev)
asic3_mfd_probe(pdev, pdata, mem);
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 1);
+
dev_info(asic->dev, "ASIC3 Core driver\n");
return 0;
@@ -1021,6 +1043,9 @@ static int __devexit asic3_remove(struct platform_device *pdev)
int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 0);
+
asic3_mfd_remove(pdev);
ret = asic3_gpio_remove(pdev);
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 315fef5d466a..3419e726de47 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -186,18 +186,7 @@ static struct pci_driver cs5535_mfd_driver = {
.remove = __devexit_p(cs5535_mfd_remove),
};
-static int __init cs5535_mfd_init(void)
-{
- return pci_register_driver(&cs5535_mfd_driver);
-}
-
-static void __exit cs5535_mfd_exit(void)
-{
- pci_unregister_driver(&cs5535_mfd_driver);
-}
-
-module_init(cs5535_mfd_init);
-module_exit(cs5535_mfd_exit);
+module_pci_driver(cs5535_mfd_driver);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 7776aff46269..1f1313c90573 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -318,6 +318,135 @@ static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
}
}
+/*
+ * TBAT look-up table is computed from the R90 reg (8 bit register)
+ * reading as below. The battery temperature is in milliCentigrade
+ * TBAT = (1/(t1+1/298) - 273) * 1000 mC
+ * where t1 = (1/B)* ln(( ADCval * 2.5)/(R25*ITBAT*255))
+ * Default values are R25 = 10e3, B = 3380, ITBAT = 50e-6
+ * Example:
+ * R25=10E3, B=3380, ITBAT=50e-6, ADCVAL=62d calculates
+ * TBAT = 20015 mili degrees Centrigrade
+ *
+*/
+static const int32_t tbat_lookup[255] = {
+ 183258, 144221, 124334, 111336, 101826, 94397, 88343, 83257,
+ 78889, 75071, 71688, 68656, 65914, 63414, 61120, 59001,
+ 570366, 55204, 53490, 51881, 50364, 48931, 47574, 46285,
+ 45059, 43889, 42772, 41703, 40678, 39694, 38748, 37838,
+ 36961, 36115, 35297, 34507, 33743, 33002, 32284, 31588,
+ 30911, 30254, 29615, 28994, 28389, 27799, 27225, 26664,
+ 26117, 25584, 25062, 24553, 24054, 23567, 23091, 22624,
+ 22167, 21719, 21281, 20851, 20429, 20015, 19610, 19211,
+ 18820, 18436, 18058, 17688, 17323, 16965, 16612, 16266,
+ 15925, 15589, 15259, 14933, 14613, 14298, 13987, 13681,
+ 13379, 13082, 12788, 12499, 12214, 11933, 11655, 11382,
+ 11112, 10845, 10582, 10322, 10066, 9812, 9562, 9315,
+ 9071, 8830, 8591, 8356, 8123, 7893, 7665, 7440,
+ 7218, 6998, 6780, 6565, 6352, 6141, 5933, 5726,
+ 5522, 5320, 5120, 4922, 4726, 4532, 4340, 4149,
+ 3961, 3774, 3589, 3406, 3225, 3045, 2867, 2690,
+ 2516, 2342, 2170, 2000, 1831, 1664, 1498, 1334,
+ 1171, 1009, 849, 690, 532, 376, 221, 67,
+ -84, -236, -386, -535, -683, -830, -975, -1119,
+ -1263, -1405, -1546, -1686, -1825, -1964, -2101, -2237,
+ -2372, -2506, -2639, -2771, -2902, -3033, -3162, -3291,
+ -3418, -3545, -3671, -3796, -3920, -4044, -4166, -4288,
+ -4409, -4529, -4649, -4767, -4885, -5002, -5119, -5235,
+ -5349, -5464, -5577, -5690, -5802, -5913, -6024, -6134,
+ -6244, -6352, -6461, -6568, -6675, -6781, -6887, -6992,
+ -7096, -7200, -7303, -7406, -7508, -7609, -7710, -7810,
+ -7910, -8009, -8108, -8206, -8304, -8401, -8497, -8593,
+ -8689, -8784, -8878, -8972, -9066, -9159, -9251, -9343,
+ -9435, -9526, -9617, -9707, -9796, -9886, -9975, -10063,
+ -10151, -10238, -10325, -10412, -10839, -10923, -11007, -11090,
+ -11173, -11256, -11338, -11420, -11501, -11583, -11663, -11744,
+ -11823, -11903, -11982
+};
+
+static const u8 chan_mux[DA9052_ADC_VBBAT + 1] = {
+ [DA9052_ADC_VDDOUT] = DA9052_ADC_MAN_MUXSEL_VDDOUT,
+ [DA9052_ADC_ICH] = DA9052_ADC_MAN_MUXSEL_ICH,
+ [DA9052_ADC_TBAT] = DA9052_ADC_MAN_MUXSEL_TBAT,
+ [DA9052_ADC_VBAT] = DA9052_ADC_MAN_MUXSEL_VBAT,
+ [DA9052_ADC_IN4] = DA9052_ADC_MAN_MUXSEL_AD4,
+ [DA9052_ADC_IN5] = DA9052_ADC_MAN_MUXSEL_AD5,
+ [DA9052_ADC_IN6] = DA9052_ADC_MAN_MUXSEL_AD6,
+ [DA9052_ADC_VBBAT] = DA9052_ADC_MAN_MUXSEL_VBBAT
+};
+
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel)
+{
+ int ret;
+ unsigned short calc_data;
+ unsigned short data;
+ unsigned char mux_sel;
+
+ if (channel > DA9052_ADC_VBBAT)
+ return -EINVAL;
+
+ mutex_lock(&da9052->auxadc_lock);
+
+ /* Channel gets activated on enabling the Conversion bit */
+ mux_sel = chan_mux[channel] | DA9052_ADC_MAN_MAN_CONV;
+
+ ret = da9052_reg_write(da9052, DA9052_ADC_MAN_REG, mux_sel);
+ if (ret < 0)
+ goto err;
+
+ /* Wait for an interrupt */
+ if (!wait_for_completion_timeout(&da9052->done,
+ msecs_to_jiffies(500))) {
+ dev_err(da9052->dev,
+ "timeout waiting for ADC conversion interrupt\n");
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_RES_H_REG);
+ if (ret < 0)
+ goto err;
+
+ calc_data = (unsigned short)ret;
+ data = calc_data << 2;
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_RES_L_REG);
+ if (ret < 0)
+ goto err;
+
+ calc_data = (unsigned short)(ret & DA9052_ADC_RES_LSB);
+ data |= calc_data;
+
+ ret = data;
+
+err:
+ mutex_unlock(&da9052->auxadc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
+
+static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
+{
+ struct da9052 *da9052 = irq_data;
+
+ complete(&da9052->done);
+
+ return IRQ_HANDLED;
+}
+
+int da9052_adc_read_temp(struct da9052 *da9052)
+{
+ int tbat;
+
+ tbat = da9052_reg_read(da9052, DA9052_TBAT_RES_REG);
+ if (tbat <= 0)
+ return tbat;
+
+ /* ARRAY_SIZE check is not needed since TBAT is a 8-bit register */
+ return tbat_lookup[tbat - 1];
+}
+EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
+
static struct resource da9052_rtc_resource = {
.name = "ALM",
.start = DA9052_IRQ_ALARM,
@@ -646,6 +775,9 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
struct irq_desc *desc;
int ret;
+ mutex_init(&da9052->auxadc_lock);
+ init_completion(&da9052->done);
+
if (pdata && pdata->init != NULL)
pdata->init(da9052);
@@ -665,6 +797,12 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
+ ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "adc irq", da9052);
+ if (ret != 0)
+ dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
+
ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0);
if (ret)
@@ -673,6 +811,7 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
return 0;
err:
+ free_irq(DA9052_IRQ_ADC_EOM, da9052);
mfd_remove_devices(da9052->dev);
regmap_err:
return ret;
@@ -680,6 +819,7 @@ regmap_err:
void da9052_device_exit(struct da9052 *da9052)
{
+ free_irq(DA9052_IRQ_ADC_EOM, da9052);
regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
mfd_remove_devices(da9052->dev);
}
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 36b88e395499..82c9d6450286 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -22,6 +22,11 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_device.h>
+#endif
+
static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
{
int reg_val, ret;
@@ -41,13 +46,31 @@ static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
return 0;
}
+static struct i2c_device_id da9052_i2c_id[] = {
+ {"da9052", DA9052},
+ {"da9053-aa", DA9053_AA},
+ {"da9053-ba", DA9053_BA},
+ {"da9053-bb", DA9053_BB},
+ {}
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id dialog_dt_ids[] = {
+ { .compatible = "dlg,da9052", .data = &da9052_i2c_id[0] },
+ { .compatible = "dlg,da9053-aa", .data = &da9052_i2c_id[1] },
+ { .compatible = "dlg,da9053-ab", .data = &da9052_i2c_id[2] },
+ { .compatible = "dlg,da9053-bb", .data = &da9052_i2c_id[3] },
+ { /* sentinel */ }
+};
+#endif
+
static int __devinit da9052_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct da9052 *da9052;
int ret;
- da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+ da9052 = devm_kzalloc(&client->dev, sizeof(struct da9052), GFP_KERNEL);
if (!da9052)
return -ENOMEM;
@@ -55,8 +78,7 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
__func__);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
da9052->dev = &client->dev;
@@ -64,29 +86,39 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, da9052);
- da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config);
+ da9052->regmap = devm_regmap_init_i2c(client, &da9052_regmap_config);
if (IS_ERR(da9052->regmap)) {
ret = PTR_ERR(da9052->regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = da9052_i2c_enable_multiwrite(da9052);
if (ret < 0)
- goto err_regmap;
+ return ret;
+
+#ifdef CONFIG_OF
+ if (!id) {
+ struct device_node *np = client->dev.of_node;
+ const struct of_device_id *deviceid;
+
+ deviceid = of_match_node(dialog_dt_ids, np);
+ id = (const struct i2c_device_id *)deviceid->data;
+ }
+#endif
+
+ if (!id) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "id is null.\n");
+ return ret;
+ }
ret = da9052_device_init(da9052, id->driver_data);
if (ret != 0)
- goto err_regmap;
+ return ret;
return 0;
-
-err_regmap:
- regmap_exit(da9052->regmap);
-err:
- kfree(da9052);
- return ret;
}
static int __devexit da9052_i2c_remove(struct i2c_client *client)
@@ -94,20 +126,9 @@ static int __devexit da9052_i2c_remove(struct i2c_client *client)
struct da9052 *da9052 = i2c_get_clientdata(client);
da9052_device_exit(da9052);
- regmap_exit(da9052->regmap);
- kfree(da9052);
-
return 0;
}
-static struct i2c_device_id da9052_i2c_id[] = {
- {"da9052", DA9052},
- {"da9053-aa", DA9053_AA},
- {"da9053-ba", DA9053_BA},
- {"da9053-bb", DA9053_BB},
- {}
-};
-
static struct i2c_driver da9052_i2c_driver = {
.probe = da9052_i2c_probe,
.remove = __devexit_p(da9052_i2c_remove),
@@ -115,6 +136,9 @@ static struct i2c_driver da9052_i2c_driver = {
.driver = {
.name = "da9052",
.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+ .of_match_table = dialog_dt_ids,
+#endif
},
};
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 6faf149e8d94..dbeadc5a6436 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -25,8 +25,9 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
{
int ret;
const struct spi_device_id *id = spi_get_device_id(spi);
- struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+ struct da9052 *da9052;
+ da9052 = devm_kzalloc(&spi->dev, sizeof(struct da9052), GFP_KERNEL);
if (!da9052)
return -ENOMEM;
@@ -42,25 +43,19 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
da9052_regmap_config.read_flag_mask = 1;
da9052_regmap_config.write_flag_mask = 0;
- da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config);
+ da9052->regmap = devm_regmap_init_spi(spi, &da9052_regmap_config);
if (IS_ERR(da9052->regmap)) {
ret = PTR_ERR(da9052->regmap);
dev_err(&spi->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = da9052_device_init(da9052, id->driver_data);
if (ret != 0)
- goto err_regmap;
+ return ret;
return 0;
-
-err_regmap:
- regmap_exit(da9052->regmap);
-err:
- kfree(da9052);
- return ret;
}
static int __devexit da9052_spi_remove(struct spi_device *spi)
@@ -68,9 +63,6 @@ static int __devexit da9052_spi_remove(struct spi_device *spi)
struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
da9052_device_exit(da9052);
- regmap_exit(da9052->regmap);
- kfree(da9052);
-
return 0;
}
@@ -88,7 +80,6 @@ static struct spi_driver da9052_spi_driver = {
.id_table = da9052_spi_id,
.driver = {
.name = "da9052",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5be32489714f..50e83dc5dc49 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2720,6 +2720,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"),
/* "v-mmc" changed to "vcore" in the mainline kernel */
REGULATOR_SUPPLY("vcore", "sdi0"),
REGULATOR_SUPPLY("vcore", "sdi1"),
@@ -2734,6 +2735,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("vcore", "uart2"),
REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
+ REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
};
static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
@@ -2958,9 +2960,10 @@ static struct mfd_cell db8500_prcmu_devs[] = {
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
*/
-static int __init db8500_prcmu_probe(struct platform_device *pdev)
+static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
{
- int err = 0;
+ struct device_node *np = pdev->dev.of_node;
+ int irq = 0, err = 0;
if (ux500_is_svp())
return -ENODEV;
@@ -2970,8 +2973,14 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
/* Clean up the mailbox interrupts after pre-kernel code. */
writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
- err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
- prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
+ if (np)
+ irq = platform_get_irq(pdev, 0);
+
+ if (!np || irq <= 0)
+ irq = IRQ_DB8500_PRCMU1;
+
+ err = request_threaded_irq(irq, prcmu_irq_handler,
+ prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
if (err < 0) {
pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
err = -EBUSY;
@@ -2981,14 +2990,16 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
if (cpu_is_u8500v20_or_later())
prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
- err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
- ARRAY_SIZE(db8500_prcmu_devs), NULL,
- 0);
+ if (!np) {
+ err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
+ ARRAY_SIZE(db8500_prcmu_devs), NULL, 0);
+ if (err) {
+ pr_err("prcmu: Failed to add subdevices\n");
+ return err;
+ }
+ }
- if (err)
- pr_err("prcmu: Failed to add subdevices\n");
- else
- pr_info("DB8500 PRCMU initialized\n");
+ pr_info("DB8500 PRCMU initialized\n");
no_irq_return:
return err;
@@ -2999,11 +3010,12 @@ static struct platform_driver db8500_prcmu_driver = {
.name = "db8500-prcmu",
.owner = THIS_MODULE,
},
+ .probe = db8500_prcmu_probe,
};
static int __init db8500_prcmu_init(void)
{
- return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
+ return platform_driver_register(&db8500_prcmu_driver);
}
arch_initcall(db8500_prcmu_init);
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index b76657eb0c51..59df5584cb58 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -406,7 +406,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
return -ENXIO;
}
- msic = kzalloc(sizeof(*msic), GFP_KERNEL);
+ msic = devm_kzalloc(&pdev->dev, sizeof(*msic), GFP_KERNEL);
if (!msic)
return -ENOMEM;
@@ -421,21 +421,13 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
- ret = -ENODEV;
- goto fail_free_msic;
+ return -ENODEV;
}
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- ret = -EBUSY;
- goto fail_free_msic;
- }
-
- msic->irq_base = ioremap_nocache(res->start, resource_size(res));
+ msic->irq_base = devm_request_and_ioremap(&pdev->dev, res);
if (!msic->irq_base) {
dev_err(&pdev->dev, "failed to map SRAM memory\n");
- ret = -ENOMEM;
- goto fail_release_region;
+ return -ENOMEM;
}
platform_set_drvdata(pdev, msic);
@@ -443,7 +435,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
ret = intel_msic_init_devices(msic);
if (ret) {
dev_err(&pdev->dev, "failed to initialize MSIC devices\n");
- goto fail_unmap_mem;
+ return ret;
}
dev_info(&pdev->dev, "Intel MSIC version %c%d (vendor %#x)\n",
@@ -451,27 +443,14 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
msic->vendor);
return 0;
-
-fail_unmap_mem:
- iounmap(msic->irq_base);
-fail_release_region:
- release_mem_region(res->start, resource_size(res));
-fail_free_msic:
- kfree(msic);
-
- return ret;
}
static int __devexit intel_msic_remove(struct platform_device *pdev)
{
struct intel_msic *msic = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
intel_msic_remove_devices(msic);
platform_set_drvdata(pdev, NULL);
- iounmap(msic->irq_base);
- release_mem_region(res->start, resource_size(res));
- kfree(msic);
return 0;
}
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index a9223ed1b7c5..2ea99989551a 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -283,23 +283,8 @@ static struct pci_driver cmodio_pci_driver = {
.remove = __devexit_p(cmodio_pci_remove),
};
-/*
- * Module Init / Exit
- */
-
-static int __init cmodio_init(void)
-{
- return pci_register_driver(&cmodio_pci_driver);
-}
-
-static void __exit cmodio_exit(void)
-{
- pci_unregister_driver(&cmodio_pci_driver);
-}
+module_pci_driver(cmodio_pci_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
MODULE_LICENSE("GPL");
-
-module_init(cmodio_init);
-module_exit(cmodio_exit);
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
new file mode 100644
index 000000000000..0b2879b87fd9
--- /dev/null
+++ b/drivers/mfd/lm3533-core.c
@@ -0,0 +1,667 @@
+/*
+ * lm3533-core.c -- LM3533 Core
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_BOOST_OVP_MASK 0x06
+#define LM3533_BOOST_OVP_SHIFT 1
+
+#define LM3533_BOOST_FREQ_MASK 0x01
+#define LM3533_BOOST_FREQ_SHIFT 0
+
+#define LM3533_BL_ID_MASK 1
+#define LM3533_LED_ID_MASK 3
+#define LM3533_BL_ID_MAX 1
+#define LM3533_LED_ID_MAX 3
+
+#define LM3533_HVLED_ID_MAX 2
+#define LM3533_LVLED_ID_MAX 5
+
+#define LM3533_REG_OUTPUT_CONF1 0x10
+#define LM3533_REG_OUTPUT_CONF2 0x11
+#define LM3533_REG_BOOST_PWM 0x2c
+
+#define LM3533_REG_MAX 0xb2
+
+
+static struct mfd_cell lm3533_als_devs[] = {
+ {
+ .name = "lm3533-als",
+ .id = -1,
+ },
+};
+
+static struct mfd_cell lm3533_bl_devs[] = {
+ {
+ .name = "lm3533-backlight",
+ .id = 0,
+ },
+ {
+ .name = "lm3533-backlight",
+ .id = 1,
+ },
+};
+
+static struct mfd_cell lm3533_led_devs[] = {
+ {
+ .name = "lm3533-leds",
+ .id = 0,
+ },
+ {
+ .name = "lm3533-leds",
+ .id = 1,
+ },
+ {
+ .name = "lm3533-leds",
+ .id = 2,
+ },
+ {
+ .name = "lm3533-leds",
+ .id = 3,
+ },
+};
+
+int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val)
+{
+ int tmp;
+ int ret;
+
+ ret = regmap_read(lm3533->regmap, reg, &tmp);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to read register %02x: %d\n",
+ reg, ret);
+ return ret;
+ }
+
+ *val = tmp;
+
+ dev_dbg(lm3533->dev, "read [%02x]: %02x\n", reg, *val);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_read);
+
+int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val)
+{
+ int ret;
+
+ dev_dbg(lm3533->dev, "write [%02x]: %02x\n", reg, val);
+
+ ret = regmap_write(lm3533->regmap, reg, val);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to write register %02x: %d\n",
+ reg, ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_write);
+
+int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask)
+{
+ int ret;
+
+ dev_dbg(lm3533->dev, "update [%02x]: %02x/%02x\n", reg, val, mask);
+
+ ret = regmap_update_bits(lm3533->regmap, reg, mask, val);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to update register %02x: %d\n",
+ reg, ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_update);
+
+static int lm3533_set_boost_freq(struct lm3533 *lm3533,
+ enum lm3533_boost_freq freq)
+{
+ int ret;
+
+ ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+ freq << LM3533_BOOST_FREQ_SHIFT,
+ LM3533_BOOST_FREQ_MASK);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set boost frequency\n");
+
+ return ret;
+}
+
+
+static int lm3533_set_boost_ovp(struct lm3533 *lm3533,
+ enum lm3533_boost_ovp ovp)
+{
+ int ret;
+
+ ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+ ovp << LM3533_BOOST_OVP_SHIFT,
+ LM3533_BOOST_OVP_MASK);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set boost ovp\n");
+
+ return ret;
+}
+
+/*
+ * HVLED output config -- output hvled controlled by backlight bl
+ */
+static int lm3533_set_hvled_config(struct lm3533 *lm3533, u8 hvled, u8 bl)
+{
+ u8 val;
+ u8 mask;
+ int shift;
+ int ret;
+
+ if (hvled == 0 || hvled > LM3533_HVLED_ID_MAX)
+ return -EINVAL;
+
+ if (bl > LM3533_BL_ID_MAX)
+ return -EINVAL;
+
+ shift = hvled - 1;
+ mask = LM3533_BL_ID_MASK << shift;
+ val = bl << shift;
+
+ ret = lm3533_update(lm3533, LM3533_REG_OUTPUT_CONF1, val, mask);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set hvled config\n");
+
+ return ret;
+}
+
+/*
+ * LVLED output config -- output lvled controlled by LED led
+ */
+static int lm3533_set_lvled_config(struct lm3533 *lm3533, u8 lvled, u8 led)
+{
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int shift;
+ int ret;
+
+ if (lvled == 0 || lvled > LM3533_LVLED_ID_MAX)
+ return -EINVAL;
+
+ if (led > LM3533_LED_ID_MAX)
+ return -EINVAL;
+
+ if (lvled < 4) {
+ reg = LM3533_REG_OUTPUT_CONF1;
+ shift = 2 * lvled;
+ } else {
+ reg = LM3533_REG_OUTPUT_CONF2;
+ shift = 2 * (lvled - 4);
+ }
+
+ mask = LM3533_LED_ID_MASK << shift;
+ val = led << shift;
+
+ ret = lm3533_update(lm3533, reg, val, mask);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set lvled config\n");
+
+ return ret;
+}
+
+static void lm3533_enable(struct lm3533 *lm3533)
+{
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_set_value(lm3533->gpio_hwen, 1);
+}
+
+static void lm3533_disable(struct lm3533 *lm3533)
+{
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_set_value(lm3533->gpio_hwen, 0);
+}
+
+enum lm3533_attribute_type {
+ LM3533_ATTR_TYPE_BACKLIGHT,
+ LM3533_ATTR_TYPE_LED,
+};
+
+struct lm3533_device_attribute {
+ struct device_attribute dev_attr;
+ enum lm3533_attribute_type type;
+ union {
+ struct {
+ u8 id;
+ } output;
+ } u;
+};
+
+#define to_lm3533_dev_attr(_attr) \
+ container_of(_attr, struct lm3533_device_attribute, dev_attr)
+
+static ssize_t show_output(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533 *lm3533 = dev_get_drvdata(dev);
+ struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+ int id = lattr->u.output.id;
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int shift;
+ int ret;
+
+ if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT) {
+ reg = LM3533_REG_OUTPUT_CONF1;
+ shift = id - 1;
+ mask = LM3533_BL_ID_MASK << shift;
+ } else {
+ if (id < 4) {
+ reg = LM3533_REG_OUTPUT_CONF1;
+ shift = 2 * id;
+ } else {
+ reg = LM3533_REG_OUTPUT_CONF2;
+ shift = 2 * (id - 4);
+ }
+ mask = LM3533_LED_ID_MASK << shift;
+ }
+
+ ret = lm3533_read(lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ val = (val & mask) >> shift;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_output(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533 *lm3533 = dev_get_drvdata(dev);
+ struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+ int id = lattr->u.output.id;
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT)
+ ret = lm3533_set_hvled_config(lm3533, id, val);
+ else
+ ret = lm3533_set_lvled_config(lm3533, id, val);
+
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+#define LM3533_OUTPUT_ATTR(_name, _mode, _show, _store, _type, _id) \
+ struct lm3533_device_attribute lm3533_dev_attr_##_name = \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .type = _type, \
+ .u.output = { .id = _id }, }
+
+#define LM3533_OUTPUT_ATTR_RW(_name, _type, _id) \
+ LM3533_OUTPUT_ATTR(output_##_name, S_IRUGO | S_IWUSR, \
+ show_output, store_output, _type, _id)
+
+#define LM3533_OUTPUT_HVLED_ATTR_RW(_nr) \
+ LM3533_OUTPUT_ATTR_RW(hvled##_nr, LM3533_ATTR_TYPE_BACKLIGHT, _nr)
+#define LM3533_OUTPUT_LVLED_ATTR_RW(_nr) \
+ LM3533_OUTPUT_ATTR_RW(lvled##_nr, LM3533_ATTR_TYPE_LED, _nr)
+/*
+ * Output config:
+ *
+ * output_hvled<nr> 0-1
+ * output_lvled<nr> 0-3
+ */
+static LM3533_OUTPUT_HVLED_ATTR_RW(1);
+static LM3533_OUTPUT_HVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(1);
+static LM3533_OUTPUT_LVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(3);
+static LM3533_OUTPUT_LVLED_ATTR_RW(4);
+static LM3533_OUTPUT_LVLED_ATTR_RW(5);
+
+static struct attribute *lm3533_attributes[] = {
+ &lm3533_dev_attr_output_hvled1.dev_attr.attr,
+ &lm3533_dev_attr_output_hvled2.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled1.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled2.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled3.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled4.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled5.dev_attr.attr,
+ NULL,
+};
+
+#define to_dev_attr(_attr) \
+ container_of(_attr, struct device_attribute, attr)
+
+static umode_t lm3533_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lm3533 *lm3533 = dev_get_drvdata(dev);
+ struct device_attribute *dattr = to_dev_attr(attr);
+ struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(dattr);
+ enum lm3533_attribute_type type = lattr->type;
+ umode_t mode = attr->mode;
+
+ if (!lm3533->have_backlights && type == LM3533_ATTR_TYPE_BACKLIGHT)
+ mode = 0;
+ else if (!lm3533->have_leds && type == LM3533_ATTR_TYPE_LED)
+ mode = 0;
+
+ return mode;
+};
+
+static struct attribute_group lm3533_attribute_group = {
+ .is_visible = lm3533_attr_is_visible,
+ .attrs = lm3533_attributes
+};
+
+static int __devinit lm3533_device_als_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int ret;
+
+ if (!pdata->als)
+ return 0;
+
+ lm3533_als_devs[0].platform_data = pdata->als;
+ lm3533_als_devs[0].pdata_size = sizeof(*pdata->als);
+
+ ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL, 0);
+ if (ret) {
+ dev_err(lm3533->dev, "failed to add ALS device\n");
+ return ret;
+ }
+
+ lm3533->have_als = 1;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int i;
+ int ret;
+
+ if (!pdata->backlights || pdata->num_backlights == 0)
+ return 0;
+
+ if (pdata->num_backlights > ARRAY_SIZE(lm3533_bl_devs))
+ pdata->num_backlights = ARRAY_SIZE(lm3533_bl_devs);
+
+ for (i = 0; i < pdata->num_backlights; ++i) {
+ lm3533_bl_devs[i].platform_data = &pdata->backlights[i];
+ lm3533_bl_devs[i].pdata_size = sizeof(pdata->backlights[i]);
+ }
+
+ ret = mfd_add_devices(lm3533->dev, 0, lm3533_bl_devs,
+ pdata->num_backlights, NULL, 0);
+ if (ret) {
+ dev_err(lm3533->dev, "failed to add backlight devices\n");
+ return ret;
+ }
+
+ lm3533->have_backlights = 1;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_led_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int i;
+ int ret;
+
+ if (!pdata->leds || pdata->num_leds == 0)
+ return 0;
+
+ if (pdata->num_leds > ARRAY_SIZE(lm3533_led_devs))
+ pdata->num_leds = ARRAY_SIZE(lm3533_led_devs);
+
+ for (i = 0; i < pdata->num_leds; ++i) {
+ lm3533_led_devs[i].platform_data = &pdata->leds[i];
+ lm3533_led_devs[i].pdata_size = sizeof(pdata->leds[i]);
+ }
+
+ ret = mfd_add_devices(lm3533->dev, 0, lm3533_led_devs,
+ pdata->num_leds, NULL, 0);
+ if (ret) {
+ dev_err(lm3533->dev, "failed to add LED devices\n");
+ return ret;
+ }
+
+ lm3533->have_leds = 1;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_setup(struct lm3533 *lm3533,
+ struct lm3533_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_set_boost_freq(lm3533, pdata->boost_freq);
+ if (ret)
+ return ret;
+
+ ret = lm3533_set_boost_ovp(lm3533, pdata->boost_ovp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int ret;
+
+ dev_dbg(lm3533->dev, "%s\n", __func__);
+
+ if (!pdata) {
+ dev_err(lm3533->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ lm3533->gpio_hwen = pdata->gpio_hwen;
+
+ dev_set_drvdata(lm3533->dev, lm3533);
+
+ if (gpio_is_valid(lm3533->gpio_hwen)) {
+ ret = gpio_request_one(lm3533->gpio_hwen, GPIOF_OUT_INIT_LOW,
+ "lm3533-hwen");
+ if (ret < 0) {
+ dev_err(lm3533->dev,
+ "failed to request HWEN GPIO %d\n",
+ lm3533->gpio_hwen);
+ return ret;
+ }
+ }
+
+ lm3533_enable(lm3533);
+
+ ret = lm3533_device_setup(lm3533, pdata);
+ if (ret)
+ goto err_disable;
+
+ lm3533_device_als_init(lm3533);
+ lm3533_device_bl_init(lm3533);
+ lm3533_device_led_init(lm3533);
+
+ ret = sysfs_create_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ mfd_remove_devices(lm3533->dev);
+err_disable:
+ lm3533_disable(lm3533);
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_free(lm3533->gpio_hwen);
+
+ return ret;
+}
+
+static void __devexit lm3533_device_exit(struct lm3533 *lm3533)
+{
+ dev_dbg(lm3533->dev, "%s\n", __func__);
+
+ sysfs_remove_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+
+ mfd_remove_devices(lm3533->dev);
+ lm3533_disable(lm3533);
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_free(lm3533->gpio_hwen);
+}
+
+static bool lm3533_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x10 ... 0x2c:
+ case 0x30 ... 0x38:
+ case 0x40 ... 0x45:
+ case 0x50 ... 0x57:
+ case 0x60 ... 0x6e:
+ case 0x70 ... 0x75:
+ case 0x80 ... 0x85:
+ case 0x90 ... 0x95:
+ case 0xa0 ... 0xa5:
+ case 0xb0 ... 0xb2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool lm3533_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x34 ... 0x36: /* zone */
+ case 0x37 ... 0x38: /* adc */
+ case 0xb0 ... 0xb1: /* fault */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool lm3533_precious_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x34: /* zone */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LM3533_REG_MAX,
+ .readable_reg = lm3533_readable_register,
+ .volatile_reg = lm3533_volatile_register,
+ .precious_reg = lm3533_precious_register,
+};
+
+static int __devinit lm3533_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct lm3533 *lm3533;
+ int ret;
+
+ dev_dbg(&i2c->dev, "%s\n", __func__);
+
+ lm3533 = devm_kzalloc(&i2c->dev, sizeof(*lm3533), GFP_KERNEL);
+ if (!lm3533)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, lm3533);
+
+ lm3533->regmap = devm_regmap_init_i2c(i2c, &regmap_config);
+ if (IS_ERR(lm3533->regmap))
+ return PTR_ERR(lm3533->regmap);
+
+ lm3533->dev = &i2c->dev;
+ lm3533->irq = i2c->irq;
+
+ ret = lm3533_device_init(lm3533);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __devexit lm3533_i2c_remove(struct i2c_client *i2c)
+{
+ struct lm3533 *lm3533 = i2c_get_clientdata(i2c);
+
+ dev_dbg(&i2c->dev, "%s\n", __func__);
+
+ lm3533_device_exit(lm3533);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3533_i2c_ids[] = {
+ { "lm3533", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, lm3533_i2c_ids);
+
+static struct i2c_driver lm3533_i2c_driver = {
+ .driver = {
+ .name = "lm3533",
+ .owner = THIS_MODULE,
+ },
+ .id_table = lm3533_i2c_ids,
+ .probe = lm3533_i2c_probe,
+ .remove = __devexit_p(lm3533_i2c_remove),
+};
+
+static int __init lm3533_i2c_init(void)
+{
+ return i2c_add_driver(&lm3533_i2c_driver);
+}
+subsys_initcall(lm3533_i2c_init);
+
+static void __exit lm3533_i2c_exit(void)
+{
+ i2c_del_driver(&lm3533_i2c_driver);
+}
+module_exit(lm3533_i2c_exit);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lm3533-ctrlbank.c b/drivers/mfd/lm3533-ctrlbank.c
new file mode 100644
index 000000000000..a4cb7a5220a7
--- /dev/null
+++ b/drivers/mfd/lm3533-ctrlbank.c
@@ -0,0 +1,148 @@
+/*
+ * lm3533-ctrlbank.c -- LM3533 Generic Control Bank interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_MAX_CURRENT_MIN 5000
+#define LM3533_MAX_CURRENT_MAX 29800
+#define LM3533_MAX_CURRENT_STEP 800
+
+#define LM3533_BRIGHTNESS_MAX 255
+#define LM3533_PWM_MAX 0x3f
+
+#define LM3533_REG_PWM_BASE 0x14
+#define LM3533_REG_MAX_CURRENT_BASE 0x1f
+#define LM3533_REG_CTRLBANK_ENABLE 0x27
+#define LM3533_REG_BRIGHTNESS_BASE 0x40
+
+
+static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base)
+{
+ return base + cb->id;
+}
+
+int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb)
+{
+ u8 mask;
+ int ret;
+
+ dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+ mask = 1 << cb->id;
+ ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE,
+ mask, mask);
+ if (ret)
+ dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_enable);
+
+int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb)
+{
+ u8 mask;
+ int ret;
+
+ dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+ mask = 1 << cb->id;
+ ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, 0, mask);
+ if (ret)
+ dev_err(cb->dev, "failed to disable ctrlbank %d\n", cb->id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_disable);
+
+/*
+ * Full-scale current.
+ *
+ * imax 5000 - 29800 uA (800 uA step)
+ */
+int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax)
+{
+ u8 reg;
+ u8 val;
+ int ret;
+
+ if (imax < LM3533_MAX_CURRENT_MIN || imax > LM3533_MAX_CURRENT_MAX)
+ return -EINVAL;
+
+ val = (imax - LM3533_MAX_CURRENT_MIN) / LM3533_MAX_CURRENT_STEP;
+
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_MAX_CURRENT_BASE);
+ ret = lm3533_write(cb->lm3533, reg, val);
+ if (ret)
+ dev_err(cb->dev, "failed to set max current\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current);
+
+#define lm3533_ctrlbank_set(_name, _NAME) \
+int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val) \
+{ \
+ u8 reg; \
+ int ret; \
+ \
+ if (val > LM3533_##_NAME##_MAX) \
+ return -EINVAL; \
+ \
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
+ ret = lm3533_write(cb->lm3533, reg, val); \
+ if (ret) \
+ dev_err(cb->dev, "failed to set " #_name "\n"); \
+ \
+ return ret; \
+} \
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name);
+
+#define lm3533_ctrlbank_get(_name, _NAME) \
+int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val) \
+{ \
+ u8 reg; \
+ int ret; \
+ \
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
+ ret = lm3533_read(cb->lm3533, reg, val); \
+ if (ret) \
+ dev_err(cb->dev, "failed to get " #_name "\n"); \
+ \
+ return ret; \
+} \
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name);
+
+lm3533_ctrlbank_set(brightness, BRIGHTNESS);
+lm3533_ctrlbank_get(brightness, BRIGHTNESS);
+
+/*
+ * PWM-input control mask:
+ *
+ * bit 5 - PWM-input enabled in Zone 4
+ * bit 4 - PWM-input enabled in Zone 3
+ * bit 3 - PWM-input enabled in Zone 2
+ * bit 2 - PWM-input enabled in Zone 1
+ * bit 1 - PWM-input enabled in Zone 0
+ * bit 0 - PWM-input enabled
+ */
+lm3533_ctrlbank_set(pwm, PWM);
+lm3533_ctrlbank_get(pwm, PWM);
+
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Control Bank interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
new file mode 100644
index 000000000000..027cc8f86132
--- /dev/null
+++ b/drivers/mfd/lpc_ich.c
@@ -0,0 +1,888 @@
+/*
+ * lpc_ich.c - LPC interface for Intel ICH
+ *
+ * LPC bridge function of the Intel ICH contains many other
+ * functional units, such as Interrupt controllers, Timers,
+ * Power Management, System Management, GPIO, RTC, and LPC
+ * Configuration Registers.
+ *
+ * This driver is derived from lpc_sch.
+
+ * Copyright (c) 2011 Extreme Engineering Solution, Inc.
+ * Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * This driver supports the following I/O Controller hubs:
+ * (See the intel documentation on http://developer.intel.com.)
+ * document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
+ * document number 290687-002, 298242-027: 82801BA (ICH2)
+ * document number 290733-003, 290739-013: 82801CA (ICH3-S)
+ * document number 290716-001, 290718-007: 82801CAM (ICH3-M)
+ * document number 290744-001, 290745-025: 82801DB (ICH4)
+ * document number 252337-001, 252663-008: 82801DBM (ICH4-M)
+ * document number 273599-001, 273645-002: 82801E (C-ICH)
+ * document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
+ * document number 300641-004, 300884-013: 6300ESB
+ * document number 301473-002, 301474-026: 82801F (ICH6)
+ * document number 313082-001, 313075-006: 631xESB, 632xESB
+ * document number 307013-003, 307014-024: 82801G (ICH7)
+ * document number 322896-001, 322897-001: NM10
+ * document number 313056-003, 313057-017: 82801H (ICH8)
+ * document number 316972-004, 316973-012: 82801I (ICH9)
+ * document number 319973-002, 319974-002: 82801J (ICH10)
+ * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
+ * document number 320066-003, 320257-008: EP80597 (IICH)
+ * document number 324645-001, 324646-001: Cougar Point (CPT)
+ * document number TBD : Patsburg (PBG)
+ * document number TBD : DH89xxCC
+ * document number TBD : Panther Point
+ * document number TBD : Lynx Point
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define ACPIBASE 0x40
+#define ACPIBASE_GPE_OFF 0x28
+#define ACPIBASE_GPE_END 0x2f
+#define ACPIBASE_SMI_OFF 0x30
+#define ACPIBASE_SMI_END 0x33
+#define ACPIBASE_TCO_OFF 0x60
+#define ACPIBASE_TCO_END 0x7f
+#define ACPICTRL 0x44
+
+#define ACPIBASE_GCS_OFF 0x3410
+#define ACPIBASE_GCS_END 0x3414
+
+#define GPIOBASE 0x48
+#define GPIOCTRL 0x4C
+
+#define RCBABASE 0xf0
+
+#define wdt_io_res(i) wdt_res(0, i)
+#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
+#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
+
+static int lpc_ich_acpi_save = -1;
+static int lpc_ich_gpio_save = -1;
+
+static struct resource wdt_ich_res[] = {
+ /* ACPI - TCO */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* ACPI - SMI */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* GCS */
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource gpio_ich_res[] = {
+ /* GPIO */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* ACPI - GPE0 */
+ {
+ .flags = IORESOURCE_IO,
+ },
+};
+
+enum lpc_cells {
+ LPC_WDT = 0,
+ LPC_GPIO,
+};
+
+static struct mfd_cell lpc_ich_cells[] = {
+ [LPC_WDT] = {
+ .name = "iTCO_wdt",
+ .num_resources = ARRAY_SIZE(wdt_ich_res),
+ .resources = wdt_ich_res,
+ .ignore_resource_conflicts = true,
+ },
+ [LPC_GPIO] = {
+ .name = "gpio_ich",
+ .num_resources = ARRAY_SIZE(gpio_ich_res),
+ .resources = gpio_ich_res,
+ .ignore_resource_conflicts = true,
+ },
+};
+
+/* chipset related info */
+enum lpc_chipsets {
+ LPC_ICH = 0, /* ICH */
+ LPC_ICH0, /* ICH0 */
+ LPC_ICH2, /* ICH2 */
+ LPC_ICH2M, /* ICH2-M */
+ LPC_ICH3, /* ICH3-S */
+ LPC_ICH3M, /* ICH3-M */
+ LPC_ICH4, /* ICH4 */
+ LPC_ICH4M, /* ICH4-M */
+ LPC_CICH, /* C-ICH */
+ LPC_ICH5, /* ICH5 & ICH5R */
+ LPC_6300ESB, /* 6300ESB */
+ LPC_ICH6, /* ICH6 & ICH6R */
+ LPC_ICH6M, /* ICH6-M */
+ LPC_ICH6W, /* ICH6W & ICH6RW */
+ LPC_631XESB, /* 631xESB/632xESB */
+ LPC_ICH7, /* ICH7 & ICH7R */
+ LPC_ICH7DH, /* ICH7DH */
+ LPC_ICH7M, /* ICH7-M & ICH7-U */
+ LPC_ICH7MDH, /* ICH7-M DH */
+ LPC_NM10, /* NM10 */
+ LPC_ICH8, /* ICH8 & ICH8R */
+ LPC_ICH8DH, /* ICH8DH */
+ LPC_ICH8DO, /* ICH8DO */
+ LPC_ICH8M, /* ICH8M */
+ LPC_ICH8ME, /* ICH8M-E */
+ LPC_ICH9, /* ICH9 */
+ LPC_ICH9R, /* ICH9R */
+ LPC_ICH9DH, /* ICH9DH */
+ LPC_ICH9DO, /* ICH9DO */
+ LPC_ICH9M, /* ICH9M */
+ LPC_ICH9ME, /* ICH9M-E */
+ LPC_ICH10, /* ICH10 */
+ LPC_ICH10R, /* ICH10R */
+ LPC_ICH10D, /* ICH10D */
+ LPC_ICH10DO, /* ICH10DO */
+ LPC_PCH, /* PCH Desktop Full Featured */
+ LPC_PCHM, /* PCH Mobile Full Featured */
+ LPC_P55, /* P55 */
+ LPC_PM55, /* PM55 */
+ LPC_H55, /* H55 */
+ LPC_QM57, /* QM57 */
+ LPC_H57, /* H57 */
+ LPC_HM55, /* HM55 */
+ LPC_Q57, /* Q57 */
+ LPC_HM57, /* HM57 */
+ LPC_PCHMSFF, /* PCH Mobile SFF Full Featured */
+ LPC_QS57, /* QS57 */
+ LPC_3400, /* 3400 */
+ LPC_3420, /* 3420 */
+ LPC_3450, /* 3450 */
+ LPC_EP80579, /* EP80579 */
+ LPC_CPT, /* Cougar Point */
+ LPC_CPTD, /* Cougar Point Desktop */
+ LPC_CPTM, /* Cougar Point Mobile */
+ LPC_PBG, /* Patsburg */
+ LPC_DH89XXCC, /* DH89xxCC */
+ LPC_PPT, /* Panther Point */
+ LPC_LPT, /* Lynx Point */
+};
+
+struct lpc_ich_info lpc_chipset_info[] __devinitdata = {
+ [LPC_ICH] = {
+ .name = "ICH",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH0] = {
+ .name = "ICH0",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH2] = {
+ .name = "ICH2",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH2M] = {
+ .name = "ICH2-M",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH3] = {
+ .name = "ICH3-S",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH3M] = {
+ .name = "ICH3-M",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH4] = {
+ .name = "ICH4",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH4M] = {
+ .name = "ICH4-M",
+ .iTCO_version = 1,
+ },
+ [LPC_CICH] = {
+ .name = "C-ICH",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH5] = {
+ .name = "ICH5 or ICH5R",
+ .iTCO_version = 1,
+ },
+ [LPC_6300ESB] = {
+ .name = "6300ESB",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH6] = {
+ .name = "ICH6 or ICH6R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_ICH6M] = {
+ .name = "ICH6-M",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_ICH6W] = {
+ .name = "ICH6W or ICH6RW",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_631XESB] = {
+ .name = "631xESB/632xESB",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_ICH7] = {
+ .name = "ICH7 or ICH7R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH7DH] = {
+ .name = "ICH7DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH7M] = {
+ .name = "ICH7-M or ICH7-U",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH7MDH] = {
+ .name = "ICH7-M DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_NM10] = {
+ .name = "NM10",
+ .iTCO_version = 2,
+ },
+ [LPC_ICH8] = {
+ .name = "ICH8 or ICH8R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8DH] = {
+ .name = "ICH8DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8DO] = {
+ .name = "ICH8DO",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8M] = {
+ .name = "ICH8M",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8ME] = {
+ .name = "ICH8M-E",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH9] = {
+ .name = "ICH9",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9R] = {
+ .name = "ICH9R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9DH] = {
+ .name = "ICH9DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9DO] = {
+ .name = "ICH9DO",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9M] = {
+ .name = "ICH9M",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9ME] = {
+ .name = "ICH9M-E",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH10] = {
+ .name = "ICH10",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CONS_GPIO,
+ },
+ [LPC_ICH10R] = {
+ .name = "ICH10R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CONS_GPIO,
+ },
+ [LPC_ICH10D] = {
+ .name = "ICH10D",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CORP_GPIO,
+ },
+ [LPC_ICH10DO] = {
+ .name = "ICH10DO",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CORP_GPIO,
+ },
+ [LPC_PCH] = {
+ .name = "PCH Desktop Full Featured",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PCHM] = {
+ .name = "PCH Mobile Full Featured",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_P55] = {
+ .name = "P55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PM55] = {
+ .name = "PM55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_H55] = {
+ .name = "H55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_QM57] = {
+ .name = "QM57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_H57] = {
+ .name = "H57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_HM55] = {
+ .name = "HM55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_Q57] = {
+ .name = "Q57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_HM57] = {
+ .name = "HM57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PCHMSFF] = {
+ .name = "PCH Mobile SFF Full Featured",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_QS57] = {
+ .name = "QS57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_3400] = {
+ .name = "3400",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_3420] = {
+ .name = "3420",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_3450] = {
+ .name = "3450",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_EP80579] = {
+ .name = "EP80579",
+ .iTCO_version = 2,
+ },
+ [LPC_CPT] = {
+ .name = "Cougar Point",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_CPTD] = {
+ .name = "Cougar Point Desktop",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_CPTM] = {
+ .name = "Cougar Point Mobile",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PBG] = {
+ .name = "Patsburg",
+ .iTCO_version = 2,
+ },
+ [LPC_DH89XXCC] = {
+ .name = "DH89xxCC",
+ .iTCO_version = 2,
+ },
+ [LPC_PPT] = {
+ .name = "Panther Point",
+ .iTCO_version = 2,
+ },
+ [LPC_LPT] = {
+ .name = "Lynx Point",
+ .iTCO_version = 2,
+ },
+};
+
+/*
+ * This data only exists for exporting the supported PCI ids
+ * via MODULE_DEVICE_TABLE. We do not actually register a
+ * pci_driver, because the I/O Controller Hub has also other
+ * functions that probably will be registered by other drivers.
+ */
+static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
+ { PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
+ { PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
+ { PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
+ { PCI_VDEVICE(INTEL, 0x244c), LPC_ICH2M},
+ { PCI_VDEVICE(INTEL, 0x2480), LPC_ICH3},
+ { PCI_VDEVICE(INTEL, 0x248c), LPC_ICH3M},
+ { PCI_VDEVICE(INTEL, 0x24c0), LPC_ICH4},
+ { PCI_VDEVICE(INTEL, 0x24cc), LPC_ICH4M},
+ { PCI_VDEVICE(INTEL, 0x2450), LPC_CICH},
+ { PCI_VDEVICE(INTEL, 0x24d0), LPC_ICH5},
+ { PCI_VDEVICE(INTEL, 0x25a1), LPC_6300ESB},
+ { PCI_VDEVICE(INTEL, 0x2640), LPC_ICH6},
+ { PCI_VDEVICE(INTEL, 0x2641), LPC_ICH6M},
+ { PCI_VDEVICE(INTEL, 0x2642), LPC_ICH6W},
+ { PCI_VDEVICE(INTEL, 0x2670), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2671), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2672), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2673), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2674), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2675), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2676), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2677), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2678), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2679), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267a), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267b), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267c), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267d), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267e), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267f), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x27b8), LPC_ICH7},
+ { PCI_VDEVICE(INTEL, 0x27b0), LPC_ICH7DH},
+ { PCI_VDEVICE(INTEL, 0x27b9), LPC_ICH7M},
+ { PCI_VDEVICE(INTEL, 0x27bd), LPC_ICH7MDH},
+ { PCI_VDEVICE(INTEL, 0x27bc), LPC_NM10},
+ { PCI_VDEVICE(INTEL, 0x2810), LPC_ICH8},
+ { PCI_VDEVICE(INTEL, 0x2812), LPC_ICH8DH},
+ { PCI_VDEVICE(INTEL, 0x2814), LPC_ICH8DO},
+ { PCI_VDEVICE(INTEL, 0x2815), LPC_ICH8M},
+ { PCI_VDEVICE(INTEL, 0x2811), LPC_ICH8ME},
+ { PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
+ { PCI_VDEVICE(INTEL, 0x2916), LPC_ICH9R},
+ { PCI_VDEVICE(INTEL, 0x2912), LPC_ICH9DH},
+ { PCI_VDEVICE(INTEL, 0x2914), LPC_ICH9DO},
+ { PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
+ { PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
+ { PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
+ { PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
+ { PCI_VDEVICE(INTEL, 0x3a1a), LPC_ICH10D},
+ { PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
+ { PCI_VDEVICE(INTEL, 0x3b00), LPC_PCH},
+ { PCI_VDEVICE(INTEL, 0x3b01), LPC_PCHM},
+ { PCI_VDEVICE(INTEL, 0x3b02), LPC_P55},
+ { PCI_VDEVICE(INTEL, 0x3b03), LPC_PM55},
+ { PCI_VDEVICE(INTEL, 0x3b06), LPC_H55},
+ { PCI_VDEVICE(INTEL, 0x3b07), LPC_QM57},
+ { PCI_VDEVICE(INTEL, 0x3b08), LPC_H57},
+ { PCI_VDEVICE(INTEL, 0x3b09), LPC_HM55},
+ { PCI_VDEVICE(INTEL, 0x3b0a), LPC_Q57},
+ { PCI_VDEVICE(INTEL, 0x3b0b), LPC_HM57},
+ { PCI_VDEVICE(INTEL, 0x3b0d), LPC_PCHMSFF},
+ { PCI_VDEVICE(INTEL, 0x3b0f), LPC_QS57},
+ { PCI_VDEVICE(INTEL, 0x3b12), LPC_3400},
+ { PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
+ { PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
+ { PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
+ { PCI_VDEVICE(INTEL, 0x1c41), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c42), LPC_CPTD},
+ { PCI_VDEVICE(INTEL, 0x1c43), LPC_CPTM},
+ { PCI_VDEVICE(INTEL, 0x1c44), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c45), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c46), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c47), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c48), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c49), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4a), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4b), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4c), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4d), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4e), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4f), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c50), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c51), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c52), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c53), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c54), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c55), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c56), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c57), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c58), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c59), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5a), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5b), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5c), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5d), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5e), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5f), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1d40), LPC_PBG},
+ { PCI_VDEVICE(INTEL, 0x1d41), LPC_PBG},
+ { PCI_VDEVICE(INTEL, 0x2310), LPC_DH89XXCC},
+ { PCI_VDEVICE(INTEL, 0x1e40), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e41), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e42), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e43), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e44), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e45), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e46), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e47), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e48), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e49), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4a), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4b), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4c), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4d), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4e), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4f), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e50), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e51), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e52), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e53), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e54), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e55), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e56), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e57), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e58), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e59), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5a), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5b), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5c), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5d), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5e), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5f), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c43), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c44), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c45), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c46), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c47), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c48), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c49), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4a), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4b), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4c), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4d), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4e), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4f), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c50), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c51), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c52), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c53), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c54), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c55), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c56), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c57), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c58), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c59), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5a), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5b), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5c), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5d), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5e), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5f), LPC_LPT},
+ { 0, }, /* End of list */
+};
+MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
+
+static void lpc_ich_restore_config_space(struct pci_dev *dev)
+{
+ if (lpc_ich_acpi_save >= 0) {
+ pci_write_config_byte(dev, ACPICTRL, lpc_ich_acpi_save);
+ lpc_ich_acpi_save = -1;
+ }
+
+ if (lpc_ich_gpio_save >= 0) {
+ pci_write_config_byte(dev, GPIOCTRL, lpc_ich_gpio_save);
+ lpc_ich_gpio_save = -1;
+ }
+}
+
+static void __devinit lpc_ich_enable_acpi_space(struct pci_dev *dev)
+{
+ u8 reg_save;
+
+ pci_read_config_byte(dev, ACPICTRL, &reg_save);
+ pci_write_config_byte(dev, ACPICTRL, reg_save | 0x10);
+ lpc_ich_acpi_save = reg_save;
+}
+
+static void __devinit lpc_ich_enable_gpio_space(struct pci_dev *dev)
+{
+ u8 reg_save;
+
+ pci_read_config_byte(dev, GPIOCTRL, &reg_save);
+ pci_write_config_byte(dev, GPIOCTRL, reg_save | 0x10);
+ lpc_ich_gpio_save = reg_save;
+}
+
+static void __devinit lpc_ich_finalize_cell(struct mfd_cell *cell,
+ const struct pci_device_id *id)
+{
+ cell->platform_data = &lpc_chipset_info[id->driver_data];
+ cell->pdata_size = sizeof(struct lpc_ich_info);
+}
+
+static int __devinit lpc_ich_init_gpio(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ u32 base_addr_cfg;
+ u32 base_addr;
+ int ret;
+ bool acpi_conflict = false;
+ struct resource *res;
+
+ /* Setup power management base register */
+ pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0x0000ff80;
+ if (!base_addr) {
+ dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ lpc_ich_cells[LPC_GPIO].num_resources--;
+ goto gpe0_done;
+ }
+
+ res = &gpio_ich_res[ICH_RES_GPE0];
+ res->start = base_addr + ACPIBASE_GPE_OFF;
+ res->end = base_addr + ACPIBASE_GPE_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ /*
+ * This isn't fatal for the GPIO, but we have to make sure that
+ * the platform_device subsystem doesn't see this resource
+ * or it will register an invalid region.
+ */
+ lpc_ich_cells[LPC_GPIO].num_resources--;
+ acpi_conflict = true;
+ } else {
+ lpc_ich_enable_acpi_space(dev);
+ }
+
+gpe0_done:
+ /* Setup GPIO base register */
+ pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0x0000ff80;
+ if (!base_addr) {
+ dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
+ ret = -ENODEV;
+ goto gpio_done;
+ }
+
+ /* Older devices provide fewer GPIO and have a smaller resource size. */
+ res = &gpio_ich_res[ICH_RES_GPIO];
+ res->start = base_addr;
+ switch (lpc_chipset_info[id->driver_data].gpio_version) {
+ case ICH_V5_GPIO:
+ case ICH_V10CORP_GPIO:
+ res->end = res->start + 128 - 1;
+ break;
+ default:
+ res->end = res->start + 64 - 1;
+ break;
+ }
+
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ /* this isn't necessarily fatal for the GPIO */
+ acpi_conflict = true;
+ goto gpio_done;
+ }
+ lpc_ich_enable_gpio_space(dev);
+
+ lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id);
+ ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
+ 1, NULL, 0);
+
+gpio_done:
+ if (acpi_conflict)
+ pr_warn("Resource conflict(s) found affecting %s\n",
+ lpc_ich_cells[LPC_GPIO].name);
+ return ret;
+}
+
+static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ u32 base_addr_cfg;
+ u32 base_addr;
+ int ret;
+ bool acpi_conflict = false;
+ struct resource *res;
+
+ /* Setup power management base register */
+ pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0x0000ff80;
+ if (!base_addr) {
+ dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ ret = -ENODEV;
+ goto wdt_done;
+ }
+
+ res = wdt_io_res(ICH_RES_IO_TCO);
+ res->start = base_addr + ACPIBASE_TCO_OFF;
+ res->end = base_addr + ACPIBASE_TCO_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ acpi_conflict = true;
+ goto wdt_done;
+ }
+
+ res = wdt_io_res(ICH_RES_IO_SMI);
+ res->start = base_addr + ACPIBASE_SMI_OFF;
+ res->end = base_addr + ACPIBASE_SMI_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ acpi_conflict = true;
+ goto wdt_done;
+ }
+ lpc_ich_enable_acpi_space(dev);
+
+ /*
+ * Get the Memory-Mapped GCS register. To get access to it
+ * we have to read RCBA from PCI Config space 0xf0 and use
+ * it as base. GCS = RCBA + ICH6_GCS(0x3410).
+ */
+ if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
+ pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0xffffc000;
+ if (!(base_addr_cfg & 1)) {
+ pr_err("RCBA is disabled by hardware/BIOS, "
+ "device disabled\n");
+ ret = -ENODEV;
+ goto wdt_done;
+ }
+ res = wdt_mem_res(ICH_RES_MEM_GCS);
+ res->start = base_addr + ACPIBASE_GCS_OFF;
+ res->end = base_addr + ACPIBASE_GCS_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ acpi_conflict = true;
+ goto wdt_done;
+ }
+ }
+
+ lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id);
+ ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
+ 1, NULL, 0);
+
+wdt_done:
+ if (acpi_conflict)
+ pr_warn("Resource conflict(s) found affecting %s\n",
+ lpc_ich_cells[LPC_WDT].name);
+ return ret;
+}
+
+static int __devinit lpc_ich_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ bool cell_added = false;
+
+ ret = lpc_ich_init_wdt(dev, id);
+ if (!ret)
+ cell_added = true;
+
+ ret = lpc_ich_init_gpio(dev, id);
+ if (!ret)
+ cell_added = true;
+
+ /*
+ * We only care if at least one or none of the cells registered
+ * successfully.
+ */
+ if (!cell_added) {
+ lpc_ich_restore_config_space(dev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __devexit lpc_ich_remove(struct pci_dev *dev)
+{
+ mfd_remove_devices(&dev->dev);
+ lpc_ich_restore_config_space(dev);
+}
+
+static struct pci_driver lpc_ich_driver = {
+ .name = "lpc_ich",
+ .id_table = lpc_ich_ids,
+ .probe = lpc_ich_probe,
+ .remove = __devexit_p(lpc_ich_remove),
+};
+
+static int __init lpc_ich_init(void)
+{
+ return pci_register_driver(&lpc_ich_driver);
+}
+
+static void __exit lpc_ich_exit(void)
+{
+ pci_unregister_driver(&lpc_ich_driver);
+}
+
+module_init(lpc_ich_init);
+module_exit(lpc_ich_exit);
+
+MODULE_AUTHOR("Aaron Sierra <asierra@xes-inc.com>");
+MODULE_DESCRIPTION("LPC interface for Intel ICH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index abc421364a45..9f20abc5e393 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -36,6 +36,7 @@
#define GPIOBASE 0x44
#define GPIO_IO_SIZE 64
+#define GPIO_IO_SIZE_CENTERTON 128
#define WDTBASE 0x84
#define WDT_IO_SIZE 64
@@ -68,7 +69,7 @@ static struct resource wdt_sch_resource = {
static struct mfd_cell tunnelcreek_cells[] = {
{
- .name = "tunnelcreek_wdt",
+ .name = "ie6xx_wdt",
.num_resources = 1,
.resources = &wdt_sch_resource,
},
@@ -77,6 +78,7 @@ static struct mfd_cell tunnelcreek_cells[] = {
static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CENTERTON_ILB) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
@@ -115,7 +117,11 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
}
gpio_sch_resource.start = base_addr;
- gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
+
+ if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
+ else
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
lpc_sch_cells[i].id = id->device;
@@ -125,7 +131,8 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
if (ret)
goto out_dev;
- if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
+ if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC
+ || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
@@ -167,18 +174,7 @@ static struct pci_driver lpc_sch_driver = {
.remove = __devexit_p(lpc_sch_remove),
};
-static int __init lpc_sch_init(void)
-{
- return pci_register_driver(&lpc_sch_driver);
-}
-
-static void __exit lpc_sch_exit(void)
-{
- pci_unregister_driver(&lpc_sch_driver);
-}
-
-module_init(lpc_sch_init);
-module_exit(lpc_sch_exit);
+module_pci_driver(lpc_sch_driver);
MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
MODULE_DESCRIPTION("LPC interface for Intel Poulsbo SCH");
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
new file mode 100644
index 000000000000..2b403569e0a6
--- /dev/null
+++ b/drivers/mfd/max77693-irq.c
@@ -0,0 +1,309 @@
+/*
+ * max77693-irq.c - Interrupt controller support for MAX77693
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997-irq.c
+ */
+
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+
+static const u8 max77693_mask_reg[] = {
+ [LED_INT] = MAX77693_LED_REG_FLASH_INT_MASK,
+ [TOPSYS_INT] = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
+ [CHG_INT] = MAX77693_CHG_REG_CHG_INT_MASK,
+ [MUIC_INT1] = MAX77693_MUIC_REG_INTMASK1,
+ [MUIC_INT2] = MAX77693_MUIC_REG_INTMASK2,
+ [MUIC_INT3] = MAX77693_MUIC_REG_INTMASK3,
+};
+
+static struct regmap *max77693_get_regmap(struct max77693_dev *max77693,
+ enum max77693_irq_source src)
+{
+ switch (src) {
+ case LED_INT ... CHG_INT:
+ return max77693->regmap;
+ case MUIC_INT1 ... MUIC_INT3:
+ return max77693->regmap_muic;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+struct max77693_irq_data {
+ int mask;
+ enum max77693_irq_source group;
+};
+
+#define DECLARE_IRQ(idx, _group, _mask) \
+ [(idx)] = { .group = (_group), .mask = (_mask) }
+static const struct max77693_irq_data max77693_irqs[] = {
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_OPEN, LED_INT, 1 << 0),
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_SHORT, LED_INT, 1 << 1),
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_OPEN, LED_INT, 1 << 2),
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_SHORT, LED_INT, 1 << 3),
+ DECLARE_IRQ(MAX77693_LED_IRQ_MAX_FLASH, LED_INT, 1 << 4),
+
+ DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T120C_INT, TOPSYS_INT, 1 << 0),
+ DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T140C_INT, TOPSYS_INT, 1 << 1),
+ DECLARE_IRQ(MAX77693_TOPSYS_IRQ_LOWSYS_INT, TOPSYS_INT, 1 << 3),
+
+ DECLARE_IRQ(MAX77693_CHG_IRQ_BYP_I, CHG_INT, 1 << 0),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_THM_I, CHG_INT, 1 << 2),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_BAT_I, CHG_INT, 1 << 3),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_CHG_I, CHG_INT, 1 << 4),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_CHGIN_I, CHG_INT, 1 << 6),
+
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC, MUIC_INT1, 1 << 0),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_LOW, MUIC_INT1, 1 << 1),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_ERR, MUIC_INT1, 1 << 2),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC1K, MUIC_INT1, 1 << 3),
+
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGTYP, MUIC_INT2, 1 << 0),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGDETREUN, MUIC_INT2, 1 << 1),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DCDTMR, MUIC_INT2, 1 << 2),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DXOVP, MUIC_INT2, 1 << 3),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VBVOLT, MUIC_INT2, 1 << 4),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VIDRM, MUIC_INT2, 1 << 5),
+
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_EOC, MUIC_INT3, 1 << 0),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CGMBC, MUIC_INT3, 1 << 1),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_OVP, MUIC_INT3, 1 << 2),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR, MUIC_INT3, 1 << 3),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, MUIC_INT3, 1 << 4),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_BAT_DET, MUIC_INT3, 1 << 5),
+};
+
+static void max77693_irq_lock(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+
+ mutex_lock(&max77693->irqlock);
+}
+
+static void max77693_irq_sync_unlock(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+ int i;
+
+ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+ u8 mask_reg = max77693_mask_reg[i];
+ struct regmap *map = max77693_get_regmap(max77693, i);
+
+ if (mask_reg == MAX77693_REG_INVALID ||
+ IS_ERR_OR_NULL(map))
+ continue;
+ max77693->irq_masks_cache[i] = max77693->irq_masks_cur[i];
+
+ max77693_write_reg(map, max77693_mask_reg[i],
+ max77693->irq_masks_cur[i]);
+ }
+
+ mutex_unlock(&max77693->irqlock);
+}
+
+static const inline struct max77693_irq_data *
+irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
+{
+ return &max77693_irqs[irq];
+}
+
+static void max77693_irq_mask(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+ const struct max77693_irq_data *irq_data =
+ irq_to_max77693_irq(max77693, data->irq);
+
+ if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+ max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+ else
+ max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+}
+
+static void max77693_irq_unmask(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+ const struct max77693_irq_data *irq_data =
+ irq_to_max77693_irq(max77693, data->irq);
+
+ if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+ max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+ else
+ max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+}
+
+static struct irq_chip max77693_irq_chip = {
+ .name = "max77693",
+ .irq_bus_lock = max77693_irq_lock,
+ .irq_bus_sync_unlock = max77693_irq_sync_unlock,
+ .irq_mask = max77693_irq_mask,
+ .irq_unmask = max77693_irq_unmask,
+};
+
+#define MAX77693_IRQSRC_CHG (1 << 0)
+#define MAX77693_IRQSRC_TOP (1 << 1)
+#define MAX77693_IRQSRC_FLASH (1 << 2)
+#define MAX77693_IRQSRC_MUIC (1 << 3)
+static irqreturn_t max77693_irq_thread(int irq, void *data)
+{
+ struct max77693_dev *max77693 = data;
+ u8 irq_reg[MAX77693_IRQ_GROUP_NR] = {};
+ u8 irq_src;
+ int ret;
+ int i, cur_irq;
+
+ ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_INTSRC,
+ &irq_src);
+ if (ret < 0) {
+ dev_err(max77693->dev, "Failed to read interrupt source: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ if (irq_src & MAX77693_IRQSRC_CHG)
+ /* CHG_INT */
+ ret = max77693_read_reg(max77693->regmap, MAX77693_CHG_REG_CHG_INT,
+ &irq_reg[CHG_INT]);
+
+ if (irq_src & MAX77693_IRQSRC_TOP)
+ /* TOPSYS_INT */
+ ret = max77693_read_reg(max77693->regmap,
+ MAX77693_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]);
+
+ if (irq_src & MAX77693_IRQSRC_FLASH)
+ /* LED_INT */
+ ret = max77693_read_reg(max77693->regmap,
+ MAX77693_LED_REG_FLASH_INT, &irq_reg[LED_INT]);
+
+ if (irq_src & MAX77693_IRQSRC_MUIC)
+ /* MUIC INT1 ~ INT3 */
+ max77693_bulk_read(max77693->regmap, MAX77693_MUIC_REG_INT1,
+ MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]);
+
+ /* Apply masking */
+ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+ if (i >= MUIC_INT1 && i <= MUIC_INT3)
+ irq_reg[i] &= max77693->irq_masks_cur[i];
+ else
+ irq_reg[i] &= ~max77693->irq_masks_cur[i];
+ }
+
+ /* Report */
+ for (i = 0; i < MAX77693_IRQ_NR; i++) {
+ if (irq_reg[max77693_irqs[i].group] & max77693_irqs[i].mask) {
+ cur_irq = irq_find_mapping(max77693->irq_domain, i);
+ if (cur_irq)
+ handle_nested_irq(cur_irq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int max77693_irq_resume(struct max77693_dev *max77693)
+{
+ if (max77693->irq)
+ max77693_irq_thread(0, max77693);
+
+ return 0;
+}
+
+static int max77693_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct max77693_dev *max77693 = d->host_data;
+
+ irq_set_chip_data(irq, max77693);
+ irq_set_chip_and_handler(irq, &max77693_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ return 0;
+}
+
+static struct irq_domain_ops max77693_irq_domain_ops = {
+ .map = max77693_irq_domain_map,
+};
+
+int max77693_irq_init(struct max77693_dev *max77693)
+{
+ struct irq_domain *domain;
+ int i;
+ int ret;
+
+ mutex_init(&max77693->irqlock);
+
+ /* Mask individual interrupt sources */
+ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+ struct regmap *map;
+ /* MUIC IRQ 0:MASK 1:NOT MASK */
+ /* Other IRQ 1:MASK 0:NOT MASK */
+ if (i >= MUIC_INT1 && i <= MUIC_INT3) {
+ max77693->irq_masks_cur[i] = 0x00;
+ max77693->irq_masks_cache[i] = 0x00;
+ } else {
+ max77693->irq_masks_cur[i] = 0xff;
+ max77693->irq_masks_cache[i] = 0xff;
+ }
+ map = max77693_get_regmap(max77693, i);
+
+ if (IS_ERR_OR_NULL(map))
+ continue;
+ if (max77693_mask_reg[i] == MAX77693_REG_INVALID)
+ continue;
+ if (i >= MUIC_INT1 && i <= MUIC_INT3)
+ max77693_write_reg(map, max77693_mask_reg[i], 0x00);
+ else
+ max77693_write_reg(map, max77693_mask_reg[i], 0xff);
+ }
+
+ domain = irq_domain_add_linear(NULL, MAX77693_IRQ_NR,
+ &max77693_irq_domain_ops, max77693);
+ if (!domain) {
+ dev_err(max77693->dev, "could not create irq domain\n");
+ return -ENODEV;
+ }
+ max77693->irq_domain = domain;
+
+ ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "max77693-irq", max77693);
+
+ if (ret)
+ dev_err(max77693->dev, "Failed to request IRQ %d: %d\n",
+ max77693->irq, ret);
+
+ return 0;
+}
+
+void max77693_irq_exit(struct max77693_dev *max77693)
+{
+ if (max77693->irq)
+ free_irq(max77693->irq, max77693);
+}
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
new file mode 100644
index 000000000000..e9e4278722f3
--- /dev/null
+++ b/drivers/mfd/max77693.c
@@ -0,0 +1,249 @@
+/*
+ * max77693.c - mfd core driver for the MAX 77693
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * SangYoung Son <hello.son@smasung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_PMIC (0xCC >> 1) /* Charger, Flash LED */
+#define I2C_ADDR_MUIC (0x4A >> 1)
+#define I2C_ADDR_HAPTIC (0x90 >> 1)
+
+static struct mfd_cell max77693_devs[] = {
+ { .name = "max77693-pmic", },
+ { .name = "max77693-charger", },
+ { .name = "max77693-flash", },
+ { .name = "max77693-muic", },
+ { .name = "max77693-haptic", },
+};
+
+int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(map, reg, &val);
+ *dest = val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_read_reg);
+
+int max77693_bulk_read(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+ int ret;
+
+ ret = regmap_bulk_read(map, reg, buf, count);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_read);
+
+int max77693_write_reg(struct regmap *map, u8 reg, u8 value)
+{
+ int ret;
+
+ ret = regmap_write(map, reg, value);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_write_reg);
+
+int max77693_bulk_write(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+ int ret;
+
+ ret = regmap_bulk_write(map, reg, buf, count);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_write);
+
+int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask)
+{
+ int ret;
+
+ ret = regmap_update_bits(map, reg, mask, val);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_update_reg);
+
+static const struct regmap_config max77693_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77693_PMIC_REG_END,
+};
+
+static int max77693_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max77693_dev *max77693;
+ struct max77693_platform_data *pdata = i2c->dev.platform_data;
+ u8 reg_data;
+ int ret = 0;
+
+ max77693 = devm_kzalloc(&i2c->dev,
+ sizeof(struct max77693_dev), GFP_KERNEL);
+ if (max77693 == NULL)
+ return -ENOMEM;
+
+ max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
+ if (IS_ERR(max77693->regmap)) {
+ ret = PTR_ERR(max77693->regmap);
+ dev_err(max77693->dev,"failed to allocate register map: %d\n",
+ ret);
+ goto err_regmap;
+ }
+
+ i2c_set_clientdata(i2c, max77693);
+ max77693->dev = &i2c->dev;
+ max77693->i2c = i2c;
+ max77693->irq = i2c->irq;
+ max77693->type = id->driver_data;
+
+ if (!pdata)
+ goto err_regmap;
+
+ max77693->wakeup = pdata->wakeup;
+
+ mutex_init(&max77693->iolock);
+
+ if (max77693_read_reg(max77693->regmap,
+ MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
+ dev_err(max77693->dev, "device not found on this channel\n");
+ ret = -ENODEV;
+ goto err_regmap;
+ } else
+ dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
+
+ max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+ i2c_set_clientdata(max77693->muic, max77693);
+
+ max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+ i2c_set_clientdata(max77693->haptic, max77693);
+
+ ret = max77693_irq_init(max77693);
+ if (ret < 0)
+ goto err_mfd;
+
+ pm_runtime_set_active(max77693->dev);
+
+ ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
+ ARRAY_SIZE(max77693_devs), NULL, 0);
+ if (ret < 0)
+ goto err_mfd;
+
+ device_init_wakeup(max77693->dev, pdata->wakeup);
+
+ return ret;
+
+err_mfd:
+ i2c_unregister_device(max77693->muic);
+ i2c_unregister_device(max77693->haptic);
+err_regmap:
+ kfree(max77693);
+
+ return ret;
+}
+
+static int max77693_i2c_remove(struct i2c_client *i2c)
+{
+ struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(max77693->dev);
+ i2c_unregister_device(max77693->muic);
+ i2c_unregister_device(max77693->haptic);
+
+ return 0;
+}
+
+static const struct i2c_device_id max77693_i2c_id[] = {
+ { "max77693", TYPE_MAX77693 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max77693_i2c_id);
+
+static int max77693_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ irq_set_irq_wake(max77693->irq, 1);
+ return 0;
+}
+
+static int max77693_resume(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ irq_set_irq_wake(max77693->irq, 0);
+ return max77693_irq_resume(max77693);
+}
+
+const struct dev_pm_ops max77693_pm = {
+ .suspend = max77693_suspend,
+ .resume = max77693_resume,
+};
+
+static struct i2c_driver max77693_i2c_driver = {
+ .driver = {
+ .name = "max77693",
+ .owner = THIS_MODULE,
+ .pm = &max77693_pm,
+ },
+ .probe = max77693_i2c_probe,
+ .remove = max77693_i2c_remove,
+ .id_table = max77693_i2c_id,
+};
+
+static int __init max77693_i2c_init(void)
+{
+ return i2c_add_driver(&max77693_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(max77693_i2c_init);
+
+static void __exit max77693_i2c_exit(void)
+{
+ i2c_del_driver(&max77693_i2c_driver);
+}
+module_exit(max77693_i2c_exit);
+
+MODULE_DESCRIPTION("MAXIM 77693 multi-function core driver");
+MODULE_AUTHOR("SangYoung, Son <hello.son@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 9fd4f63c45cc..f0ea3b8b3e4a 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -15,24 +15,13 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
-#include <linux/spi/spi.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mc13xxx.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-struct mc13xxx {
- struct spi_device *spidev;
- struct mutex lock;
- int irq;
- int flags;
-
- irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
- void *irqdata[MC13XXX_NUM_IRQ];
-
- int adcflags;
-};
+#include "mc13xxx.h"
#define MC13XXX_IRQSTAT0 0
#define MC13XXX_IRQSTAT0_ADCDONEI (1 << 0)
@@ -139,34 +128,29 @@ struct mc13xxx {
#define MC13XXX_ADC2 45
-#define MC13XXX_NUMREGS 0x3f
-
void mc13xxx_lock(struct mc13xxx *mc13xxx)
{
if (!mutex_trylock(&mc13xxx->lock)) {
- dev_dbg(&mc13xxx->spidev->dev, "wait for %s from %pf\n",
+ dev_dbg(mc13xxx->dev, "wait for %s from %pf\n",
__func__, __builtin_return_address(0));
mutex_lock(&mc13xxx->lock);
}
- dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+ dev_dbg(mc13xxx->dev, "%s from %pf\n",
__func__, __builtin_return_address(0));
}
EXPORT_SYMBOL(mc13xxx_lock);
void mc13xxx_unlock(struct mc13xxx *mc13xxx)
{
- dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+ dev_dbg(mc13xxx->dev, "%s from %pf\n",
__func__, __builtin_return_address(0));
mutex_unlock(&mc13xxx->lock);
}
EXPORT_SYMBOL(mc13xxx_unlock);
-#define MC13XXX_REGOFFSET_SHIFT 25
int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
{
- struct spi_transfer t;
- struct spi_message m;
int ret;
BUG_ON(!mutex_is_locked(&mc13xxx->lock));
@@ -174,84 +158,35 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
if (offset > MC13XXX_NUMREGS)
return -EINVAL;
- *val = offset << MC13XXX_REGOFFSET_SHIFT;
-
- memset(&t, 0, sizeof(t));
-
- t.tx_buf = val;
- t.rx_buf = val;
- t.len = sizeof(u32);
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
-
- ret = spi_sync(mc13xxx->spidev, &m);
-
- /* error in message.status implies error return from spi_sync */
- BUG_ON(!ret && m.status);
+ ret = regmap_read(mc13xxx->regmap, offset, val);
+ dev_vdbg(mc13xxx->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
- if (ret)
- return ret;
-
- *val &= 0xffffff;
-
- dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(mc13xxx_reg_read);
int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
{
- u32 buf;
- struct spi_transfer t;
- struct spi_message m;
- int ret;
-
BUG_ON(!mutex_is_locked(&mc13xxx->lock));
- dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val);
+ dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
if (offset > MC13XXX_NUMREGS || val > 0xffffff)
return -EINVAL;
- buf = 1 << 31 | offset << MC13XXX_REGOFFSET_SHIFT | val;
-
- memset(&t, 0, sizeof(t));
-
- t.tx_buf = &buf;
- t.rx_buf = &buf;
- t.len = sizeof(u32);
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
-
- ret = spi_sync(mc13xxx->spidev, &m);
-
- BUG_ON(!ret && m.status);
-
- if (ret)
- return ret;
-
- return 0;
+ return regmap_write(mc13xxx->regmap, offset, val);
}
EXPORT_SYMBOL(mc13xxx_reg_write);
int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
u32 mask, u32 val)
{
- int ret;
- u32 valread;
-
+ BUG_ON(!mutex_is_locked(&mc13xxx->lock));
BUG_ON(val & ~mask);
+ dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
+ offset, val, mask);
- ret = mc13xxx_reg_read(mc13xxx, offset, &valread);
- if (ret)
- return ret;
-
- valread = (valread & ~mask) | val;
-
- return mc13xxx_reg_write(mc13xxx, offset, valread);
+ return regmap_update_bits(mc13xxx->regmap, offset, mask, val);
}
EXPORT_SYMBOL(mc13xxx_reg_rmw);
@@ -439,7 +374,7 @@ static int mc13xxx_irq_handle(struct mc13xxx *mc13xxx,
if (handled == IRQ_HANDLED)
num_handled++;
} else {
- dev_err(&mc13xxx->spidev->dev,
+ dev_err(mc13xxx->dev,
"BUG: irq %u but no handler\n",
baseirq + irq);
@@ -475,25 +410,23 @@ static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
return IRQ_RETVAL(handled);
}
-enum mc13xxx_id {
- MC13XXX_ID_MC13783,
- MC13XXX_ID_MC13892,
- MC13XXX_ID_INVALID,
-};
-
static const char *mc13xxx_chipname[] = {
[MC13XXX_ID_MC13783] = "mc13783",
[MC13XXX_ID_MC13892] = "mc13892",
};
#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
-static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
+static int mc13xxx_identify(struct mc13xxx *mc13xxx)
{
u32 icid;
u32 revision;
- const char *name;
int ret;
+ /*
+ * Get the generation ID from register 46, as apparently some older
+ * IC revisions only have this info at this location. Newer ICs seem to
+ * have both.
+ */
ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
if (ret)
return ret;
@@ -502,26 +435,23 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
switch (icid) {
case 2:
- *id = MC13XXX_ID_MC13783;
- name = "mc13783";
+ mc13xxx->ictype = MC13XXX_ID_MC13783;
break;
case 7:
- *id = MC13XXX_ID_MC13892;
- name = "mc13892";
+ mc13xxx->ictype = MC13XXX_ID_MC13892;
break;
default:
- *id = MC13XXX_ID_INVALID;
+ mc13xxx->ictype = MC13XXX_ID_INVALID;
break;
}
- if (*id == MC13XXX_ID_MC13783 || *id == MC13XXX_ID_MC13892) {
+ if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
+ mc13xxx->ictype == MC13XXX_ID_MC13892) {
ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
- if (ret)
- return ret;
- dev_info(&mc13xxx->spidev->dev, "%s: rev: %d.%d, "
+ dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
"fin: %d, fab: %d, icid: %d/%d\n",
- mc13xxx_chipname[*id],
+ mc13xxx_chipname[mc13xxx->ictype],
maskval(revision, MC13XXX_REVISION_REVFULL),
maskval(revision, MC13XXX_REVISION_REVMETAL),
maskval(revision, MC13XXX_REVISION_FIN),
@@ -530,26 +460,12 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
maskval(revision, MC13XXX_REVISION_ICIDCODE));
}
- if (*id != MC13XXX_ID_INVALID) {
- const struct spi_device_id *devid =
- spi_get_device_id(mc13xxx->spidev);
- if (!devid || devid->driver_data != *id)
- dev_warn(&mc13xxx->spidev->dev, "device id doesn't "
- "match auto detection!\n");
- }
-
- return 0;
+ return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
}
static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
{
- const struct spi_device_id *devid =
- spi_get_device_id(mc13xxx->spidev);
-
- if (!devid)
- return NULL;
-
- return mc13xxx_chipname[devid->driver_data];
+ return mc13xxx_chipname[mc13xxx->ictype];
}
int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -592,7 +508,7 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
};
init_completion(&adcdone_data.done);
- dev_dbg(&mc13xxx->spidev->dev, "%s\n", __func__);
+ dev_dbg(mc13xxx->dev, "%s\n", __func__);
mc13xxx_lock(mc13xxx);
@@ -637,7 +553,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
adc1 |= ato << MC13783_ADC1_ATO_SHIFT;
if (atox)
adc1 |= MC13783_ADC1_ATOX;
- dev_dbg(&mc13xxx->spidev->dev, "%s: request irq\n", __func__);
+
+ dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__);
mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
mc13xxx_handler_adcdone, __func__, &adcdone_data);
mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE);
@@ -695,7 +612,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
if (!cell.name)
return -ENOMEM;
- return mfd_add_devices(&mc13xxx->spidev->dev, -1, &cell, 1, NULL, 0);
+ return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0);
}
static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
@@ -706,7 +623,7 @@ static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
#ifdef CONFIG_OF
static int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
{
- struct device_node *np = mc13xxx->spidev->dev.of_node;
+ struct device_node *np = mc13xxx->dev->of_node;
if (!np)
return -ENODEV;
@@ -732,55 +649,15 @@ static inline int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
}
#endif
-static const struct spi_device_id mc13xxx_device_id[] = {
- {
- .name = "mc13783",
- .driver_data = MC13XXX_ID_MC13783,
- }, {
- .name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
-
-static const struct of_device_id mc13xxx_dt_ids[] = {
- { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
- { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
-
-static int mc13xxx_probe(struct spi_device *spi)
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+ struct mc13xxx_platform_data *pdata, int irq)
{
- const struct of_device_id *of_id;
- struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
- struct mc13xxx *mc13xxx;
- struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
- enum mc13xxx_id id;
int ret;
- of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
- if (of_id)
- sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
-
- mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
- if (!mc13xxx)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, mc13xxx);
- spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
- spi->bits_per_word = 32;
- spi_setup(spi);
-
- mc13xxx->spidev = spi;
-
- mutex_init(&mc13xxx->lock);
mc13xxx_lock(mc13xxx);
- ret = mc13xxx_identify(mc13xxx, &id);
- if (ret || id == MC13XXX_ID_INVALID)
+ ret = mc13xxx_identify(mc13xxx);
+ if (ret)
goto err_revision;
/* mask all irqs */
@@ -792,18 +669,19 @@ static int mc13xxx_probe(struct spi_device *spi)
if (ret)
goto err_mask;
- ret = request_threaded_irq(spi->irq, NULL, mc13xxx_irq_thread,
+ ret = request_threaded_irq(irq, NULL, mc13xxx_irq_thread,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx);
if (ret) {
err_mask:
err_revision:
mc13xxx_unlock(mc13xxx);
- dev_set_drvdata(&spi->dev, NULL);
kfree(mc13xxx);
return ret;
}
+ mc13xxx->irq = irq;
+
mc13xxx_unlock(mc13xxx);
if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
@@ -813,7 +691,8 @@ err_revision:
mc13xxx_add_subdevice(mc13xxx, "%s-adc");
if (mc13xxx->flags & MC13XXX_USE_CODEC)
- mc13xxx_add_subdevice(mc13xxx, "%s-codec");
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-codec",
+ pdata->codec, sizeof(*pdata->codec));
if (mc13xxx->flags & MC13XXX_USE_RTC)
mc13xxx_add_subdevice(mc13xxx, "%s-rtc");
@@ -837,42 +716,19 @@ err_revision:
return 0;
}
+EXPORT_SYMBOL_GPL(mc13xxx_common_init);
-static int __devexit mc13xxx_remove(struct spi_device *spi)
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx)
{
- struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+ free_irq(mc13xxx->irq, mc13xxx);
- free_irq(mc13xxx->spidev->irq, mc13xxx);
+ mfd_remove_devices(mc13xxx->dev);
- mfd_remove_devices(&spi->dev);
+ regmap_exit(mc13xxx->regmap);
kfree(mc13xxx);
-
- return 0;
-}
-
-static struct spi_driver mc13xxx_driver = {
- .id_table = mc13xxx_device_id,
- .driver = {
- .name = "mc13xxx",
- .owner = THIS_MODULE,
- .of_match_table = mc13xxx_dt_ids,
- },
- .probe = mc13xxx_probe,
- .remove = __devexit_p(mc13xxx_remove),
-};
-
-static int __init mc13xxx_init(void)
-{
- return spi_register_driver(&mc13xxx_driver);
-}
-subsys_initcall(mc13xxx_init);
-
-static void __exit mc13xxx_exit(void)
-{
- spi_unregister_driver(&mc13xxx_driver);
}
-module_exit(mc13xxx_exit);
+EXPORT_SYMBOL_GPL(mc13xxx_common_cleanup);
MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
new file mode 100644
index 000000000000..d22501dad6a6
--- /dev/null
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2009-2010 Creative Product Design
+ * Marc Reilly marc@cpdesign.com.au
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include "mc13xxx.h"
+
+static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
+ {
+ .name = "mc13892",
+ .driver_data = MC13XXX_ID_MC13892,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+ {
+ .compatible = "fsl,mc13892",
+ .data = (void *) &mc13xxx_i2c_device_id[0],
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_i2c_config = {
+ .reg_bits = 8,
+ .val_bits = 24,
+
+ .max_register = MC13XXX_NUMREGS,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int mc13xxx_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct of_device_id *of_id;
+ struct i2c_driver *idrv = to_i2c_driver(client->dev.driver);
+ struct mc13xxx *mc13xxx;
+ struct mc13xxx_platform_data *pdata = dev_get_platdata(&client->dev);
+ int ret;
+
+ of_id = of_match_device(mc13xxx_dt_ids, &client->dev);
+ if (of_id)
+ idrv->id_table = (const struct i2c_device_id*) of_id->data;
+
+ mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+ if (!mc13xxx)
+ return -ENOMEM;
+
+ dev_set_drvdata(&client->dev, mc13xxx);
+
+ mc13xxx->dev = &client->dev;
+ mutex_init(&mc13xxx->lock);
+
+ mc13xxx->regmap = regmap_init_i2c(client, &mc13xxx_regmap_i2c_config);
+ if (IS_ERR(mc13xxx->regmap)) {
+ ret = PTR_ERR(mc13xxx->regmap);
+ dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+ ret);
+ dev_set_drvdata(&client->dev, NULL);
+ kfree(mc13xxx);
+ return ret;
+ }
+
+ ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
+
+ if (ret == 0 && (id->driver_data != mc13xxx->ictype))
+ dev_warn(mc13xxx->dev,
+ "device id doesn't match auto detection!\n");
+
+ return ret;
+}
+
+static int __devexit mc13xxx_i2c_remove(struct i2c_client *client)
+{
+ struct mc13xxx *mc13xxx = dev_get_drvdata(&client->dev);
+
+ mc13xxx_common_cleanup(mc13xxx);
+
+ return 0;
+}
+
+static struct i2c_driver mc13xxx_i2c_driver = {
+ .id_table = mc13xxx_i2c_device_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mc13xxx",
+ .of_match_table = mc13xxx_dt_ids,
+ },
+ .probe = mc13xxx_i2c_probe,
+ .remove = __devexit_p(mc13xxx_i2c_remove),
+};
+
+static int __init mc13xxx_i2c_init(void)
+{
+ return i2c_add_driver(&mc13xxx_i2c_driver);
+}
+subsys_initcall(mc13xxx_i2c_init);
+
+static void __exit mc13xxx_i2c_exit(void)
+{
+ i2c_del_driver(&mc13xxx_i2c_driver);
+}
+module_exit(mc13xxx_i2c_exit);
+
+MODULE_DESCRIPTION("i2c driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Marc Reilly <marc@cpdesign.com.au");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
new file mode 100644
index 000000000000..03df422feb76
--- /dev/null
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * loosely based on an earlier driver that has
+ * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+
+#include "mc13xxx.h"
+
+static const struct spi_device_id mc13xxx_device_id[] = {
+ {
+ .name = "mc13783",
+ .driver_data = MC13XXX_ID_MC13783,
+ }, {
+ .name = "mc13892",
+ .driver_data = MC13XXX_ID_MC13892,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+ { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
+ { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_spi_config = {
+ .reg_bits = 7,
+ .pad_bits = 1,
+ .val_bits = 24,
+ .write_flag_mask = 0x80,
+
+ .max_register = MC13XXX_NUMREGS,
+
+ .cache_type = REGCACHE_NONE,
+ .use_single_rw = 1,
+};
+
+static int mc13xxx_spi_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ unsigned char w[4] = { *((unsigned char *) reg), 0, 0, 0};
+ unsigned char r[4];
+ unsigned char *p = val;
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_transfer t = {
+ .tx_buf = w,
+ .rx_buf = r,
+ .len = 4,
+ };
+
+ struct spi_message m;
+ int ret;
+
+ if (val_size != 3 || reg_size != 1)
+ return -ENOTSUPP;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+
+ memcpy(p, &r[1], 3);
+
+ return ret;
+}
+
+static int mc13xxx_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ if (count != 4)
+ return -ENOTSUPP;
+
+ return spi_write(spi, data, count);
+}
+
+/*
+ * We cannot use regmap-spi generic bus implementation here.
+ * The MC13783 chip will get corrupted if CS signal is deasserted
+ * and on i.Mx31 SoC (the target SoC for MC13783 PMIC) the SPI controller
+ * has the following errata (DSPhl22960):
+ * "The CSPI negates SS when the FIFO becomes empty with
+ * SSCTL= 0. Software cannot guarantee that the FIFO will not
+ * drain because of higher priority interrupts and the
+ * non-realtime characteristics of the operating system. As a
+ * result, the SS will negate before all of the data has been
+ * transferred to/from the peripheral."
+ * We workaround this by accessing the SPI controller with a
+ * single transfert.
+ */
+
+static struct regmap_bus regmap_mc13xxx_bus = {
+ .write = mc13xxx_spi_write,
+ .read = mc13xxx_spi_read,
+};
+
+static int mc13xxx_spi_probe(struct spi_device *spi)
+{
+ const struct of_device_id *of_id;
+ struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
+ struct mc13xxx *mc13xxx;
+ struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
+ int ret;
+
+ of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
+ if (of_id)
+ sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
+
+ mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+ if (!mc13xxx)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, mc13xxx);
+ spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
+
+ mc13xxx->dev = &spi->dev;
+ mutex_init(&mc13xxx->lock);
+
+ mc13xxx->regmap = regmap_init(&spi->dev, &regmap_mc13xxx_bus, &spi->dev,
+ &mc13xxx_regmap_spi_config);
+
+ if (IS_ERR(mc13xxx->regmap)) {
+ ret = PTR_ERR(mc13xxx->regmap);
+ dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+ ret);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(mc13xxx);
+ return ret;
+ }
+
+ ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+
+ if (ret) {
+ dev_set_drvdata(&spi->dev, NULL);
+ } else {
+ const struct spi_device_id *devid =
+ spi_get_device_id(spi);
+ if (!devid || devid->driver_data != mc13xxx->ictype)
+ dev_warn(mc13xxx->dev,
+ "device id doesn't match auto detection!\n");
+ }
+
+ return ret;
+}
+
+static int __devexit mc13xxx_spi_remove(struct spi_device *spi)
+{
+ struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+
+ mc13xxx_common_cleanup(mc13xxx);
+
+ return 0;
+}
+
+static struct spi_driver mc13xxx_spi_driver = {
+ .id_table = mc13xxx_device_id,
+ .driver = {
+ .name = "mc13xxx",
+ .owner = THIS_MODULE,
+ .of_match_table = mc13xxx_dt_ids,
+ },
+ .probe = mc13xxx_spi_probe,
+ .remove = __devexit_p(mc13xxx_spi_remove),
+};
+
+static int __init mc13xxx_init(void)
+{
+ return spi_register_driver(&mc13xxx_spi_driver);
+}
+subsys_initcall(mc13xxx_init);
+
+static void __exit mc13xxx_exit(void)
+{
+ spi_unregister_driver(&mc13xxx_spi_driver);
+}
+module_exit(mc13xxx_exit);
+
+MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
new file mode 100644
index 000000000000..bbba06feea06
--- /dev/null
+++ b/drivers/mfd/mc13xxx.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012 Creative Product Design
+ * Marc Reilly <marc@cpdesign.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#ifndef __DRIVERS_MFD_MC13XXX_H
+#define __DRIVERS_MFD_MC13XXX_H
+
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mc13xxx.h>
+
+enum mc13xxx_id {
+ MC13XXX_ID_MC13783,
+ MC13XXX_ID_MC13892,
+ MC13XXX_ID_INVALID,
+};
+
+#define MC13XXX_NUMREGS 0x3f
+
+struct mc13xxx {
+ struct regmap *regmap;
+
+ struct device *dev;
+ enum mc13xxx_id ictype;
+
+ struct mutex lock;
+ int irq;
+ int flags;
+
+ irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
+ void *irqdata[MC13XXX_NUM_IRQ];
+
+ int adcflags;
+};
+
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+ struct mc13xxx_platform_data *pdata, int irq);
+
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx);
+
+#endif /* __DRIVERS_MFD_MC13XXX_H */
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 7e96bb229724..41088ecbb2a9 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,6 +25,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
+#include <linux/gpio.h>
#include <plat/cpu.h>
#include <plat/usb.h>
#include <linux/pm_runtime.h>
@@ -500,8 +501,21 @@ static void omap_usbhs_init(struct device *dev)
dev_dbg(dev, "starting TI HSUSB Controller\n");
pm_runtime_get_sync(dev);
- spin_lock_irqsave(&omap->lock, flags);
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+ GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+ GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+ /* Hold the PHY in RESET for enough time till DIR is high */
+ udelay(10);
+ }
+
+ spin_lock_irqsave(&omap->lock, flags);
omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
@@ -581,9 +595,39 @@ static void omap_usbhs_init(struct device *dev)
}
spin_unlock_irqrestore(&omap->lock, flags);
+
+ if (pdata->ehci_data->phy_reset) {
+ /* Hold the PHY in RESET for enough time till
+ * PHY is settled and ready
+ */
+ udelay(10);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_set_value_cansleep
+ (pdata->ehci_data->reset_gpio_port[0], 1);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_set_value_cansleep
+ (pdata->ehci_data->reset_gpio_port[1], 1);
+ }
+
pm_runtime_put_sync(dev);
}
+static void omap_usbhs_deinit(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ }
+}
+
/**
* usbhs_omap_probe - initialize TI-based HCDs
@@ -767,6 +811,7 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
goto end_probe;
err_alloc:
+ omap_usbhs_deinit(&pdev->dev);
iounmap(omap->tll_base);
err_tll:
@@ -818,6 +863,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
{
struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+ omap_usbhs_deinit(&pdev->dev);
iounmap(omap->tll_base);
iounmap(omap->uhh_base);
clk_put(omap->init_60m_fclk);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 00c0aba7eba0..c4a69f193a1d 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -356,7 +356,14 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
}
}
- ret = regmap_add_irq_chip(palmas->regmap[1], palmas->irq,
+ /* Change IRQ into clear on read mode for efficiency */
+ slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE);
+ addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL);
+ reg = PALMAS_INT_CTRL_INT_CLEAR;
+
+ regmap_write(palmas->regmap[slave], addr, reg);
+
+ ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq,
IRQF_ONESHOT | IRQF_TRIGGER_LOW, -1, &palmas_irq_chip,
&palmas->irq_data);
if (ret < 0)
@@ -441,6 +448,9 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
goto err;
}
+ children[PALMAS_PMIC_ID].platform_data = pdata->pmic_pdata;
+ children[PALMAS_PMIC_ID].pdata_size = sizeof(*pdata->pmic_pdata);
+
ret = mfd_add_devices(palmas->dev, -1,
children, ARRAY_SIZE(palmas_children),
NULL, regmap_irq_chip_get_base(palmas->irq_data));
@@ -472,6 +482,7 @@ static const struct i2c_device_id palmas_i2c_id[] = {
{ "twl6035", },
{ "twl6037", },
{ "tps65913", },
+ { /* end */ }
};
MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 189c2f07b83f..29c122bf28ea 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -204,7 +204,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return -ENOENT;
}
- pcf = kzalloc(sizeof(*pcf), GFP_KERNEL);
+ pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
if (!pcf)
return -ENOMEM;
@@ -212,12 +212,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
mutex_init(&pcf->lock);
- pcf->regmap = regmap_init_i2c(client, &pcf50633_regmap_config);
+ pcf->regmap = devm_regmap_init_i2c(client, &pcf50633_regmap_config);
if (IS_ERR(pcf->regmap)) {
ret = PTR_ERR(pcf->regmap);
- dev_err(pcf->dev, "Failed to allocate register map: %d\n",
- ret);
- goto err_free;
+ dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret);
+ return ret;
}
i2c_set_clientdata(client, pcf);
@@ -228,7 +227,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
if (version < 0 || variant < 0) {
dev_err(pcf->dev, "Unable to probe pcf50633\n");
ret = -ENODEV;
- goto err_regmap;
+ return ret;
}
dev_info(pcf->dev, "Probed device version %d variant %d\n",
@@ -237,16 +236,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pcf50633_irq_init(pcf, client->irq);
/* Create sub devices */
- pcf50633_client_dev_register(pcf, "pcf50633-input",
- &pcf->input_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-rtc",
- &pcf->rtc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-mbc",
- &pcf->mbc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-adc",
- &pcf->adc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-backlight",
- &pcf->bl_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-input", &pcf->input_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-rtc", &pcf->rtc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-mbc", &pcf->mbc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-backlight", &pcf->bl_pdev);
for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
@@ -274,13 +268,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pdata->probe_done(pcf);
return 0;
-
-err_regmap:
- regmap_exit(pcf->regmap);
-err_free:
- kfree(pcf);
-
- return ret;
}
static int __devexit pcf50633_remove(struct i2c_client *client)
@@ -300,9 +287,6 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
- regmap_exit(pcf->regmap);
- kfree(pcf);
-
return 0;
}
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 44afae0a69ce..cdc1df7fa0e9 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -75,6 +75,7 @@ static struct deepsleep_control_data deepsleep_data[] = {
(RC5T583_EXT_PWRREQ1_CONTROL | RC5T583_EXT_PWRREQ2_CONTROL)
static struct mfd_cell rc5t583_subdevs[] = {
+ {.name = "rc5t583-gpio",},
{.name = "rc5t583-regulator",},
{.name = "rc5t583-rtc", },
{.name = "rc5t583-key", }
@@ -267,7 +268,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
rc5t583->dev = &i2c->dev;
i2c_set_clientdata(i2c, rc5t583);
- rc5t583->regmap = regmap_init_i2c(i2c, &rc5t583_regmap_config);
+ rc5t583->regmap = devm_regmap_init_i2c(i2c, &rc5t583_regmap_config);
if (IS_ERR(rc5t583->regmap)) {
ret = PTR_ERR(rc5t583->regmap);
dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
@@ -276,7 +277,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
ret = rc5t583_clear_ext_power_req(rc5t583, pdata);
if (ret < 0)
- goto err_irq_init;
+ return ret;
if (i2c->irq) {
ret = rc5t583_irq_init(rc5t583, i2c->irq, pdata->irq_base);
@@ -299,8 +300,6 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
err_add_devs:
if (irq_init_success)
rc5t583_irq_exit(rc5t583);
-err_irq_init:
- regmap_exit(rc5t583->regmap);
return ret;
}
@@ -310,7 +309,6 @@ static int __devexit rc5t583_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(rc5t583->dev);
rc5t583_irq_exit(rc5t583);
- regmap_exit(rc5t583->regmap);
return 0;
}
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 809bd4a61089..685d61e431ad 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -108,18 +108,7 @@ static struct pci_driver rdc321x_sb_driver = {
.remove = __devexit_p(rdc321x_sb_remove),
};
-static int __init rdc321x_sb_init(void)
-{
- return pci_register_driver(&rdc321x_sb_driver);
-}
-
-static void __exit rdc321x_sb_exit(void)
-{
- pci_unregister_driver(&rdc321x_sb_driver);
-}
-
-module_init(rdc321x_sb_init);
-module_exit(rdc321x_sb_exit);
+module_pci_driver(rdc321x_sb_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/s5m-core.c b/drivers/mfd/s5m-core.c
index 48949d998d10..dd170307e60e 100644
--- a/drivers/mfd/s5m-core.c
+++ b/drivers/mfd/s5m-core.c
@@ -114,12 +114,12 @@ static int s5m87xx_i2c_probe(struct i2c_client *i2c,
s5m87xx->wakeup = pdata->wakeup;
}
- s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config);
+ s5m87xx->regmap = devm_regmap_init_i2c(i2c, &s5m_regmap_config);
if (IS_ERR(s5m87xx->regmap)) {
ret = PTR_ERR(s5m87xx->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
@@ -159,7 +159,6 @@ err:
mfd_remove_devices(s5m87xx->dev);
s5m_irq_exit(s5m87xx);
i2c_unregister_device(s5m87xx->rtc);
- regmap_exit(s5m87xx->regmap);
return ret;
}
@@ -170,7 +169,6 @@ static int s5m87xx_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(s5m87xx->dev);
s5m_irq_exit(s5m87xx);
i2c_unregister_device(s5m87xx->rtc);
- regmap_exit(s5m87xx->regmap);
return 0;
}
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
new file mode 100644
index 000000000000..d31fed07aefb
--- /dev/null
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+#include <asm/sta2x11.h>
+
+/* This describes STA2X11 MFD chip for us, we may have several */
+struct sta2x11_mfd {
+ struct sta2x11_instance *instance;
+ spinlock_t lock;
+ struct list_head list;
+ void __iomem *sctl_regs;
+ void __iomem *apbreg_regs;
+};
+
+static LIST_HEAD(sta2x11_mfd_list);
+
+/* Three functions to act on the list */
+static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
+{
+ struct sta2x11_instance *instance;
+ struct sta2x11_mfd *mfd;
+
+ if (!pdev && !list_empty(&sta2x11_mfd_list)) {
+ pr_warning("%s: Unspecified device, "
+ "using first instance\n", __func__);
+ return list_entry(sta2x11_mfd_list.next,
+ struct sta2x11_mfd, list);
+ }
+
+ instance = sta2x11_get_instance(pdev);
+ if (!instance)
+ return NULL;
+ list_for_each_entry(mfd, &sta2x11_mfd_list, list) {
+ if (mfd->instance == instance)
+ return mfd;
+ }
+ return NULL;
+}
+
+static int __devinit sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+ struct sta2x11_instance *instance;
+
+ if (mfd)
+ return -EBUSY;
+ instance = sta2x11_get_instance(pdev);
+ if (!instance)
+ return -EINVAL;
+ mfd = kzalloc(sizeof(*mfd), flags);
+ if (!mfd)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&mfd->list);
+ spin_lock_init(&mfd->lock);
+ mfd->instance = instance;
+ list_add(&mfd->list, &sta2x11_mfd_list);
+ return 0;
+}
+
+static int __devexit mfd_remove(struct pci_dev *pdev)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+
+ if (!mfd)
+ return -ENODEV;
+ list_del(&mfd->list);
+ kfree(mfd);
+ return 0;
+}
+
+/* These two functions are exported and are not expected to fail */
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+ u32 r;
+ unsigned long flags;
+
+ if (!mfd) {
+ dev_warn(&pdev->dev, ": can't access sctl regs\n");
+ return 0;
+ }
+ if (!mfd->sctl_regs) {
+ dev_warn(&pdev->dev, ": system ctl not initialized\n");
+ return 0;
+ }
+ spin_lock_irqsave(&mfd->lock, flags);
+ r = readl(mfd->sctl_regs + reg);
+ r &= ~mask;
+ r |= val;
+ if (mask)
+ writel(r, mfd->sctl_regs + reg);
+ spin_unlock_irqrestore(&mfd->lock, flags);
+ return r;
+}
+EXPORT_SYMBOL(sta2x11_sctl_mask);
+
+u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+ u32 r;
+ unsigned long flags;
+
+ if (!mfd) {
+ dev_warn(&pdev->dev, ": can't access apb regs\n");
+ return 0;
+ }
+ if (!mfd->apbreg_regs) {
+ dev_warn(&pdev->dev, ": apb bridge not initialized\n");
+ return 0;
+ }
+ spin_lock_irqsave(&mfd->lock, flags);
+ r = readl(mfd->apbreg_regs + reg);
+ r &= ~mask;
+ r |= val;
+ if (mask)
+ writel(r, mfd->apbreg_regs + reg);
+ spin_unlock_irqrestore(&mfd->lock, flags);
+ return r;
+}
+EXPORT_SYMBOL(sta2x11_apbreg_mask);
+
+/* Two debugfs files, for our registers (FIXME: one instance only) */
+#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
+static struct debugfs_reg32 sta2x11_sctl_regs[] = {
+ REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
+ REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
+ REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
+ REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
+ REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
+ REG(SCCLKSTAT2), REG(SCRSTSTA),
+};
+#undef REG
+
+static struct debugfs_regset32 sctl_regset = {
+ .regs = sta2x11_sctl_regs,
+ .nregs = ARRAY_SIZE(sta2x11_sctl_regs),
+};
+
+#define REG(regname) {.name = #regname, .offset = regname}
+static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
+ REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
+ REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
+};
+#undef REG
+
+static struct debugfs_regset32 apbreg_regset = {
+ .regs = sta2x11_apbreg_regs,
+ .nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
+};
+
+static struct dentry *sta2x11_sctl_debugfs;
+static struct dentry *sta2x11_apbreg_debugfs;
+
+/* Probe for the two platform devices */
+static int sta2x11_sctl_probe(struct platform_device *dev)
+{
+ struct pci_dev **pdev;
+ struct sta2x11_mfd *mfd;
+ struct resource *res;
+
+ pdev = dev->dev.platform_data;
+ mfd = sta2x11_mfd_find(*pdev);
+ if (!mfd)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ if (!request_mem_region(res->start, resource_size(res),
+ "sta2x11-sctl"))
+ return -EBUSY;
+
+ mfd->sctl_regs = ioremap(res->start, resource_size(res));
+ if (!mfd->sctl_regs) {
+ release_mem_region(res->start, resource_size(res));
+ return -ENOMEM;
+ }
+ sctl_regset.base = mfd->sctl_regs;
+ sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
+ S_IFREG | S_IRUGO,
+ NULL, &sctl_regset);
+ return 0;
+}
+
+static int sta2x11_apbreg_probe(struct platform_device *dev)
+{
+ struct pci_dev **pdev;
+ struct sta2x11_mfd *mfd;
+ struct resource *res;
+
+ pdev = dev->dev.platform_data;
+ dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
+ dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
+
+ mfd = sta2x11_mfd_find(*pdev);
+ if (!mfd)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ if (!request_mem_region(res->start, resource_size(res),
+ "sta2x11-apbreg"))
+ return -EBUSY;
+
+ mfd->apbreg_regs = ioremap(res->start, resource_size(res));
+ if (!mfd->apbreg_regs) {
+ release_mem_region(res->start, resource_size(res));
+ return -ENOMEM;
+ }
+ dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
+
+ apbreg_regset.base = mfd->apbreg_regs;
+ sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
+ S_IFREG | S_IRUGO,
+ NULL, &apbreg_regset);
+ return 0;
+}
+
+/* The two platform drivers */
+static struct platform_driver sta2x11_sctl_platform_driver = {
+ .driver = {
+ .name = "sta2x11-sctl",
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_sctl_probe,
+};
+
+static int __init sta2x11_sctl_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_sctl_platform_driver);
+}
+
+static struct platform_driver sta2x11_platform_driver = {
+ .driver = {
+ .name = "sta2x11-apbreg",
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_apbreg_probe,
+};
+
+static int __init sta2x11_apbreg_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_platform_driver);
+}
+
+/*
+ * What follows is the PCI device that hosts the above two pdevs.
+ * Each logic block is 4kB and they are all consecutive: we use this info.
+ */
+
+/* Bar 0 */
+enum bar0_cells {
+ STA2X11_GPIO_0 = 0,
+ STA2X11_GPIO_1,
+ STA2X11_GPIO_2,
+ STA2X11_GPIO_3,
+ STA2X11_SCTL,
+ STA2X11_SCR,
+ STA2X11_TIME,
+};
+/* Bar 1 */
+enum bar1_cells {
+ STA2X11_APBREG = 0,
+};
+#define CELL_4K(_name, _cell) { \
+ .name = _name, \
+ .start = _cell * 4096, .end = _cell * 4096 + 4095, \
+ .flags = IORESOURCE_MEM, \
+ }
+
+static const __devinitconst struct resource gpio_resources[] = {
+ {
+ .name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
+ .start = 0,
+ .end = (4 * 4096) - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+static const __devinitconst struct resource sctl_resources[] = {
+ CELL_4K("sta2x11-sctl", STA2X11_SCTL),
+};
+static const __devinitconst struct resource scr_resources[] = {
+ CELL_4K("sta2x11-scr", STA2X11_SCR),
+};
+static const __devinitconst struct resource time_resources[] = {
+ CELL_4K("sta2x11-time", STA2X11_TIME),
+};
+
+static const __devinitconst struct resource apbreg_resources[] = {
+ CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
+};
+
+#define DEV(_name, _r) \
+ { .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar0[] = {
+ DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
+ DEV("sta2x11-sctl", sctl_resources),
+ DEV("sta2x11-scr", scr_resources),
+ DEV("sta2x11-time", time_resources),
+};
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar1[] = {
+ DEV("sta2x11-apbreg", apbreg_resources),
+};
+
+static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int sta2x11_mfd_resume(struct pci_dev *pdev)
+{
+ int err;
+
+ pci_set_power_state(pdev, 0);
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ pci_restore_state(pdev);
+
+ return 0;
+}
+
+static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ int err, i;
+ struct sta2x11_gpio_pdata *gpio_data;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Can't enable device.\n");
+ return err;
+ }
+
+ err = pci_enable_msi(pdev);
+ if (err)
+ dev_info(&pdev->dev, "Enable msi failed\n");
+
+ /* Read gpio config data as pci device's platform data */
+ gpio_data = dev_get_platdata(&pdev->dev);
+ if (!gpio_data)
+ dev_warn(&pdev->dev, "no gpio configuration\n");
+
+ dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
+ gpio_data, &gpio_data);
+ dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
+ pdev, &pdev);
+
+ /* platform data is the pci device for all of them */
+ for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
+ sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
+ sta2x11_mfd_bar0[i].platform_data = &pdev;
+ }
+ sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
+ sta2x11_mfd_bar1[0].platform_data = &pdev;
+
+ /* Record this pdev before mfd_add_devices: their probe looks for it */
+ sta2x11_mfd_add(pdev, GFP_ATOMIC);
+
+
+ err = mfd_add_devices(&pdev->dev, -1,
+ sta2x11_mfd_bar0,
+ ARRAY_SIZE(sta2x11_mfd_bar0),
+ &pdev->resource[0],
+ 0);
+ if (err) {
+ dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
+ goto err_disable;
+ }
+
+ err = mfd_add_devices(&pdev->dev, -1,
+ sta2x11_mfd_bar1,
+ ARRAY_SIZE(sta2x11_mfd_bar1),
+ &pdev->resource[1],
+ 0);
+ if (err) {
+ dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ mfd_remove_devices(&pdev->dev);
+ pci_disable_device(pdev);
+ pci_disable_msi(pdev);
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
+ {0,},
+};
+
+static struct pci_driver sta2x11_mfd_driver = {
+ .name = "sta2x11-mfd",
+ .id_table = sta2x11_mfd_tbl,
+ .probe = sta2x11_mfd_probe,
+ .suspend = sta2x11_mfd_suspend,
+ .resume = sta2x11_mfd_resume,
+};
+
+static int __init sta2x11_mfd_init(void)
+{
+ pr_info("%s\n", __func__);
+ return pci_register_driver(&sta2x11_mfd_driver);
+}
+
+/*
+ * All of this must be ready before "normal" devices like MMCI appear.
+ * But MFD (the pci device) can't be too early. The following choice
+ * prepares platform drivers very early and probe the PCI device later,
+ * but before other PCI devices.
+ */
+subsys_initcall(sta2x11_apbreg_init);
+subsys_initcall(sta2x11_sctl_init);
+rootfs_initcall(sta2x11_mfd_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wind River");
+MODULE_DESCRIPTION("STA2x11 mfd for GPIO, SCTL and APBREG");
+MODULE_DEVICE_TABLE(pci, sta2x11_mfd_tbl);
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 373f423b1181..947a06a1845f 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
*
* License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
*/
#include <linux/i2c.h>
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index b58c43c7ea93..9edfe864cc05 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,7 +4,7 @@
* Copyright (C) ST Microelectronics SA 2011
*
* License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
*/
#include <linux/spi/spi.h>
@@ -122,7 +122,6 @@ MODULE_DEVICE_TABLE(spi, stmpe_id);
static struct spi_driver stmpe_spi_driver = {
.driver = {
.name = "stmpe-spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &stmpe_dev_pm_ops,
@@ -147,4 +146,4 @@ module_exit(stmpe_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 47f802bf1848..396b9d1b6bd6 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -283,27 +283,24 @@ static int __devinit tps65090_i2c_probe(struct i2c_client *client,
}
}
- tps65090->rmap = regmap_init_i2c(tps65090->client,
- &tps65090_regmap_config);
+ tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
+ &tps65090_regmap_config);
if (IS_ERR(tps65090->rmap)) {
- dev_err(&client->dev, "regmap_init failed with err: %ld\n",
- PTR_ERR(tps65090->rmap));
+ ret = PTR_ERR(tps65090->rmap);
+ dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
goto err_irq_exit;
- };
+ }
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
ARRAY_SIZE(tps65090s), NULL, 0);
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
- goto err_regmap_exit;
+ goto err_irq_exit;
}
return 0;
-err_regmap_exit:
- regmap_exit(tps65090->rmap);
-
err_irq_exit:
if (client->irq)
free_irq(client->irq, tps65090);
@@ -316,29 +313,34 @@ static int __devexit tps65090_i2c_remove(struct i2c_client *client)
struct tps65090 *tps65090 = i2c_get_clientdata(client);
mfd_remove_devices(tps65090->dev);
- regmap_exit(tps65090->rmap);
if (client->irq)
free_irq(client->irq, tps65090);
return 0;
}
-#ifdef CONFIG_PM
-static int tps65090_i2c_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tps65090_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
if (client->irq)
disable_irq(client->irq);
return 0;
}
-static int tps65090_i2c_resume(struct i2c_client *client)
+static int tps65090_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
if (client->irq)
enable_irq(client->irq);
return 0;
}
#endif
+static const struct dev_pm_ops tps65090_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tps65090_suspend, tps65090_resume)
+};
+
static const struct i2c_device_id tps65090_id_table[] = {
{ "tps65090", 0 },
{ },
@@ -349,13 +351,10 @@ static struct i2c_driver tps65090_driver = {
.driver = {
.name = "tps65090",
.owner = THIS_MODULE,
+ .pm = &tps65090_pm_ops,
},
.probe = tps65090_i2c_probe,
.remove = __devexit_p(tps65090_i2c_remove),
-#ifdef CONFIG_PM
- .suspend = tps65090_i2c_suspend,
- .resume = tps65090_i2c_resume,
-#endif
.id_table = tps65090_id_table,
};
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index f7d854e4cc62..db194e433c08 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_write);
* @val: Value to write.
* @level: Password protected level
*/
-int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
+static int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
unsigned int mask, unsigned int val, unsigned int level)
{
int ret;
@@ -150,7 +150,7 @@ static int __devinit tps65217_probe(struct i2c_client *client,
return -ENOMEM;
tps->pdata = pdata;
- tps->regmap = regmap_init_i2c(client, &tps65217_regmap_config);
+ tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
if (IS_ERR(tps->regmap)) {
ret = PTR_ERR(tps->regmap);
dev_err(tps->dev, "Failed to allocate register map: %d\n",
@@ -163,9 +163,9 @@ static int __devinit tps65217_probe(struct i2c_client *client,
ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version);
if (ret < 0) {
- dev_err(tps->dev, "Failed to read revision"
- " register: %d\n", ret);
- goto err_regmap;
+ dev_err(tps->dev, "Failed to read revision register: %d\n",
+ ret);
+ return ret;
}
dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
@@ -190,11 +190,6 @@ static int __devinit tps65217_probe(struct i2c_client *client,
}
return 0;
-
-err_regmap:
- regmap_exit(tps->regmap);
-
- return ret;
}
static int __devexit tps65217_remove(struct i2c_client *client)
@@ -205,8 +200,6 @@ static int __devexit tps65217_remove(struct i2c_client *client)
for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
platform_device_unregister(tps->regulator_pdev[i]);
- regmap_exit(tps->regmap);
-
return 0;
}
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index c9ed5c00a621..09aab3e4776d 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -20,15 +20,10 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/gpio.h>
#include <linux/mfd/tps65910.h>
-static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
- int irq)
-{
- return (irq - tps65910->irq_base);
-}
-
/*
* This is a threaded IRQ handler so can access I2C/SPI. Since all
* interrupts are clear on read the IRQ line will be reasserted and
@@ -41,28 +36,28 @@ static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
static irqreturn_t tps65910_irq(int irq, void *irq_data)
{
struct tps65910 *tps65910 = irq_data;
+ unsigned int reg;
u32 irq_sts;
u32 irq_mask;
- u8 reg;
int i;
- tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
irq_sts = reg;
- tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
irq_sts |= reg << 8;
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
- tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
irq_sts |= reg << 16;
}
- tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
irq_mask = reg;
- tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
irq_mask |= reg << 8;
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
- tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
irq_mask |= reg << 16;
}
@@ -76,19 +71,19 @@ static irqreturn_t tps65910_irq(int irq, void *irq_data)
if (!(irq_sts & (1 << i)))
continue;
- handle_nested_irq(tps65910->irq_base + i);
+ handle_nested_irq(irq_find_mapping(tps65910->domain, i));
}
/* Write the STS register back to clear IRQs we handled */
reg = irq_sts & 0xFF;
irq_sts >>= 8;
- tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
reg = irq_sts & 0xFF;
- tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
reg = irq_sts >> 8;
- tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
}
return IRQ_HANDLED;
@@ -105,27 +100,27 @@ static void tps65910_irq_sync_unlock(struct irq_data *data)
{
struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
u32 reg_mask;
- u8 reg;
+ unsigned int reg;
- tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
reg_mask = reg;
- tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
reg_mask |= reg << 8;
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
- tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
reg_mask |= reg << 16;
}
if (tps65910->irq_mask != reg_mask) {
reg = tps65910->irq_mask & 0xFF;
- tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
reg = tps65910->irq_mask >> 8 & 0xFF;
- tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
reg = tps65910->irq_mask >> 16;
- tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
}
}
mutex_unlock(&tps65910->irq_lock);
@@ -135,14 +130,14 @@ static void tps65910_irq_enable(struct irq_data *data)
{
struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+ tps65910->irq_mask &= ~(1 << data->hwirq);
}
static void tps65910_irq_disable(struct irq_data *data)
{
struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+ tps65910->irq_mask |= (1 << data->hwirq);
}
#ifdef CONFIG_PM_SLEEP
@@ -164,10 +159,35 @@ static struct irq_chip tps65910_irq_chip = {
.irq_set_wake = tps65910_irq_set_wake,
};
+static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct tps65910 *tps65910 = h->host_data;
+
+ irq_set_chip_data(virq, tps65910);
+ irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops tps65910_domain_ops = {
+ .map = tps65910_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
int tps65910_irq_init(struct tps65910 *tps65910, int irq,
struct tps65910_platform_data *pdata)
{
- int ret, cur_irq;
+ int ret;
int flags = IRQF_ONESHOT;
if (!irq) {
@@ -175,17 +195,11 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
return -EINVAL;
}
- if (!pdata || !pdata->irq_base) {
- dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
+ if (!pdata) {
+ dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
return -EINVAL;
}
- tps65910->irq_mask = 0xFFFFFF;
-
- mutex_init(&tps65910->irq_lock);
- tps65910->chip_irq = irq;
- tps65910->irq_base = pdata->irq_base;
-
switch (tps65910_chip_id(tps65910)) {
case TPS65910:
tps65910->irq_num = TPS65910_NUM_IRQ;
@@ -195,22 +209,36 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
break;
}
- /* Register with genirq */
- for (cur_irq = tps65910->irq_base;
- cur_irq < tps65910->irq_num + tps65910->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, tps65910);
- irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
+ if (pdata->irq_base > 0) {
+ pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
+ tps65910->irq_num, -1);
+ if (pdata->irq_base < 0) {
+ dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
+ pdata->irq_base);
+ return pdata->irq_base;
+ }
+ }
+
+ tps65910->irq_mask = 0xFFFFFF;
+
+ mutex_init(&tps65910->irq_lock);
+ tps65910->chip_irq = irq;
+ tps65910->irq_base = pdata->irq_base;
+
+ if (pdata->irq_base > 0)
+ tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
+ tps65910->irq_num,
+ pdata->irq_base,
+ 0,
+ &tps65910_domain_ops, tps65910);
+ else
+ tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
+ tps65910->irq_num,
+ &tps65910_domain_ops, tps65910);
+
+ if (!tps65910->domain) {
+ dev_err(tps65910->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
}
ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index bf2b25ebf2ca..be9e07b77325 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -19,13 +19,16 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/regmap.h>
#include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
static struct mfd_cell tps65910s[] = {
{
+ .name = "tps65910-gpio",
+ },
+ {
.name = "tps65910-pmic",
},
{
@@ -37,30 +40,6 @@ static struct mfd_cell tps65910s[] = {
};
-static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
- int bytes, void *dest)
-{
- return regmap_bulk_read(tps65910->regmap, reg, dest, bytes);
-}
-
-static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
- int bytes, void *src)
-{
- return regmap_bulk_write(tps65910->regmap, reg, src, bytes);
-}
-
-int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, mask);
-}
-EXPORT_SYMBOL_GPL(tps65910_set_bits);
-
-int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, 0);
-}
-EXPORT_SYMBOL_GPL(tps65910_clear_bits);
-
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -85,80 +64,197 @@ static const struct regmap_config tps65910_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_reg = is_volatile_reg,
- .max_register = TPS65910_MAX_REGISTER,
- .num_reg_defaults_raw = TPS65910_MAX_REGISTER,
+ .max_register = TPS65910_MAX_REGISTER - 1,
.cache_type = REGCACHE_RBTREE,
};
-static int tps65910_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int __devinit tps65910_sleepinit(struct tps65910 *tps65910,
+ struct tps65910_board *pmic_pdata)
+{
+ struct device *dev = NULL;
+ int ret = 0;
+
+ dev = tps65910->dev;
+
+ if (!pmic_pdata->en_dev_slp)
+ return 0;
+
+ /* enabling SLEEP device state */
+ ret = tps65910_reg_set_bits(tps65910, TPS65910_DEVCTRL,
+ DEVCTRL_DEV_SLP_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set dev_slp failed: %d\n", ret);
+ goto err_sleep_init;
+ }
+
+ /* Return if there is no sleep keepon data. */
+ if (!pmic_pdata->slp_keepon)
+ return 0;
+
+ if (pmic_pdata->slp_keepon->therm_keepon) {
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set therm_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ if (pmic_pdata->slp_keepon->clkout32k_keepon) {
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set clkout32k_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ if (pmic_pdata->slp_keepon->i2chs_keepon) {
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set i2chs_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ return 0;
+
+disable_dev_slp:
+ tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
+ DEVCTRL_DEV_SLP_MASK);
+
+err_sleep_init:
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id tps65910_of_match[] = {
+ { .compatible = "ti,tps65910", .data = (void *)TPS65910},
+ { .compatible = "ti,tps65911", .data = (void *)TPS65911},
+ { },
+};
+MODULE_DEVICE_TABLE(of, tps65910_of_match);
+
+static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+ int *chip_id)
+{
+ struct device_node *np = client->dev.of_node;
+ struct tps65910_board *board_info;
+ unsigned int prop;
+ const struct of_device_id *match;
+ int ret = 0;
+
+ match = of_match_device(tps65910_of_match, &client->dev);
+ if (!match) {
+ dev_err(&client->dev, "Failed to find matching dt id\n");
+ return NULL;
+ }
+
+ *chip_id = (int)match->data;
+
+ board_info = devm_kzalloc(&client->dev, sizeof(*board_info),
+ GFP_KERNEL);
+ if (!board_info) {
+ dev_err(&client->dev, "Failed to allocate pdata\n");
+ return NULL;
+ }
+
+ ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop);
+ if (!ret)
+ board_info->vmbch_threshold = prop;
+ else if (*chip_id == TPS65911)
+ dev_warn(&client->dev, "VMBCH-Threshold not specified");
+
+ ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop);
+ if (!ret)
+ board_info->vmbch2_threshold = prop;
+ else if (*chip_id == TPS65911)
+ dev_warn(&client->dev, "VMBCH2-Threshold not specified");
+
+ board_info->irq = client->irq;
+ board_info->irq_base = -1;
+
+ return board_info;
+}
+#else
+static inline
+struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+ int *chip_id)
+{
+ return NULL;
+}
+#endif
+
+static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct tps65910 *tps65910;
struct tps65910_board *pmic_plat_data;
+ struct tps65910_board *of_pmic_plat_data = NULL;
struct tps65910_platform_data *init_data;
int ret = 0;
+ int chip_id = id->driver_data;
pmic_plat_data = dev_get_platdata(&i2c->dev);
+
+ if (!pmic_plat_data && i2c->dev.of_node) {
+ pmic_plat_data = tps65910_parse_dt(i2c, &chip_id);
+ of_pmic_plat_data = pmic_plat_data;
+ }
+
if (!pmic_plat_data)
return -EINVAL;
- init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
+ init_data = devm_kzalloc(&i2c->dev, sizeof(*init_data), GFP_KERNEL);
if (init_data == NULL)
return -ENOMEM;
- tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
- if (tps65910 == NULL) {
- kfree(init_data);
+ tps65910 = devm_kzalloc(&i2c->dev, sizeof(*tps65910), GFP_KERNEL);
+ if (tps65910 == NULL)
return -ENOMEM;
- }
+ tps65910->of_plat_data = of_pmic_plat_data;
i2c_set_clientdata(i2c, tps65910);
tps65910->dev = &i2c->dev;
tps65910->i2c_client = i2c;
- tps65910->id = id->driver_data;
- tps65910->read = tps65910_i2c_read;
- tps65910->write = tps65910_i2c_write;
+ tps65910->id = chip_id;
mutex_init(&tps65910->io_mutex);
- tps65910->regmap = regmap_init_i2c(i2c, &tps65910_regmap_config);
+ tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
if (IS_ERR(tps65910->regmap)) {
ret = PTR_ERR(tps65910->regmap);
dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
- goto regmap_err;
+ return ret;
}
ret = mfd_add_devices(tps65910->dev, -1,
tps65910s, ARRAY_SIZE(tps65910s),
NULL, 0);
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+ return ret;
+ }
init_data->irq = pmic_plat_data->irq;
init_data->irq_base = pmic_plat_data->irq_base;
- tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
-
tps65910_irq_init(tps65910, init_data->irq, init_data);
- kfree(init_data);
- return ret;
+ tps65910_sleepinit(tps65910, pmic_plat_data);
-err:
- regmap_exit(tps65910->regmap);
-regmap_err:
- kfree(tps65910);
- kfree(init_data);
return ret;
}
-static int tps65910_i2c_remove(struct i2c_client *i2c)
+static __devexit int tps65910_i2c_remove(struct i2c_client *i2c)
{
struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
tps65910_irq_exit(tps65910);
mfd_remove_devices(tps65910->dev);
- regmap_exit(tps65910->regmap);
- kfree(tps65910);
return 0;
}
@@ -175,9 +271,10 @@ static struct i2c_driver tps65910_i2c_driver = {
.driver = {
.name = "tps65910",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tps65910_of_match),
},
.probe = tps65910_i2c_probe,
- .remove = tps65910_i2c_remove,
+ .remove = __devexit_p(tps65910_i2c_remove),
.id_table = tps65910_i2c_id,
};
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index e7ff783aa31e..5a62e6bf89ae 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -26,7 +26,7 @@
#define COMP1 1
#define COMP2 2
-/* Comparator 1 voltage selection table in milivolts */
+/* Comparator 1 voltage selection table in millivolts */
static const u16 COMP_VSEL_TABLE[] = {
0, 2500, 2500, 2500, 2500, 2550, 2600, 2650,
2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050,
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 5d656e814358..ad733d76207a 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -757,6 +757,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
dev_err(dev, "could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
}
+ enable_irq_wake(irq_num);
return irq_base;
fail_rqirq:
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index 2d6bedadca09..4ded9e7aa246 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -27,7 +27,12 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -35,8 +40,24 @@
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
+#include <linux/regulator/consumer.h>
#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
+#define TWL6040_NUM_SUPPLIES (2)
+
+static bool twl6040_has_vibra(struct twl6040_platform_data *pdata,
+ struct device_node *node)
+{
+ if (pdata && pdata->vibra)
+ return true;
+
+#ifdef CONFIG_OF
+ if (of_find_node_by_name(node, "vibra"))
+ return true;
+#endif
+
+ return false;
+}
int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
{
@@ -502,17 +523,18 @@ static int __devinit twl6040_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct twl6040_platform_data *pdata = client->dev.platform_data;
+ struct device_node *node = client->dev.of_node;
struct twl6040 *twl6040;
struct mfd_cell *cell = NULL;
- int ret, children = 0;
+ int irq, ret, children = 0;
- if (!pdata) {
+ if (!pdata && !node) {
dev_err(&client->dev, "Platform data is missing\n");
return -EINVAL;
}
/* In order to operate correctly we need valid interrupt config */
- if (!client->irq || !pdata->irq_base) {
+ if (!client->irq) {
dev_err(&client->dev, "Invalid IRQ configuration\n");
return -EINVAL;
}
@@ -524,7 +546,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
goto err;
}
- twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config);
+ twl6040->regmap = devm_regmap_init_i2c(client, &twl6040_regmap_config);
if (IS_ERR(twl6040->regmap)) {
ret = PTR_ERR(twl6040->regmap);
goto err;
@@ -532,9 +554,23 @@ static int __devinit twl6040_probe(struct i2c_client *client,
i2c_set_clientdata(client, twl6040);
+ twl6040->supplies[0].supply = "vio";
+ twl6040->supplies[1].supply = "v2v1";
+ ret = regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
+ twl6040->supplies);
+ if (ret != 0) {
+ dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
+ goto regulator_get_err;
+ }
+
+ ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+ if (ret != 0) {
+ dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
+ goto power_err;
+ }
+
twl6040->dev = &client->dev;
twl6040->irq = client->irq;
- twl6040->irq_base = pdata->irq_base;
mutex_init(&twl6040->mutex);
mutex_init(&twl6040->io_mutex);
@@ -543,22 +579,26 @@ static int __devinit twl6040_probe(struct i2c_client *client,
twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
/* ERRATA: Automatic power-up is not possible in ES1.0 */
- if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0)
- twl6040->audpwron = pdata->audpwron_gpio;
- else
+ if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) {
+ if (pdata)
+ twl6040->audpwron = pdata->audpwron_gpio;
+ else
+ twl6040->audpwron = of_get_named_gpio(node,
+ "ti,audpwron-gpio", 0);
+ } else
twl6040->audpwron = -EINVAL;
if (gpio_is_valid(twl6040->audpwron)) {
ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW,
"audpwron");
if (ret)
- goto gpio1_err;
+ goto gpio_err;
}
/* codec interrupt */
ret = twl6040_irq_init(twl6040);
if (ret)
- goto gpio2_err;
+ goto irq_init_err;
ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
NULL, twl6040_naudint_handler, 0,
@@ -572,22 +612,27 @@ static int __devinit twl6040_probe(struct i2c_client *client,
/* dual-access registers controlled by I2C only */
twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
- if (pdata->codec) {
- int irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
-
- cell = &twl6040->cells[children];
- cell->name = "twl6040-codec";
- twl6040_codec_rsrc[0].start = irq;
- twl6040_codec_rsrc[0].end = irq;
- cell->resources = twl6040_codec_rsrc;
- cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+ /*
+ * The main functionality of twl6040 to provide audio on OMAP4+ systems.
+ * We can add the ASoC codec child whenever this driver has been loaded.
+ * The ASoC codec can work without pdata, pass the platform_data only if
+ * it has been provided.
+ */
+ irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-codec";
+ twl6040_codec_rsrc[0].start = irq;
+ twl6040_codec_rsrc[0].end = irq;
+ cell->resources = twl6040_codec_rsrc;
+ cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+ if (pdata && pdata->codec) {
cell->platform_data = pdata->codec;
cell->pdata_size = sizeof(*pdata->codec);
- children++;
}
+ children++;
- if (pdata->vibra) {
- int irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+ if (twl6040_has_vibra(pdata, node)) {
+ irq = twl6040->irq_base + TWL6040_IRQ_VIB;
cell = &twl6040->cells[children];
cell->name = "twl6040-vibra";
@@ -596,21 +641,17 @@ static int __devinit twl6040_probe(struct i2c_client *client,
cell->resources = twl6040_vibra_rsrc;
cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
- cell->platform_data = pdata->vibra;
- cell->pdata_size = sizeof(*pdata->vibra);
+ if (pdata && pdata->vibra) {
+ cell->platform_data = pdata->vibra;
+ cell->pdata_size = sizeof(*pdata->vibra);
+ }
children++;
}
- if (children) {
- ret = mfd_add_devices(&client->dev, -1, twl6040->cells,
- children, NULL, 0);
- if (ret)
- goto mfd_err;
- } else {
- dev_err(&client->dev, "No platform data found for children\n");
- ret = -ENODEV;
+ ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
+ NULL, 0);
+ if (ret)
goto mfd_err;
- }
return 0;
@@ -618,12 +659,15 @@ mfd_err:
free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
irq_err:
twl6040_irq_exit(twl6040);
-gpio2_err:
+irq_init_err:
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
-gpio1_err:
+gpio_err:
+ regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+power_err:
+ regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+regulator_get_err:
i2c_set_clientdata(client, NULL);
- regmap_exit(twl6040->regmap);
err:
return ret;
}
@@ -643,7 +687,9 @@ static int __devexit twl6040_remove(struct i2c_client *client)
mfd_remove_devices(&client->dev);
i2c_set_clientdata(client, NULL);
- regmap_exit(twl6040->regmap);
+
+ regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+ regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
return 0;
}
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
index b3f8ddaa28a8..4b42543da228 100644
--- a/drivers/mfd/twl6040-irq.c
+++ b/drivers/mfd/twl6040-irq.c
@@ -23,7 +23,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/err.h>
#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
@@ -138,7 +141,8 @@ static irqreturn_t twl6040_irq_thread(int irq, void *data)
int twl6040_irq_init(struct twl6040 *twl6040)
{
- int cur_irq, ret;
+ struct device_node *node = twl6040->dev->of_node;
+ int i, nr_irqs, irq_base, ret;
u8 val;
mutex_init(&twl6040->irq_mutex);
@@ -148,21 +152,31 @@ int twl6040_irq_init(struct twl6040 *twl6040)
twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
+ nr_irqs = ARRAY_SIZE(twl6040_irqs);
+
+ irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
+ return irq_base;
+ }
+ twl6040->irq_base = irq_base;
+
+ irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+
/* Register them with genirq */
- for (cur_irq = twl6040->irq_base;
- cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs);
- cur_irq++) {
- irq_set_chip_data(cur_irq, twl6040);
- irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
+ for (i = irq_base; i < irq_base + nr_irqs; i++) {
+ irq_set_chip_data(i, twl6040);
+ irq_set_chip_and_handler(i, &twl6040_irq_chip,
handle_level_irq);
- irq_set_nested_thread(cur_irq, 1);
+ irq_set_nested_thread(i, 1);
/* ARM needs us to explicitly flag the IRQ as valid
* and will set them noprobe when we do so. */
#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
+ set_irq_flags(i, IRQF_VALID);
#else
- irq_set_noprobe(cur_irq);
+ irq_set_noprobe(i);
#endif
}
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index b73cc15e0081..872aff21e4be 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -131,17 +131,7 @@ static struct pci_driver vx855_pci_driver = {
.remove = __devexit_p(vx855_remove),
};
-static int vx855_init(void)
-{
- return pci_register_driver(&vx855_pci_driver);
-}
-module_init(vx855_init);
-
-static void vx855_exit(void)
-{
- pci_unregister_driver(&vx855_pci_driver);
-}
-module_exit(vx855_exit);
+module_pci_driver(vx855_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
index 87210954a066..6ee3018d8653 100644
--- a/drivers/mfd/wm831x-auxadc.c
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -280,11 +280,11 @@ void wm831x_auxadc_init(struct wm831x *wm831x)
mutex_init(&wm831x->auxadc_lock);
INIT_LIST_HEAD(&wm831x->auxadc_pending);
- if (wm831x->irq && wm831x->irq_base) {
+ if (wm831x->irq) {
wm831x->auxadc_read = wm831x_auxadc_read_irq;
- ret = request_threaded_irq(wm831x->irq_base +
- WM831X_IRQ_AUXADC_DATA,
+ ret = request_threaded_irq(wm831x_irq(wm831x,
+ WM831X_IRQ_AUXADC_DATA),
NULL, wm831x_auxadc_irq, 0,
"auxadc", wm831x);
if (ret < 0) {
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 838056c3493a..946698fd2dc6 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -614,8 +614,15 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
}
EXPORT_SYMBOL_GPL(wm831x_set_bits);
+static struct resource wm831x_io_parent = {
+ .start = 0,
+ .end = 0xffffffff,
+ .flags = IORESOURCE_IO,
+};
+
static struct resource wm831x_dcdc1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC1_CONTROL_1,
.end = WM831X_DC1_DVS_CONTROL,
.flags = IORESOURCE_IO,
@@ -637,6 +644,7 @@ static struct resource wm831x_dcdc1_resources[] = {
static struct resource wm831x_dcdc2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC2_CONTROL_1,
.end = WM831X_DC2_DVS_CONTROL,
.flags = IORESOURCE_IO,
@@ -657,6 +665,7 @@ static struct resource wm831x_dcdc2_resources[] = {
static struct resource wm831x_dcdc3_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC3_CONTROL_1,
.end = WM831X_DC3_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -671,6 +680,7 @@ static struct resource wm831x_dcdc3_resources[] = {
static struct resource wm831x_dcdc4_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC4_CONTROL,
.end = WM831X_DC4_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -685,6 +695,7 @@ static struct resource wm831x_dcdc4_resources[] = {
static struct resource wm8320_dcdc4_buck_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC4_CONTROL,
.end = WM832X_DC4_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -707,6 +718,7 @@ static struct resource wm831x_gpio_resources[] = {
static struct resource wm831x_isink1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_CURRENT_SINK_1,
.end = WM831X_CURRENT_SINK_1,
.flags = IORESOURCE_IO,
@@ -720,6 +732,7 @@ static struct resource wm831x_isink1_resources[] = {
static struct resource wm831x_isink2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_CURRENT_SINK_2,
.end = WM831X_CURRENT_SINK_2,
.flags = IORESOURCE_IO,
@@ -733,6 +746,7 @@ static struct resource wm831x_isink2_resources[] = {
static struct resource wm831x_ldo1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO1_CONTROL,
.end = WM831X_LDO1_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -747,6 +761,7 @@ static struct resource wm831x_ldo1_resources[] = {
static struct resource wm831x_ldo2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO2_CONTROL,
.end = WM831X_LDO2_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -761,6 +776,7 @@ static struct resource wm831x_ldo2_resources[] = {
static struct resource wm831x_ldo3_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO3_CONTROL,
.end = WM831X_LDO3_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -775,6 +791,7 @@ static struct resource wm831x_ldo3_resources[] = {
static struct resource wm831x_ldo4_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO4_CONTROL,
.end = WM831X_LDO4_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -789,6 +806,7 @@ static struct resource wm831x_ldo4_resources[] = {
static struct resource wm831x_ldo5_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO5_CONTROL,
.end = WM831X_LDO5_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -803,6 +821,7 @@ static struct resource wm831x_ldo5_resources[] = {
static struct resource wm831x_ldo6_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO6_CONTROL,
.end = WM831X_LDO6_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -817,6 +836,7 @@ static struct resource wm831x_ldo6_resources[] = {
static struct resource wm831x_ldo7_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO7_CONTROL,
.end = WM831X_LDO7_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -831,6 +851,7 @@ static struct resource wm831x_ldo7_resources[] = {
static struct resource wm831x_ldo8_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO8_CONTROL,
.end = WM831X_LDO8_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -845,6 +866,7 @@ static struct resource wm831x_ldo8_resources[] = {
static struct resource wm831x_ldo9_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO9_CONTROL,
.end = WM831X_LDO9_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -859,6 +881,7 @@ static struct resource wm831x_ldo9_resources[] = {
static struct resource wm831x_ldo10_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO10_CONTROL,
.end = WM831X_LDO10_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -873,6 +896,7 @@ static struct resource wm831x_ldo10_resources[] = {
static struct resource wm831x_ldo11_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO11_ON_CONTROL,
.end = WM831X_LDO11_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -974,6 +998,7 @@ static struct resource wm831x_rtc_resources[] = {
static struct resource wm831x_status1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_STATUS_LED_1,
.end = WM831X_STATUS_LED_1,
.flags = IORESOURCE_IO,
@@ -982,6 +1007,7 @@ static struct resource wm831x_status1_resources[] = {
static struct resource wm831x_status2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_STATUS_LED_2,
.end = WM831X_STATUS_LED_2,
.flags = IORESOURCE_IO,
@@ -1787,27 +1813,27 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8310:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8310_devs, ARRAY_SIZE(wm8310_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
case WM8311:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8311_devs, ARRAY_SIZE(wm8311_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
if (!pdata || !pdata->disable_touch)
mfd_add_devices(wm831x->dev, wm831x_num,
touch_devs, ARRAY_SIZE(touch_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
case WM8312:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8312_devs, ARRAY_SIZE(wm8312_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
if (!pdata || !pdata->disable_touch)
mfd_add_devices(wm831x->dev, wm831x_num,
touch_devs, ARRAY_SIZE(touch_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
case WM8320:
@@ -1816,7 +1842,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8326:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8320_devs, ARRAY_SIZE(wm8320_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
default:
@@ -1841,7 +1867,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
if (ret & WM831X_XTAL_ENA) {
ret = mfd_add_devices(wm831x->dev, wm831x_num,
rtc_devs, ARRAY_SIZE(rtc_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
if (ret != 0) {
dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
goto err_irq;
@@ -1854,7 +1880,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
/* Treat errors as non-critical */
ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
ARRAY_SIZE(backlight_devs), NULL,
- wm831x->irq_base);
+ 0);
if (ret < 0)
dev_err(wm831x->dev, "Failed to add backlight: %d\n",
ret);
@@ -1883,8 +1909,7 @@ void wm831x_device_exit(struct wm831x *wm831x)
{
wm831x_otp_exit(wm831x);
mfd_remove_devices(wm831x->dev);
- if (wm831x->irq_base)
- free_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, wm831x);
+ free_irq(wm831x_irq(wm831x, WM831X_IRQ_AUXADC_DATA), wm831x);
wm831x_irq_exit(wm831x);
}
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index bec4d0539160..804e56ec99eb 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -18,6 +18,7 @@
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
@@ -328,7 +329,7 @@ static inline int irq_data_to_status_reg(struct wm831x_irq_data *irq_data)
static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
int irq)
{
- return &wm831x_irqs[irq - wm831x->irq_base];
+ return &wm831x_irqs[irq];
}
static void wm831x_irq_lock(struct irq_data *data)
@@ -374,7 +375,7 @@ static void wm831x_irq_enable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
- data->irq);
+ data->hwirq);
wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
@@ -383,7 +384,7 @@ static void wm831x_irq_disable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
- data->irq);
+ data->hwirq);
wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
@@ -393,7 +394,7 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
int irq;
- irq = data->irq - wm831x->irq_base;
+ irq = data->hwirq;
if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
/* Ignore internal-only IRQs */
@@ -412,22 +413,25 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
* do the update here as we can be called with the bus lock
* held.
*/
+ wm831x->gpio_level_low[irq] = false;
+ wm831x->gpio_level_high[irq] = false;
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
- wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_EDGE_RISING:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
- wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_EDGE_FALLING:
wm831x->gpio_update[irq] = 0x10000;
- wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_LEVEL_HIGH:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
- wm831x->gpio_level[irq] = true;
+ wm831x->gpio_level_high[irq] = true;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ wm831x->gpio_update[irq] = 0x10000;
+ wm831x->gpio_level_low[irq] = true;
break;
default:
return -EINVAL;
@@ -469,9 +473,11 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
* descriptors.
*/
if (primary & WM831X_TCHPD_INT)
- handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ WM831X_IRQ_TCHPD));
if (primary & WM831X_TCHDATA_INT)
- handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ WM831X_IRQ_TCHDATA));
primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
@@ -507,16 +513,29 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
}
if (*status & wm831x_irqs[i].mask)
- handle_nested_irq(wm831x->irq_base + i);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ i));
/* Simulate an edge triggered IRQ by polling the input
* status. This is sucky but improves interoperability.
*/
if (primary == WM831X_GP_INT &&
- wm831x->gpio_level[i - WM831X_IRQ_GPIO_1]) {
+ wm831x->gpio_level_high[i - WM831X_IRQ_GPIO_1]) {
ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
while (ret & 1 << (i - WM831X_IRQ_GPIO_1)) {
- handle_nested_irq(wm831x->irq_base + i);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ i));
+ ret = wm831x_reg_read(wm831x,
+ WM831X_GPIO_LEVEL);
+ }
+ }
+
+ if (primary == WM831X_GP_INT &&
+ wm831x->gpio_level_low[i - WM831X_IRQ_GPIO_1]) {
+ ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
+ while (!(ret & 1 << (i - WM831X_IRQ_GPIO_1))) {
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ i));
ret = wm831x_reg_read(wm831x,
WM831X_GPIO_LEVEL);
}
@@ -527,10 +546,34 @@ out:
return IRQ_HANDLED;
}
+static int wm831x_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops wm831x_irq_domain_ops = {
+ .map = wm831x_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
int wm831x_irq_init(struct wm831x *wm831x, int irq)
{
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int i, cur_irq, ret;
+ struct irq_domain *domain;
+ int i, ret, irq_base;
mutex_init(&wm831x->irq_lock);
@@ -543,18 +586,33 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
}
/* Try to dynamically allocate IRQs if no base is specified */
- if (!pdata || !pdata->irq_base)
- wm831x->irq_base = -1;
+ if (pdata && pdata->irq_base) {
+ irq_base = irq_alloc_descs(pdata->irq_base, 0,
+ WM831X_NUM_IRQS, 0);
+ if (irq_base < 0) {
+ dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
+ irq_base);
+ irq_base = 0;
+ }
+ } else {
+ irq_base = 0;
+ }
+
+ if (irq_base)
+ domain = irq_domain_add_legacy(wm831x->dev->of_node,
+ ARRAY_SIZE(wm831x_irqs),
+ irq_base, 0,
+ &wm831x_irq_domain_ops,
+ wm831x);
else
- wm831x->irq_base = pdata->irq_base;
+ domain = irq_domain_add_linear(wm831x->dev->of_node,
+ ARRAY_SIZE(wm831x_irqs),
+ &wm831x_irq_domain_ops,
+ wm831x);
- wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0,
- WM831X_NUM_IRQS, 0);
- if (wm831x->irq_base < 0) {
- dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
- wm831x->irq_base);
- wm831x->irq_base = 0;
- return 0;
+ if (!domain) {
+ dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n");
+ return -EINVAL;
}
if (pdata && pdata->irq_cmos)
@@ -565,38 +623,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG,
WM831X_IRQ_OD, i);
- /* Try to flag /IRQ as a wake source; there are a number of
- * unconditional wake sources in the PMIC so this isn't
- * conditional but we don't actually care *too* much if it
- * fails.
- */
- ret = enable_irq_wake(irq);
- if (ret != 0) {
- dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
- ret);
- }
-
wm831x->irq = irq;
-
- /* Register them with genirq */
- for (cur_irq = wm831x->irq_base;
- cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, wm831x);
- irq_set_chip_and_handler(cur_irq, &wm831x_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
- }
+ wm831x->irq_domain = domain;
if (irq) {
+ /* Try to flag /IRQ as a wake source; there are a number of
+ * unconditional wake sources in the PMIC so this isn't
+ * conditional but we don't actually care *too* much if it
+ * fails.
+ */
+ ret = enable_irq_wake(irq);
+ if (ret != 0) {
+ dev_warn(wm831x->dev,
+ "Can't enable IRQ as wake source: %d\n",
+ ret);
+ }
+
ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"wm831x", wm831x);
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index dd1caaac55e4..8a9b11ca076a 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/regmap.h>
#include <linux/workqueue.h>
#include <linux/mfd/wm8350/core.h>
@@ -74,7 +75,7 @@ static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs,
int bytes = num_regs * 2;
dev_dbg(wm8350->dev, "volatile read\n");
- ret = wm8350->read_dev(wm8350, reg, bytes, (char *)dest);
+ ret = regmap_raw_read(wm8350->regmap, reg, dest, bytes);
for (i = reg; i < reg + num_regs; i++) {
/* Cache is CPU endian */
@@ -96,9 +97,6 @@ static int wm8350_read(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *dest)
int ret = 0;
int bytes = num_regs * 2;
- if (wm8350->read_dev == NULL)
- return -ENODEV;
-
if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
dev_err(wm8350->dev, "invalid reg %x\n",
reg + num_regs - 1);
@@ -149,9 +147,6 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
int end = reg + num_regs;
int bytes = num_regs * 2;
- if (wm8350->write_dev == NULL)
- return -ENODEV;
-
if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
dev_err(wm8350->dev, "invalid reg %x\n",
reg + num_regs - 1);
@@ -182,7 +177,7 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
}
/* Actually write it out */
- return wm8350->write_dev(wm8350, reg, bytes, (char *)src);
+ return regmap_raw_write(wm8350->regmap, reg, src, bytes);
}
/*
@@ -515,9 +510,8 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
* a PMIC so the device many not be in a virgin state and we
* can't rely on the silicon values.
*/
- ret = wm8350->read_dev(wm8350, 0,
- sizeof(u16) * (WM8350_MAX_REGISTER + 1),
- wm8350->reg_cache);
+ ret = regmap_raw_read(wm8350->regmap, 0, wm8350->reg_cache,
+ sizeof(u16) * (WM8350_MAX_REGISTER + 1));
if (ret < 0) {
dev_err(wm8350->dev,
"failed to read initial cache values\n");
@@ -570,35 +564,30 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
struct wm8350_platform_data *pdata)
{
int ret;
- u16 id1, id2, mask_rev;
- u16 cust_id, mode, chip_rev;
+ unsigned int id1, id2, mask_rev;
+ unsigned int cust_id, mode, chip_rev;
dev_set_drvdata(wm8350->dev, wm8350);
/* get WM8350 revision and config mode */
- ret = wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1);
+ ret = regmap_read(wm8350->regmap, WM8350_RESET_ID, &id1);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
goto err;
}
- ret = wm8350->read_dev(wm8350, WM8350_ID, sizeof(id2), &id2);
+ ret = regmap_read(wm8350->regmap, WM8350_ID, &id2);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
goto err;
}
- ret = wm8350->read_dev(wm8350, WM8350_REVISION, sizeof(mask_rev),
- &mask_rev);
+ ret = regmap_read(wm8350->regmap, WM8350_REVISION, &mask_rev);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read revision: %d\n", ret);
goto err;
}
- id1 = be16_to_cpu(id1);
- id2 = be16_to_cpu(id2);
- mask_rev = be16_to_cpu(mask_rev);
-
if (id1 != 0x6143) {
dev_err(wm8350->dev,
"Device with ID %x is not a WM8350\n", id1);
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index d955faaf27c4..a68aceb4e48c 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -15,47 +15,18 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/mfd/wm8350/core.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
-static int wm8350_i2c_read_device(struct wm8350 *wm8350, char reg,
- int bytes, void *dest)
-{
- int ret;
-
- ret = i2c_master_send(wm8350->i2c_client, &reg, 1);
- if (ret < 0)
- return ret;
- ret = i2c_master_recv(wm8350->i2c_client, dest, bytes);
- if (ret < 0)
- return ret;
- if (ret != bytes)
- return -EIO;
- return 0;
-}
-
-static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
- int bytes, void *src)
-{
- /* we add 1 byte for device register */
- u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
- int ret;
-
- if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
- return -EINVAL;
-
- msg[0] = reg;
- memcpy(&msg[1], src, bytes);
- ret = i2c_master_send(wm8350->i2c_client, msg, bytes + 1);
- if (ret < 0)
- return ret;
- if (ret != bytes + 1)
- return -EIO;
- return 0;
-}
+static const struct regmap_config wm8350_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
+};
static int wm8350_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
@@ -67,20 +38,18 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
if (wm8350 == NULL)
return -ENOMEM;
+ wm8350->regmap = devm_regmap_init_i2c(i2c, &wm8350_regmap);
+ if (IS_ERR(wm8350->regmap)) {
+ ret = PTR_ERR(wm8350->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
i2c_set_clientdata(i2c, wm8350);
wm8350->dev = &i2c->dev;
- wm8350->i2c_client = i2c;
- wm8350->read_dev = wm8350_i2c_read_device;
- wm8350->write_dev = wm8350_i2c_write_device;
-
- ret = wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
- if (ret < 0)
- goto err;
-
- return ret;
-err:
- return ret;
+ return wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
}
static int wm8350_i2c_remove(struct i2c_client *i2c)
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 1189a17f0f25..4b7d378551d5 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -23,136 +23,16 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-static struct {
- u16 readable; /* Mask of readable bits */
- u16 writable; /* Mask of writable bits */
- u16 vol; /* Mask of volatile bits */
- int is_codec; /* Register controlled by codec reset */
- u16 default_val; /* Value on reset */
-} reg_data[] = {
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x6172 }, /* R0 */
- { 0x7000, 0x0000, 0x8000, 0, 0x0000 }, /* R1 */
- { 0xFF17, 0xFF17, 0x0000, 0, 0x0000 }, /* R2 */
- { 0xEBF3, 0xEBF3, 0x0000, 1, 0x6000 }, /* R3 */
- { 0x3CF3, 0x3CF3, 0x0000, 1, 0x0000 }, /* R4 */
- { 0xF1F8, 0xF1F8, 0x0000, 1, 0x4050 }, /* R5 */
- { 0xFC1F, 0xFC1F, 0x0000, 1, 0x4000 }, /* R6 */
- { 0xDFDE, 0xDFDE, 0x0000, 1, 0x01C8 }, /* R7 */
- { 0xFCFC, 0xFCFC, 0x0000, 1, 0x0000 }, /* R8 */
- { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R9 */
- { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R10 */
- { 0x27F7, 0x27F7, 0x0000, 1, 0x0004 }, /* R11 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R12 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R13 */
- { 0x1FEF, 0x1FEF, 0x0000, 1, 0x0000 }, /* R14 */
- { 0x0163, 0x0163, 0x0000, 1, 0x0100 }, /* R15 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R16 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R17 */
- { 0x1FFF, 0x0FFF, 0x0000, 1, 0x0000 }, /* R18 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1000 }, /* R19 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R20 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R21 */
- { 0x0FDD, 0x0FDD, 0x0000, 1, 0x8000 }, /* R22 */
- { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0800 }, /* R23 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R24 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R25 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R26 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R27 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R28 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R29 */
- { 0x0000, 0x0077, 0x0000, 1, 0x0066 }, /* R30 */
- { 0x0000, 0x0033, 0x0000, 1, 0x0022 }, /* R31 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R32 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R33 */
- { 0x0000, 0x0003, 0x0000, 1, 0x0003 }, /* R34 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0003 }, /* R35 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R36 */
- { 0x0000, 0x003F, 0x0000, 1, 0x0100 }, /* R37 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R38 */
- { 0x0000, 0x000F, 0x0000, 0, 0x0000 }, /* R39 */
- { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R40 */
- { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R41 */
- { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R42 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R43 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R44 */
- { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R45 */
- { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R46 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R47 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R48 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R49 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R50 */
- { 0x0000, 0x01B3, 0x0000, 1, 0x0180 }, /* R51 */
- { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R52 */
- { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R53 */
- { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R54 */
- { 0x0000, 0x0001, 0x0000, 1, 0x0000 }, /* R55 */
- { 0x0000, 0x003F, 0x0000, 1, 0x0000 }, /* R56 */
- { 0x0000, 0x004F, 0x0000, 1, 0x0000 }, /* R57 */
- { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R58 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R59 */
- { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0000 }, /* R60 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x0000 }, /* R61 */
- { 0x03FF, 0x03FF, 0x0000, 1, 0x0000 }, /* R62 */
- { 0x007F, 0x007F, 0x0000, 1, 0x0000 }, /* R63 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R64 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R65 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R66 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R67 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R68 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R69 */
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R70 */
- { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R71 */
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R72 */
- { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R73 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R74 */
- { 0x000E, 0x000E, 0x0000, 0, 0x0008 }, /* R75 */
- { 0xE00F, 0xE00F, 0x0000, 0, 0x0000 }, /* R76 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R77 */
- { 0x03C0, 0x03C0, 0x0000, 0, 0x02C0 }, /* R78 */
- { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R79 */
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x0000 }, /* R80 */
- { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R81 */
- { 0x2BFF, 0x0000, 0xffff, 0, 0x0000 }, /* R82 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R83 */
- { 0x80FF, 0x80FF, 0x0000, 0, 0x00ff }, /* R84 */
-};
-
-static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
+static bool wm8400_volatile(struct device *dev, unsigned int reg)
{
- int i, ret = 0;
-
- BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
- /* If there are any volatile reads then read back the entire block */
- for (i = reg; i < reg + num_regs; i++)
- if (reg_data[i].vol) {
- ret = regmap_bulk_read(wm8400->regmap, reg, dest,
- num_regs);
- return ret;
- }
-
- /* Otherwise use the cache */
- memcpy(dest, &wm8400->reg_cache[reg], num_regs * sizeof(u16));
-
- return 0;
-}
-
-static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
- u16 *src)
-{
- int ret, i;
-
- BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
- for (i = 0; i < num_regs; i++) {
- BUG_ON(!reg_data[reg + i].writable);
- wm8400->reg_cache[reg + i] = src[i];
- ret = regmap_write(wm8400->regmap, reg, src[i]);
- if (ret != 0)
- return ret;
+ switch (reg) {
+ case WM8400_INTERRUPT_STATUS_1:
+ case WM8400_INTERRUPT_LEVELS:
+ case WM8400_SHUTDOWN_REASON:
+ return true;
+ default:
+ return false;
}
-
- return 0;
}
/**
@@ -165,13 +45,12 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
*/
u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg)
{
- u16 val;
-
- mutex_lock(&wm8400->io_lock);
-
- wm8400_read(wm8400, reg, 1, &val);
+ unsigned int val;
+ int ret;
- mutex_unlock(&wm8400->io_lock);
+ ret = regmap_read(wm8400->regmap, reg, &val);
+ if (ret < 0)
+ return ret;
return val;
}
@@ -179,63 +58,10 @@ EXPORT_SYMBOL_GPL(wm8400_reg_read);
int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
{
- int ret;
-
- mutex_lock(&wm8400->io_lock);
-
- ret = wm8400_read(wm8400, reg, count, data);
-
- mutex_unlock(&wm8400->io_lock);
-
- return ret;
+ return regmap_bulk_read(wm8400->regmap, reg, data, count);
}
EXPORT_SYMBOL_GPL(wm8400_block_read);
-/**
- * wm8400_set_bits - Bitmask write
- *
- * @wm8400: Pointer to wm8400 control structure
- * @reg: Register to access
- * @mask: Mask of bits to change
- * @val: Value to set for masked bits
- */
-int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val)
-{
- u16 tmp;
- int ret;
-
- mutex_lock(&wm8400->io_lock);
-
- ret = wm8400_read(wm8400, reg, 1, &tmp);
- tmp = (tmp & ~mask) | val;
- if (ret == 0)
- ret = wm8400_write(wm8400, reg, 1, &tmp);
-
- mutex_unlock(&wm8400->io_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(wm8400_set_bits);
-
-/**
- * wm8400_reset_codec_reg_cache - Reset cached codec registers to
- * their default values.
- */
-void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
-{
- int i;
-
- mutex_lock(&wm8400->io_lock);
-
- /* Reset all codec registers to their initial value */
- for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
- if (reg_data[i].is_codec)
- wm8400->reg_cache[i] = reg_data[i].default_val;
-
- mutex_unlock(&wm8400->io_lock);
-}
-EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
-
static int wm8400_register_codec(struct wm8400 *wm8400)
{
struct mfd_cell cell = {
@@ -257,44 +83,24 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
static int wm8400_init(struct wm8400 *wm8400,
struct wm8400_platform_data *pdata)
{
- u16 reg;
- int ret, i;
-
- mutex_init(&wm8400->io_lock);
+ unsigned int reg;
+ int ret;
dev_set_drvdata(wm8400->dev, wm8400);
/* Check that this is actually a WM8400 */
- ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &i);
+ ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &reg);
if (ret != 0) {
dev_err(wm8400->dev, "Chip ID register read failed\n");
return -EIO;
}
- if (i != reg_data[WM8400_RESET_ID].default_val) {
- dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n", i);
+ if (reg != 0x6172) {
+ dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n",
+ reg);
return -ENODEV;
}
- /* We don't know what state the hardware is in and since this
- * is a PMIC we can't reset it safely so initialise the register
- * cache from the hardware.
- */
- ret = regmap_raw_read(wm8400->regmap, 0, wm8400->reg_cache,
- ARRAY_SIZE(wm8400->reg_cache));
- if (ret != 0) {
- dev_err(wm8400->dev, "Register cache read failed\n");
- return -EIO;
- }
- for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
- wm8400->reg_cache[i] = be16_to_cpu(wm8400->reg_cache[i]);
-
- /* If the codec is in reset use hard coded values */
- if (!(wm8400->reg_cache[WM8400_POWER_MANAGEMENT_1] & WM8400_CODEC_ENA))
- for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
- if (reg_data[i].is_codec)
- wm8400->reg_cache[i] = reg_data[i].default_val;
-
- ret = wm8400_read(wm8400, WM8400_ID, 1, &reg);
+ ret = regmap_read(wm8400->regmap, WM8400_ID, &reg);
if (ret != 0) {
dev_err(wm8400->dev, "ID register read failed: %d\n", ret);
return ret;
@@ -334,8 +140,22 @@ static const struct regmap_config wm8400_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = WM8400_REGISTER_COUNT - 1,
+
+ .volatile_reg = wm8400_volatile,
+
+ .cache_type = REGCACHE_RBTREE,
};
+/**
+ * wm8400_reset_codec_reg_cache - Reset cached codec registers to
+ * their default values.
+ */
+void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
+{
+ regmap_reinit_cache(wm8400->regmap, &wm8400_regmap_config);
+}
+EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
+
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8400_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 9d7ca1e978fa..1e321d349777 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -500,7 +500,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
ret);
goto err_enable;
}
- wm8994->revision = ret;
+ wm8994->revision = ret & WM8994_CHIP_REV_MASK;
+ wm8994->cust_id = (ret & WM8994_CUST_ID_MASK) >> WM8994_CUST_ID_SHIFT;
switch (wm8994->type) {
case WM8994:
@@ -553,8 +554,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
}
- dev_info(wm8994->dev, "%s revision %c\n", devname,
- 'A' + wm8994->revision);
+ dev_info(wm8994->dev, "%s revision %c CUST_ID %02x\n", devname,
+ 'A' + wm8994->revision, wm8994->cust_id);
switch (wm8994->type) {
case WM1811:
@@ -732,23 +733,7 @@ static struct i2c_driver wm8994_i2c_driver = {
.id_table = wm8994_i2c_id,
};
-static int __init wm8994_i2c_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&wm8994_i2c_driver);
- if (ret != 0)
- pr_err("Failed to register wm8994 I2C driver: %d\n", ret);
-
- return ret;
-}
-module_init(wm8994_i2c_init);
-
-static void __exit wm8994_i2c_exit(void)
-{
- i2c_del_driver(&wm8994_i2c_driver);
-}
-module_exit(wm8994_i2c_exit);
+module_i2c_driver(wm8994_i2c_driver);
MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index bfd25af6ecb1..52e9e2944940 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -1122,7 +1122,6 @@ static bool wm8994_volatile_register(struct device *dev, unsigned int reg)
case WM8994_RATE_STATUS:
case WM8958_MIC_DETECT_3:
case WM8994_DC_SERVO_4E:
- case WM8994_CHIP_REVISION:
case WM8994_INTERRUPT_STATUS_1:
case WM8994_INTERRUPT_STATUS_2:
return true;
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index d7a9aa14e5d5..042a8fe4efaa 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -142,10 +142,16 @@ static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_pwm_match[] = {
+ { .compatible = "stericsson,ab8500-pwm", },
+ {}
+};
+
static struct platform_driver ab8500_pwm_driver = {
.driver = {
.name = "ab8500-pwm",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_pwm_match,
},
.probe = ab8500_pwm_probe,
.remove = __devexit_p(ab8500_pwm_remove),
diff --git a/drivers/misc/c2port/Kconfig b/drivers/misc/c2port/Kconfig
index e46af9a5810d..33ee834e1b83 100644
--- a/drivers/misc/c2port/Kconfig
+++ b/drivers/misc/c2port/Kconfig
@@ -5,7 +5,7 @@
menuconfig C2PORT
tristate "Silicon Labs C2 port support (EXPERIMENTAL)"
depends on EXPERIMENTAL
- default no
+ default n
help
This option enables support for Silicon Labs C2 port used to
program Silicon micro controller chips (and other 8051 compatible).
@@ -23,8 +23,8 @@ if C2PORT
config C2PORT_DURAMAR_2150
tristate "C2 port support for Eurotech's Duramar 2150 (EXPERIMENTAL)"
- depends on X86 && C2PORT
- default no
+ depends on X86
+ default n
help
This option enables C2 support for the Eurotech's Duramar 2150
on board micro controller.
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 93936f1b75eb..23f5463d4cae 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -835,7 +835,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
struct mei_cl *cl,
struct mei_io_list *cmpl_list)
{
- if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
+ if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_flow_control))) {
/* return the cancel routine */
list_del(&cb_pos->cb_list);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index c70333228337..783fcd7365bc 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -982,7 +982,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
err = request_threaded_irq(pdev->irq,
NULL,
mei_interrupt_thread_handler,
- 0, mei_driver_name, dev);
+ IRQF_ONESHOT, mei_driver_name, dev);
else
err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
@@ -992,7 +992,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
pdev->irq);
- goto unmap_memory;
+ goto disable_msi;
}
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
if (mei_hw_init(dev)) {
@@ -1023,8 +1023,8 @@ release_irq:
mei_disable_interrupts(dev);
flush_scheduled_work();
free_irq(pdev->irq, dev);
+disable_msi:
pci_disable_msi(pdev);
-unmap_memory:
pci_iounmap(pdev, dev->mem_addr);
free_device:
kfree(dev);
@@ -1101,6 +1101,8 @@ static void __devexit mei_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ misc_deregister(&mei_misc_device);
}
#ifdef CONFIG_PM
static int mei_pci_suspend(struct device *device)
@@ -1145,7 +1147,7 @@ static int mei_pci_resume(struct device *device)
err = request_threaded_irq(pdev->irq,
NULL,
mei_interrupt_thread_handler,
- 0, mei_driver_name, dev);
+ IRQF_ONESHOT, mei_driver_name, dev);
else
err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
@@ -1216,7 +1218,6 @@ module_init(mei_init_module);
*/
static void __exit mei_exit_module(void)
{
- misc_deregister(&mei_misc_device);
pci_unregister_driver(&mei_driver);
pr_debug("unloaded successfully.\n");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 6be5605707b4..e2ec0505eb5c 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -341,7 +341,7 @@ static const struct watchdog_ops wd_ops = {
};
static const struct watchdog_info wd_info = {
.identity = INTEL_AMT_WATCHDOG_ID,
- .options = WDIOF_KEEPALIVEPING,
+ .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
};
static struct watchdog_device amt_wd_dev = {
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 17bbacb1b4b1..87b251ab6ec5 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -452,9 +452,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
if (msg->activate_gru_mq_desc_gpa !=
part_uv->activate_gru_mq_desc_gpa) {
- spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ spin_lock(&part_uv->flags_lock);
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
- spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+ spin_unlock(&part_uv->flags_lock);
part_uv->activate_gru_mq_desc_gpa =
msg->activate_gru_mq_desc_gpa;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dabec556ebb8..276d21ce6bc1 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -384,7 +384,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
md = mmc_blk_get(bdev->bd_disk);
if (!md) {
err = -EINVAL;
- goto cmd_done;
+ goto cmd_err;
}
card = md->queue.card;
@@ -483,6 +483,7 @@ cmd_rel_host:
cmd_done:
mmc_blk_put(md);
+cmd_err:
kfree(idata->buf);
kfree(idata);
return err;
@@ -553,7 +554,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
- unsigned int timeout_us;
struct scatterlist sg;
@@ -573,23 +573,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- data.timeout_ns = card->csd.tacc_ns * 100;
- data.timeout_clks = card->csd.tacc_clks * 100;
-
- timeout_us = data.timeout_ns / 1000;
- timeout_us += data.timeout_clks * 1000 /
- (card->host->ios.clock / 1000);
-
- if (timeout_us > 100000) {
- data.timeout_ns = 100000000;
- data.timeout_clks = 0;
- }
-
data.blksz = 4;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
mrq.cmd = &cmd;
mrq.data = &data;
@@ -1283,7 +1272,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
int ret = 1, disable_multi = 0, retry = 0, type;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req;
+ struct request *req = rqc;
struct mmc_async_req *areq;
if (!rqc && !mq->mqrq_prev->req)
@@ -1291,6 +1280,16 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
do {
if (rqc) {
+ /*
+ * When 4KB native sector is enabled, only 8 blocks
+ * multiple read or write is allowed
+ */
+ if ((brq->data.blocks & 0x07) &&
+ (card->ext_csd.data_sector_size == 4096)) {
+ pr_err("%s: Transfer size is not 4KB sector size aligned\n",
+ req->rq_disk->disk_name);
+ goto cmd_abort;
+ }
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
@@ -1538,7 +1537,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
"mmcblk%d%s", md->name_idx, subname ? subname : "");
- blk_queue_logical_block_size(md->queue.queue, 512);
+ if (mmc_card_mmc(card))
+ blk_queue_logical_block_size(md->queue.queue,
+ card->ext_csd.data_sector_size);
+ else
+ blk_queue_logical_block_size(md->queue.queue, 512);
+
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 996f8e36e23d..e360a979857d 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -96,7 +96,7 @@ static int mmc_queue_thread(void *d)
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.
*/
-static void mmc_request(struct request_queue *q)
+static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
@@ -171,12 +171,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
limit = *mmc_dev(host)->dma_mask;
mq->card = card;
- mq->queue = blk_init_queue(mmc_request, lock);
+ mq->queue = blk_init_queue(mmc_request_fn, lock);
if (!mq->queue)
return -ENOMEM;
- memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
- memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c60cee92a2b2..9b68933f27e7 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,6 +122,7 @@ static int mmc_bus_remove(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int mmc_bus_suspend(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
@@ -143,6 +144,7 @@ static int mmc_bus_resume(struct device *dev)
ret = drv->resume(card);
return ret;
}
+#endif
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 2c14be73254c..8f5dc08d6598 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -50,8 +50,8 @@ int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio)
goto egpioreq;
ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- cd->label, host);
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, cd->label, host);
if (ret < 0)
goto eirqreq;
@@ -73,6 +73,9 @@ void mmc_cd_gpio_free(struct mmc_host *host)
{
struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
+ if (!cd)
+ return;
+
free_irq(host->hotplug.irq, host);
gpio_free(cd->gpio);
kfree(cd);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ba821fe70bca..0b6141d29dbd 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -42,6 +42,7 @@
#include "sdio_ops.h"
static struct workqueue_struct *workqueue;
+static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
@@ -1157,6 +1158,9 @@ static void mmc_power_up(struct mmc_host *host)
{
int bit;
+ if (host->ios.power_mode == MMC_POWER_ON)
+ return;
+
mmc_host_clk_hold(host);
/* If ocr is set, we use it */
@@ -1199,6 +1203,10 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
int err = 0;
+
+ if (host->ios.power_mode == MMC_POWER_OFF)
+ return;
+
mmc_host_clk_hold(host);
host->ios.clock = 0;
@@ -2005,7 +2013,6 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
void mmc_rescan(struct work_struct *work)
{
- static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
@@ -2044,8 +2051,12 @@ void mmc_rescan(struct work_struct *work)
*/
mmc_bus_put(host);
- if (host->ops->get_cd && host->ops->get_cd(host) == 0)
+ if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
+ mmc_claim_host(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
goto out;
+ }
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
@@ -2063,7 +2074,8 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
- mmc_power_off(host);
+ host->f_init = max(freqs[0], host->f_min);
+ mmc_power_up(host);
mmc_detect_change(host, 0);
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 54df5adc0413..4f4489aa6bae 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -235,6 +235,36 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
return err;
}
+static void mmc_select_card_type(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+ unsigned int caps = host->caps, caps2 = host->caps2;
+ unsigned int hs_max_dtr = 0;
+
+ if (card_type & EXT_CSD_CARD_TYPE_26)
+ hs_max_dtr = MMC_HIGH_26_MAX_DTR;
+
+ if (caps & MMC_CAP_MMC_HIGHSPEED &&
+ card_type & EXT_CSD_CARD_TYPE_52)
+ hs_max_dtr = MMC_HIGH_52_MAX_DTR;
+
+ if ((caps & MMC_CAP_1_8V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
+ (caps & MMC_CAP_1_2V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
+ hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+
+ if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
+ (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
+ hs_max_dtr = MMC_HS200_MAX_DTR;
+
+ card->ext_csd.hs_max_dtr = hs_max_dtr;
+ card->ext_csd.card_type = card_type;
+}
+
/*
* Decode extended CSD.
*/
@@ -284,56 +314,9 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
mmc_card_set_blockaddr(card);
}
+
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
- switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
- case EXT_CSD_CARD_TYPE_SDR_ALL:
- case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
- case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
- case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
- card->ext_csd.hs_max_dtr = 200000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
- break;
- case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
- case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
- case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
- case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
- card->ext_csd.hs_max_dtr = 200000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
- break;
- case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
- case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
- case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
- case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
- card->ext_csd.hs_max_dtr = 200000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
- break;
- case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
- EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
- break;
- case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
- EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
- break;
- case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
- EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
- break;
- case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- break;
- case EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 26000000;
- break;
- default:
- /* MMC v4 spec says this cannot happen */
- pr_warning("%s: card is mmc v4 but doesn't "
- "support any high-speed modes.\n",
- mmc_hostname(card->host));
- }
+ mmc_select_card_type(card);
card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.raw_erase_timeout_mult =
@@ -533,6 +516,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
} else {
card->ext_csd.data_tag_unit_size = 0;
}
+ } else {
+ card->ext_csd.data_sector_size = 512;
}
out:
@@ -556,14 +541,10 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
err = mmc_get_ext_csd(card, &bw_ext_csd);
if (err || bw_ext_csd == NULL) {
- if (bus_width != MMC_BUS_WIDTH_1)
- err = -EINVAL;
+ err = -EINVAL;
goto out;
}
- if (bus_width == MMC_BUS_WIDTH_1)
- goto out;
-
/* only compare read only fields */
err = !((card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
@@ -745,7 +726,7 @@ static int mmc_select_powerclass(struct mmc_card *card,
*/
static int mmc_select_hs200(struct mmc_card *card)
{
- int idx, err = 0;
+ int idx, err = -EINVAL;
struct mmc_host *host;
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_4,
@@ -761,10 +742,12 @@ static int mmc_select_hs200(struct mmc_card *card)
host = card->host;
if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
- host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
- if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
- err = mmc_set_signal_voltage(host,
- MMC_SIGNAL_VOLTAGE_180, 0);
+ host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
+
+ if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
+ host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
/* If fails try again during next card power cycle */
if (err)
@@ -1347,7 +1330,7 @@ static int mmc_suspend(struct mmc_host *host)
if (!err)
mmc_card_set_sleep(host->card);
} else if (!mmc_host_is_spi(host))
- mmc_deselect_cards(host);
+ err = mmc_deselect_cards(host);
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
mmc_release_host(host);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c272c6868ecf..b2b43f624b9e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1075,16 +1075,18 @@ static void mmc_sd_detect(struct mmc_host *host)
*/
static int mmc_sd_suspend(struct mmc_host *host)
{
+ int err = 0;
+
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
if (!mmc_host_is_spi(host))
- mmc_deselect_cards(host);
+ err = mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_release_host(host);
- return 0;
+ return err;
}
/*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2c7c83f832d2..41c5fd8848f4 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -218,6 +218,12 @@ static int sdio_enable_wide(struct mmc_card *card)
if (ret)
return ret;
+ if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
+ pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+ mmc_hostname(card->host), ctrl);
+
+ /* set as 4-bit bus width */
+ ctrl &= ~SDIO_BUS_WIDTH_MASK;
ctrl |= SDIO_BUS_WIDTH_4BIT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
@@ -947,7 +953,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
if (!err && host->sdio_irqs)
- mmc_signal_sdio_irq(host);
+ wake_up_process(host->sdio_irq_thread);
mmc_release_host(host);
/*
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index f573e7f9f740..3d8ceb4084de 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -28,18 +28,20 @@
#include "sdio_ops.h"
-static int process_sdio_pending_irqs(struct mmc_card *card)
+static int process_sdio_pending_irqs(struct mmc_host *host)
{
+ struct mmc_card *card = host->card;
int i, ret, count;
unsigned char pending;
struct sdio_func *func;
/*
* Optimization, if there is only 1 function interrupt registered
- * call irq handler directly
+ * and we know an IRQ was signaled then call irq handler directly.
+ * Otherwise do the full probe.
*/
func = card->sdio_single_irq;
- if (func) {
+ if (func && host->sdio_irq_pending) {
func->irq_handler(func);
return 1;
}
@@ -116,7 +118,8 @@ static int sdio_irq_thread(void *_host)
ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
if (ret)
break;
- ret = process_sdio_pending_irqs(host->card);
+ ret = process_sdio_pending_irqs(host);
+ host->sdio_irq_pending = false;
mmc_release_host(host);
/*
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2bc06e7344db..aa131b32e3b2 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -278,10 +278,13 @@ choice
Choose which driver to use for the Atmel MCI Silicon
config MMC_AT91
- tristate "AT91 SD/MMC Card Interface support"
+ tristate "AT91 SD/MMC Card Interface support (DEPRECATED)"
depends on ARCH_AT91
help
- This selects the AT91 MCI controller.
+ This selects the AT91 MCI controller. This driver will
+ be removed soon (for more information have a look to
+ Documentation/feature-removal-schedule.txt). Please use
+ MMC_ATMEL_MCI.
If unsure, say N.
@@ -307,16 +310,6 @@ config MMC_ATMELMCI_DMA
If unsure, say N.
-config MMC_IMX
- tristate "Motorola i.MX Multimedia Card Interface support"
- depends on ARCH_MX1
- help
- This selects the Motorola i.MX Multimedia card Interface.
- If you have a i.MX platform with a Multimedia Card slot,
- say Y or M here.
-
- If unsure, say N.
-
config MMC_MSM
tristate "Qualcomm SDCC Controller Support"
depends on MMC && ARCH_MSM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3e7e26d08073..8922b06be925 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -4,7 +4,6 @@
obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
-obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 787aba1682bb..ab56f7db5315 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -140,4 +140,18 @@
#define atmci_writel(port,reg,value) \
__raw_writel((value), (port)->regs + reg)
+/*
+ * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * This can be done by finding most significant bit set.
+ */
+static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
+{
+ if (maxburst > 1)
+ return fls(maxburst) - 2;
+ else
+ return 0;
+}
+
#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e94476beca18..f2c115e06438 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -45,19 +45,19 @@
#define ATMCI_DMA_THRESHOLD 16
enum {
- EVENT_CMD_COMPLETE = 0,
+ EVENT_CMD_RDY = 0,
EVENT_XFER_COMPLETE,
- EVENT_DATA_COMPLETE,
+ EVENT_NOTBUSY,
EVENT_DATA_ERROR,
};
enum atmel_mci_state {
STATE_IDLE = 0,
STATE_SENDING_CMD,
- STATE_SENDING_DATA,
- STATE_DATA_BUSY,
+ STATE_DATA_XFER,
+ STATE_WAITING_NOTBUSY,
STATE_SENDING_STOP,
- STATE_DATA_ERROR,
+ STATE_END_REQUEST,
};
enum atmci_xfer_dir {
@@ -78,6 +78,9 @@ struct atmel_mci_caps {
bool has_highspeed;
bool has_rwproof;
bool has_odd_clk_div;
+ bool has_bad_data_ordering;
+ bool need_reset_after_xfer;
+ bool need_blksz_mul_4;
};
struct atmel_mci_dma {
@@ -91,6 +94,11 @@ struct atmel_mci_dma {
* @regs: Pointer to MMIO registers.
* @sg: Scatterlist entry currently being processed by PIO or PDC code.
* @pio_offset: Offset into the current scatterlist entry.
+ * @buffer: Buffer used if we don't have the r/w proof capability. We
+ * don't have the time to switch pdc buffers so we have to use only
+ * one buffer for the full transaction.
+ * @buf_size: size of the buffer.
+ * @phys_buf_addr: buffer address needed for pdc.
* @cur_slot: The slot which is currently using the controller.
* @mrq: The request currently being processed on @cur_slot,
* or NULL if the controller is idle.
@@ -116,6 +124,7 @@ struct atmel_mci_dma {
* @queue: List of slots waiting for access to the controller.
* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
+ * @timer: Timer to balance the data timeout error flag which cannot rise.
* @mode_reg: Value of the MR register.
* @cfg_reg: Value of the CFG register.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
@@ -166,6 +175,9 @@ struct atmel_mci {
struct scatterlist *sg;
unsigned int pio_offset;
+ unsigned int *buffer;
+ unsigned int buf_size;
+ dma_addr_t buf_phys_addr;
struct atmel_mci_slot *cur_slot;
struct mmc_request *mrq;
@@ -189,6 +201,7 @@ struct atmel_mci {
bool need_clock_update;
bool need_reset;
+ struct timer_list timer;
u32 mode_reg;
u32 cfg_reg;
unsigned long bus_hz;
@@ -480,6 +493,32 @@ err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
+static inline unsigned int atmci_get_version(struct atmel_mci *host)
+{
+ return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
+}
+
+static void atmci_timeout_timer(unsigned long data)
+{
+ struct atmel_mci *host;
+
+ host = (struct atmel_mci *)data;
+
+ dev_dbg(&host->pdev->dev, "software timeout\n");
+
+ if (host->mrq->cmd->data) {
+ host->mrq->cmd->data->error = -ETIMEDOUT;
+ host->data = NULL;
+ } else {
+ host->mrq->cmd->error = -ETIMEDOUT;
+ host->cmd = NULL;
+ }
+ host->need_reset = 1;
+ host->state = STATE_END_REQUEST;
+ smp_wmb();
+ tasklet_schedule(&host->tasklet);
+}
+
static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
unsigned int ns)
{
@@ -591,6 +630,7 @@ static void atmci_send_command(struct atmel_mci *host,
static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
{
+ dev_dbg(&host->pdev->dev, "send stop command\n");
atmci_send_command(host, data->stop, host->stop_cmdr);
atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
}
@@ -603,6 +643,7 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
{
u32 pointer_reg, counter_reg;
+ unsigned int buf_size;
if (dir == XFER_RECEIVE) {
pointer_reg = ATMEL_PDC_RPR;
@@ -617,8 +658,15 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
counter_reg += ATMEL_PDC_SCND_BUF_OFF;
}
- atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
- if (host->data_size <= sg_dma_len(host->sg)) {
+ if (!host->caps.has_rwproof) {
+ buf_size = host->buf_size;
+ atmci_writel(host, pointer_reg, host->buf_phys_addr);
+ } else {
+ buf_size = sg_dma_len(host->sg);
+ atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
+ }
+
+ if (host->data_size <= buf_size) {
if (host->data_size & 0x3) {
/* If size is different from modulo 4, transfer bytes */
atmci_writel(host, counter_reg, host->data_size);
@@ -670,7 +718,20 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
*/
static void atmci_pdc_complete(struct atmel_mci *host)
{
+ int transfer_size = host->data->blocks * host->data->blksz;
+ int i;
+
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
+
+ if ((!host->caps.has_rwproof)
+ && (host->data->flags & MMC_DATA_READ)) {
+ if (host->caps.has_bad_data_ordering)
+ for (i = 0; i < transfer_size; i++)
+ host->buffer[i] = swab32(host->buffer[i]);
+ sg_copy_from_buffer(host->data->sg, host->data->sg_len,
+ host->buffer, transfer_size);
+ }
+
atmci_pdc_cleanup(host);
/*
@@ -678,9 +739,10 @@ static void atmci_pdc_complete(struct atmel_mci *host)
* to send the stop command or waiting for NBUSY in this case.
*/
if (host->data) {
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
- atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
@@ -716,6 +778,8 @@ static void atmci_dma_complete(void *arg)
* to send the stop command or waiting for NBUSY in this case.
*/
if (data) {
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
@@ -791,6 +855,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
u32 iflags, tmp;
unsigned int sg_len;
enum dma_data_direction dir;
+ int i;
data->error = -EINPROGRESS;
@@ -806,7 +871,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
} else {
dir = DMA_TO_DEVICE;
- iflags |= ATMCI_ENDTX | ATMCI_TXBUFE;
+ iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
}
/* Set BLKLEN */
@@ -818,6 +883,16 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
+
+ if ((!host->caps.has_rwproof)
+ && (host->data->flags & MMC_DATA_WRITE)) {
+ sg_copy_to_buffer(host->data->sg, host->data->sg_len,
+ host->buffer, host->data_size);
+ if (host->caps.has_bad_data_ordering)
+ for (i = 0; i < host->data_size; i++)
+ host->buffer[i] = swab32(host->buffer[i]);
+ }
+
if (host->data_size)
atmci_pdc_set_both_buf(host,
((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
@@ -835,6 +910,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
enum dma_data_direction direction;
enum dma_transfer_direction slave_dirn;
unsigned int sglen;
+ u32 maxburst;
u32 iflags;
data->error = -EINPROGRESS;
@@ -868,17 +944,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (!chan)
return -ENODEV;
- if (host->caps.has_dma)
- atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
-
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
+ maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
} else {
direction = DMA_TO_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
+ maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
}
+ atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
+
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
@@ -931,6 +1008,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
static void atmci_stop_transfer(struct atmel_mci *host)
{
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -940,8 +1019,7 @@ static void atmci_stop_transfer(struct atmel_mci *host)
*/
static void atmci_stop_transfer_pdc(struct atmel_mci *host)
{
- atmci_set_pending(host, EVENT_XFER_COMPLETE);
- atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
}
static void atmci_stop_transfer_dma(struct atmel_mci *host)
@@ -953,6 +1031,8 @@ static void atmci_stop_transfer_dma(struct atmel_mci *host)
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -977,9 +1057,12 @@ static void atmci_start_request(struct atmel_mci *host,
host->pending_events = 0;
host->completed_events = 0;
+ host->cmd_status = 0;
host->data_status = 0;
- if (host->need_reset) {
+ dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
+
+ if (host->need_reset || host->caps.need_reset_after_xfer) {
iflags = atmci_readl(host, ATMCI_IMR);
iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
@@ -994,7 +1077,7 @@ static void atmci_start_request(struct atmel_mci *host,
iflags = atmci_readl(host, ATMCI_IMR);
if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
- dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
+ dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
iflags);
if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
@@ -1043,6 +1126,8 @@ static void atmci_start_request(struct atmel_mci *host,
* prepared yet.)
*/
atmci_writel(host, ATMCI_IER, iflags);
+
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
}
static void atmci_queue_request(struct atmel_mci *host,
@@ -1057,6 +1142,7 @@ static void atmci_queue_request(struct atmel_mci *host,
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
+ dev_dbg(&host->pdev->dev, "queue request\n");
list_add_tail(&slot->queue_node, &host->queue);
}
spin_unlock_bh(&host->lock);
@@ -1069,6 +1155,7 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_data *data;
WARN_ON(slot->mrq);
+ dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
/*
* We may "know" the card is gone even though there's still an
@@ -1308,6 +1395,8 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
host->state = STATE_IDLE;
}
+ del_timer(&host->timer);
+
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
@@ -1330,21 +1419,13 @@ static void atmci_command_complete(struct atmel_mci *host,
cmd->error = -EILSEQ;
else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
cmd->error = -EIO;
- else
- cmd->error = 0;
-
- if (cmd->error) {
- dev_dbg(&host->pdev->dev,
- "command error: status=0x%08x\n", status);
-
- if (cmd->data) {
- host->stop_transfer(host);
- host->data = NULL;
- atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY
- | ATMCI_TXRDY | ATMCI_RXRDY
- | ATMCI_DATA_ERROR_FLAGS);
+ else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
+ if (host->caps.need_blksz_mul_4) {
+ cmd->error = -EINVAL;
+ host->need_reset = 1;
}
- }
+ } else
+ cmd->error = 0;
}
static void atmci_detect_change(unsigned long data)
@@ -1407,23 +1488,21 @@ static void atmci_detect_change(unsigned long data)
break;
case STATE_SENDING_CMD:
mrq->cmd->error = -ENOMEDIUM;
- if (!mrq->data)
- break;
- /* fall through */
- case STATE_SENDING_DATA:
+ if (mrq->data)
+ host->stop_transfer(host);
+ break;
+ case STATE_DATA_XFER:
mrq->data->error = -ENOMEDIUM;
host->stop_transfer(host);
break;
- case STATE_DATA_BUSY:
- case STATE_DATA_ERROR:
- if (mrq->data->error == -EINPROGRESS)
- mrq->data->error = -ENOMEDIUM;
- if (!mrq->stop)
- break;
- /* fall through */
+ case STATE_WAITING_NOTBUSY:
+ mrq->data->error = -ENOMEDIUM;
+ break;
case STATE_SENDING_STOP:
mrq->stop->error = -ENOMEDIUM;
break;
+ case STATE_END_REQUEST:
+ break;
}
atmci_request_end(host, mrq);
@@ -1451,7 +1530,6 @@ static void atmci_tasklet_func(unsigned long priv)
struct atmel_mci *host = (struct atmel_mci *)priv;
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = host->data;
- struct mmc_command *cmd = host->cmd;
enum atmel_mci_state state = host->state;
enum atmel_mci_state prev_state;
u32 status;
@@ -1467,107 +1545,186 @@ static void atmci_tasklet_func(unsigned long priv)
do {
prev_state = state;
+ dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
switch (state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
+ /*
+ * Command has been sent, we are waiting for command
+ * ready. Then we have three next states possible:
+ * END_REQUEST by default, WAITING_NOTBUSY if it's a
+ * command needing it or DATA_XFER if there is data.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
- EVENT_CMD_COMPLETE))
+ EVENT_CMD_RDY))
break;
+ dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
host->cmd = NULL;
- atmci_set_completed(host, EVENT_CMD_COMPLETE);
+ atmci_set_completed(host, EVENT_CMD_RDY);
atmci_command_complete(host, mrq->cmd);
- if (!mrq->data || cmd->error) {
- atmci_request_end(host, host->mrq);
- goto unlock;
- }
+ if (mrq->data) {
+ dev_dbg(&host->pdev->dev,
+ "command with data transfer");
+ /*
+ * If there is a command error don't start
+ * data transfer.
+ */
+ if (mrq->cmd->error) {
+ host->stop_transfer(host);
+ host->data = NULL;
+ atmci_writel(host, ATMCI_IDR,
+ ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
+ } else
+ state = STATE_DATA_XFER;
+ } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
+ dev_dbg(&host->pdev->dev,
+ "command response need waiting notbusy");
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ } else
+ state = STATE_END_REQUEST;
- prev_state = state = STATE_SENDING_DATA;
- /* fall through */
+ break;
- case STATE_SENDING_DATA:
+ case STATE_DATA_XFER:
if (atmci_test_and_clear_pending(host,
EVENT_DATA_ERROR)) {
- host->stop_transfer(host);
- if (data->stop)
- atmci_send_stop_cmd(host, data);
- state = STATE_DATA_ERROR;
+ dev_dbg(&host->pdev->dev, "set completed data error\n");
+ atmci_set_completed(host, EVENT_DATA_ERROR);
+ state = STATE_END_REQUEST;
break;
}
+ /*
+ * A data transfer is in progress. The event expected
+ * to move to the next state depends of data transfer
+ * type (PDC or DMA). Once transfer done we can move
+ * to the next step which is WAITING_NOTBUSY in write
+ * case and directly SENDING_STOP in read case.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_XFER_COMPLETE))
break;
+ dev_dbg(&host->pdev->dev,
+ "(%s) set completed xfer complete\n",
+ __func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
- prev_state = state = STATE_DATA_BUSY;
- /* fall through */
- case STATE_DATA_BUSY:
- if (!atmci_test_and_clear_pending(host,
- EVENT_DATA_COMPLETE))
- break;
-
- host->data = NULL;
- atmci_set_completed(host, EVENT_DATA_COMPLETE);
- status = host->data_status;
- if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
- if (status & ATMCI_DTOE) {
- dev_dbg(&host->pdev->dev,
- "data timeout error\n");
- data->error = -ETIMEDOUT;
- } else if (status & ATMCI_DCRCE) {
- dev_dbg(&host->pdev->dev,
- "data CRC error\n");
- data->error = -EILSEQ;
- } else {
- dev_dbg(&host->pdev->dev,
- "data FIFO error (status=%08x)\n",
- status);
- data->error = -EIO;
- }
+ if (host->data->flags & MMC_DATA_WRITE) {
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ } else if (host->mrq->stop) {
+ atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
+ atmci_send_stop_cmd(host, data);
+ state = STATE_SENDING_STOP;
} else {
+ host->data = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
- atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
}
+ break;
- if (!data->stop) {
- atmci_request_end(host, host->mrq);
- goto unlock;
- }
+ case STATE_WAITING_NOTBUSY:
+ /*
+ * We can be in the state for two reasons: a command
+ * requiring waiting not busy signal (stop command
+ * included) or a write operation. In the latest case,
+ * we need to send a stop command.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
+ if (!atmci_test_and_clear_pending(host,
+ EVENT_NOTBUSY))
+ break;
- prev_state = state = STATE_SENDING_STOP;
- if (!data->error)
- atmci_send_stop_cmd(host, data);
- /* fall through */
+ dev_dbg(&host->pdev->dev, "set completed not busy\n");
+ atmci_set_completed(host, EVENT_NOTBUSY);
+
+ if (host->data) {
+ /*
+ * For some commands such as CMD53, even if
+ * there is data transfer, there is no stop
+ * command to send.
+ */
+ if (host->mrq->stop) {
+ atmci_writel(host, ATMCI_IER,
+ ATMCI_CMDRDY);
+ atmci_send_stop_cmd(host, data);
+ state = STATE_SENDING_STOP;
+ } else {
+ host->data = NULL;
+ data->bytes_xfered = data->blocks
+ * data->blksz;
+ data->error = 0;
+ state = STATE_END_REQUEST;
+ }
+ } else
+ state = STATE_END_REQUEST;
+ break;
case STATE_SENDING_STOP:
+ /*
+ * In this state, it is important to set host->data to
+ * NULL (which is tested in the waiting notbusy state)
+ * in order to go to the end request state instead of
+ * sending stop again.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
- EVENT_CMD_COMPLETE))
+ EVENT_CMD_RDY))
break;
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
host->cmd = NULL;
+ host->data = NULL;
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
atmci_command_complete(host, mrq->stop);
- atmci_request_end(host, host->mrq);
- goto unlock;
+ if (mrq->stop->error) {
+ host->stop_transfer(host);
+ atmci_writel(host, ATMCI_IDR,
+ ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
+ } else {
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ }
+ break;
- case STATE_DATA_ERROR:
- if (!atmci_test_and_clear_pending(host,
- EVENT_XFER_COMPLETE))
- break;
+ case STATE_END_REQUEST:
+ atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ status = host->data_status;
+ if (unlikely(status)) {
+ host->stop_transfer(host);
+ host->data = NULL;
+ if (status & ATMCI_DTOE) {
+ data->error = -ETIMEDOUT;
+ } else if (status & ATMCI_DCRCE) {
+ data->error = -EILSEQ;
+ } else {
+ data->error = -EIO;
+ }
+ }
- state = STATE_DATA_BUSY;
+ atmci_request_end(host, host->mrq);
+ state = STATE_IDLE;
break;
}
} while (state != prev_state);
host->state = state;
-unlock:
spin_unlock(&host->lock);
}
@@ -1620,9 +1777,6 @@ static void atmci_read_data_pio(struct atmel_mci *host)
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
- smp_wmb();
- atmci_set_pending(host, EVENT_DATA_ERROR);
- tasklet_schedule(&host->tasklet);
return;
}
} while (status & ATMCI_RXRDY);
@@ -1691,9 +1845,6 @@ static void atmci_write_data_pio(struct atmel_mci *host)
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
- smp_wmb();
- atmci_set_pending(host, EVENT_DATA_ERROR);
- tasklet_schedule(&host->tasklet);
return;
}
} while (status & ATMCI_TXRDY);
@@ -1711,16 +1862,6 @@ done:
atmci_set_pending(host, EVENT_XFER_COMPLETE);
}
-static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
-{
- atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
-
- host->cmd_status = status;
- smp_wmb();
- atmci_set_pending(host, EVENT_CMD_COMPLETE);
- tasklet_schedule(&host->tasklet);
-}
-
static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
{
int i;
@@ -1748,17 +1889,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
break;
if (pending & ATMCI_DATA_ERROR_FLAGS) {
+ dev_dbg(&host->pdev->dev, "IRQ: data error\n");
atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
- | ATMCI_RXRDY | ATMCI_TXRDY);
- pending &= atmci_readl(host, ATMCI_IMR);
+ | ATMCI_RXRDY | ATMCI_TXRDY
+ | ATMCI_ENDRX | ATMCI_ENDTX
+ | ATMCI_RXBUFF | ATMCI_TXBUFE);
host->data_status = status;
+ dev_dbg(&host->pdev->dev, "set pending data error\n");
smp_wmb();
atmci_set_pending(host, EVENT_DATA_ERROR);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_TXBUFE) {
+ dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
/*
@@ -1774,6 +1919,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDTX) {
+ dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
if (host->data_size) {
@@ -1784,6 +1930,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
}
if (pending & ATMCI_RXBUFF) {
+ dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
/*
@@ -1799,6 +1946,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDRX) {
+ dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
if (host->data_size) {
@@ -1808,23 +1956,44 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
}
}
+ /*
+ * First mci IPs, so mainly the ones having pdc, have some
+ * issues with the notbusy signal. You can't get it after
+ * data transmission if you have not sent a stop command.
+ * The appropriate workaround is to use the BLKE signal.
+ */
+ if (pending & ATMCI_BLKE) {
+ dev_dbg(&host->pdev->dev, "IRQ: blke\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
+ smp_wmb();
+ dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ atmci_set_pending(host, EVENT_NOTBUSY);
+ tasklet_schedule(&host->tasklet);
+ }
if (pending & ATMCI_NOTBUSY) {
- atmci_writel(host, ATMCI_IDR,
- ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY);
- if (!host->data_status)
- host->data_status = status;
+ dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
smp_wmb();
- atmci_set_pending(host, EVENT_DATA_COMPLETE);
+ dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ atmci_set_pending(host, EVENT_NOTBUSY);
tasklet_schedule(&host->tasklet);
}
+
if (pending & ATMCI_RXRDY)
atmci_read_data_pio(host);
if (pending & ATMCI_TXRDY)
atmci_write_data_pio(host);
- if (pending & ATMCI_CMDRDY)
- atmci_cmd_interrupt(host, status);
+ if (pending & ATMCI_CMDRDY) {
+ dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
+ host->cmd_status = status;
+ smp_wmb();
+ dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
+ atmci_set_pending(host, EVENT_CMD_RDY);
+ tasklet_schedule(&host->tasklet);
+ }
if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
atmci_sdio_interrupt(host, status);
@@ -1877,13 +2046,26 @@ static int __init atmci_init_slot(struct atmel_mci *host,
mmc->caps |= MMC_CAP_SDIO_IRQ;
if (host->caps.has_highspeed)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
- if (slot_data->bus_width >= 4)
+ /*
+ * Without the read/write proof capability, it is strongly suggested to
+ * use only one bit for data to prevent fifo underruns and overruns
+ * which will corrupt data.
+ */
+ if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- mmc->max_segs = 64;
- mmc->max_req_size = 32768 * 512;
- mmc->max_blk_size = 32768;
- mmc->max_blk_count = 512;
+ if (atmci_get_version(host) < 0x200) {
+ mmc->max_segs = 256;
+ mmc->max_blk_size = 4095;
+ mmc->max_blk_count = 256;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
+ } else {
+ mmc->max_segs = 64;
+ mmc->max_req_size = 32768 * 512;
+ mmc->max_blk_size = 32768;
+ mmc->max_blk_count = 512;
+ }
/* Assume card is present initially */
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
@@ -2007,11 +2189,6 @@ static bool atmci_configure_dma(struct atmel_mci *host)
}
}
-static inline unsigned int atmci_get_version(struct atmel_mci *host)
-{
- return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
-}
-
/*
* HSMCI (High Speed MCI) module is not fully compatible with MCI module.
* HSMCI provides DMA support and a new config register but no more supports
@@ -2032,6 +2209,9 @@ static void __init atmci_get_cap(struct atmel_mci *host)
host->caps.has_highspeed = 0;
host->caps.has_rwproof = 0;
host->caps.has_odd_clk_div = 0;
+ host->caps.has_bad_data_ordering = 1;
+ host->caps.need_reset_after_xfer = 1;
+ host->caps.need_blksz_mul_4 = 1;
/* keep only major version number */
switch (version & 0xf00) {
@@ -2051,7 +2231,11 @@ static void __init atmci_get_cap(struct atmel_mci *host)
host->caps.has_highspeed = 1;
case 0x200:
host->caps.has_rwproof = 1;
+ host->caps.need_blksz_mul_4 = 0;
case 0x100:
+ host->caps.has_bad_data_ordering = 0;
+ host->caps.need_reset_after_xfer = 0;
+ case 0x0:
break;
default:
host->caps.has_pdc = 0;
@@ -2132,20 +2316,28 @@ static int __init atmci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
+ setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
/* We need at least one slot to succeed */
nr_slots = 0;
ret = -ENODEV;
if (pdata->slot[0].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[0],
0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
- if (!ret)
+ if (!ret) {
nr_slots++;
+ host->buf_size = host->slot[0]->mmc->max_req_size;
+ }
}
if (pdata->slot[1].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[1],
1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
- if (!ret)
+ if (!ret) {
nr_slots++;
+ if (host->slot[1]->mmc->max_req_size > host->buf_size)
+ host->buf_size =
+ host->slot[1]->mmc->max_req_size;
+ }
}
if (!nr_slots) {
@@ -2153,6 +2345,17 @@ static int __init atmci_probe(struct platform_device *pdev)
goto err_init_slot;
}
+ if (!host->caps.has_rwproof) {
+ host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
+ &host->buf_phys_addr,
+ GFP_KERNEL);
+ if (!host->buffer) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "buffer allocation failed\n");
+ goto err_init_slot;
+ }
+ }
+
dev_info(&pdev->dev,
"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
host->mapbase, irq, nr_slots);
@@ -2179,6 +2382,10 @@ static int __exit atmci_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
+ if (host->buffer)
+ dma_free_coherent(&pdev->dev, host->buf_size,
+ host->buffer, host->buf_phys_addr);
+
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
atmci_cleanup_slot(host->slot[i], i);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index c1f3673ae1ef..7cf6c624bf73 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1533,4 +1533,5 @@ module_exit(davinci_mmcsd_exit);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
+MODULE_ALIAS("platform:davinci_mmc");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index ab3fc4617107..1ca5e72ceb65 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -100,8 +100,6 @@ struct dw_mci_slot {
int last_detect_state;
};
-static struct workqueue_struct *dw_mci_card_workqueue;
-
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
@@ -420,6 +418,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
p->des3 = host->sg_dma;
p->des0 = IDMAC_DES0_ER;
+ mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
+
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
SDMMC_IDMAC_INT_TI);
@@ -617,14 +617,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
u32 div;
if (slot->clock != host->current_speed) {
- if (host->bus_hz % slot->clock)
+ div = host->bus_hz / slot->clock;
+ if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
/*
* move the + 1 after the divide to prevent
* over-clocking the card.
*/
- div = ((host->bus_hz / slot->clock) >> 1) + 1;
- else
- div = (host->bus_hz / slot->clock) >> 1;
+ div += 1;
+
+ div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
@@ -859,10 +860,10 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
int_mask = mci_readl(host, INTMASK);
if (enb) {
mci_writel(host, INTMASK,
- (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
+ (int_mask | SDMMC_INT_SDIO(slot->id)));
} else {
mci_writel(host, INTMASK,
- (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
+ (int_mask & ~SDMMC_INT_SDIO(slot->id)));
}
}
@@ -941,8 +942,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
mdelay(20);
if (cmd->data) {
- host->data = NULL;
dw_mci_stop_dma(host);
+ host->data = NULL;
}
}
}
@@ -1605,7 +1606,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_CD) {
mci_writel(host, RINTSTS, SDMMC_INT_CD);
- queue_work(dw_mci_card_workqueue, &host->card_work);
+ queue_work(host->card_workqueue, &host->card_work);
}
/* Handle SDIO Interrupts */
@@ -1625,7 +1626,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
- set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
host->dma_ops->complete(host);
}
#endif
@@ -1727,7 +1727,8 @@ static void dw_mci_work_routine_card(struct work_struct *work)
#ifdef CONFIG_MMC_DW_IDMAC
ctrl = mci_readl(host, BMOD);
- ctrl |= 0x01; /* Software reset of DMA */
+ /* Software reset of DMA */
+ ctrl |= SDMMC_IDMAC_SWRESET;
mci_writel(host, BMOD, ctrl);
#endif
@@ -1844,7 +1845,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
* Card may have been plugged in prior to boot so we
* need to run the detect tasklet
*/
- queue_work(dw_mci_card_workqueue, &host->card_work);
+ queue_work(host->card_workqueue, &host->card_work);
return 0;
}
@@ -1952,10 +1953,6 @@ int dw_mci_probe(struct dw_mci *host)
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
-
- host->dma_ops = host->pdata->dma_ops;
- dw_mci_init_dma(host);
-
/*
* Get the host data width - this assumes that HCON has been set with
* the correct values.
@@ -1983,10 +1980,11 @@ int dw_mci_probe(struct dw_mci *host)
}
/* Reset all blocks */
- if (!mci_wait_reset(&host->dev, host)) {
- ret = -ENODEV;
- goto err_dmaunmap;
- }
+ if (!mci_wait_reset(&host->dev, host))
+ return -ENODEV;
+
+ host->dma_ops = host->pdata->dma_ops;
+ dw_mci_init_dma(host);
/* Clear the interrupts for the host controller */
mci_writel(host, RINTSTS, 0xFFFFFFFF);
@@ -2021,9 +2019,9 @@ int dw_mci_probe(struct dw_mci *host)
mci_writel(host, CLKSRC, 0);
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
- dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
+ host->card_workqueue = alloc_workqueue("dw-mci-card",
WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
- if (!dw_mci_card_workqueue)
+ if (!host->card_workqueue)
goto err_dmaunmap;
INIT_WORK(&host->card_work, dw_mci_work_routine_card);
ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
@@ -2085,7 +2083,7 @@ err_init_slot:
free_irq(host->irq, host);
err_workqueue:
- destroy_workqueue(dw_mci_card_workqueue);
+ destroy_workqueue(host->card_workqueue);
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
@@ -2119,7 +2117,7 @@ void dw_mci_remove(struct dw_mci *host)
mci_writel(host, CLKSRC, 0);
free_irq(host->irq, host);
- destroy_workqueue(dw_mci_card_workqueue);
+ destroy_workqueue(host->card_workqueue);
dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->use_dma && host->dma_ops->exit)
@@ -2172,14 +2170,14 @@ int dw_mci_resume(struct dw_mci *host)
if (host->vmmc)
regulator_enable(host->vmmc);
- if (host->dma_ops->init)
- host->dma_ops->init(host);
-
if (!mci_wait_reset(&host->dev, host)) {
ret = -ENODEV;
return ret;
}
+ if (host->dma_ops->init)
+ host->dma_ops->init(host);
+
/* Restore the old value at FIFOTH register */
mci_writel(host, FIFOTH, host->fifoth_val);
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
deleted file mode 100644
index ea0f3cedef21..000000000000
--- a/drivers/mmc/host/imxmmc.c
+++ /dev/null
@@ -1,1169 +0,0 @@
-/*
- * linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
- *
- * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
- * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
- *
- * derived from pxamci.c by Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/dma-mapping.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/sizes.h>
-#include <mach/mmc.h>
-#include <mach/imx-dma.h>
-
-#include "imxmmc.h"
-
-#define DRIVER_NAME "imx-mmc"
-
-#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
- INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
- INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
-
-struct imxmci_host {
- struct mmc_host *mmc;
- spinlock_t lock;
- struct resource *res;
- void __iomem *base;
- int irq;
- imx_dmach_t dma;
- volatile unsigned int imask;
- unsigned int power_mode;
- unsigned int present;
- struct imxmmc_platform_data *pdata;
-
- struct mmc_request *req;
- struct mmc_command *cmd;
- struct mmc_data *data;
-
- struct timer_list timer;
- struct tasklet_struct tasklet;
- unsigned int status_reg;
- unsigned long pending_events;
- /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
- u16 *data_ptr;
- unsigned int data_cnt;
- atomic_t stuck_timeout;
-
- unsigned int dma_nents;
- unsigned int dma_size;
- unsigned int dma_dir;
- int dma_allocated;
-
- unsigned char actual_bus_width;
-
- int prev_cmd_code;
-
- struct clk *clk;
-};
-
-#define IMXMCI_PEND_IRQ_b 0
-#define IMXMCI_PEND_DMA_END_b 1
-#define IMXMCI_PEND_DMA_ERR_b 2
-#define IMXMCI_PEND_WAIT_RESP_b 3
-#define IMXMCI_PEND_DMA_DATA_b 4
-#define IMXMCI_PEND_CPU_DATA_b 5
-#define IMXMCI_PEND_CARD_XCHG_b 6
-#define IMXMCI_PEND_SET_INIT_b 7
-#define IMXMCI_PEND_STARTED_b 8
-
-#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
-#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
-#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
-#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
-#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
-#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
-#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
-#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
-#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
-
-static void imxmci_stop_clock(struct imxmci_host *host)
-{
- int i = 0;
- u16 reg;
-
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
- while (i < 0x1000) {
- if (!(i & 0x7f)) {
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_STOP_CLK,
- host->base + MMC_REG_STR_STP_CLK);
- }
-
- reg = readw(host->base + MMC_REG_STATUS);
- if (!(reg & STATUS_CARD_BUS_CLK_RUN)) {
- /* Check twice before cut */
- reg = readw(host->base + MMC_REG_STATUS);
- if (!(reg & STATUS_CARD_BUS_CLK_RUN))
- return;
- }
-
- i++;
- }
- dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
-}
-
-static int imxmci_start_clock(struct imxmci_host *host)
-{
- unsigned int trials = 0;
- unsigned int delay_limit = 128;
- unsigned long flags;
- u16 reg;
-
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
-
- clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
-
- /*
- * Command start of the clock, this usually succeeds in less
- * then 6 delay loops, but during card detection (low clockrate)
- * it takes up to 5000 delay loops and sometimes fails for the first time
- */
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
-
- do {
- unsigned int delay = delay_limit;
-
- while (delay--) {
- reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN) {
- /* Check twice before cut */
- reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN)
- return 0;
- }
-
- if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
- return 0;
- }
-
- local_irq_save(flags);
- /*
- * Ensure, that request is not doubled under all possible circumstances.
- * It is possible, that cock running state is missed, because some other
- * IRQ or schedule delays this function execution and the clocks has
- * been already stopped by other means (response processing, SDHC HW)
- */
- if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) {
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_START_CLK,
- host->base + MMC_REG_STR_STP_CLK);
- }
- local_irq_restore(flags);
-
- } while (++trials < 256);
-
- dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
-
- return -1;
-}
-
-static void imxmci_softreset(struct imxmci_host *host)
-{
- int i;
-
- /* reset sequence */
- writew(0x08, host->base + MMC_REG_STR_STP_CLK);
- writew(0x0D, host->base + MMC_REG_STR_STP_CLK);
-
- for (i = 0; i < 8; i++)
- writew(0x05, host->base + MMC_REG_STR_STP_CLK);
-
- writew(0xff, host->base + MMC_REG_RES_TO);
- writew(512, host->base + MMC_REG_BLK_LEN);
- writew(1, host->base + MMC_REG_NOB);
-}
-
-static int imxmci_busy_wait_for_status(struct imxmci_host *host,
- unsigned int *pstat, unsigned int stat_mask,
- int timeout, const char *where)
-{
- int loops = 0;
-
- while (!(*pstat & stat_mask)) {
- loops += 2;
- if (loops >= timeout) {
- dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
- where, *pstat, stat_mask);
- return -1;
- }
- udelay(2);
- *pstat |= readw(host->base + MMC_REG_STATUS);
- }
- if (!loops)
- return 0;
-
- /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
- if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000))
- dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
- loops, where, *pstat, stat_mask);
- return loops;
-}
-
-static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
-{
- unsigned int nob = data->blocks;
- unsigned int blksz = data->blksz;
- unsigned int datasz = nob * blksz;
- int i;
-
- if (data->flags & MMC_DATA_STREAM)
- nob = 0xffff;
-
- host->data = data;
- data->bytes_xfered = 0;
-
- writew(nob, host->base + MMC_REG_NOB);
- writew(blksz, host->base + MMC_REG_BLK_LEN);
-
- /*
- * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
- * We are in big troubles for non-512 byte transfers according to note in the paragraph
- * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
- * The situation is even more complex in reality. The SDHC in not able to handle wll
- * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
- * This is required for SCR read at least.
- */
- if (datasz < 512) {
- host->dma_size = datasz;
- if (data->flags & MMC_DATA_READ) {
- host->dma_dir = DMA_FROM_DEVICE;
-
- /* Hack to enable read SCR */
- writew(1, host->base + MMC_REG_NOB);
- writew(512, host->base + MMC_REG_BLK_LEN);
- } else {
- host->dma_dir = DMA_TO_DEVICE;
- }
-
- /* Convert back to virtual address */
- host->data_ptr = (u16 *)sg_virt(data->sg);
- host->data_cnt = 0;
-
- clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
- set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
- return;
- }
-
- if (data->flags & MMC_DATA_READ) {
- host->dma_dir = DMA_FROM_DEVICE;
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
-
- imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_READ);
-
- /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
- CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
- } else {
- host->dma_dir = DMA_TO_DEVICE;
-
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
-
- imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_WRITE);
-
- /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
- CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
- }
-
-#if 1 /* This code is there only for consistency checking and can be disabled in future */
- host->dma_size = 0;
- for (i = 0; i < host->dma_nents; i++)
- host->dma_size += data->sg[i].length;
-
- if (datasz > host->dma_size) {
- dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
- datasz, host->dma_size);
- }
-#endif
-
- host->dma_size = datasz;
-
- wmb();
-
- set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
- clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
- /* start DMA engine for read, write is delayed after initial response */
- if (host->dma_dir == DMA_FROM_DEVICE)
- imx_dma_enable(host->dma);
-}
-
-static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
-{
- unsigned long flags;
- u32 imask;
-
- WARN_ON(host->cmd != NULL);
- host->cmd = cmd;
-
- /* Ensure, that clock are stopped else command programming and start fails */
- imxmci_stop_clock(host);
-
- if (cmd->flags & MMC_RSP_BUSY)
- cmdat |= CMD_DAT_CONT_BUSY;
-
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_R1: /* short CRC, OPCODE */
- case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
- cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
- break;
- case MMC_RSP_R2: /* long 136 bit + CRC */
- cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
- break;
- case MMC_RSP_R3: /* short */
- cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
- break;
- default:
- break;
- }
-
- if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events))
- cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
-
- if (host->actual_bus_width == MMC_BUS_WIDTH_4)
- cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
-
- writew(cmd->opcode, host->base + MMC_REG_CMD);
- writew(cmd->arg >> 16, host->base + MMC_REG_ARGH);
- writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL);
- writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
-
- atomic_set(&host->stuck_timeout, 0);
- set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
-
-
- imask = IMXMCI_INT_MASK_DEFAULT;
- imask &= ~INT_MASK_END_CMD_RES;
- if (cmdat & CMD_DAT_CONT_DATA_ENABLE) {
- /* imask &= ~INT_MASK_BUF_READY; */
- imask &= ~INT_MASK_DATA_TRAN;
- if (cmdat & CMD_DAT_CONT_WRITE)
- imask &= ~INT_MASK_WRITE_OP_DONE;
- if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
- imask &= ~INT_MASK_BUF_READY;
- }
-
- spin_lock_irqsave(&host->lock, flags);
- host->imask = imask;
- writew(host->imask, host->base + MMC_REG_INT_MASK);
- spin_unlock_irqrestore(&host->lock, flags);
-
- dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
- cmd->opcode, cmd->opcode, imask);
-
- imxmci_start_clock(host);
-}
-
-static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
-
- host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
- IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
-
- host->imask = IMXMCI_INT_MASK_DEFAULT;
- writew(host->imask, host->base + MMC_REG_INT_MASK);
-
- spin_unlock_irqrestore(&host->lock, flags);
-
- if (req && req->cmd)
- host->prev_cmd_code = req->cmd->opcode;
-
- host->req = NULL;
- host->cmd = NULL;
- host->data = NULL;
- mmc_request_done(host->mmc, req);
-}
-
-static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
-{
- struct mmc_data *data = host->data;
- int data_error;
-
- if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
- imx_dma_disable(host->dma);
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
- host->dma_dir);
- }
-
- if (stat & STATUS_ERR_MASK) {
- dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat);
- if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
- data->error = -EILSEQ;
- else if (stat & STATUS_TIME_OUT_READ)
- data->error = -ETIMEDOUT;
- else
- data->error = -EIO;
- } else {
- data->bytes_xfered = host->dma_size;
- }
-
- data_error = data->error;
-
- host->data = NULL;
-
- return data_error;
-}
-
-static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
-{
- struct mmc_command *cmd = host->cmd;
- int i;
- u32 a, b, c;
- struct mmc_data *data = host->data;
-
- if (!cmd)
- return 0;
-
- host->cmd = NULL;
-
- if (stat & STATUS_TIME_OUT_RESP) {
- dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
- cmd->error = -ETIMEDOUT;
- } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
- dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
- cmd->error = -EILSEQ;
- }
-
- if (cmd->flags & MMC_RSP_PRESENT) {
- if (cmd->flags & MMC_RSP_136) {
- for (i = 0; i < 4; i++) {
- a = readw(host->base + MMC_REG_RES_FIFO);
- b = readw(host->base + MMC_REG_RES_FIFO);
- cmd->resp[i] = a << 16 | b;
- }
- } else {
- a = readw(host->base + MMC_REG_RES_FIFO);
- b = readw(host->base + MMC_REG_RES_FIFO);
- c = readw(host->base + MMC_REG_RES_FIFO);
- cmd->resp[0] = a << 24 | b << 8 | c >> 8;
- }
- }
-
- dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
- cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
-
- if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) {
- if (host->req->data->flags & MMC_DATA_WRITE) {
-
- /* Wait for FIFO to be empty before starting DMA write */
-
- stat = readw(host->base + MMC_REG_STATUS);
- if (imxmci_busy_wait_for_status(host, &stat,
- STATUS_APPL_BUFF_FE,
- 40, "imxmci_cmd_done DMA WR") < 0) {
- cmd->error = -EIO;
- imxmci_finish_data(host, stat);
- if (host->req)
- imxmci_finish_request(host, host->req);
- dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
- stat);
- return 0;
- }
-
- if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
- imx_dma_enable(host->dma);
- }
- } else {
- struct mmc_request *req;
- imxmci_stop_clock(host);
- req = host->req;
-
- if (data)
- imxmci_finish_data(host, stat);
-
- if (req)
- imxmci_finish_request(host, req);
- else
- dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
- }
-
- return 1;
-}
-
-static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
-{
- struct mmc_data *data = host->data;
- int data_error;
-
- if (!data)
- return 0;
-
- data_error = imxmci_finish_data(host, stat);
-
- if (host->req->stop) {
- imxmci_stop_clock(host);
- imxmci_start_cmd(host, host->req->stop, 0);
- } else {
- struct mmc_request *req;
- req = host->req;
- if (req)
- imxmci_finish_request(host, req);
- else
- dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
- }
-
- return 1;
-}
-
-static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
-{
- int i;
- int burst_len;
- int trans_done = 0;
- unsigned int stat = *pstat;
-
- if (host->actual_bus_width != MMC_BUS_WIDTH_4)
- burst_len = 16;
- else
- burst_len = 64;
-
- /* This is unfortunately required */
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
- stat);
-
- udelay(20); /* required for clocks < 8MHz*/
-
- if (host->dma_dir == DMA_FROM_DEVICE) {
- imxmci_busy_wait_for_status(host, &stat,
- STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
- STATUS_TIME_OUT_READ,
- 50, "imxmci_cpu_driven_data read");
-
- while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
- !(stat & STATUS_TIME_OUT_READ) &&
- (host->data_cnt < 512)) {
-
- udelay(20); /* required for clocks < 8MHz*/
-
- for (i = burst_len; i >= 2 ; i -= 2) {
- u16 data;
- data = readw(host->base + MMC_REG_BUFFER_ACCESS);
- udelay(10); /* required for clocks < 8MHz*/
- if (host->data_cnt+2 <= host->dma_size) {
- *(host->data_ptr++) = data;
- } else {
- if (host->data_cnt < host->dma_size)
- *(u8 *)(host->data_ptr) = data;
- }
- host->data_cnt += 2;
- }
-
- stat = readw(host->base + MMC_REG_STATUS);
-
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
- host->data_cnt, burst_len, stat);
- }
-
- if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
- trans_done = 1;
-
- if (host->dma_size & 0x1ff)
- stat &= ~STATUS_CRC_READ_ERR;
-
- if (stat & STATUS_TIME_OUT_READ) {
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
- stat);
- trans_done = -1;
- }
-
- } else {
- imxmci_busy_wait_for_status(host, &stat,
- STATUS_APPL_BUFF_FE,
- 20, "imxmci_cpu_driven_data write");
-
- while ((stat & STATUS_APPL_BUFF_FE) &&
- (host->data_cnt < host->dma_size)) {
- if (burst_len >= host->dma_size - host->data_cnt) {
- burst_len = host->dma_size - host->data_cnt;
- host->data_cnt = host->dma_size;
- trans_done = 1;
- } else {
- host->data_cnt += burst_len;
- }
-
- for (i = burst_len; i > 0 ; i -= 2)
- writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS);
-
- stat = readw(host->base + MMC_REG_STATUS);
-
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
- burst_len, stat);
- }
- }
-
- *pstat = stat;
-
- return trans_done;
-}
-
-static void imxmci_dma_irq(int dma, void *devid)
-{
- struct imxmci_host *host = devid;
- u32 stat = readw(host->base + MMC_REG_STATUS);
-
- atomic_set(&host->stuck_timeout, 0);
- host->status_reg = stat;
- set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
- tasklet_schedule(&host->tasklet);
-}
-
-static irqreturn_t imxmci_irq(int irq, void *devid)
-{
- struct imxmci_host *host = devid;
- u32 stat = readw(host->base + MMC_REG_STATUS);
- int handled = 1;
-
- writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT,
- host->base + MMC_REG_INT_MASK);
-
- atomic_set(&host->stuck_timeout, 0);
- host->status_reg = stat;
- set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
- set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
- tasklet_schedule(&host->tasklet);
-
- return IRQ_RETVAL(handled);
-}
-
-static void imxmci_tasklet_fnc(unsigned long data)
-{
- struct imxmci_host *host = (struct imxmci_host *)data;
- u32 stat;
- unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
- int timeout = 0;
-
- if (atomic_read(&host->stuck_timeout) > 4) {
- char *what;
- timeout = 1;
- stat = readw(host->base + MMC_REG_STATUS);
- host->status_reg = stat;
- if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
- if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
- what = "RESP+DMA";
- else
- what = "RESP";
- else
- if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
- if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
- what = "DATA";
- else
- what = "DMA";
- else
- what = "???";
-
- dev_err(mmc_dev(host->mmc),
- "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
- what, stat,
- readw(host->base + MMC_REG_INT_MASK));
- dev_err(mmc_dev(host->mmc),
- "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
- readw(host->base + MMC_REG_CMD_DAT_CONT),
- readw(host->base + MMC_REG_BLK_LEN),
- readw(host->base + MMC_REG_NOB),
- CCR(host->dma));
- dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
- host->cmd ? host->cmd->opcode : 0,
- host->prev_cmd_code,
- 1 << host->actual_bus_width, host->dma_size);
- }
-
- if (!host->present || timeout)
- host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
- STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
-
- if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
- clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
-
- stat = readw(host->base + MMC_REG_STATUS);
- /*
- * This is not required in theory, but there is chance to miss some flag
- * which clears automatically by mask write, FreeScale original code keeps
- * stat from IRQ time so do I
- */
- stat |= host->status_reg;
-
- if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
- stat &= ~STATUS_CRC_READ_ERR;
-
- if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
- imxmci_busy_wait_for_status(host, &stat,
- STATUS_END_CMD_RESP | STATUS_ERR_MASK,
- 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
- }
-
- if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
- if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
- imxmci_cmd_done(host, stat);
- if (host->data && (stat & STATUS_ERR_MASK))
- imxmci_data_done(host, stat);
- }
-
- if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
- stat |= readw(host->base + MMC_REG_STATUS);
- if (imxmci_cpu_driven_data(host, &stat)) {
- if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
- imxmci_cmd_done(host, stat);
- atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
- &host->pending_events);
- imxmci_data_done(host, stat);
- }
- }
- }
-
- if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
- !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
-
- stat = readw(host->base + MMC_REG_STATUS);
- /* Same as above */
- stat |= host->status_reg;
-
- if (host->dma_dir == DMA_TO_DEVICE)
- data_dir_mask = STATUS_WRITE_OP_DONE;
- else
- data_dir_mask = STATUS_DATA_TRANS_DONE;
-
- if (stat & data_dir_mask) {
- clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
- imxmci_data_done(host, stat);
- }
- }
-
- if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
-
- if (host->cmd)
- imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
-
- if (host->data)
- imxmci_data_done(host, STATUS_TIME_OUT_READ |
- STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
-
- if (host->req)
- imxmci_finish_request(host, host->req);
-
- mmc_detect_change(host->mmc, msecs_to_jiffies(100));
-
- }
-}
-
-static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
-{
- struct imxmci_host *host = mmc_priv(mmc);
- unsigned int cmdat;
-
- WARN_ON(host->req != NULL);
-
- host->req = req;
-
- cmdat = 0;
-
- if (req->data) {
- imxmci_setup_data(host, req->data);
-
- cmdat |= CMD_DAT_CONT_DATA_ENABLE;
-
- if (req->data->flags & MMC_DATA_WRITE)
- cmdat |= CMD_DAT_CONT_WRITE;
-
- if (req->data->flags & MMC_DATA_STREAM)
- cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
- }
-
- imxmci_start_cmd(host, req->cmd, cmdat);
-}
-
-#define CLK_RATE 19200000
-
-static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct imxmci_host *host = mmc_priv(mmc);
- int prescaler;
-
- if (ios->bus_width == MMC_BUS_WIDTH_4) {
- host->actual_bus_width = MMC_BUS_WIDTH_4;
- imx_gpio_mode(PB11_PF_SD_DAT3);
- BLR(host->dma) = 0; /* burst 64 byte read/write */
- } else {
- host->actual_bus_width = MMC_BUS_WIDTH_1;
- imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
- BLR(host->dma) = 16; /* burst 16 byte read/write */
- }
-
- if (host->power_mode != ios->power_mode) {
- switch (ios->power_mode) {
- case MMC_POWER_OFF:
- break;
- case MMC_POWER_UP:
- set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
- break;
- case MMC_POWER_ON:
- break;
- }
- host->power_mode = ios->power_mode;
- }
-
- if (ios->clock) {
- unsigned int clk;
- u16 reg;
-
- /* The prescaler is 5 for PERCLK2 equal to 96MHz
- * then 96MHz / 5 = 19.2 MHz
- */
- clk = clk_get_rate(host->clk);
- prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE;
- switch (prescaler) {
- case 0:
- case 1: prescaler = 0;
- break;
- case 2: prescaler = 1;
- break;
- case 3: prescaler = 2;
- break;
- case 4: prescaler = 4;
- break;
- default:
- case 5: prescaler = 5;
- break;
- }
-
- dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
- clk, prescaler);
-
- for (clk = 0; clk < 8; clk++) {
- int x;
- x = CLK_RATE / (1 << clk);
- if (x <= ios->clock)
- break;
- }
-
- /* enable controller */
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_ENABLE,
- host->base + MMC_REG_STR_STP_CLK);
-
- imxmci_stop_clock(host);
- writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE);
- /*
- * Under my understanding, clock should not be started there, because it would
- * initiate SDHC sequencer and send last or random command into card
- */
- /* imxmci_start_clock(host); */
-
- dev_dbg(mmc_dev(host->mmc),
- "MMC_CLK_RATE: 0x%08x\n",
- readw(host->base + MMC_REG_CLK_RATE));
- } else {
- imxmci_stop_clock(host);
- }
-}
-
-static int imxmci_get_ro(struct mmc_host *mmc)
-{
- struct imxmci_host *host = mmc_priv(mmc);
-
- if (host->pdata && host->pdata->get_ro)
- return !!host->pdata->get_ro(mmc_dev(mmc));
- /*
- * Board doesn't support read only detection; let the mmc core
- * decide what to do.
- */
- return -ENOSYS;
-}
-
-
-static const struct mmc_host_ops imxmci_ops = {
- .request = imxmci_request,
- .set_ios = imxmci_set_ios,
- .get_ro = imxmci_get_ro,
-};
-
-static void imxmci_check_status(unsigned long data)
-{
- struct imxmci_host *host = (struct imxmci_host *)data;
-
- if (host->pdata && host->pdata->card_present &&
- host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
- host->present ^= 1;
- dev_info(mmc_dev(host->mmc), "card %s\n",
- host->present ? "inserted" : "removed");
-
- set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
- tasklet_schedule(&host->tasklet);
- }
-
- if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
- test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
- atomic_inc(&host->stuck_timeout);
- if (atomic_read(&host->stuck_timeout) > 4)
- tasklet_schedule(&host->tasklet);
- } else {
- atomic_set(&host->stuck_timeout, 0);
-
- }
-
- mod_timer(&host->timer, jiffies + (HZ>>1));
-}
-
-static int __init imxmci_probe(struct platform_device *pdev)
-{
- struct mmc_host *mmc;
- struct imxmci_host *host = NULL;
- struct resource *r;
- int ret = 0, irq;
- u16 rev_no;
-
- pr_info("i.MX mmc driver\n");
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
- return -ENXIO;
-
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (!r)
- return -EBUSY;
-
- mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out;
- }
-
- mmc->ops = &imxmci_ops;
- mmc->f_min = 150000;
- mmc->f_max = CLK_RATE/2;
- mmc->ocr_avail = MMC_VDD_32_33;
- mmc->caps = MMC_CAP_4_BIT_DATA;
-
- /* MMC core transfer sizes tunable parameters */
- mmc->max_segs = 64;
- mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
- mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
- mmc->max_blk_size = 2048;
- mmc->max_blk_count = 65535;
-
- host = mmc_priv(mmc);
- host->base = ioremap(r->start, resource_size(r));
- if (!host->base) {
- ret = -ENOMEM;
- goto out;
- }
-
- host->mmc = mmc;
- host->dma_allocated = 0;
- host->pdata = pdev->dev.platform_data;
- if (!host->pdata)
- dev_warn(&pdev->dev, "No platform data provided!\n");
-
- spin_lock_init(&host->lock);
- host->res = r;
- host->irq = irq;
-
- host->clk = clk_get(&pdev->dev, "perclk2");
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- goto out;
- }
- clk_enable(host->clk);
-
- imx_gpio_mode(PB8_PF_SD_DAT0);
- imx_gpio_mode(PB9_PF_SD_DAT1);
- imx_gpio_mode(PB10_PF_SD_DAT2);
- /* Configured as GPIO with pull-up to ensure right MCC card mode */
- /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
- imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
- /* imx_gpio_mode(PB11_PF_SD_DAT3); */
- imx_gpio_mode(PB12_PF_SD_CLK);
- imx_gpio_mode(PB13_PF_SD_CMD);
-
- imxmci_softreset(host);
-
- rev_no = readw(host->base + MMC_REG_REV_NO);
- if (rev_no != 0x390) {
- dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
- readw(host->base + MMC_REG_REV_NO));
- goto out;
- }
-
- /* recommended in data sheet */
- writew(0x2db4, host->base + MMC_REG_READ_TO);
-
- host->imask = IMXMCI_INT_MASK_DEFAULT;
- writew(host->imask, host->base + MMC_REG_INT_MASK);
-
- host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
- if(host->dma < 0) {
- dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
- ret = -EBUSY;
- goto out;
- }
- host->dma_allocated = 1;
- imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
- RSSR(host->dma) = DMA_REQ_SDHC;
-
- tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
- host->status_reg=0;
- host->pending_events=0;
-
- ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
- if (ret)
- goto out;
-
- if (host->pdata && host->pdata->card_present)
- host->present = host->pdata->card_present(mmc_dev(mmc));
- else /* if there is no way to detect assume that card is present */
- host->present = 1;
-
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = imxmci_check_status;
- add_timer(&host->timer);
- mod_timer(&host->timer, jiffies + (HZ >> 1));
-
- platform_set_drvdata(pdev, mmc);
-
- mmc_add_host(mmc);
-
- return 0;
-
-out:
- if (host) {
- if (host->dma_allocated) {
- imx_dma_free(host->dma);
- host->dma_allocated = 0;
- }
- if (host->clk) {
- clk_disable(host->clk);
- clk_put(host->clk);
- }
- if (host->base)
- iounmap(host->base);
- }
- if (mmc)
- mmc_free_host(mmc);
- release_mem_region(r->start, resource_size(r));
- return ret;
-}
-
-static int __exit imxmci_remove(struct platform_device *pdev)
-{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
-
- platform_set_drvdata(pdev, NULL);
-
- if (mmc) {
- struct imxmci_host *host = mmc_priv(mmc);
-
- tasklet_disable(&host->tasklet);
-
- del_timer_sync(&host->timer);
- mmc_remove_host(mmc);
-
- free_irq(host->irq, host);
- iounmap(host->base);
- if (host->dma_allocated) {
- imx_dma_free(host->dma);
- host->dma_allocated = 0;
- }
-
- tasklet_kill(&host->tasklet);
-
- clk_disable(host->clk);
- clk_put(host->clk);
-
- release_mem_region(host->res->start, resource_size(host->res));
-
- mmc_free_host(mmc);
- }
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_suspend_host(mmc);
-
- return ret;
-}
-
-static int imxmci_resume(struct platform_device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- struct imxmci_host *host;
- int ret = 0;
-
- if (mmc) {
- host = mmc_priv(mmc);
- if (host)
- set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
- ret = mmc_resume_host(mmc);
- }
-
- return ret;
-}
-#else
-#define imxmci_suspend NULL
-#define imxmci_resume NULL
-#endif /* CONFIG_PM */
-
-static struct platform_driver imxmci_driver = {
- .remove = __exit_p(imxmci_remove),
- .suspend = imxmci_suspend,
- .resume = imxmci_resume,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-static int __init imxmci_init(void)
-{
- return platform_driver_probe(&imxmci_driver, imxmci_probe);
-}
-
-static void __exit imxmci_exit(void)
-{
- platform_driver_unregister(&imxmci_driver);
-}
-
-module_init(imxmci_init);
-module_exit(imxmci_exit);
-
-MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-mmc");
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
deleted file mode 100644
index 09d5d4ee3a77..000000000000
--- a/drivers/mmc/host/imxmmc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#define MMC_REG_STR_STP_CLK 0x00
-#define MMC_REG_STATUS 0x04
-#define MMC_REG_CLK_RATE 0x08
-#define MMC_REG_CMD_DAT_CONT 0x0C
-#define MMC_REG_RES_TO 0x10
-#define MMC_REG_READ_TO 0x14
-#define MMC_REG_BLK_LEN 0x18
-#define MMC_REG_NOB 0x1C
-#define MMC_REG_REV_NO 0x20
-#define MMC_REG_INT_MASK 0x24
-#define MMC_REG_CMD 0x28
-#define MMC_REG_ARGH 0x2C
-#define MMC_REG_ARGL 0x30
-#define MMC_REG_RES_FIFO 0x34
-#define MMC_REG_BUFFER_ACCESS 0x38
-
-#define STR_STP_CLK_IPG_CLK_GATE_DIS (1<<15)
-#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14)
-#define STR_STP_CLK_ENDIAN (1<<5)
-#define STR_STP_CLK_RESET (1<<3)
-#define STR_STP_CLK_ENABLE (1<<2)
-#define STR_STP_CLK_START_CLK (1<<1)
-#define STR_STP_CLK_STOP_CLK (1<<0)
-#define STATUS_CARD_PRESENCE (1<<15)
-#define STATUS_SDIO_INT_ACTIVE (1<<14)
-#define STATUS_END_CMD_RESP (1<<13)
-#define STATUS_WRITE_OP_DONE (1<<12)
-#define STATUS_DATA_TRANS_DONE (1<<11)
-#define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10)
-#define STATUS_CARD_BUS_CLK_RUN (1<<8)
-#define STATUS_APPL_BUFF_FF (1<<7)
-#define STATUS_APPL_BUFF_FE (1<<6)
-#define STATUS_RESP_CRC_ERR (1<<5)
-#define STATUS_CRC_READ_ERR (1<<3)
-#define STATUS_CRC_WRITE_ERR (1<<2)
-#define STATUS_TIME_OUT_RESP (1<<1)
-#define STATUS_TIME_OUT_READ (1<<0)
-#define STATUS_ERR_MASK 0x2f
-#define CLK_RATE_PRESCALER(x) ((x) & 0x7)
-#define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3)
-#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12)
-#define CMD_DAT_CONT_STOP_READWAIT (1<<11)
-#define CMD_DAT_CONT_START_READWAIT (1<<10)
-#define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8)
-#define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8)
-#define CMD_DAT_CONT_INIT (1<<7)
-#define CMD_DAT_CONT_BUSY (1<<6)
-#define CMD_DAT_CONT_STREAM_BLOCK (1<<5)
-#define CMD_DAT_CONT_WRITE (1<<4)
-#define CMD_DAT_CONT_DATA_ENABLE (1<<3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
-#define INT_MASK_AUTO_CARD_DETECT (1<<6)
-#define INT_MASK_DAT0_EN (1<<5)
-#define INT_MASK_SDIO (1<<4)
-#define INT_MASK_BUF_READY (1<<3)
-#define INT_MASK_END_CMD_RES (1<<2)
-#define INT_MASK_WRITE_OP_DONE (1<<1)
-#define INT_MASK_DATA_TRAN (1<<0)
-#define INT_ALL (0x7f)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b6f38421d541..50ff19a62368 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/highmem.h>
@@ -25,6 +26,7 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -1207,21 +1209,77 @@ static const struct mmc_host_ops mmci_ops = {
.get_cd = mmci_get_cd,
};
+#ifdef CONFIG_OF
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+ struct mmci_platform_data *pdata)
+{
+ int bus_width = 0;
+
+ pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
+ pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
+
+ if (of_get_property(np, "cd-inverted", NULL))
+ pdata->cd_invert = true;
+ else
+ pdata->cd_invert = false;
+
+ of_property_read_u32(np, "max-frequency", &pdata->f_max);
+ if (!pdata->f_max)
+ pr_warn("%s has no 'max-frequency' property\n", np->full_name);
+
+ if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
+ pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
+ if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
+ pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
+
+ of_property_read_u32(np, "bus-width", &bus_width);
+ switch (bus_width) {
+ case 0 :
+ /* No bus-width supplied. */
+ break;
+ case 4 :
+ pdata->capabilities |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 8 :
+ pdata->capabilities |= MMC_CAP_8_BIT_DATA;
+ break;
+ default :
+ pr_warn("%s: Unsupported bus width\n", np->full_name);
+ }
+}
+#else
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+ struct mmci_platform_data *pdata)
+{
+ return;
+}
+#endif
+
static int __devinit mmci_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
+ struct device_node *np = dev->dev.of_node;
struct variant_data *variant = id->data;
struct mmci_host *host;
struct mmc_host *mmc;
int ret;
- /* must have platform data */
+ /* Must have platform data or Device Tree. */
+ if (!plat && !np) {
+ dev_err(&dev->dev, "No plat data or DT found\n");
+ return -EINVAL;
+ }
+
if (!plat) {
- ret = -EINVAL;
- goto out;
+ plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
}
+ if (np)
+ mmci_dt_populate_generic_pdata(np, plat);
+
ret = amba_request_regions(dev, DRIVER_NAME);
if (ret)
goto out;
@@ -1367,6 +1425,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
writel(0, host->base + MMCIMASK1);
writel(0xfff, host->base + MMCICLEAR);
+ if (plat->gpio_cd == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_gpio_cd;
+ }
if (gpio_is_valid(plat->gpio_cd)) {
ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
if (ret == 0)
@@ -1390,6 +1452,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
if (ret >= 0)
host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
}
+ if (plat->gpio_wp == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_gpio_wp;
+ }
if (gpio_is_valid(plat->gpio_wp)) {
ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
if (ret == 0)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index eeb8cd125b0c..3b9136c1a475 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/irq.h>
+#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/mmc/host.h>
@@ -51,6 +52,7 @@ struct mvsd_host {
struct device *dev;
struct resource *res;
int irq;
+ struct clk *clk;
int gpio_card_detect;
int gpio_write_protect;
};
@@ -770,6 +772,13 @@ static int __init mvsd_probe(struct platform_device *pdev)
} else
host->irq = irq;
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(host->clk)) {
+ clk_prepare_enable(host->clk);
+ }
+
if (mvsd_data->gpio_card_detect) {
ret = gpio_request(mvsd_data->gpio_card_detect,
DRIVER_NAME " cd");
@@ -854,6 +863,11 @@ static int __exit mvsd_remove(struct platform_device *pdev)
mvsd_power_down(host);
iounmap(host->base);
release_resource(host->res);
+
+ if (!IS_ERR(host->clk)) {
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+ }
mmc_free_host(mmc);
}
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index b2058b432320..28ed52d58f7f 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -136,7 +136,8 @@ struct mxcmci_host {
u16 rev_no;
unsigned int cmdat;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
int clock;
@@ -672,7 +673,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
{
unsigned int divider;
int prescaler = 0;
- unsigned int clk_in = clk_get_rate(host->clk);
+ unsigned int clk_in = clk_get_rate(host->clk_per);
while (prescaler <= 0x800) {
for (divider = 1; divider <= 0xF; divider++) {
@@ -900,12 +901,20 @@ static int mxcmci_probe(struct platform_device *pdev)
host->res = r;
host->irq = irq;
- host->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
+ host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(host->clk_ipg)) {
+ ret = PTR_ERR(host->clk_ipg);
goto out_iounmap;
}
- clk_enable(host->clk);
+
+ host->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(host->clk_per)) {
+ ret = PTR_ERR(host->clk_per);
+ goto out_iounmap;
+ }
+
+ clk_prepare_enable(host->clk_per);
+ clk_prepare_enable(host->clk_ipg);
mxcmci_softreset(host);
@@ -917,8 +926,8 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_clk_put;
}
- mmc->f_min = clk_get_rate(host->clk) >> 16;
- mmc->f_max = clk_get_rate(host->clk) >> 1;
+ mmc->f_min = clk_get_rate(host->clk_per) >> 16;
+ mmc->f_max = clk_get_rate(host->clk_per) >> 1;
/* recommended in data sheet */
writew(0x2db4, host->base + MMC_REG_READ_TO);
@@ -967,8 +976,8 @@ out_free_dma:
if (host->dma)
dma_release_channel(host->dma);
out_clk_put:
- clk_disable(host->clk);
- clk_put(host->clk);
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
out_iounmap:
iounmap(host->base);
out_free:
@@ -999,8 +1008,8 @@ static int mxcmci_remove(struct platform_device *pdev)
if (host->dma)
dma_release_channel(host->dma);
- clk_disable(host->clk);
- clk_put(host->clk);
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
release_mem_region(host->res->start, resource_size(host->res));
@@ -1018,7 +1027,8 @@ static int mxcmci_suspend(struct device *dev)
if (mmc)
ret = mmc_suspend_host(mmc);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
return ret;
}
@@ -1029,7 +1039,8 @@ static int mxcmci_resume(struct device *dev)
struct mxcmci_host *host = mmc_priv(mmc);
int ret = 0;
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk_per);
+ clk_prepare_enable(host->clk_ipg);
if (mmc)
ret = mmc_resume_host(mmc);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index bb03ddda481d..277161d279b8 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -23,6 +23,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -40,18 +43,15 @@
#include <linux/module.h>
#include <linux/fsl/mxs-dma.h>
#include <linux/pinctrl/consumer.h>
-
-#include <mach/mxs.h>
-#include <mach/common.h>
-#include <mach/mmc.h>
+#include <linux/stmp_device.h>
+#include <linux/mmc/mxs-mmc.h>
#define DRIVER_NAME "mxs-mmc"
/* card detect polling timeout */
#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
-#define SSP_VERSION_LATEST 4
-#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
+#define ssp_is_old(host) ((host)->devid == IMX23_MMC)
/* SSP registers */
#define HW_SSP_CTRL0 0x000
@@ -86,14 +86,14 @@
#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
-#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070)
+#define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070)
#define BP_SSP_TIMING_TIMEOUT (16)
#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
#define BP_SSP_TIMING_CLOCK_RATE (0)
#define BM_SSP_TIMING_CLOCK_RATE (0xff)
-#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080)
+#define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080)
#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
@@ -116,15 +116,13 @@
#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
#define BP_SSP_CTRL1_SSP_MODE (0)
#define BM_SSP_CTRL1_SSP_MODE (0xf)
-#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0)
-#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0)
-#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0)
-#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0)
-#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100)
+#define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100)
#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
-#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
-#define BP_SSP_VERSION_MAJOR (24)
#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
@@ -139,6 +137,11 @@
#define SSP_PIO_NUM 3
+enum mxs_mmc_id {
+ IMX23_MMC,
+ IMX28_MMC,
+};
+
struct mxs_mmc_host {
struct mmc_host *mmc;
struct mmc_request *mrq;
@@ -146,9 +149,7 @@ struct mxs_mmc_host {
struct mmc_data *data;
void __iomem *base;
- int irq;
- struct resource *res;
- struct resource *dma_res;
+ int dma_channel;
struct clk *clk;
unsigned int clk_rate;
@@ -158,32 +159,28 @@ struct mxs_mmc_host {
enum dma_transfer_direction slave_dirn;
u32 ssp_pio_words[SSP_PIO_NUM];
- unsigned int version;
+ enum mxs_mmc_id devid;
unsigned char bus_width;
spinlock_t lock;
int sdio_irq_en;
+ int wp_gpio;
};
static int mxs_mmc_get_ro(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
- struct mxs_mmc_platform_data *pdata =
- mmc_dev(host->mmc)->platform_data;
-
- if (!pdata)
- return -EFAULT;
- if (!gpio_is_valid(pdata->wp_gpio))
+ if (!gpio_is_valid(host->wp_gpio))
return -EINVAL;
- return gpio_get_value(pdata->wp_gpio);
+ return gpio_get_value(host->wp_gpio);
}
static int mxs_mmc_get_cd(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
- return !(readl(host->base + HW_SSP_STATUS) &
+ return !(readl(host->base + HW_SSP_STATUS(host)) &
BM_SSP_STATUS_CARD_DETECT);
}
@@ -191,7 +188,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
{
u32 ctrl0, ctrl1;
- mxs_reset_block(host->base);
+ stmp_reset_block(host->base);
ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -207,7 +204,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
BF_SSP(2, TIMING_CLOCK_DIVIDE) |
BF_SSP(0, TIMING_CLOCK_RATE),
- host->base + HW_SSP_TIMING);
+ host->base + HW_SSP_TIMING(host));
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
@@ -215,7 +212,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
}
writel(ctrl0, host->base + HW_SSP_CTRL0);
- writel(ctrl1, host->base + HW_SSP_CTRL1);
+ writel(ctrl1, host->base + HW_SSP_CTRL1(host));
}
static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -229,12 +226,12 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
if (mmc_resp_type(cmd) & MMC_RSP_136) {
- cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
- cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
- cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
- cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
+ cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host));
+ cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host));
+ cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host));
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host));
} else {
- cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host));
}
}
@@ -277,9 +274,9 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
spin_lock(&host->lock);
- stat = readl(host->base + HW_SSP_CTRL1);
+ stat = readl(host->base + HW_SSP_CTRL1(host));
writel(stat & MXS_MMC_IRQ_BITS,
- host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
mmc_signal_sdio_irq(host->mmc);
@@ -485,7 +482,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
blocks = 1;
/* xfer count, block size and count need to be set differently */
- if (ssp_is_old()) {
+ if (ssp_is_old(host)) {
ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
@@ -509,10 +506,10 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
/* set the timeout count */
timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
- val = readl(host->base + HW_SSP_TIMING);
+ val = readl(host->base + HW_SSP_TIMING(host));
val &= ~(BM_SSP_TIMING_TIMEOUT);
val |= BF_SSP(timeout, TIMING_TIMEOUT);
- writel(val, host->base + HW_SSP_TIMING);
+ writel(val, host->base + HW_SSP_TIMING(host));
/* pio */
host->ssp_pio_words[0] = ctrl0;
@@ -598,11 +595,11 @@ static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
- val = readl(host->base + HW_SSP_TIMING);
+ val = readl(host->base + HW_SSP_TIMING(host));
val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
- writel(val, host->base + HW_SSP_TIMING);
+ writel(val, host->base + HW_SSP_TIMING(host));
host->clk_rate = ssp_sck;
@@ -637,18 +634,19 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
if (enable) {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
- host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+ host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
- host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+ host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
- if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+ if (readl(host->base + HW_SSP_STATUS(host)) &
+ BM_SSP_STATUS_SDIO_IRQ)
mmc_signal_sdio_irq(host->mmc);
} else {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
- host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+ host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
- host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -669,7 +667,7 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
if (!mxs_dma_is_apbh(chan))
return false;
- if (chan->chan_id != host->dma_res->start)
+ if (chan->chan_id != host->dma_channel)
return false;
chan->private = &host->dma_data;
@@ -677,11 +675,34 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
return true;
}
+static struct platform_device_id mxs_mmc_ids[] = {
+ {
+ .name = "imx23-mmc",
+ .driver_data = IMX23_MMC,
+ }, {
+ .name = "imx28-mmc",
+ .driver_data = IMX28_MMC,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_mmc_ids);
+
+static const struct of_device_id mxs_mmc_dt_ids[] = {
+ { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, },
+ { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+
static int mxs_mmc_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_mmc_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
struct mxs_mmc_host *host;
struct mmc_host *mmc;
- struct resource *iores, *dmares, *r;
+ struct resource *iores, *dmares;
struct mxs_mmc_platform_data *pdata;
struct pinctrl *pinctrl;
int ret = 0, irq_err, irq_dma;
@@ -691,46 +712,51 @@ static int mxs_mmc_probe(struct platform_device *pdev)
dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
irq_err = platform_get_irq(pdev, 0);
irq_dma = platform_get_irq(pdev, 1);
- if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
+ if (!iores || irq_err < 0 || irq_dma < 0)
return -EINVAL;
- r = request_mem_region(iores->start, resource_size(iores), pdev->name);
- if (!r)
- return -EBUSY;
-
mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out_release_mem;
- }
+ if (!mmc)
+ return -ENOMEM;
host = mmc_priv(mmc);
- host->base = ioremap(r->start, resource_size(r));
+ host->base = devm_request_and_ioremap(&pdev->dev, iores);
if (!host->base) {
- ret = -ENOMEM;
+ ret = -EADDRNOTAVAIL;
goto out_mmc_free;
}
- /* only major verion does matter */
- host->version = readl(host->base + HW_SSP_VERSION) >>
- BP_SSP_VERSION_MAJOR;
+ if (np) {
+ host->devid = (enum mxs_mmc_id) of_id->data;
+ /*
+ * TODO: This is a temporary solution and should be changed
+ * to use generic DMA binding later when the helpers get in.
+ */
+ ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
+ &host->dma_channel);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to get dma channel\n");
+ goto out_mmc_free;
+ }
+ } else {
+ host->devid = pdev->id_entry->driver_data;
+ host->dma_channel = dmares->start;
+ }
host->mmc = mmc;
- host->res = r;
- host->dma_res = dmares;
- host->irq = irq_err;
host->sdio_irq_en = 0;
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
- goto out_iounmap;
+ goto out_mmc_free;
}
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- goto out_iounmap;
+ goto out_mmc_free;
}
clk_prepare_enable(host->clk);
@@ -752,11 +778,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
pdata = mmc_dev(host->mmc)->platform_data;
- if (pdata) {
+ if (!pdata) {
+ u32 bus_width = 0;
+ of_property_read_u32(np, "bus-width", &bus_width);
+ if (bus_width == 4)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ else if (bus_width == 8)
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ } else {
if (pdata->flags & SLOTF_8_BIT_CAPABLE)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
if (pdata->flags & SLOTF_4_BIT_CAPABLE)
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ host->wp_gpio = pdata->wp_gpio;
}
mmc->f_min = 400000;
@@ -765,13 +800,14 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mmc->max_segs = 52;
mmc->max_blk_size = 1 << 0xf;
- mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
- mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
+ mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff;
+ mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff;
mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
platform_set_drvdata(pdev, mmc);
- ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
+ ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
+ DRIVER_NAME, host);
if (ret)
goto out_free_dma;
@@ -779,26 +815,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ret = mmc_add_host(mmc);
if (ret)
- goto out_free_irq;
+ goto out_free_dma;
dev_info(mmc_dev(host->mmc), "initialized\n");
return 0;
-out_free_irq:
- free_irq(host->irq, host);
out_free_dma:
if (host->dmach)
dma_release_channel(host->dmach);
out_clk_put:
clk_disable_unprepare(host->clk);
clk_put(host->clk);
-out_iounmap:
- iounmap(host->base);
out_mmc_free:
mmc_free_host(mmc);
-out_release_mem:
- release_mem_region(iores->start, resource_size(iores));
return ret;
}
@@ -806,12 +836,9 @@ static int mxs_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mxs_mmc_host *host = mmc_priv(mmc);
- struct resource *res = host->res;
mmc_remove_host(mmc);
- free_irq(host->irq, host);
-
platform_set_drvdata(pdev, NULL);
if (host->dmach)
@@ -820,12 +847,8 @@ static int mxs_mmc_remove(struct platform_device *pdev)
clk_disable_unprepare(host->clk);
clk_put(host->clk);
- iounmap(host->base);
-
mmc_free_host(mmc);
- release_mem_region(res->start, resource_size(res));
-
return 0;
}
@@ -865,12 +888,14 @@ static const struct dev_pm_ops mxs_mmc_pm_ops = {
static struct platform_driver mxs_mmc_driver = {
.probe = mxs_mmc_probe,
.remove = mxs_mmc_remove,
+ .id_table = mxs_mmc_ids,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &mxs_mmc_pm_ops,
#endif
+ .of_match_table = mxs_mmc_dt_ids,
},
};
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 887c0e598cf3..3e8dcf8d2e05 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -169,11 +169,11 @@ struct mmc_omap_host {
struct timer_list clk_timer;
spinlock_t clk_lock; /* for changing enabled state */
unsigned int fclk_enabled:1;
+ struct workqueue_struct *mmc_omap_wq;
struct omap_mmc_platform_data *pdata;
};
-static struct workqueue_struct *mmc_omap_wq;
static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
{
@@ -291,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
host->next_slot = new_slot;
host->mmc = new_slot->mmc;
spin_unlock_irqrestore(&host->slot_lock, flags);
- queue_work(mmc_omap_wq, &host->slot_release_work);
+ queue_work(host->mmc_omap_wq, &host->slot_release_work);
return;
}
@@ -459,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
}
host->stop_data = data;
- queue_work(mmc_omap_wq, &host->send_stop_work);
+ queue_work(host->mmc_omap_wq, &host->send_stop_work);
}
static void
@@ -639,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data)
OMAP_MMC_WRITE(host, IE, 0);
disable_irq(host->irq);
host->abort = 1;
- queue_work(mmc_omap_wq, &host->cmd_abort_work);
+ queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
}
spin_unlock_irqrestore(&host->slot_lock, flags);
}
@@ -828,7 +828,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
host->abort = 1;
OMAP_MMC_WRITE(host, IE, 0);
disable_irq_nosync(host->irq);
- queue_work(mmc_omap_wq, &host->cmd_abort_work);
+ queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
return IRQ_HANDLED;
}
@@ -1300,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = {
.set_ios = mmc_omap_set_ios,
};
-static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
{
struct mmc_omap_slot *slot = NULL;
struct mmc_host *mmc;
@@ -1389,13 +1389,13 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
tasklet_kill(&slot->cover_tasklet);
del_timer_sync(&slot->cover_timer);
- flush_workqueue(mmc_omap_wq);
+ flush_workqueue(slot->host->mmc_omap_wq);
mmc_remove_host(mmc);
mmc_free_host(mmc);
}
-static int __init mmc_omap_probe(struct platform_device *pdev)
+static int __devinit mmc_omap_probe(struct platform_device *pdev)
{
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_omap_host *host = NULL;
@@ -1485,20 +1485,26 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
}
host->nr_slots = pdata->nr_slots;
+ host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
+ host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+ if (!host->mmc_omap_wq)
+ goto err_plat_cleanup;
+
for (i = 0; i < pdata->nr_slots; i++) {
ret = mmc_omap_new_slot(host, i);
if (ret < 0) {
while (--i >= 0)
mmc_omap_remove_slot(host->slots[i]);
- goto err_plat_cleanup;
+ goto err_destroy_wq;
}
}
- host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
-
return 0;
+err_destroy_wq:
+ destroy_workqueue(host->mmc_omap_wq);
err_plat_cleanup:
if (pdata->cleanup)
pdata->cleanup(&pdev->dev);
@@ -1518,7 +1524,7 @@ err_free_mem_region:
return ret;
}
-static int mmc_omap_remove(struct platform_device *pdev)
+static int __devexit mmc_omap_remove(struct platform_device *pdev)
{
struct mmc_omap_host *host = platform_get_drvdata(pdev);
int i;
@@ -1542,6 +1548,7 @@ static int mmc_omap_remove(struct platform_device *pdev)
iounmap(host->virt_base);
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
+ destroy_workqueue(host->mmc_omap_wq);
kfree(host);
@@ -1599,7 +1606,8 @@ static int mmc_omap_resume(struct platform_device *pdev)
#endif
static struct platform_driver mmc_omap_driver = {
- .remove = mmc_omap_remove,
+ .probe = mmc_omap_probe,
+ .remove = __devexit_p(mmc_omap_remove),
.suspend = mmc_omap_suspend,
.resume = mmc_omap_resume,
.driver = {
@@ -1608,29 +1616,7 @@ static struct platform_driver mmc_omap_driver = {
},
};
-static int __init mmc_omap_init(void)
-{
- int ret;
-
- mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
- if (!mmc_omap_wq)
- return -ENOMEM;
-
- ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
- if (ret)
- destroy_workqueue(mmc_omap_wq);
- return ret;
-}
-
-static void __exit mmc_omap_exit(void)
-{
- platform_driver_unregister(&mmc_omap_driver);
- destroy_workqueue(mmc_omap_wq);
-}
-
-module_init(mmc_omap_init);
-module_exit(mmc_omap_exit);
-
+module_platform_driver(mmc_omap_driver);
MODULE_DESCRIPTION("OMAP Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 56d4499d4388..389a3eedfc24 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -91,6 +91,7 @@
#define MSBS (1 << 5)
#define BCE (1 << 1)
#define FOUR_BIT (1 << 1)
+#define DDR (1 << 19)
#define DW8 (1 << 5)
#define CC 0x1
#define TC 0x02
@@ -167,7 +168,6 @@ struct omap_hsmmc_host {
int use_dma, dma_ch;
int dma_line_tx, dma_line_rx;
int slot_id;
- int got_dbclk;
int response_busy;
int context_loss;
int vdd;
@@ -520,6 +520,10 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
u32 con;
con = OMAP_HSMMC_READ(host->base, CON);
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
+ con |= DDR; /* configure in DDR mode */
+ else
+ con &= ~DDR;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
@@ -796,11 +800,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
int dma_ch;
+ unsigned long flags;
- spin_lock(&host->irq_lock);
+ spin_lock_irqsave(&host->irq_lock, flags);
host->req_in_progress = 0;
dma_ch = host->dma_ch;
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
omap_hsmmc_disable_irq(host);
/* Do not complete the request if DMA is still in progress */
@@ -874,13 +879,14 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
{
int dma_ch;
+ unsigned long flags;
host->data->error = errno;
- spin_lock(&host->irq_lock);
+ spin_lock_irqsave(&host->irq_lock, flags);
dma_ch = host->dma_ch;
host->dma_ch = -1;
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
if (host->use_dma && dma_ch != -1) {
dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
@@ -1082,7 +1088,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
/* Disable the clocks */
pm_runtime_put_sync(host->dev);
- if (host->got_dbclk)
+ if (host->dbclk)
clk_disable(host->dbclk);
/* Turn the power off */
@@ -1093,7 +1099,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
vdd);
pm_runtime_get_sync(host->dev);
- if (host->got_dbclk)
+ if (host->dbclk)
clk_enable(host->dbclk);
if (ret != 0)
@@ -1234,6 +1240,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
struct omap_hsmmc_host *host = cb_data;
struct mmc_data *data;
int dma_ch, req_in_progress;
+ unsigned long flags;
if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
@@ -1241,9 +1248,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
return;
}
- spin_lock(&host->irq_lock);
+ spin_lock_irqsave(&host->irq_lock, flags);
if (host->dma_ch < 0) {
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
return;
}
@@ -1253,7 +1260,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
/* Fire up the next transfer. */
omap_hsmmc_config_dma_params(host, data,
data->sg + host->dma_sg_idx);
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
return;
}
@@ -1264,7 +1271,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
req_in_progress = host->req_in_progress;
dma_ch = host->dma_ch;
host->dma_ch = -1;
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
omap_free_dma(dma_ch);
@@ -1766,7 +1773,7 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
pdata->slots[0].nonremovable = true;
pdata->slots[0].no_regulator_off_init = true;
}
- of_property_read_u32(np, "ti,bus-width", &bus_width);
+ of_property_read_u32(np, "bus-width", &bus_width);
if (bus_width == 4)
pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA;
else if (bus_width == 8)
@@ -1885,21 +1892,17 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_context_save(host);
- if (cpu_is_omap2430()) {
- host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
- /*
- * MMC can still work without debounce clock.
- */
- if (IS_ERR(host->dbclk))
- dev_warn(mmc_dev(host->mmc),
- "Failed to get debounce clock\n");
- else
- host->got_dbclk = 1;
-
- if (host->got_dbclk)
- if (clk_enable(host->dbclk) != 0)
- dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
- " clk failed\n");
+ host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
+ /*
+ * MMC can still work without debounce clock.
+ */
+ if (IS_ERR(host->dbclk)) {
+ dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n");
+ host->dbclk = NULL;
+ } else if (clk_enable(host->dbclk) != 0) {
+ dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
+ clk_put(host->dbclk);
+ host->dbclk = NULL;
}
/* Since we do only SG emulation, we can have as many segs
@@ -1969,7 +1972,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
NULL,
omap_hsmmc_detect,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc),
@@ -2019,7 +2022,7 @@ err_irq:
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
- if (host->got_dbclk) {
+ if (host->dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
}
@@ -2030,7 +2033,9 @@ err1:
err_alloc:
omap_hsmmc_gpio_free(pdata);
err:
- release_mem_region(res->start, resource_size(res));
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -2052,7 +2057,7 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
- if (host->got_dbclk) {
+ if (host->dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
}
@@ -2110,7 +2115,7 @@ static int omap_hsmmc_suspend(struct device *dev)
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
}
- if (host->got_dbclk)
+ if (host->dbclk)
clk_disable(host->dbclk);
err:
pm_runtime_put_sync(host->dev);
@@ -2131,7 +2136,7 @@ static int omap_hsmmc_resume(struct device *dev)
pm_runtime_get_sync(host->dev);
- if (host->got_dbclk)
+ if (host->dbclk)
clk_enable(host->dbclk);
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d190d04636a7..ebbe984e5d00 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -71,6 +71,9 @@ struct pltfm_imx_data {
enum imx_esdhc_type devtype;
struct pinctrl *pinctrl;
struct esdhc_platform_data boarddata;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
};
static struct platform_device_id imx_esdhc_devtype[] = {
@@ -404,7 +407,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (!np)
return -ENODEV;
- if (of_get_property(np, "fsl,card-wired", NULL))
+ if (of_get_property(np, "non-removable", NULL))
boarddata->cd_type = ESDHC_CD_PERMANENT;
if (of_get_property(np, "fsl,cd-controller", NULL))
@@ -439,7 +442,6 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
struct esdhc_platform_data *boarddata;
- struct clk *clk;
int err;
struct pltfm_imx_data *imx_data;
@@ -460,14 +462,29 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
imx_data->devtype = pdev->id_entry->driver_data;
pltfm_host->priv = imx_data;
- clk = clk_get(mmc_dev(host->mmc), NULL);
- if (IS_ERR(clk)) {
- dev_err(mmc_dev(host->mmc), "clk err\n");
- err = PTR_ERR(clk);
+ imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(imx_data->clk_ipg)) {
+ err = PTR_ERR(imx_data->clk_ipg);
goto err_clk_get;
}
- clk_prepare_enable(clk);
- pltfm_host->clk = clk;
+
+ imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(imx_data->clk_ahb)) {
+ err = PTR_ERR(imx_data->clk_ahb);
+ goto err_clk_get;
+ }
+
+ imx_data->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(imx_data->clk_per)) {
+ err = PTR_ERR(imx_data->clk_per);
+ goto err_clk_get;
+ }
+
+ pltfm_host->clk = imx_data->clk_per;
+
+ clk_prepare_enable(imx_data->clk_per);
+ clk_prepare_enable(imx_data->clk_ipg);
+ clk_prepare_enable(imx_data->clk_ahb);
imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(imx_data->pinctrl)) {
@@ -567,8 +584,9 @@ no_card_detect_irq:
no_card_detect_pin:
no_board_data:
pin_err:
- clk_disable_unprepare(pltfm_host->clk);
- clk_put(pltfm_host->clk);
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
err_clk_get:
kfree(imx_data);
err_imx_data:
@@ -594,8 +612,10 @@ static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
gpio_free(boarddata->cd_gpio);
}
- clk_disable_unprepare(pltfm_host->clk);
- clk_put(pltfm_host->clk);
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
+
kfree(imx_data);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index c5c2a48bdd94..d9a4ef4f1ed0 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -42,7 +42,8 @@ static struct sdhci_ops sdhci_pltfm_ops = {
#ifdef CONFIG_OF
static bool sdhci_of_wp_inverted(struct device_node *np)
{
- if (of_get_property(np, "sdhci,wp-inverted", NULL))
+ if (of_get_property(np, "sdhci,wp-inverted", NULL) ||
+ of_get_property(np, "wp-inverted", NULL))
return true;
/* Old device trees don't have the wp-inverted property. */
@@ -59,13 +60,16 @@ void sdhci_get_of_property(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
const __be32 *clk;
+ u32 bus_width;
int size;
if (of_device_is_available(np)) {
if (of_get_property(np, "sdhci,auto-cmd12", NULL))
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
- if (of_get_property(np, "sdhci,1-bit-only", NULL))
+ if (of_get_property(np, "sdhci,1-bit-only", NULL) ||
+ (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
+ bus_width == 1))
host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
if (sdhci_of_wp_inverted(np))
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 55a164fcaa15..a50c205ea208 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -404,7 +404,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
if (sc->ext_cd_irq &&
request_threaded_irq(sc->ext_cd_irq, NULL,
sdhci_s3c_gpio_card_detect_thread,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), sc) == 0) {
int status = gpio_get_value(sc->ext_cd_gpio);
if (pdata->ext_cd_gpio_invert)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 6dfa82e03c7e..423da8194cd8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
* Support of SDHCI platform devices for spear soc family
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* Inspired by sdhci-pltfm.c
*
@@ -75,8 +75,6 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
struct spear_sdhci *sdhci;
int ret;
- BUG_ON(pdev == NULL);
-
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
ret = -ENOMEM;
@@ -84,18 +82,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
goto err;
}
- if (!request_mem_region(iomem->start, resource_size(iomem),
- "spear-sdhci")) {
+ if (!devm_request_mem_region(&pdev->dev, iomem->start,
+ resource_size(iomem), "spear-sdhci")) {
ret = -EBUSY;
dev_dbg(&pdev->dev, "cannot request region\n");
goto err;
}
- sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
+ sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL);
if (!sdhci) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
- goto err_kzalloc;
+ goto err;
}
/* clk enable */
@@ -103,13 +101,13 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (IS_ERR(sdhci->clk)) {
ret = PTR_ERR(sdhci->clk);
dev_dbg(&pdev->dev, "Error getting clock\n");
- goto err_clk_get;
+ goto err;
}
ret = clk_enable(sdhci->clk);
if (ret) {
dev_dbg(&pdev->dev, "Error enabling clock\n");
- goto err_clk_enb;
+ goto put_clk;
}
/* overwrite platform_data */
@@ -124,7 +122,7 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (IS_ERR(host)) {
ret = PTR_ERR(host);
dev_dbg(&pdev->dev, "error allocating host\n");
- goto err_alloc_host;
+ goto disable_clk;
}
host->hw_name = "sdhci";
@@ -132,17 +130,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
- host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+ host->ioaddr = devm_ioremap(&pdev->dev, iomem->start,
+ resource_size(iomem));
if (!host->ioaddr) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "failed to remap registers\n");
- goto err_ioremap;
+ goto free_host;
}
ret = sdhci_add_host(host);
if (ret) {
dev_dbg(&pdev->dev, "error adding host\n");
- goto err_add_host;
+ goto free_host;
}
platform_set_drvdata(pdev, host);
@@ -161,11 +160,12 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (sdhci->data->card_power_gpio >= 0) {
int val = 0;
- ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
+ ret = devm_gpio_request(&pdev->dev,
+ sdhci->data->card_power_gpio, "sdhci");
if (ret < 0) {
dev_dbg(&pdev->dev, "gpio request fail: %d\n",
sdhci->data->card_power_gpio);
- goto err_pgpio_request;
+ goto set_drvdata;
}
if (sdhci->data->power_always_enb)
@@ -177,60 +177,48 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (ret) {
dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
sdhci->data->card_power_gpio);
- goto err_pgpio_direction;
+ goto set_drvdata;
}
}
if (sdhci->data->card_int_gpio >= 0) {
- ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
+ ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio,
+ "sdhci");
if (ret < 0) {
dev_dbg(&pdev->dev, "gpio request fail: %d\n",
sdhci->data->card_int_gpio);
- goto err_igpio_request;
+ goto set_drvdata;
}
ret = gpio_direction_input(sdhci->data->card_int_gpio);
if (ret) {
dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
sdhci->data->card_int_gpio);
- goto err_igpio_direction;
+ goto set_drvdata;
}
- ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
+ ret = devm_request_irq(&pdev->dev,
+ gpio_to_irq(sdhci->data->card_int_gpio),
sdhci_gpio_irq, IRQF_TRIGGER_LOW,
mmc_hostname(host->mmc), pdev);
if (ret) {
dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
sdhci->data->card_int_gpio);
- goto err_igpio_request_irq;
+ goto set_drvdata;
}
}
return 0;
-err_igpio_request_irq:
-err_igpio_direction:
- if (sdhci->data->card_int_gpio >= 0)
- gpio_free(sdhci->data->card_int_gpio);
-err_igpio_request:
-err_pgpio_direction:
- if (sdhci->data->card_power_gpio >= 0)
- gpio_free(sdhci->data->card_power_gpio);
-err_pgpio_request:
+set_drvdata:
platform_set_drvdata(pdev, NULL);
sdhci_remove_host(host, 1);
-err_add_host:
- iounmap(host->ioaddr);
-err_ioremap:
+free_host:
sdhci_free_host(host);
-err_alloc_host:
+disable_clk:
clk_disable(sdhci->clk);
-err_clk_enb:
+put_clk:
clk_put(sdhci->clk);
-err_clk_get:
- kfree(sdhci);
-err_kzalloc:
- release_mem_region(iomem->start, resource_size(iomem));
err:
dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
return ret;
@@ -239,35 +227,19 @@ err:
static int __devexit sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
- int dead;
+ int dead = 0;
u32 scratch;
- if (sdhci->data) {
- if (sdhci->data->card_int_gpio >= 0) {
- free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
- gpio_free(sdhci->data->card_int_gpio);
- }
-
- if (sdhci->data->card_power_gpio >= 0)
- gpio_free(sdhci->data->card_power_gpio);
- }
-
platform_set_drvdata(pdev, NULL);
- dead = 0;
scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
dead = 1;
sdhci_remove_host(host, dead);
- iounmap(host->ioaddr);
sdhci_free_host(host);
clk_disable(sdhci->clk);
clk_put(sdhci->clk);
- kfree(sdhci);
- if (iomem)
- release_mem_region(iomem->start, resource_size(iomem));
return 0;
}
@@ -317,5 +289,5 @@ static struct platform_driver sdhci_driver = {
module_platform_driver(sdhci_driver);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index ff5a16991939..b38d8a78f6a0 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -32,8 +32,13 @@
#include "sdhci-pltfm.h"
+/* Tegra SDHOST controller vendor register definitions */
+#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
+#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
+
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
+#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
struct sdhci_tegra_soc_data {
struct sdhci_pltfm_data *pdata;
@@ -120,6 +125,25 @@ static irqreturn_t carddetect_irq(int irq, void *data)
return IRQ_HANDLED;
};
+static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+ if (!(mask & SDHCI_RESET_ALL))
+ return;
+
+ /* Erratum: Enable SDHCI spec v3.00 support */
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) {
+ u32 misc_ctrl;
+
+ misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
+ sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ }
+}
+
static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -148,6 +172,7 @@ static struct sdhci_ops tegra_sdhci_ops = {
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.platform_8bit_width = tegra_sdhci_8bit,
+ .platform_reset_exit = tegra_sdhci_reset_exit,
};
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
@@ -178,6 +203,7 @@ static struct sdhci_pltfm_data sdhci_tegra30_pdata = {
static struct sdhci_tegra_soc_data soc_data_tegra30 = {
.pdata = &sdhci_tegra30_pdata,
+ .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300,
};
#endif
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ccefdebeff14..f4b8b4db3a9a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
}
if (count >= 0xF) {
- pr_warning("%s: Too large timeout requested for CMD%d!\n",
- mmc_hostname(host->mmc), cmd->opcode);
+ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
count = 0xE;
}
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5760c1a4b3f6..27143e042af5 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -128,7 +128,7 @@ config MTD_AFS_PARTS
config MTD_OF_PARTS
tristate "OpenFirmware partitioning information support"
- default Y
+ default y
depends on OF
help
This provides a partition parsing function which derives
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 608321ee056e..63d2a64331f7 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -4,7 +4,7 @@
* Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
* Mike Albon <malbon@openwrt.org>
* Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
- * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ * Copyright © 2011-2012 Jonas Gorski <jonas.gorski@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -82,6 +82,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
int namelen = 0;
int i;
u32 computed_crc;
+ bool rootfs_first = false;
if (bcm63xx_detect_cfe(master))
return -EINVAL;
@@ -109,6 +110,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
char *boardid = &(buf->board_id[0]);
char *tagversion = &(buf->tag_version[0]);
+ sscanf(buf->flash_image_start, "%u", &rootfsaddr);
sscanf(buf->kernel_address, "%u", &kerneladdr);
sscanf(buf->kernel_length, "%u", &kernellen);
sscanf(buf->total_length, "%u", &totallen);
@@ -117,10 +119,19 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
tagversion, boardid);
kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
- rootfsaddr = kerneladdr + kernellen;
+ rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
spareaddr = roundup(totallen, master->erasesize) + cfelen;
sparelen = master->size - spareaddr - nvramlen;
- rootfslen = spareaddr - rootfsaddr;
+
+ if (rootfsaddr < kerneladdr) {
+ /* default Broadcom layout */
+ rootfslen = kerneladdr - rootfsaddr;
+ rootfs_first = true;
+ } else {
+ /* OpenWrt layout */
+ rootfsaddr = kerneladdr + kernellen;
+ rootfslen = spareaddr - rootfsaddr;
+ }
} else {
pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
buf->header_crc, computed_crc);
@@ -156,18 +167,26 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
curpart++;
if (kernellen > 0) {
- parts[curpart].name = "kernel";
- parts[curpart].offset = kerneladdr;
- parts[curpart].size = kernellen;
+ int kernelpart = curpart;
+
+ if (rootfslen > 0 && rootfs_first)
+ kernelpart++;
+ parts[kernelpart].name = "kernel";
+ parts[kernelpart].offset = kerneladdr;
+ parts[kernelpart].size = kernellen;
curpart++;
}
if (rootfslen > 0) {
- parts[curpart].name = "rootfs";
- parts[curpart].offset = rootfsaddr;
- parts[curpart].size = rootfslen;
- if (sparelen > 0)
- parts[curpart].size += sparelen;
+ int rootfspart = curpart;
+
+ if (kernellen > 0 && rootfs_first)
+ rootfspart--;
+ parts[rootfspart].name = "rootfs";
+ parts[rootfspart].offset = rootfsaddr;
+ parts[rootfspart].size = rootfslen;
+ if (sparelen > 0 && !rootfs_first)
+ parts[rootfspart].size += sparelen;
curpart++;
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d02592e6a0f0..22d0493a026f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -317,7 +317,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
- pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
+ pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
}
}
@@ -328,10 +328,23 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
- pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
+ pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
}
}
+static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * S29NS512P flash uses more than 8bits to report number of sectors,
+ * which is not permitted by CFI.
+ */
+ cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
+ pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
+}
+
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -362,6 +375,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+ { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index ddf9ec6d9168..4558e0f4d07f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -70,7 +70,7 @@ struct cmdline_mtd_partition {
/* mtdpart_setup() parses into here */
static struct cmdline_mtd_partition *partitions;
-/* the command line passed to mtdpart_setupd() */
+/* the command line passed to mtdpart_setup() */
static char *cmdline;
static int cmdline_parsed = 0;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index a4a80b742e65..681e2ee0f2d6 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -52,8 +52,6 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
while (pages) {
page = page_read(mapping, index);
- if (!page)
- return -ENOMEM;
if (IS_ERR(page))
return PTR_ERR(page);
@@ -112,8 +110,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
len = len - cpylen;
page = page_read(dev->blkdev->bd_inode->i_mapping, index);
- if (!page)
- return -ENOMEM;
if (IS_ERR(page))
return PTR_ERR(page);
@@ -148,8 +144,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
len = len - cpylen;
page = page_read(mapping, index);
- if (!page)
- return -ENOMEM;
if (IS_ERR(page))
return PTR_ERR(page);
@@ -271,7 +265,6 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd._erase = block2mtd_erase;
dev->mtd._write = block2mtd_write;
- dev->mtd._writev = mtd_writev;
dev->mtd._sync = block2mtd_sync;
dev->mtd._read = block2mtd_read;
dev->mtd.priv = dev;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 8272c02668d6..f70854d728fe 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -227,7 +227,7 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
u8 data8, *dst8;
doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
- cdr = len & 0x3;
+ cdr = len & 0x1;
len4 = len - cdr;
if (first)
@@ -383,7 +383,7 @@ static void doc_set_device_id(struct docg3 *docg3, int id)
* leveling counters are stored. To access this last area of 4 bytes, a special
* mode must be input to the flash ASIC.
*
- * Returns 0 if no error occured, -EIO else.
+ * Returns 0 if no error occurred, -EIO else.
*/
static int doc_set_extra_page_mode(struct docg3 *docg3)
{
@@ -681,7 +681,7 @@ out:
* - one read of 512 bytes at offset 0
* - one read of 512 bytes at offset 512 + 16
*
- * Returns 0 if successful, -EIO if a read error occured.
+ * Returns 0 if successful, -EIO if a read error occurred.
*/
static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
int page, int offset)
@@ -732,12 +732,24 @@ err:
* @len: the number of bytes to be read (must be a multiple of 4)
* @buf: the buffer to be filled in (or NULL is forget bytes)
* @first: 1 if first time read, DOC_READADDRESS should be set
+ * @last_odd: 1 if last read ended up on an odd byte
+ *
+ * Reads bytes from a prepared page. There is a trickery here : if the last read
+ * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
+ * planes, the first byte must be read apart. If a word (16bit) read was used,
+ * the read would return the byte of plane 2 as low *and* high endian, which
+ * will mess the read.
*
*/
static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
- int first)
+ int first, int last_odd)
{
- doc_read_data_area(docg3, buf, len, first);
+ if (last_odd && len > 0) {
+ doc_read_data_area(docg3, buf, 1, first);
+ doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
+ } else {
+ doc_read_data_area(docg3, buf, len, first);
+ }
doc_delay(docg3, 2);
return len;
}
@@ -839,7 +851,7 @@ static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
*
* Reads flash memory OOB area of pages.
*
- * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
+ * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
*/
static int doc_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
@@ -850,6 +862,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
u8 *buf = ops->datbuf;
size_t len, ooblen, nbdata, nboob;
u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+ int max_bitflips = 0;
if (buf)
len = ops->len;
@@ -876,7 +889,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
ret = 0;
skip = from % DOC_LAYOUT_PAGE_SIZE;
mutex_lock(&docg3->cascade->lock);
- while (!ret && (len > 0 || ooblen > 0)) {
+ while (ret >= 0 && (len > 0 || ooblen > 0)) {
calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
docg3->reliable);
nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
@@ -887,20 +900,20 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
if (ret < 0)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
+ ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
if (ret < skip)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
if (ret < nbdata)
goto err_in_read;
doc_read_page_getbytes(docg3,
DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
- NULL, 0);
- ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+ NULL, 0, (skip + nbdata) % 2);
+ ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
if (ret < nboob)
goto err_in_read;
doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
- NULL, 0);
+ NULL, 0, nboob % 2);
doc_get_bch_hw_ecc(docg3, hwecc);
eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
@@ -936,7 +949,8 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
}
if (ret > 0) {
mtd->ecc_stats.corrected += ret;
- ret = -EUCLEAN;
+ max_bitflips = max(max_bitflips, ret);
+ ret = max_bitflips;
}
}
@@ -971,7 +985,7 @@ err_in_read:
* Reads flash memory pages. This function does not read the OOB chunk, but only
* the page data.
*
- * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
+ * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
*/
static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
@@ -1004,7 +1018,7 @@ static int doc_reload_bbt(struct docg3 *docg3)
DOC_LAYOUT_PAGE_SIZE);
if (!ret)
doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
- buf, 1);
+ buf, 1, 0);
buf += DOC_LAYOUT_PAGE_SIZE;
}
doc_read_page_finish(docg3);
@@ -1064,10 +1078,10 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
ret = doc_reset_seq(docg3);
if (!ret)
ret = doc_read_page_prepare(docg3, block0, block1, page,
- ofs + DOC_LAYOUT_WEAR_OFFSET);
+ ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
if (!ret)
ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
- buf, 1);
+ buf, 1, 0);
doc_read_page_finish(docg3);
if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
@@ -1109,7 +1123,7 @@ static int doc_get_op_status(struct docg3 *docg3)
* Wait for the chip to be ready again after erase or write operation, and check
* erase/write status.
*
- * Returns 0 if erase successfull, -EIO if erase/write issue, -ETIMEOUT if
+ * Returns 0 if erase successful, -EIO if erase/write issue, -ETIMEOUT if
* timeout
*/
static int doc_write_erase_wait_status(struct docg3 *docg3)
@@ -1186,7 +1200,7 @@ static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
* Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
* split into 2 pages of 512 bytes on 2 contiguous blocks.
*
- * Returns 0 if erase successful, -EINVAL if adressing error, -EIO if erase
+ * Returns 0 if erase successful, -EINVAL if addressing error, -EIO if erase
* issue
*/
static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
@@ -1397,7 +1411,7 @@ static int doc_backup_oob(struct docg3 *docg3, loff_t to,
* Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
* still be filled in if asked for).
*
- * Returns 0 is successfull, EINVAL if length is not 14 bytes
+ * Returns 0 is successful, EINVAL if length is not 14 bytes
*/
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
struct mtd_oob_ops *ops)
@@ -1945,7 +1959,7 @@ static void doc_release_device(struct mtd_info *mtd)
* docg3_resume - Awakens docg3 floor
* @pdev: platfrom device
*
- * Returns 0 (always successfull)
+ * Returns 0 (always successful)
*/
static int docg3_resume(struct platform_device *pdev)
{
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 1924d247c1cb..5d0d68c3fe27 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -639,12 +639,16 @@ static const struct spi_device_id m25p_ids[] = {
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ /* Everspin */
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
+
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* Macronix */
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
{ "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
@@ -728,6 +732,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 797d43cd3550..67960362681e 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -990,9 +990,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
goto err_clk;
}
- ret = clk_enable(dev->clk);
+ ret = clk_prepare_enable(dev->clk);
if (ret)
- goto err_clk_enable;
+ goto err_clk_prepare_enable;
ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
if (ret) {
@@ -1020,8 +1020,8 @@ err_bank_setup:
free_irq(irq, dev);
platform_set_drvdata(pdev, NULL);
err_irq:
- clk_disable(dev->clk);
-err_clk_enable:
+ clk_disable_unprepare(dev->clk);
+err_clk_prepare_enable:
clk_put(dev->clk);
err_clk:
iounmap(dev->io_base);
@@ -1074,7 +1074,7 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
free_irq(irq, dev);
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
iounmap(dev->io_base);
kfree(dev);
@@ -1091,7 +1091,7 @@ int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
struct spear_smi *dev = platform_get_drvdata(pdev);
if (dev && dev->clk)
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
return 0;
}
@@ -1102,7 +1102,7 @@ int spear_smi_resume(struct platform_device *pdev)
int ret = -EPERM;
if (dev && dev->clk)
- ret = clk_enable(dev->clk);
+ ret = clk_prepare_enable(dev->clk);
if (!ret)
spear_smi_hw_init(dev);
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index dbfe17baf046..45abed67f1ef 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -57,7 +57,7 @@ static struct qinfo_query_info qinfo_array[] = {
static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
{
- int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
+ int qinfo_lines = ARRAY_SIZE(qinfo_array);
int i;
int bankwidth = map_bankwidth(map) * 8;
int major, minor;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8af67cfd671a..5ba2458e799a 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -224,7 +224,7 @@ config MTD_CK804XROM
config MTD_SCB2_FLASH
tristate "BIOS flash chip on Intel SCB2 boards"
- depends on X86 && MTD_JEDECPROBE
+ depends on X86 && MTD_JEDECPROBE && PCI
help
Support for treating the BIOS flash chip on Intel SCB2 boards
as an MTD device - with this you can reprogram your BIOS.
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 92e1f41634c7..93f03175c82d 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -260,18 +260,7 @@ static struct pci_driver vr_nor_pci_driver = {
.id_table = vr_nor_pci_ids,
};
-static int __init vr_nor_mtd_init(void)
-{
- return pci_register_driver(&vr_nor_pci_driver);
-}
-
-static void __exit vr_nor_mtd_exit(void)
-{
- pci_unregister_driver(&vr_nor_pci_driver);
-}
-
-module_init(vr_nor_mtd_init);
-module_exit(vr_nor_mtd_exit);
+module_pci_driver(vr_nor_pci_driver);
MODULE_AUTHOR("Andy Lowe");
MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index b5401e355745..c03456f17004 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -19,9 +19,9 @@
#include <linux/mtd/cfi.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
+#include <linux/of.h>
#include <lantiq_soc.h>
-#include <lantiq_platform.h>
/*
* The NOR flash is connected to the same external bus unit (EBU) as PCI.
@@ -44,8 +44,9 @@ struct ltq_mtd {
struct map_info *map;
};
-static char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
+static const char ltq_map_name[] = "ltq_nor";
+static const char *ltq_probe_types[] __devinitconst = {
+ "cmdlinepart", "ofpart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
@@ -108,42 +109,38 @@ ltq_copy_to(struct map_info *map, unsigned long to,
spin_unlock_irqrestore(&ebu_lock, flags);
}
-static int __init
+static int __devinit
ltq_mtd_probe(struct platform_device *pdev)
{
- struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
+ struct mtd_part_parser_data ppdata;
struct ltq_mtd *ltq_mtd;
- struct resource *res;
struct cfi_private *cfi;
int err;
+ if (of_machine_is_compatible("lantiq,falcon") &&
+ (ltq_boot_select() != BS_FLASH)) {
+ dev_err(&pdev->dev, "invalid bootstrap options\n");
+ return -ENODEV;
+ }
+
ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
platform_set_drvdata(pdev, ltq_mtd);
ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to get memory resource");
+ dev_err(&pdev->dev, "failed to get memory resource\n");
err = -ENOENT;
goto err_out;
}
- res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
- resource_size(ltq_mtd->res), dev_name(&pdev->dev));
- if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to request mem resource");
- err = -EBUSY;
- goto err_out;
- }
-
ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
- ltq_mtd->map->phys = res->start;
- ltq_mtd->map->size = resource_size(res);
- ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
- ltq_mtd->map->phys, ltq_mtd->map->size);
+ ltq_mtd->map->phys = ltq_mtd->res->start;
+ ltq_mtd->map->size = resource_size(ltq_mtd->res);
+ ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res);
if (!ltq_mtd->map->virt) {
- dev_err(&pdev->dev, "failed to ioremap!\n");
- err = -ENOMEM;
- goto err_free;
+ dev_err(&pdev->dev, "failed to remap mem resource\n");
+ err = -EBUSY;
+ goto err_out;
}
ltq_mtd->map->name = ltq_map_name;
@@ -169,9 +166,9 @@ ltq_mtd_probe(struct platform_device *pdev)
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
- err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
- ltq_mtd_data->parts,
- ltq_mtd_data->nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
+ &ppdata, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
@@ -204,32 +201,23 @@ ltq_mtd_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ltq_mtd_match[] = {
+ { .compatible = "lantiq,nor" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mtd_match);
+
static struct platform_driver ltq_mtd_driver = {
+ .probe = ltq_mtd_probe,
.remove = __devexit_p(ltq_mtd_remove),
.driver = {
- .name = "ltq_nor",
+ .name = "ltq-nor",
.owner = THIS_MODULE,
+ .of_match_table = ltq_mtd_match,
},
};
-static int __init
-init_ltq_mtd(void)
-{
- int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
-
- if (ret)
- pr_err("ltq_nor: error registering platform driver");
- return ret;
-}
-
-static void __exit
-exit_ltq_mtd(void)
-{
- platform_driver_unregister(&ltq_mtd_driver);
-}
-
-module_init(init_ltq_mtd);
-module_exit(exit_ltq_mtd);
+module_platform_driver(ltq_mtd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1d005a3e9b41..f14ce0af763f 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -352,18 +352,7 @@ static struct pci_driver mtd_pci_driver = {
.id_table = mtd_pci_ids,
};
-static int __init mtd_pci_maps_init(void)
-{
- return pci_register_driver(&mtd_pci_driver);
-}
-
-static void __exit mtd_pci_maps_exit(void)
-{
- pci_unregister_driver(&mtd_pci_driver);
-}
-
-module_init(mtd_pci_maps_init);
-module_exit(mtd_pci_maps_exit);
+module_pci_driver(mtd_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 934a72c80078..9dcbc684abdb 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -234,20 +234,7 @@ static struct pci_driver scb2_flash_driver = {
.remove = __devexit_p(scb2_flash_remove),
};
-static int __init
-scb2_flash_init(void)
-{
- return pci_register_driver(&scb2_flash_driver);
-}
-
-static void __exit
-scb2_flash_exit(void)
-{
- pci_unregister_driver(&scb2_flash_driver);
-}
-
-module_init(scb2_flash_init);
-module_exit(scb2_flash_exit);
+module_pci_driver(scb2_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 71b0ba797912..e7534c82f93a 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -59,7 +59,7 @@ static struct mtd_partition bigflash_parts[] = {
}
};
-static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
+static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
#define init_sbc82xx_one_flash(map, br, or) \
do { \
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c837507dfb1c..575730744fdb 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -250,6 +250,43 @@ static ssize_t mtd_name_show(struct device *dev,
}
static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+}
+static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ unsigned int bitflip_threshold;
+ int retval;
+
+ retval = kstrtouint(buf, 0, &bitflip_threshold);
+ if (retval)
+ return retval;
+
+ mtd->bitflip_threshold = bitflip_threshold;
+ return count;
+}
+static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
+ mtd_bitflip_threshold_show,
+ mtd_bitflip_threshold_store);
+
static struct attribute *mtd_attrs[] = {
&dev_attr_type.attr,
&dev_attr_flags.attr,
@@ -260,6 +297,8 @@ static struct attribute *mtd_attrs[] = {
&dev_attr_oobsize.attr,
&dev_attr_numeraseregions.attr,
&dev_attr_name.attr,
+ &dev_attr_ecc_strength.attr,
+ &dev_attr_bitflip_threshold.attr,
NULL,
};
@@ -322,6 +361,10 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->index = i;
mtd->usecount = 0;
+ /* default value if not set by driver */
+ if (mtd->bitflip_threshold == 0)
+ mtd->bitflip_threshold = mtd->ecc_strength;
+
if (is_power_of_2(mtd->erasesize))
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
else
@@ -757,12 +800,24 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
u_char *buf)
{
+ int ret_code;
*retlen = 0;
if (from < 0 || from > mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
- return mtd->_read(mtd, from, len, retlen, buf);
+
+ /*
+ * In the absence of an error, drivers return a non-negative integer
+ * representing the maximum number of bitflips that were corrected on
+ * any one ecc region (if applicable; zero otherwise).
+ */
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
}
EXPORT_SYMBOL_GPL(mtd_read);
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index ae36d7e1e913..551e316e4454 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -304,32 +304,17 @@ static void find_next_position(struct mtdoops_context *cxt)
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
- const char *s2, unsigned long l2)
+ enum kmsg_dump_reason reason)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
- unsigned long s1_start, s2_start;
- unsigned long l1_cpy, l2_cpy;
- char *dst;
-
- if (reason != KMSG_DUMP_OOPS &&
- reason != KMSG_DUMP_PANIC)
- return;
/* Only dump oopses if dump_oops is set */
if (reason == KMSG_DUMP_OOPS && !dump_oops)
return;
- dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
- l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
- l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
-
- s2_start = l2 - l2_cpy;
- s1_start = l1 - l1_cpy;
-
- memcpy(dst, s1 + s1_start, l1_cpy);
- memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
+ kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+ record_size - MTDOOPS_HEADER_SIZE, NULL);
/* Panics must be written immediately */
if (reason != KMSG_DUMP_OOPS)
@@ -375,6 +360,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
return;
}
+ cxt->dump.max_reason = KMSG_DUMP_OOPS;
cxt->dump.dump = mtdoops_do_dump;
err = kmsg_dump_register(&cxt->dump);
if (err) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 9651c06de0a9..d518e4db8a0b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -67,12 +67,12 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
stats = part->master->ecc_stats;
res = part->master->_read(part->master, from + part->offset, len,
retlen, buf);
- if (unlikely(res)) {
- if (mtd_is_bitflip(res))
- mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
- if (mtd_is_eccerr(res))
- mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
- }
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ part->master->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ part->master->ecc_stats.corrected - stats.corrected;
return res;
}
@@ -517,6 +517,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.ecclayout = master->ecclayout;
slave->mtd.ecc_strength = master->ecc_strength;
+ slave->mtd.bitflip_threshold = master->bitflip_threshold;
+
if (master->_block_isbad) {
uint64_t offs = 0;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7d17cecad69d..31bb7e5b504a 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -115,6 +115,46 @@ config MTD_NAND_OMAP2
Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
platforms.
+config MTD_NAND_OMAP_BCH
+ depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
+ bool "Enable support for hardware BCH error correction"
+ default n
+ select BCH
+ select BCH_CONST_PARAMS
+ help
+ Support for hardware BCH error correction.
+
+choice
+ prompt "BCH error correction capability"
+ depends on MTD_NAND_OMAP_BCH
+
+config MTD_NAND_OMAP_BCH8
+ bool "8 bits / 512 bytes (recommended)"
+ help
+ Support correcting up to 8 bitflips per 512-byte block.
+ This will use 13 bytes of spare area per 512 bytes of page data.
+ This is the recommended mode, as 4-bit mode does not work
+ on some OMAP3 revisions, due to a hardware bug.
+
+config MTD_NAND_OMAP_BCH4
+ bool "4 bits / 512 bytes"
+ help
+ Support correcting up to 4 bitflips per 512-byte block.
+ This will use 7 bytes of spare area per 512 bytes of page data.
+ Note that this mode does not work on some OMAP3 revisions, due to a
+ hardware bug. Please check your OMAP datasheet before selecting this
+ mode.
+
+endchoice
+
+if MTD_NAND_OMAP_BCH
+config BCH_CONST_M
+ default 13
+config BCH_CONST_T
+ default 4 if MTD_NAND_OMAP_BCH4
+ default 8 if MTD_NAND_OMAP_BCH8
+endif
+
config MTD_NAND_IDS
tristate
@@ -440,7 +480,7 @@ config MTD_NAND_NANDSIM
config MTD_NAND_GPMI_NAND
bool "GPMI NAND Flash Controller driver"
- depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
+ depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q)
help
Enables NAND Flash support for IMX23 or IMX28.
The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 4f20e1d8bef1..60a0dfdb0808 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -414,7 +414,7 @@ static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
}
err = 0;
if (corrected)
- err = -EUCLEAN;
+ err = 1; /* return max_bitflips per ecc step */
if (uncorrected)
err = -EBADMSG;
out:
@@ -446,7 +446,7 @@ static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
}
err = 0;
if (corrected)
- err = -EUCLEAN;
+ err = 1; /* return max_bitflips per ecc step */
if (uncorrected)
err = -EBADMSG;
return err;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2165576a1c67..97ac6712bb19 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -324,9 +324,10 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
* mtd: mtd info structure
* chip: nand chip info structure
* buf: buffer to store read data
+ * oob_required: caller expects OOB data read to chip->oob_poi
*/
-static int atmel_nand_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
+static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -335,6 +336,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
uint8_t *oob = chip->oob_poi;
uint8_t *ecc_pos;
int stat;
+ unsigned int max_bitflips = 0;
/*
* Errata: ALE is incorrectly wired up to the ECC controller
@@ -371,10 +373,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
/* check if there's an error */
stat = chip->ecc.correct(mtd, p, oob, NULL);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
/* get back to oob start (end of page) */
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
@@ -382,7 +386,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
/* read the oob */
chip->read_buf(mtd, oob, mtd->oobsize);
- return 0;
+ return max_bitflips;
}
/*
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 73abbc3e093e..9f609d2dcf62 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -508,8 +508,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev)
this->chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
- this->options = NAND_NO_AUTOINCR;
-
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
index a930666d0687..5914bb32e001 100644
--- a/drivers/mtd/nand/bcm_umi_bch.c
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -22,9 +22,9 @@
/* ---- Private Function Prototypes -------------------------------------- */
static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page);
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf);
+ struct nand_chip *chip, const uint8_t *buf, int oob_required);
/* ---- Private Variables ------------------------------------------------ */
@@ -103,11 +103,12 @@ static struct nand_ecclayout nand_hw_eccoob_4096 = {
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+* @oob_required: caller expects OOB data read to chip->oob_poi
*
***************************************************************************/
static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t * buf,
- int page)
+ int oob_required, int page)
{
int sectorIdx = 0;
int eccsize = chip->ecc.size;
@@ -116,6 +117,7 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
uint8_t eccCalc[NAND_ECC_NUM_BYTES];
int sectorOobSize = mtd->oobsize / eccsteps;
int stat;
+ unsigned int max_bitflips = 0;
for (sectorIdx = 0; sectorIdx < eccsteps;
sectorIdx++, datap += eccsize) {
@@ -177,9 +179,10 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
}
#endif
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
- return 0;
+ return max_bitflips;
}
/****************************************************************************
@@ -188,10 +191,11 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+* @oob_required: must write chip->oob_poi to OOB
*
***************************************************************************/
static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
int sectorIdx = 0;
int eccsize = chip->ecc.size;
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 6908cdde3065..c855e7cd337b 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -341,7 +341,7 @@ static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
* for MLC parts which may have permanently stuck bits.
*/
struct nand_chip *chip = mtd->priv;
- int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
+ int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0);
if (ret < 0)
return -EFAULT;
else {
@@ -476,12 +476,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
this->badblock_pattern = &largepage_bbt;
}
- /*
- * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
- * conservative guess, given 13 ecc bytes and using bch alg.
- * (Assume Galois field order m=15 to allow a margin of error.)
- */
- this->ecc.strength = 6;
+ this->ecc.strength = 8;
#endif
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index d7b86b925de5..3f1c18599cbd 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -558,7 +558,7 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
}
static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -567,7 +567,7 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
}
static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 2a96e1a12062..f3f6cfedd69e 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int cafe_device_ready(struct mtd_info *mtd)
{
struct cafe_priv *cafe = mtd->priv;
- int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
+ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
cafe_writel(cafe, irqs, NAND_IRQ);
@@ -364,25 +364,27 @@ static int cafe_nand_write_oob(struct mtd_info *mtd,
/* Don't use -- use nand_read_oob_std for now */
static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 1;
+ return 0;
}
/**
* cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
*
* The hw generator calculates the error syndrome automatically. Therefor
* we need a special oob layout and handling.
*/
static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct cafe_priv *cafe = mtd->priv;
+ unsigned int max_bitflips = 0;
cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
cafe_readl(cafe, NAND_ECC_RESULT),
@@ -449,10 +451,11 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
} else {
dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
mtd->ecc_stats.corrected += n;
+ max_bitflips = max_t(unsigned int, max_bitflips, n);
}
}
- return 0;
+ return max_bitflips;
}
static struct nand_ecclayout cafe_oobinfo_2048 = {
@@ -518,7 +521,8 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
struct cafe_priv *cafe = mtd->priv;
@@ -530,16 +534,17 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
}
static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int page, int cached, int raw)
+ const uint8_t *buf, int oob_required, int page,
+ int cached, int raw)
{
int status;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- chip->ecc.write_page_raw(mtd, chip, buf);
+ chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
else
- chip->ecc.write_page(mtd, chip, buf);
+ chip->ecc.write_page(mtd, chip, buf, oob_required);
/*
* Cached progamming disabled for now, Not sure if its worth the
@@ -685,7 +690,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
- cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
+ cafe->nand.options = NAND_OWN_BUFFERS;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -888,17 +893,7 @@ static struct pci_driver cafe_nand_pci_driver = {
.resume = cafe_nand_resume,
};
-static int __init cafe_nand_init(void)
-{
- return pci_register_driver(&cafe_nand_pci_driver);
-}
-
-static void __exit cafe_nand_exit(void)
-{
- pci_unregister_driver(&cafe_nand_pci_driver);
-}
-module_init(cafe_nand_init);
-module_exit(cafe_nand_exit);
+module_pci_driver(cafe_nand_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 821c34c62500..adb6c3ef37fb 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -240,7 +240,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
/* Enable the following for a flash based bad block table */
this->bbt_options = NAND_BBT_USE_FLASH;
- this->options = NAND_NO_AUTOINCR;
/* Scan to find existence of the device */
if (nand_scan(new_mtd, 1)) {
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index a9e57d686297..0650aafa0dd2 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -924,9 +924,10 @@ bool is_erased(uint8_t *buf, int len)
#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
- uint32_t irq_status)
+ uint32_t irq_status, unsigned int *max_bitflips)
{
bool check_erased_page = false;
+ unsigned int bitflips = 0;
if (irq_status & INTR_STATUS__ECC_ERR) {
/* read the ECC errors. we'll ignore them for now */
@@ -965,6 +966,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
/* correct the ECC error */
buf[offset] ^= err_correction_value;
denali->mtd.ecc_stats.corrected++;
+ bitflips++;
}
} else {
/* if the error is not correctable, need to
@@ -984,6 +986,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
clear_interrupts(denali);
denali_set_intr_modes(denali, true);
}
+ *max_bitflips = bitflips;
return check_erased_page;
}
@@ -1084,7 +1087,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
* by write_page above.
* */
static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
/* for regular page writes, we let HW handle all the ECC
* data written to the device. */
@@ -1096,7 +1099,7 @@ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
* write_page() function above.
*/
static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
/* for raw page writes, we want to disable ECC and simply write
whatever data is in the buffer. */
@@ -1110,17 +1113,17 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
}
static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
read_oob_data(mtd, chip->oob_poi, page);
- return 0; /* notify NAND core to send command to
- NAND device. */
+ return 0;
}
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
+ unsigned int max_bitflips;
struct denali_nand_info *denali = mtd_to_denali(mtd);
dma_addr_t addr = denali->buf.dma_buf;
@@ -1153,7 +1156,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
memcpy(buf, denali->buf.buf, mtd->writesize);
- check_erased_page = handle_ecc(denali, buf, irq_status);
+ check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
denali_enable_dma(denali, false);
if (check_erased_page) {
@@ -1167,11 +1170,11 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
denali->mtd.ecc_stats.failed++;
}
}
- return 0;
+ return max_bitflips;
}
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1702,17 +1705,4 @@ static struct pci_driver denali_pci_driver = {
.remove = denali_pci_remove,
};
-static int __devinit denali_init(void)
-{
- printk(KERN_INFO "Spectra MTD driver\n");
- return pci_register_driver(&denali_pci_driver);
-}
-
-/* Free memory */
-static void __devexit denali_exit(void)
-{
- pci_unregister_driver(&denali_pci_driver);
-}
-
-module_init(denali_init);
-module_exit(denali_exit);
+module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index b08202664543..a225e49a5623 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -720,6 +720,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
uint16_t status, edc_err, *buf16;
+ int bits_corrected = 0;
dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
@@ -772,7 +773,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
/* If bitflips are reported, attempt to correct with ecc */
if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
- int bits_corrected = correct_data(mtd, buf, page);
+ bits_corrected = correct_data(mtd, buf, page);
if (bits_corrected == -EBADMSG)
mtd->ecc_stats.failed++;
else
@@ -781,24 +782,24 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
}
writew(0, docptr + DOC_DATAEND);
- return 0;
+ return bits_corrected;
}
static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
return read_page(mtd, nand, buf, page, false);
}
static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
return read_page(mtd, nand, buf, page, true);
}
static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
- int page, int sndcmd)
+ int page)
{
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
@@ -952,13 +953,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
}
static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
return write_page(mtd, nand, buf, false);
}
static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
return write_page(mtd, nand, buf, true);
}
@@ -1002,7 +1003,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
return -ENOMEM;
read_page_prologue(mtd, g4_addr);
- status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
+ status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
if (status)
goto exit;
@@ -1079,7 +1080,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
/* write first page of block */
write_page_prologue(mtd, g4_addr);
- docg4_write_page(mtd, nand, buf);
+ docg4_write_page(mtd, nand, buf, 1);
ret = pageprog(mtd);
if (!ret)
mtd->ecc_stats.badblocks++;
@@ -1192,8 +1193,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->ecc.prepad = 8;
nand->ecc.bytes = 8;
nand->ecc.strength = DOCG4_T;
- nand->options =
- NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
+ nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
nand->controller = &nand->hwcontrol;
spin_lock_init(&nand->controller->lock);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 80b5264f0a32..784293806110 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,6 +75,7 @@ struct fsl_elbc_fcm_ctrl {
unsigned int use_mdr; /* Non zero if the MDR is to be set */
unsigned int oob; /* Non zero if operating on OOB data */
unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
};
/* These map to the positions used by the FCM hardware ECC generator */
@@ -253,6 +254,8 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
if (chip->ecc.mode != NAND_ECC_HW)
return 0;
+ elbc_fcm_ctrl->max_bitflips = 0;
+
if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
uint32_t lteccr = in_be32(&lbc->lteccr);
/*
@@ -262,11 +265,16 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
* bits 28-31 are uncorrectable errors, marked elsewhere.
* for small page nand only 1 bit is used.
* if the ELBC doesn't have the lteccr register it reads 0
+ * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+ * count the number of sub-pages with bitflips and update
+ * ecc_stats.corrected accordingly.
*/
if (lteccr & 0x000F000F)
out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
- if (lteccr & 0x000F0000)
+ if (lteccr & 0x000F0000) {
mtd->ecc_stats.corrected++;
+ elbc_fcm_ctrl->max_bitflips = 1;
+ }
}
return 0;
@@ -738,26 +746,28 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
return 0;
}
-static int fsl_elbc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf,
- int page)
+static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
fsl_elbc_read_buf(mtd, buf, mtd->writesize);
- fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
- return 0;
+ return elbc_fcm_ctrl->max_bitflips;
}
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static void fsl_elbc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf)
+static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -795,7 +805,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->bbt_md = &bbt_mirror_descr;
/* set up nand options */
- chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ chip->options = NAND_NO_READRDY;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->controller = &elbc_fcm_ctrl->controller;
@@ -814,11 +824,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->ecc.size = 512;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
- /*
- * FIXME: can hardware ecc correct 4 bitflips if page size is
- * 2k? Then does hardware report number of corrections for this
- * case? If so, ecc_stats reporting needs to be fixed as well.
- */
} else {
/* otherwise fall back to default software ECC */
chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index c30ac7b83d28..9602c1b7e27e 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -63,6 +63,7 @@ struct fsl_ifc_nand_ctrl {
unsigned int oob; /* Non zero if operating on OOB data */
unsigned int eccread; /* Non zero for a full-page ECC read */
unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
};
static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
@@ -262,6 +263,8 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
dev_err(priv->dev, "NAND Flash Write Protect Error\n");
+ nctrl->max_bitflips = 0;
+
if (nctrl->eccread) {
int errors;
int bufnum = nctrl->page & priv->bufnum_mask;
@@ -290,6 +293,9 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
}
mtd->ecc_stats.corrected += errors;
+ nctrl->max_bitflips = max_t(unsigned int,
+ nctrl->max_bitflips,
+ errors);
}
nctrl->eccread = 0;
@@ -375,21 +381,31 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
return;
- /* READID must read all 8 possible bytes */
case NAND_CMD_READID:
+ case NAND_CMD_PARAM: {
+ int timing = IFC_FIR_OP_RB;
+ if (command == NAND_CMD_PARAM)
+ timing = IFC_FIR_OP_RBCD;
+
out_be32(&ifc->ifc_nand.nand_fir0,
(IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+ (timing << IFC_NAND_FIR0_OP2_SHIFT));
out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
- /* 8 bytes for manuf, device and exts */
- out_be32(&ifc->ifc_nand.nand_fbcr, 8);
- ifc_nand_ctrl->read_bytes = 8;
+ command << IFC_NAND_FCR0_CMD0_SHIFT);
+ out_be32(&ifc->ifc_nand.row3, column);
+
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&ifc->ifc_nand.nand_fbcr, 256);
+ ifc_nand_ctrl->read_bytes = 256;
set_addr(mtd, 0, 0, 0);
fsl_ifc_run_command(mtd);
return;
+ }
/* ERASE1 stores the block and page address */
case NAND_CMD_ERASE1:
@@ -682,15 +698,16 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
return nand_fsr | NAND_STATUS_WP;
}
-static int fsl_ifc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int page)
+static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
struct fsl_ifc_mtd *priv = chip->priv;
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
fsl_ifc_read_buf(mtd, buf, mtd->writesize);
- fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
@@ -698,15 +715,14 @@ static int fsl_ifc_read_page(struct mtd_info *mtd,
if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
mtd->ecc_stats.failed++;
- return 0;
+ return nctrl->max_bitflips;
}
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static void fsl_ifc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf)
+static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
fsl_ifc_write_buf(mtd, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -789,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
out_be32(&ifc->ifc_nand.ncfgr, 0x0);
/* set up nand options */
- chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ chip->options = NAND_NO_READRDY;
chip->bbt_options = NAND_BBT_USE_FLASH;
@@ -811,6 +827,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* Hardware generates ECC per 512 Bytes */
chip->ecc.size = 512;
chip->ecc.bytes = 8;
+ chip->ecc.strength = 4;
switch (csor & CSOR_NAND_PGS_MASK) {
case CSOR_NAND_PGS_512:
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 1b8330e1155a..38d26240d8b1 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -692,6 +692,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
* @page: page number to read
*
* This routine is needed for fsmc version 8 as reading from NAND chip has to be
@@ -701,7 +702,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
* max of 8 bits)
*/
static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
@@ -720,6 +721,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
*/
uint16_t ecc_oob[7];
uint8_t *oob = (uint8_t *)&ecc_oob[0];
+ unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
@@ -748,13 +750,15 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/*
@@ -994,9 +998,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
return PTR_ERR(host->clk);
}
- ret = clk_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
if (ret)
- goto err_clk_enable;
+ goto err_clk_prepare_enable;
/*
* This device ID is actually a common AMBA ID as used on the
@@ -1176,8 +1180,8 @@ err_req_write_chnl:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->read_dma_chan);
err_req_read_chnl:
- clk_disable(host->clk);
-err_clk_enable:
+ clk_disable_unprepare(host->clk);
+err_clk_prepare_enable:
clk_put(host->clk);
return ret;
}
@@ -1198,7 +1202,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
dma_release_channel(host->write_dma_chan);
dma_release_channel(host->read_dma_chan);
}
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
}
@@ -1210,7 +1214,7 @@ static int fsmc_nand_suspend(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
if (host)
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
return 0;
}
@@ -1218,7 +1222,7 @@ static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
if (host) {
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
fsmc_nand_setup(host->regs_va, host->bank,
host->nand.options & NAND_BUSWIDTH_16,
host->dev_timings);
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index 4effb8c579db..a0924515c396 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -51,15 +51,26 @@
#define BP_BCH_FLASH0LAYOUT0_ECC0 12
#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
-#define BF_BCH_FLASH0LAYOUT0_ECC0(v) \
- (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
+ (GPMI_IS_MX6Q(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
+ : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & BM_BCH_FLASH0LAYOUT0_ECC0) \
+ )
#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
-#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \
- (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
- & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
+ (GPMI_IS_MX6Q(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ )
#define HW_BCH_FLASH0LAYOUT1 0x00000090
@@ -72,13 +83,24 @@
#define BP_BCH_FLASH0LAYOUT1_ECCN 12
#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
-#define BF_BCH_FLASH0LAYOUT1_ECCN(v) \
- (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
+ (GPMI_IS_MX6Q(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
+ : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & BM_BCH_FLASH0LAYOUT1_ECCN) \
+ )
#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
-#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \
- (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
- & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
+ (GPMI_IS_MX6Q(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ )
#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index e8ea7107932e..a1f43329ad43 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -21,7 +21,6 @@
#include <linux/mtd/gpmi-nand.h>
#include <linux/delay.h>
#include <linux/clk.h>
-#include <mach/mxs.h>
#include "gpmi-nand.h"
#include "gpmi-regs.h"
@@ -37,6 +36,8 @@ struct timing_threshod timing_default_threshold = {
.max_dll_delay_in_ns = 16,
};
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
/*
* Clear the bit and poll it cleared. This is usually called with
* a reset address and mask being either SFTRST(bit 31) or CLKGATE
@@ -47,7 +48,7 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
int timeout = 0x400;
/* clear the bit */
- __mxs_clrl(mask, addr);
+ writel(mask, addr + MXS_CLR_ADDR);
/*
* SFTRST needs 3 GPMI clocks to settle, the reference manual
@@ -92,11 +93,11 @@ static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
goto error;
/* clear CLKGATE */
- __mxs_clrl(MODULE_CLKGATE, reset_addr);
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
if (!just_enable) {
/* set SFTRST to reset the block */
- __mxs_setl(MODULE_SFTRST, reset_addr);
+ writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
udelay(1);
/* poll CLKGATE becoming set */
@@ -223,13 +224,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
/* Configure layout 0. */
writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
- | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
- | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
+ | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT0);
writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
- | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
- | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
+ | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT1);
/* Set *all* chip selects to use layout 0. */
@@ -255,11 +256,12 @@ static unsigned int ns_to_cycles(unsigned int time,
return max(k, min);
}
+#define DEF_MIN_PROP_DELAY 5
+#define DEF_MAX_PROP_DELAY 9
/* Apply timing to current hardware conditions. */
static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
struct gpmi_nfc_hardware_timing *hw)
{
- struct gpmi_nand_platform_data *pdata = this->pdata;
struct timing_threshod *nfc = &timing_default_threshold;
struct nand_chip *nand = &this->nand;
struct nand_timing target = this->timing;
@@ -276,8 +278,8 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
int ideal_sample_delay_in_ns;
unsigned int sample_delay_factor;
int tEYE;
- unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
- unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
+ unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
+ unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
/*
* If there are multiple chips, we need to relax the timings to allow
@@ -803,7 +805,8 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
if (GPMI_IS_MX23(this)) {
mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
- } else if (GPMI_IS_MX28(this)) {
+ } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+ /* MX28 shares the same R/B register as MX6Q. */
mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
reg = readl(r->gpmi_regs + HW_GPMI_STAT);
} else
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index b68e04310bd8..a6cad5caba78 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,6 +25,8 @@
#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "gpmi-nand.h"
/* add our owner bbt descriptor */
@@ -387,7 +389,7 @@ static void release_bch_irq(struct gpmi_nand_data *this)
static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
{
struct gpmi_nand_data *this = param;
- struct resource *r = this->private;
+ int dma_channel = (int)this->private;
if (!mxs_dma_is_apbh(chan))
return false;
@@ -399,7 +401,7 @@ static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
* for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
* (These eight channels share the same IRQ!)
*/
- if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
+ if (dma_channel == chan->chan_id) {
chan->private = &this->dma_data;
return true;
}
@@ -419,57 +421,45 @@ static void release_dma_channels(struct gpmi_nand_data *this)
static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
- struct gpmi_nand_platform_data *pdata = this->pdata;
- struct resources *res = &this->resources;
- struct resource *r, *r_dma;
- unsigned int i;
+ struct resource *r_dma;
+ struct device_node *dn;
+ int dma_channel;
+ unsigned int ret;
+ struct dma_chan *dma_chan;
+ dma_cap_mask_t mask;
+
+ /* dma channel, we only use the first one. */
+ dn = pdev->dev.of_node;
+ ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel);
+ if (ret) {
+ pr_err("unable to get DMA channel from dt.\n");
+ goto acquire_err;
+ }
+ this->private = (void *)dma_channel;
- r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- GPMI_NAND_DMA_CHANNELS_RES_NAME);
+ /* gpmi dma interrupt */
r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
GPMI_NAND_DMA_INTERRUPT_RES_NAME);
- if (!r || !r_dma) {
+ if (!r_dma) {
pr_err("Can't get resource for DMA\n");
- return -ENXIO;
+ goto acquire_err;
}
+ this->dma_data.chan_irq = r_dma->start;
- /* used in gpmi_dma_filter() */
- this->private = r;
+ /* request dma channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- for (i = r->start; i <= r->end; i++) {
- struct dma_chan *dma_chan;
- dma_cap_mask_t mask;
-
- if (i - r->start >= pdata->max_chip_count)
- break;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- /* get the DMA interrupt */
- if (r_dma->start == r_dma->end) {
- /* only register the first. */
- if (i == r->start)
- this->dma_data.chan_irq = r_dma->start;
- else
- this->dma_data.chan_irq = NO_IRQ;
- } else
- this->dma_data.chan_irq = r_dma->start + (i - r->start);
-
- dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
- if (!dma_chan)
- goto acquire_err;
-
- /* fill the first empty item */
- this->dma_chans[i - r->start] = dma_chan;
+ dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
+ if (!dma_chan) {
+ pr_err("dma_request_channel failed.\n");
+ goto acquire_err;
}
- res->dma_low_channel = r->start;
- res->dma_high_channel = i;
+ this->dma_chans[0] = dma_chan;
return 0;
acquire_err:
- pr_err("Can't acquire DMA channel %u\n", i);
release_dma_channels(this);
return -EINVAL;
}
@@ -851,7 +841,7 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
}
static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct gpmi_nand_data *this = chip->priv;
struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -917,17 +907,20 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
mtd->ecc_stats.corrected += corrected;
}
- /*
- * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
- * details about our policy for delivering the OOB.
- *
- * We fill the caller's buffer with set bits, and then copy the block
- * mark to th caller's buffer. Note that, if block mark swapping was
- * necessary, it has already been done, so we can rely on the first
- * byte of the auxiliary buffer to contain the block mark.
- */
- memset(chip->oob_poi, ~0, mtd->oobsize);
- chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+ if (oob_required) {
+ /*
+ * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+ * for details about our policy for delivering the OOB.
+ *
+ * We fill the caller's buffer with set bits, and then copy the
+ * block mark to th caller's buffer. Note that, if block mark
+ * swapping was necessary, it has already been done, so we can
+ * rely on the first byte of the auxiliary buffer to contain
+ * the block mark.
+ */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+ }
read_page_swap_end(this, buf, mtd->writesize,
this->payload_virt, this->payload_phys,
@@ -937,8 +930,8 @@ exit_nfc:
return ret;
}
-static void gpmi_ecc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
struct gpmi_nand_data *this = chip->priv;
struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -1077,7 +1070,7 @@ exit_auxiliary:
* this driver.
*/
static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
struct gpmi_nand_data *this = chip->priv;
@@ -1100,11 +1093,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
chip->oob_poi[0] = chip->read_byte(mtd);
}
- /*
- * Return true, indicating that the next call to this function must send
- * a command.
- */
- return true;
+ return 0;
}
static int
@@ -1318,7 +1307,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- chip->ecc.write_page_raw(mtd, chip, buffer);
+ chip->ecc.write_page_raw(mtd, chip, buffer, 0);
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
/* Wait for the write to finish. */
@@ -1444,6 +1433,10 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
if (ret)
return ret;
+ /* Adjust the ECC strength according to the chip. */
+ this->nand.ecc.strength = this->bch_geometry.ecc_strength;
+ this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
+
/* NAND boot init, depends on the gpmi_set_geometry(). */
return nand_boot_init(this);
}
@@ -1471,9 +1464,9 @@ void gpmi_nfc_exit(struct gpmi_nand_data *this)
static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
{
- struct gpmi_nand_platform_data *pdata = this->pdata;
struct mtd_info *mtd = &this->mtd;
struct nand_chip *chip = &this->nand;
+ struct mtd_part_parser_data ppdata = {};
int ret;
/* init current chip */
@@ -1502,6 +1495,7 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
chip->options |= NAND_NO_SUBPAGE_WRITE;
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 1;
+ chip->ecc.strength = 8;
chip->ecc.layout = &gpmi_hw_ecclayout;
/* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
@@ -1511,14 +1505,14 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
- ret = nand_scan(mtd, pdata->max_chip_count);
+ ret = nand_scan(mtd, 1);
if (ret) {
pr_err("Chip scan failed\n");
goto err_out;
}
- ret = mtd_device_parse_register(mtd, NULL, NULL,
- pdata->partitions, pdata->partition_count);
+ ppdata.of_node = this->pdev->dev.of_node;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (ret)
goto err_out;
return 0;
@@ -1528,12 +1522,41 @@ err_out:
return ret;
}
+static const struct platform_device_id gpmi_ids[] = {
+ { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
+ { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
+ { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
+ {},
+};
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+ {
+ .compatible = "fsl,imx23-gpmi-nand",
+ .data = (void *)&gpmi_ids[IS_MX23]
+ }, {
+ .compatible = "fsl,imx28-gpmi-nand",
+ .data = (void *)&gpmi_ids[IS_MX28]
+ }, {
+ .compatible = "fsl,imx6q-gpmi-nand",
+ .data = (void *)&gpmi_ids[IS_MX6Q]
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
static int __devinit gpmi_nand_probe(struct platform_device *pdev)
{
- struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
struct gpmi_nand_data *this;
+ const struct of_device_id *of_id;
int ret;
+ of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
+ if (of_id) {
+ pdev->id_entry = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
this = kzalloc(sizeof(*this), GFP_KERNEL);
if (!this) {
pr_err("Failed to allocate per-device memory\n");
@@ -1543,13 +1566,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, this);
this->pdev = pdev;
this->dev = &pdev->dev;
- this->pdata = pdata;
-
- if (pdata->platform_init) {
- ret = pdata->platform_init();
- if (ret)
- goto platform_init_error;
- }
ret = acquire_resources(this);
if (ret)
@@ -1567,7 +1583,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
exit_nfc_init:
release_resources(this);
-platform_init_error:
exit_acquire_resources:
platform_set_drvdata(pdev, NULL);
kfree(this);
@@ -1585,19 +1600,10 @@ static int __exit gpmi_nand_remove(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id gpmi_ids[] = {
- {
- .name = "imx23-gpmi-nand",
- .driver_data = IS_MX23,
- }, {
- .name = "imx28-gpmi-nand",
- .driver_data = IS_MX28,
- }, {},
-};
-
static struct platform_driver gpmi_nand_driver = {
.driver = {
.name = "gpmi-nand",
+ .of_match_table = gpmi_nand_id_table,
},
.probe = gpmi_nand_probe,
.remove = __exit_p(gpmi_nand_remove),
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index ec6180d4ff8f..ce5daa160920 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -266,8 +266,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
#define STATUS_UNCORRECTABLE 0xfe
/* Use the platform_id to distinguish different Archs. */
-#define IS_MX23 0x1
-#define IS_MX28 0x2
+#define IS_MX23 0x0
+#define IS_MX28 0x1
+#define IS_MX6Q 0x2
#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
+#define GPMI_IS_MX6Q(x) ((x)->pdev->id_entry->driver_data == IS_MX6Q)
#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 9bf5ce5fa22d..50166e93ba96 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -124,7 +124,6 @@ static int __init h1910_init(void)
/* 15 us command delay time */
this->chip_delay = 50;
this->ecc.mode = NAND_ECC_SOFT;
- this->options = NAND_NO_AUTOINCR;
/* Scan to find existence of the device */
if (nand_scan(h1910_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index e4147e8acb7c..a6fa884ae49b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -332,11 +332,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
chip->ecc.size = 512;
chip->ecc.bytes = 9;
- chip->ecc.strength = 2;
- /*
- * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
- * conservative guess, given 9 ecc bytes and reed-solomon alg.
- */
+ chip->ecc.strength = 4;
if (pdata)
chip->ecc.layout = pdata->ecc_layout;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index c240cf1af961..c259c24d7986 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -734,7 +734,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
chip->write_buf = mpc5121_nfc_write_buf;
chip->verify_buf = mpc5121_nfc_verify_buf;
chip->select_chip = mpc5121_nfc_select_chip;
- chip->options = NAND_NO_AUTOINCR;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index cc0678a967c1..6acc790c2fbb 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,6 +32,8 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/completion.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#include <asm/mach/flash.h>
#include <mach/mxc_nand.h>
@@ -140,13 +142,47 @@
#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+ void (*preset)(struct mtd_info *);
+ void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_page)(struct mtd_info *, unsigned int);
+ void (*send_read_id)(struct mxc_nand_host *);
+ uint16_t (*get_dev_status)(struct mxc_nand_host *);
+ int (*check_int)(struct mxc_nand_host *);
+ void (*irq_control)(struct mxc_nand_host *, int);
+ u32 (*get_ecc_status)(struct mxc_nand_host *);
+ struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
+ void (*select_chip)(struct mtd_info *mtd, int chip);
+ int (*correct_data)(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc);
+
+ /*
+ * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+ * (CONFIG1:INT_MSK is set). To handle this the driver uses
+ * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+ */
+ int irqpending_quirk;
+ int needs_ip;
+
+ size_t regs_offset;
+ size_t spare0_offset;
+ size_t axi_offset;
+
+ int spare_len;
+ int eccbytes;
+ int eccsize;
+};
+
struct mxc_nand_host {
struct mtd_info mtd;
struct nand_chip nand;
struct device *dev;
- void *spare0;
- void *main_area0;
+ void __iomem *spare0;
+ void __iomem *main_area0;
void __iomem *base;
void __iomem *regs;
@@ -163,16 +199,9 @@ struct mxc_nand_host {
uint8_t *data_buf;
unsigned int buf_start;
- int spare_len;
-
- void (*preset)(struct mtd_info *);
- void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
- void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
- void (*send_page)(struct mtd_info *, unsigned int);
- void (*send_read_id)(struct mxc_nand_host *);
- uint16_t (*get_dev_status)(struct mxc_nand_host *);
- int (*check_int)(struct mxc_nand_host *);
- void (*irq_control)(struct mxc_nand_host *, int);
+
+ const struct mxc_nand_devtype_data *devtype_data;
+ struct mxc_nand_platform_data pdata;
};
/* OOB placement block for use with hardware ecc generation */
@@ -242,20 +271,26 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
-static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
{
- struct mxc_nand_host *host = dev_id;
-
- if (!host->check_int(host))
- return IRQ_NONE;
+ int i;
+ u32 *t = trg;
+ const __iomem u32 *s = src;
- host->irq_control(host, 0);
+ for (i = 0; i < (size >> 2); i++)
+ *t++ = __raw_readl(s++);
+}
- complete(&host->op_completion);
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+ int i;
+ u32 __iomem *t = trg;
+ const u32 *s = src;
- return IRQ_HANDLED;
+ for (i = 0; i < (size >> 2); i++)
+ __raw_writel(*s++, t++);
}
static int check_int_v3(struct mxc_nand_host *host)
@@ -280,26 +315,12 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
if (!(tmp & NFC_V1_V2_CONFIG2_INT))
return 0;
- if (!cpu_is_mx21())
+ if (!host->devtype_data->irqpending_quirk)
writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
return 1;
}
-/*
- * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
- * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
- * driver can enable/disable the irq line rather than simply masking the
- * interrupts.
- */
-static void irq_control_mx21(struct mxc_nand_host *host, int activate)
-{
- if (activate)
- enable_irq(host->irq);
- else
- disable_irq_nosync(host->irq);
-}
-
static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
{
uint16_t tmp;
@@ -328,6 +349,47 @@ static void irq_control_v3(struct mxc_nand_host *host, int activate)
writel(tmp, NFC_V3_CONFIG2);
}
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+ if (host->devtype_data->irqpending_quirk) {
+ if (activate)
+ enable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+ } else {
+ host->devtype_data->irq_control(host, activate);
+ }
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+ return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+ return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+ return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+ struct mxc_nand_host *host = dev_id;
+
+ if (!host->devtype_data->check_int(host))
+ return IRQ_NONE;
+
+ irq_control(host, 0);
+
+ complete(&host->op_completion);
+
+ return IRQ_HANDLED;
+}
+
/* This function polls the NANDFC to wait for the basic operation to
* complete by checking the INT bit of config2 register.
*/
@@ -336,14 +398,14 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
int max_retries = 8000;
if (useirq) {
- if (!host->check_int(host)) {
+ if (!host->devtype_data->check_int(host)) {
INIT_COMPLETION(host->op_completion);
- host->irq_control(host, 1);
+ irq_control(host, 1);
wait_for_completion(&host->op_completion);
}
} else {
while (max_retries-- > 0) {
- if (host->check_int(host))
+ if (host->devtype_data->check_int(host))
break;
udelay(1);
@@ -374,7 +436,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
- if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+ if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
int max_retries = 100;
/* Reset completion is indicated by NFC_CONFIG2 */
/* being set to 0 */
@@ -433,13 +495,27 @@ static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
wait_op_done(host, false);
}
-static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
int bufs, i;
- if (nfc_is_v1() && mtd->writesize > 512)
+ if (mtd->writesize > 512)
bufs = 4;
else
bufs = 1;
@@ -463,7 +539,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
wait_op_done(host, true);
- memcpy(host->data_buf, host->main_area0, 16);
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
}
/* Request the NANDFC to perform a read of the NAND device ID. */
@@ -479,7 +555,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
/* Wait for operation to complete */
wait_op_done(host, true);
- memcpy(host->data_buf, host->main_area0, 16);
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
if (this->options & NAND_BUSWIDTH_16) {
/* compress the ID info */
@@ -555,7 +631,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
* additional correction. 2-Bit errors cannot be corrected by
* HW ECC, so we need to return failure
*/
- uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
+ uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
@@ -580,10 +656,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
no_subpages = mtd->writesize >> 9;
- if (nfc_is_v21())
- ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
- else
- ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+ ecc_stat = host->devtype_data->get_ecc_status(host);
do {
err = ecc_stat & ecc_bit_mask;
@@ -616,7 +689,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
/* Check for status request */
if (host->status_request)
- return host->get_dev_status(host) & 0xFF;
+ return host->devtype_data->get_dev_status(host) & 0xFF;
ret = *(uint8_t *)(host->data_buf + host->buf_start);
host->buf_start++;
@@ -682,7 +755,28 @@ static int mxc_nand_verify_buf(struct mtd_info *mtd,
/* This function is used by upper layer for select and
* deselect of the NAND chip */
-static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable_unprepare(host->clk);
+ host->clk_act = 0;
+ }
+ return;
+ }
+
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_prepare_enable(host->clk);
+ host->clk_act = 1;
+ }
+}
+
+static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
@@ -702,10 +796,8 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
host->clk_act = 1;
}
- if (nfc_is_v21()) {
- host->active_cs = chip;
- writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
- }
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
}
/*
@@ -718,23 +810,23 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
u16 i, j;
u16 n = mtd->writesize >> 9;
u8 *d = host->data_buf + mtd->writesize;
- u8 *s = host->spare0;
- u16 t = host->spare_len;
+ u8 __iomem *s = host->spare0;
+ u16 t = host->devtype_data->spare_len;
j = (mtd->oobsize / n >> 1) << 1;
if (bfrom) {
for (i = 0; i < n - 1; i++)
- memcpy(d + i * j, s + i * t, j);
+ memcpy32_fromio(d + i * j, s + i * t, j);
/* the last section */
- memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
+ memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
} else {
for (i = 0; i < n - 1; i++)
- memcpy(&s[i * t], &d[i * j], j);
+ memcpy32_toio(&s[i * t], &d[i * j], j);
/* the last section */
- memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+ memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
}
}
@@ -751,34 +843,44 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
* perform a read/write buf operation, the saved column
* address is used to index into the full page.
*/
- host->send_addr(host, 0, page_addr == -1);
+ host->devtype_data->send_addr(host, 0, page_addr == -1);
if (mtd->writesize > 512)
/* another col addr cycle for 2k page */
- host->send_addr(host, 0, false);
+ host->devtype_data->send_addr(host, 0, false);
}
/* Write out page address, if necessary */
if (page_addr != -1) {
/* paddr_0 - p_addr_7 */
- host->send_addr(host, (page_addr & 0xff), false);
+ host->devtype_data->send_addr(host, (page_addr & 0xff), false);
if (mtd->writesize > 512) {
if (mtd->size >= 0x10000000) {
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, false);
- host->send_addr(host, (page_addr >> 16) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
} else
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, false);
- host->send_addr(host, (page_addr >> 16) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
} else
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
}
}
}
@@ -800,7 +902,35 @@ static int get_eccsize(struct mtd_info *mtd)
return 8;
}
-static void preset_v1_v2(struct mtd_info *mtd)
+static void preset_v1(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t config1 = 0;
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (!host->devtype_data->irqpending_quirk)
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ host->eccsize = 1;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v2(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
@@ -809,13 +939,12 @@ static void preset_v1_v2(struct mtd_info *mtd)
if (nand_chip->ecc.mode == NAND_ECC_HW)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
- if (nfc_is_v21())
- config1 |= NFC_V2_CONFIG1_FP_INT;
+ config1 |= NFC_V2_CONFIG1_FP_INT;
- if (!cpu_is_mx21())
+ if (!host->devtype_data->irqpending_quirk)
config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
- if (nfc_is_v21() && mtd->writesize) {
+ if (mtd->writesize) {
uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
host->eccsize = get_eccsize(mtd);
@@ -834,20 +963,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
writew(0x2, NFC_V1_V2_CONFIG);
/* Blocks to be unlocked */
- if (nfc_is_v21()) {
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
- } else if (nfc_is_v1()) {
- writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
- writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
- } else
- BUG();
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
/* Unlock Block Command for given address range */
writew(0x4, NFC_V1_V2_WRPROT);
@@ -937,15 +1060,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* Command pre-processing step */
switch (command) {
case NAND_CMD_RESET:
- host->preset(mtd);
- host->send_cmd(host, command, false);
+ host->devtype_data->preset(mtd);
+ host->devtype_data->send_cmd(host, command, false);
break;
case NAND_CMD_STATUS:
host->buf_start = 0;
host->status_request = true;
- host->send_cmd(host, command, true);
+ host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -958,15 +1081,17 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
command = NAND_CMD_READ0; /* only READ0 is valid */
- host->send_cmd(host, command, false);
+ host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
if (mtd->writesize > 512)
- host->send_cmd(host, NAND_CMD_READSTART, true);
+ host->devtype_data->send_cmd(host,
+ NAND_CMD_READSTART, true);
- host->send_page(mtd, NFC_OUTPUT);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
- memcpy(host->data_buf, host->main_area0, mtd->writesize);
+ memcpy32_fromio(host->data_buf, host->main_area0,
+ mtd->writesize);
copy_spare(mtd, true);
break;
@@ -977,28 +1102,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->buf_start = column;
- host->send_cmd(host, command, false);
+ host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_PAGEPROG:
- memcpy(host->main_area0, host->data_buf, mtd->writesize);
+ memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
copy_spare(mtd, false);
- host->send_page(mtd, NFC_INPUT);
- host->send_cmd(host, command, true);
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+ host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_READID:
- host->send_cmd(host, command, true);
+ host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
- host->send_read_id(host);
+ host->devtype_data->send_read_id(host);
host->buf_start = column;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
- host->send_cmd(host, command, false);
+ host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -1032,15 +1157,191 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.pattern = mirror_pattern,
};
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+ .preset = preset_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v1,
+ .irqpending_quirk = 1,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+ .preset = preset_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v1,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .axi_offset = 0,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+ .preset = preset_v2,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v2,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v2,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_4k,
+ .select_chip = mxc_nand_select_chip_v2,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0x1e00,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0,
+ .spare_len = 64,
+ .eccbytes = 9,
+ .eccsize = 0,
+};
+
+/* v3: i.MX51, i.MX53 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+ .preset = preset_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+};
+
+#ifdef CONFIG_OF_MTD
+static const struct of_device_id mxcnd_dt_ids[] = {
+ {
+ .compatible = "fsl,imx21-nand",
+ .data = &imx21_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx27-nand",
+ .data = &imx27_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx25-nand",
+ .data = &imx25_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx51-nand",
+ .data = &imx51_nand_devtype_data,
+ },
+ { /* sentinel */ }
+};
+
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ struct device_node *np = host->dev->of_node;
+ struct mxc_nand_platform_data *pdata = &host->pdata;
+ const struct of_device_id *of_id =
+ of_match_device(mxcnd_dt_ids, host->dev);
+ int buswidth;
+
+ if (!np)
+ return 1;
+
+ if (of_get_nand_ecc_mode(np) >= 0)
+ pdata->hw_ecc = 1;
+
+ pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ buswidth = of_get_nand_bus_width(np);
+ if (buswidth < 0)
+ return buswidth;
+
+ pdata->width = buswidth / 8;
+
+ host->devtype_data = of_id->data;
+
+ return 0;
+}
+#else
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ return 1;
+}
+#endif
+
+static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
+{
+ struct mxc_nand_platform_data *pdata = host->dev->platform_data;
+
+ if (!pdata)
+ return -ENODEV;
+
+ host->pdata = *pdata;
+
+ if (nfc_is_v1()) {
+ if (cpu_is_mx21())
+ host->devtype_data = &imx21_nand_devtype_data;
+ else
+ host->devtype_data = &imx27_nand_devtype_data;
+ } else if (nfc_is_v21()) {
+ host->devtype_data = &imx25_nand_devtype_data;
+ } else if (nfc_is_v3_2()) {
+ host->devtype_data = &imx51_nand_devtype_data;
+ } else
+ BUG();
+
+ return 0;
+}
+
static int __init mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
- struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
int err = 0;
- struct nand_ecclayout *oob_smallpage, *oob_largepage;
/* Allocate memory for MTD device structure and private data */
host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
@@ -1065,7 +1366,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->priv = host;
this->dev_ready = mxc_nand_dev_ready;
this->cmdfunc = mxc_nand_command;
- this->select_chip = mxc_nand_select_chip;
this->read_byte = mxc_nand_read_byte;
this->read_word = mxc_nand_read_word;
this->write_buf = mxc_nand_write_buf;
@@ -1078,7 +1378,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
goto eclk;
}
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->clk_act = 1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1095,36 +1395,26 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->main_area0 = host->base;
- if (nfc_is_v1() || nfc_is_v21()) {
- host->preset = preset_v1_v2;
- host->send_cmd = send_cmd_v1_v2;
- host->send_addr = send_addr_v1_v2;
- host->send_page = send_page_v1_v2;
- host->send_read_id = send_read_id_v1_v2;
- host->get_dev_status = get_dev_status_v1_v2;
- host->check_int = check_int_v1_v2;
- if (cpu_is_mx21())
- host->irq_control = irq_control_mx21;
- else
- host->irq_control = irq_control_v1_v2;
- }
+ err = mxcnd_probe_dt(host);
+ if (err > 0)
+ err = mxcnd_probe_pdata(host);
+ if (err < 0)
+ goto eirq;
- if (nfc_is_v21()) {
- host->regs = host->base + 0x1e00;
- host->spare0 = host->base + 0x1000;
- host->spare_len = 64;
- oob_smallpage = &nandv2_hw_eccoob_smallpage;
- oob_largepage = &nandv2_hw_eccoob_largepage;
- this->ecc.bytes = 9;
- } else if (nfc_is_v1()) {
- host->regs = host->base + 0xe00;
- host->spare0 = host->base + 0x800;
- host->spare_len = 16;
- oob_smallpage = &nandv1_hw_eccoob_smallpage;
- oob_largepage = &nandv1_hw_eccoob_largepage;
- this->ecc.bytes = 3;
- host->eccsize = 1;
- } else if (nfc_is_v3_2()) {
+ if (host->devtype_data->regs_offset)
+ host->regs = host->base + host->devtype_data->regs_offset;
+ host->spare0 = host->base + host->devtype_data->spare0_offset;
+ if (host->devtype_data->axi_offset)
+ host->regs_axi = host->base + host->devtype_data->axi_offset;
+
+ this->ecc.bytes = host->devtype_data->eccbytes;
+ host->eccsize = host->devtype_data->eccsize;
+
+ this->select_chip = host->devtype_data->select_chip;
+ this->ecc.size = 512;
+ this->ecc.layout = host->devtype_data->ecclayout_512;
+
+ if (host->devtype_data->needs_ip) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
err = -ENODEV;
@@ -1135,42 +1425,22 @@ static int __init mxcnd_probe(struct platform_device *pdev)
err = -ENOMEM;
goto eirq;
}
- host->regs_axi = host->base + 0x1e00;
- host->spare0 = host->base + 0x1000;
- host->spare_len = 64;
- host->preset = preset_v3;
- host->send_cmd = send_cmd_v3;
- host->send_addr = send_addr_v3;
- host->send_page = send_page_v3;
- host->send_read_id = send_read_id_v3;
- host->check_int = check_int_v3;
- host->get_dev_status = get_dev_status_v3;
- host->irq_control = irq_control_v3;
- oob_smallpage = &nandv2_hw_eccoob_smallpage;
- oob_largepage = &nandv2_hw_eccoob_largepage;
- } else
- BUG();
-
- this->ecc.size = 512;
- this->ecc.layout = oob_smallpage;
+ }
- if (pdata->hw_ecc) {
+ if (host->pdata.hw_ecc) {
this->ecc.calculate = mxc_nand_calculate_ecc;
this->ecc.hwctl = mxc_nand_enable_hwecc;
- if (nfc_is_v1())
- this->ecc.correct = mxc_nand_correct_data_v1;
- else
- this->ecc.correct = mxc_nand_correct_data_v2_v3;
+ this->ecc.correct = host->devtype_data->correct_data;
this->ecc.mode = NAND_ECC_HW;
} else {
this->ecc.mode = NAND_ECC_SOFT;
}
- /* NAND bus width determines access funtions used by upper layer */
- if (pdata->width == 2)
+ /* NAND bus width determines access functions used by upper layer */
+ if (host->pdata.width == 2)
this->options |= NAND_BUSWIDTH_16;
- if (pdata->flash_bbt) {
+ if (host->pdata.flash_bbt) {
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
/* update flash based bbt */
@@ -1182,28 +1452,25 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
/*
- * mask the interrupt. For i.MX21 explicitely call
- * irq_control_v1_v2 to use the mask bit. We can't call
- * disable_irq_nosync() for an interrupt we do not own yet.
+ * Use host->devtype_data->irq_control() here instead of irq_control()
+ * because we must not disable_irq_nosync without having requested the
+ * irq.
*/
- if (cpu_is_mx21())
- irq_control_v1_v2(host, 0);
- else
- host->irq_control(host, 0);
+ host->devtype_data->irq_control(host, 0);
err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
if (err)
goto eirq;
- host->irq_control(host, 0);
-
/*
- * Now that the interrupt is disabled make sure the interrupt
- * mask bit is cleared on i.MX21. Otherwise we can't read
- * the interrupt status bit on this machine.
+ * Now that we "own" the interrupt make sure the interrupt mask bit is
+ * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+ * on this machine.
*/
- if (cpu_is_mx21())
- irq_control_v1_v2(host, 1);
+ if (host->devtype_data->irqpending_quirk) {
+ disable_irq_nosync(host->irq);
+ host->devtype_data->irq_control(host, 1);
+ }
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
@@ -1212,18 +1479,12 @@ static int __init mxcnd_probe(struct platform_device *pdev)
}
/* Call preset again, with correct writesize this time */
- host->preset(mtd);
+ host->devtype_data->preset(mtd);
if (mtd->writesize == 2048)
- this->ecc.layout = oob_largepage;
- if (nfc_is_v21() && mtd->writesize == 4096)
- this->ecc.layout = &nandv2_hw_eccoob_4k;
-
- /* second phase scan */
- if (nand_scan_tail(mtd)) {
- err = -ENXIO;
- goto escan;
- }
+ this->ecc.layout = host->devtype_data->ecclayout_2k;
+ else if (mtd->writesize == 4096)
+ this->ecc.layout = host->devtype_data->ecclayout_4k;
if (this->ecc.mode == NAND_ECC_HW) {
if (nfc_is_v1())
@@ -1232,9 +1493,19 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
}
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ err = -ENXIO;
+ goto escan;
+ }
+
/* Register the partitions */
- mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
- pdata->nr_parts);
+ mtd_device_parse_register(mtd, part_probes,
+ &(struct mtd_part_parser_data){
+ .of_node = pdev->dev.of_node,
+ },
+ host->pdata.parts,
+ host->pdata.nr_parts);
platform_set_drvdata(pdev, host);
@@ -1275,6 +1546,8 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
static struct platform_driver mxcnd_driver = {
.driver = {
.name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mxcnd_dt_ids),
},
.remove = __devexit_p(mxcnd_remove),
};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 47b19c0bb070..a11253a0fcab 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1066,15 +1066,17 @@ EXPORT_SYMBOL(nand_lock);
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
chip->read_buf(mtd, buf, mtd->writesize);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
@@ -1083,13 +1085,14 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* We need a special oob layout and handling even when OOB isn't used.
*/
static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int page)
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1126,10 +1129,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*/
static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1138,8 +1142,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
- chip->ecc.read_page_raw(mtd, chip, buf, page);
+ chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1154,12 +1159,14 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int stat;
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
@@ -1180,6 +1187,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
int index = 0;
+ unsigned int max_bitflips = 0;
/* Column address within the page aligned to ECC size (256bytes) */
start_step = data_offs / chip->ecc.size;
@@ -1244,12 +1252,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
stat = chip->ecc.correct(mtd, p,
&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
@@ -1257,12 +1267,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Not for syndrome calculating ECC controllers which need a special oob layout.
*/
static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1271,6 +1282,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1289,12 +1301,14 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int stat;
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
@@ -1302,6 +1316,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Hardware ECC for large page chips, require OOB to be read first. For this
@@ -1311,7 +1326,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* the data area, by overwriting the NAND manufacturer bad block markings.
*/
static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1320,6 +1335,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint8_t *ecc_calc = chip->buffers->ecccalc;
+ unsigned int max_bitflips = 0;
/* Read the OOB area first */
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1337,12 +1353,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
@@ -1350,19 +1368,21 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
+ unsigned int max_bitflips = 0;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
@@ -1379,10 +1399,12 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, oob, eccbytes);
stat = chip->ecc.correct(mtd, p, oob, NULL);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
oob += eccbytes;
@@ -1397,7 +1419,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
if (i)
chip->read_buf(mtd, oob, i);
- return 0;
+ return max_bitflips;
}
/**
@@ -1459,11 +1481,9 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int chipnr, page, realpage, col, bytes, aligned;
+ int chipnr, page, realpage, col, bytes, aligned, oob_required;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
- int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
- int sndcmd = 1;
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
@@ -1471,6 +1491,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
mtd->oobavail : mtd->oobsize;
uint8_t *bufpoi, *oob, *buf;
+ unsigned int max_bitflips = 0;
stats = mtd->ecc_stats;
@@ -1484,6 +1505,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf = ops->datbuf;
oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
while (1) {
bytes = min(mtd->writesize - col, readlen);
@@ -1493,21 +1515,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
if (realpage != chip->pagebuf || oob) {
bufpoi = aligned ? buf : chip->buffers->databuf;
- if (likely(sndcmd)) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- sndcmd = 0;
- }
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- /* Now read the page into the buffer */
+ /*
+ * Now read the page into the buffer. Absent an error,
+ * the read methods return max bitflips per ecc step.
+ */
if (unlikely(ops->mode == MTD_OPS_RAW))
- ret = chip->ecc.read_page_raw(mtd, chip,
- bufpoi, page);
+ ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+ oob_required,
+ page);
else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
ret = chip->ecc.read_subpage(mtd, chip,
col, bytes, bufpoi);
else
ret = chip->ecc.read_page(mtd, chip, bufpoi,
- page);
+ oob_required, page);
if (ret < 0) {
if (!aligned)
/* Invalidate page cache */
@@ -1515,22 +1538,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
break;
}
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
/* Transfer not aligned data */
if (!aligned) {
if (!NAND_SUBPAGE_READ(chip) && !oob &&
!(mtd->ecc_stats.failed - stats.failed) &&
- (ops->mode != MTD_OPS_RAW))
+ (ops->mode != MTD_OPS_RAW)) {
chip->pagebuf = realpage;
- else
+ chip->pagebuf_bitflips = ret;
+ } else {
/* Invalidate page cache */
chip->pagebuf = -1;
+ }
memcpy(buf, chip->buffers->databuf + col, bytes);
}
buf += bytes;
if (unlikely(oob)) {
-
int toread = min(oobreadlen, max_oobsize);
if (toread) {
@@ -1541,13 +1567,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
}
if (!(chip->options & NAND_NO_READRDY)) {
- /*
- * Apply delay or wait for ready/busy pin. Do
- * this before the AUTOINCR check, so no
- * problems arise if a chip which does auto
- * increment is marked as NOAUTOINCR by the
- * board driver.
- */
+ /* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
@@ -1556,6 +1576,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
} else {
memcpy(buf, chip->buffers->databuf + col, bytes);
buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagebuf_bitflips);
}
readlen -= bytes;
@@ -1575,26 +1597,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
-
- /*
- * Check, if the chip supports auto page increment or if we
- * have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
- sndcmd = 1;
}
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
- if (ret)
+ if (ret < 0)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}
/**
@@ -1630,17 +1645,13 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to read
- * @sndcmd: flag whether to issue read command or not
*/
static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
- if (sndcmd) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- sndcmd = 0;
- }
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return sndcmd;
+ return 0;
}
/**
@@ -1649,10 +1660,9 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to read
- * @sndcmd: flag whether to issue read command or not
*/
static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
uint8_t *buf = chip->oob_poi;
int length = mtd->oobsize;
@@ -1679,7 +1689,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
if (length > 0)
chip->read_buf(mtd, bufpoi, length);
- return 1;
+ return 0;
}
/**
@@ -1775,13 +1785,13 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int page, realpage, chipnr, sndcmd = 1;
+ int page, realpage, chipnr;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
- int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
int readlen = ops->ooblen;
int len;
uint8_t *buf = ops->oobbuf;
+ int ret = 0;
pr_debug("%s: from = 0x%08Lx, len = %i\n",
__func__, (unsigned long long)from, readlen);
@@ -1817,20 +1827,18 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
while (1) {
if (ops->mode == MTD_OPS_RAW)
- sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
+ ret = chip->ecc.read_oob_raw(mtd, chip, page);
else
- sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+ ret = chip->ecc.read_oob(mtd, chip, page);
+
+ if (ret < 0)
+ break;
len = min(len, readlen);
buf = nand_transfer_oob(chip, buf, ops, len);
if (!(chip->options & NAND_NO_READRDY)) {
- /*
- * Apply delay or wait for ready/busy pin. Do this
- * before the AUTOINCR check, so no problems arise if a
- * chip which does auto increment is marked as
- * NOAUTOINCR by the board driver.
- */
+ /* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
@@ -1851,16 +1859,12 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
-
- /*
- * Check, if the chip supports auto page increment or if we
- * have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
- sndcmd = 1;
}
- ops->oobretlen = ops->ooblen;
+ ops->oobretlen = ops->ooblen - readlen;
+
+ if (ret < 0)
+ return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
@@ -1919,14 +1923,16 @@ out:
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
}
/**
@@ -1934,12 +1940,13 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1973,9 +1980,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*/
static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1991,7 +1999,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < chip->ecc.total; i++)
chip->oob_poi[eccpos[i]] = ecc_calc[i];
- chip->ecc.write_page_raw(mtd, chip, buf);
+ chip->ecc.write_page_raw(mtd, chip, buf, 1);
}
/**
@@ -1999,9 +2007,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*/
static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2027,12 +2036,14 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
static void nand_write_page_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2071,21 +2082,23 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
* @mtd: MTD device structure
* @chip: NAND chip descriptor
* @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
* @cached: cached programming
* @raw: use _raw version of write_page
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int page, int cached, int raw)
+ const uint8_t *buf, int oob_required, int page,
+ int cached, int raw)
{
int status;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- chip->ecc.write_page_raw(mtd, chip, buf);
+ chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
else
- chip->ecc.write_page(mtd, chip, buf);
+ chip->ecc.write_page(mtd, chip, buf, oob_required);
/*
* Cached progamming disabled for now. Not sure if it's worth the
@@ -2118,6 +2131,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->verify_buf(mtd, buf, mtd->writesize))
return -EIO;
+
+ /* Make sure the next page prog is preceded by a status read */
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
#endif
return 0;
}
@@ -2202,6 +2218,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
int ret, subpage;
+ int oob_required = oob ? 1 : 0;
ops->retlen = 0;
if (!writelen)
@@ -2264,8 +2281,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
memset(chip->oob_poi, 0xff, mtd->oobsize);
}
- ret = chip->write_page(mtd, chip, wbuf, page, cached,
- (ops->mode == MTD_OPS_RAW));
+ ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
+ cached, (ops->mode == MTD_OPS_RAW));
if (ret)
break;
@@ -2898,8 +2915,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
*busw = NAND_BUSWIDTH_16;
chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= (NAND_NO_READRDY |
- NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+ chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
pr_info("ONFI flash detected\n");
return 1;
@@ -3076,11 +3092,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
ident_done:
- /*
- * Set chip as a default. Board drivers can override it, if necessary.
- */
- chip->options |= NAND_NO_AUTOINCR;
-
/* Try to identify manufacturer */
for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
if (nand_manuf_ids[maf_idx].id == *maf_id)
@@ -3154,10 +3165,11 @@ ident_done:
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
- pr_info("NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
- nand_manuf_ids[maf_idx].name,
- chip->onfi_version ? chip->onfi_params.model : type->name);
+ pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
+ " page size: %d, OOB size: %d\n",
+ *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
+ chip->onfi_version ? chip->onfi_params.model : type->name,
+ mtd->writesize, mtd->oobsize);
return type;
}
@@ -3329,8 +3341,13 @@ int nand_scan_tail(struct mtd_info *mtd)
if (!chip->ecc.write_oob)
chip->ecc.write_oob = nand_write_oob_syndrome;
- if (mtd->writesize >= chip->ecc.size)
+ if (mtd->writesize >= chip->ecc.size) {
+ if (!chip->ecc.strength) {
+ pr_warn("Driver must set ecc.strength when using hardware ECC\n");
+ BUG();
+ }
break;
+ }
pr_warn("%d byte HW ECC not possible on "
"%d byte page size, fallback to SW ECC\n",
chip->ecc.size, mtd->writesize);
@@ -3385,7 +3402,7 @@ int nand_scan_tail(struct mtd_info *mtd)
BUG();
}
chip->ecc.strength =
- chip->ecc.bytes*8 / fls(8*chip->ecc.size);
+ chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
break;
case NAND_ECC_NONE:
@@ -3483,7 +3500,14 @@ int nand_scan_tail(struct mtd_info *mtd)
/* propagate ecc info to mtd_info */
mtd->ecclayout = chip->ecc.layout;
- mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
+ mtd->ecc_strength = chip->ecc.strength;
+ /*
+ * Initialize bitflip_threshold to its default prior scan_bbt() call.
+ * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+ * properly set.
+ */
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = mtd->ecc_strength;
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 20a112f591fe..30d1319ff065 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -324,6 +324,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
buf += mtd->oobsize + mtd->writesize;
len -= mtd->writesize;
+ offs += mtd->writesize;
}
return 0;
}
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index af4fe8ca7b5e..621b70b7a159 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = {
* These are the new chips with large page size. The pagesize and the
* erasesize is determined from the extended id bytes
*/
-#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
+#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY)
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
/* 512 Megabit */
@@ -157,9 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = {
* writes possible, but not implemented now
*/
{"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
- NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
- BBT_AUTO_REFRESH
- },
+ NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
{NULL,}
};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 261f478f8cc3..cf0cd3146817 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -28,7 +28,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -268,7 +268,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -547,12 +546,6 @@ static char *get_partition_name(int i)
return kstrdup(buf, GFP_KERNEL);
}
-static uint64_t divide(uint64_t n, uint32_t d)
-{
- do_div(n, d);
- return n;
-}
-
/*
* Initialize the nandsim structure.
*
@@ -581,7 +574,7 @@ static int init_nandsim(struct mtd_info *mtd)
ns->geom.oobsz = mtd->oobsize;
ns->geom.secsz = mtd->erasesize;
ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
- ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
ns->geom.secshift = ffs(ns->geom.secsz) - 1;
ns->geom.pgshift = chip->page_shift;
@@ -594,7 +587,7 @@ static int init_nandsim(struct mtd_info *mtd)
ns->options |= OPT_PAGE256;
}
else if (ns->geom.pgsz == 512) {
- ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
+ ns->options |= OPT_PAGE512;
if (ns->busw == 8)
ns->options |= OPT_PAGE512_8BIT;
} else if (ns->geom.pgsz == 2048) {
@@ -663,8 +656,6 @@ static int init_nandsim(struct mtd_info *mtd)
for (i = 0; nand_flash_ids[i].name != NULL; i++) {
if (second_id_byte != nand_flash_ids[i].id)
continue;
- if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
- ns->options |= OPT_AUTOINCR;
}
if (ns->busw == 16)
@@ -924,7 +915,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
if (!rptwear)
return 0;
- wear_eb_count = divide(mtd->size, mtd->erasesize);
+ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
mem = wear_eb_count * sizeof(unsigned long);
if (mem / sizeof(unsigned long) != wear_eb_count) {
NS_ERR("Too many erase blocks for wear reporting\n");
@@ -1936,20 +1927,8 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
if (ns->regs.count == ns->regs.num) {
NS_DBG("read_byte: all bytes were read\n");
- /*
- * The OPT_AUTOINCR allows to read next consecutive pages without
- * new read operation cycle.
- */
- if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
- ns->regs.count = 0;
- if (ns->regs.row + 1 < ns->geom.pgnum)
- ns->regs.row += 1;
- NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
- do_state_action(ns, ACTION_CPY);
- }
- else if (NS_STATE(ns->nxstate) == STATE_READY)
+ if (NS_STATE(ns->nxstate) == STATE_READY)
switch_state(ns);
-
}
return outb;
@@ -2203,14 +2182,7 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
ns->regs.count += len;
if (ns->regs.count == ns->regs.num) {
- if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
- ns->regs.count = 0;
- if (ns->regs.row + 1 < ns->geom.pgnum)
- ns->regs.row += 1;
- NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
- do_state_action(ns, ACTION_CPY);
- }
- else if (NS_STATE(ns->nxstate) == STATE_READY)
+ if (NS_STATE(ns->nxstate) == STATE_READY)
switch_state(ns);
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c2b0bba9d8b3..d7f681d0c9b9 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -21,6 +21,10 @@
#include <linux/io.h>
#include <linux/slab.h>
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+#include <linux/bch.h>
+#endif
+
#include <plat/dma.h>
#include <plat/gpmc.h>
#include <plat/nand.h>
@@ -127,6 +131,11 @@ struct omap_nand_info {
} iomode;
u_char *buf;
int buf_len;
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ struct bch_control *bch;
+ struct nand_ecclayout ecclayout;
+#endif
};
/**
@@ -402,7 +411,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
if (ret)
/* PFPW engine is busy, use cpu copy method */
- goto out_copy;
+ goto out_copy_unmap;
init_completion(&info->comp);
@@ -421,6 +430,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
return 0;
+out_copy_unmap:
+ dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
out_copy:
if (info->nand.options & NAND_BUSWIDTH_16)
is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -879,7 +890,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
unsigned long timeo = jiffies;
- int status = NAND_STATUS_FAIL, state = this->state;
+ int status, state = this->state;
if (state == FL_ERASING)
timeo += (HZ * 400) / 1000;
@@ -894,6 +905,8 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
break;
cond_resched();
}
+
+ status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
return status;
}
@@ -925,6 +938,226 @@ static int omap_dev_ready(struct mtd_info *mtd)
return 1;
}
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+
+/**
+ * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+{
+ int nerrors;
+ unsigned int dev_width;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
+ dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ /*
+ * Program GPMC to perform correction on one 512-byte sector at a time.
+ * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
+ * gives a slight (5%) performance gain (but requires additional code).
+ */
+ (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
+}
+
+/**
+ * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_correct_data_bch - Decode received data and correct errors
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ */
+static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ int i, count;
+ /* cannot correct more than 8 errors */
+ unsigned int errloc[8];
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+
+ count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
+ errloc);
+ if (count > 0) {
+ /* correct errors */
+ for (i = 0; i < count; i++) {
+ /* correct data only, not ecc bytes */
+ if (errloc[i] < 8*512)
+ data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
+ pr_debug("corrected bitflip %u\n", errloc[i]);
+ }
+ } else if (count < 0) {
+ pr_err("ecc unrecoverable error\n");
+ }
+ return count;
+}
+
+/**
+ * omap3_free_bch - Release BCH ecc resources
+ * @mtd: MTD device structure
+ */
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ if (info->bch) {
+ free_bch(info->bch);
+ info->bch = NULL;
+ }
+}
+
+/**
+ * omap3_init_bch - Initialize BCH ECC
+ * @mtd: MTD device structure
+ * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
+ */
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+ int ret, max_errors;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+#ifdef CONFIG_MTD_NAND_OMAP_BCH8
+ const int hw_errors = 8;
+#else
+ const int hw_errors = 4;
+#endif
+ info->bch = NULL;
+
+ max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
+ if (max_errors != hw_errors) {
+ pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
+ max_errors, hw_errors);
+ goto fail;
+ }
+
+ /* initialize GPMC BCH engine */
+ ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
+ if (ret)
+ goto fail;
+
+ /* software bch library is only used to detect and locate errors */
+ info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
+ if (!info->bch)
+ goto fail;
+
+ info->nand.ecc.size = 512;
+ info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
+ info->nand.ecc.correct = omap3_correct_data_bch;
+ info->nand.ecc.mode = NAND_ECC_HW;
+
+ /*
+ * The number of corrected errors in an ecc block that will trigger
+ * block scrubbing defaults to the ecc strength (4 or 8).
+ * Set mtd->bitflip_threshold here to define a custom threshold.
+ */
+
+ if (max_errors == 8) {
+ info->nand.ecc.strength = 8;
+ info->nand.ecc.bytes = 13;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+ } else {
+ info->nand.ecc.strength = 4;
+ info->nand.ecc.bytes = 7;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+ }
+
+ pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
+ return 0;
+fail:
+ omap3_free_bch(mtd);
+ return -1;
+}
+
+/**
+ * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
+ * @mtd: MTD device structure
+ */
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+ int i, steps;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_ecclayout *layout = &info->ecclayout;
+
+ /* build oob layout */
+ steps = mtd->writesize/info->nand.ecc.size;
+ layout->eccbytes = steps*info->nand.ecc.bytes;
+
+ /* do not bother creating special oob layouts for small page devices */
+ if (mtd->oobsize < 64) {
+ pr_err("BCH ecc is not supported on small page devices\n");
+ goto fail;
+ }
+
+ /* reserve 2 bytes for bad block marker */
+ if (layout->eccbytes+2 > mtd->oobsize) {
+ pr_err("no oob layout available for oobsize %d eccbytes %u\n",
+ mtd->oobsize, layout->eccbytes);
+ goto fail;
+ }
+
+ /* put ecc bytes at oob tail */
+ for (i = 0; i < layout->eccbytes; i++)
+ layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+ info->nand.ecc.layout = layout;
+
+ if (!(info->nand.options & NAND_BUSWIDTH_16))
+ info->nand.badblock_pattern = &bb_descrip_flashbased;
+ return 0;
+fail:
+ omap3_free_bch(mtd);
+ return -1;
+}
+
+#else
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+ pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
+ return -1;
+}
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+ return -1;
+}
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+}
+#endif /* CONFIG_MTD_NAND_OMAP_BCH */
+
static int __devinit omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
@@ -1063,6 +1296,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.ecc.hwctl = omap_enable_hwecc;
info->nand.ecc.correct = omap_correct_data;
info->nand.ecc.mode = NAND_ECC_HW;
+ } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+ (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+ err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
+ if (err) {
+ err = -EINVAL;
+ goto out_release_mem_region;
+ }
}
/* DIP switches on some boards change between 8 and 16 bit
@@ -1094,6 +1334,14 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
(offset + omap_oobinfo.eccbytes);
info->nand.ecc.layout = &omap_oobinfo;
+ } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+ (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+ /* build OOB layout for BCH ECC correction */
+ err = omap3_init_bch_tail(&info->mtd);
+ if (err) {
+ err = -EINVAL;
+ goto out_release_mem_region;
+ }
}
/* second phase scan */
@@ -1122,6 +1370,7 @@ static int omap_nand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
+ omap3_free_bch(&info->mtd);
platform_set_drvdata(pdev, NULL);
if (info->dma_ch != -1)
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 0f50ef38b87b..513dc88a05ca 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -17,6 +17,8 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <asm/io.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
@@ -79,6 +81,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct nand_chip *nc;
struct orion_nand_data *board;
struct resource *res;
+ struct clk *clk;
void __iomem *io_base;
int ret = 0;
u32 val = 0;
@@ -155,6 +158,14 @@ static int __init orion_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mtd);
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
if (nand_scan(mtd, 1)) {
ret = -ENXIO;
goto no_dev;
@@ -184,6 +195,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nc = mtd->priv;
+ struct clk *clk;
nand_release(mtd);
@@ -191,6 +203,12 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
kfree(nc);
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
+
return 0;
}
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 974dbf8251c9..1440e51cedcc 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -155,7 +155,6 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
chip->ecc.mode = NAND_ECC_SOFT;
/* Enable the following for a flash based bad block table */
- chip->options = NAND_NO_AUTOINCR;
chip->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 6404e6e81b10..1bcb52040422 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -23,14 +23,18 @@ struct plat_nand_data {
void __iomem *io_base;
};
+static const char *part_probe_types[] = { "cmdlinepart", NULL };
+
/*
* Probe for the NAND device.
*/
static int __devinit plat_nand_probe(struct platform_device *pdev)
{
struct platform_nand_data *pdata = pdev->dev.platform_data;
+ struct mtd_part_parser_data ppdata;
struct plat_nand_data *data;
struct resource *res;
+ const char **part_types;
int err = 0;
if (pdata->chip.nr_chips < 1) {
@@ -75,6 +79,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
data->chip.select_chip = pdata->ctrl.select_chip;
data->chip.write_buf = pdata->ctrl.write_buf;
data->chip.read_buf = pdata->ctrl.read_buf;
+ data->chip.read_byte = pdata->ctrl.read_byte;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
data->chip.bbt_options |= pdata->chip.bbt_options;
@@ -98,8 +103,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
goto out;
}
- err = mtd_device_parse_register(&data->mtd,
- pdata->chip.part_probe_types, NULL,
+ part_types = pdata->chip.part_probe_types ? : part_probe_types;
+
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
pdata->chip.partitions,
pdata->chip.nr_partitions);
@@ -140,12 +147,19 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id plat_nand_match[] = {
+ { .compatible = "gen_nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
static struct platform_driver plat_nand_driver = {
- .probe = plat_nand_probe,
- .remove = __devexit_p(plat_nand_remove),
- .driver = {
- .name = "gen_nand",
- .owner = THIS_MODULE,
+ .probe = plat_nand_probe,
+ .remove = __devexit_p(plat_nand_remove),
+ .driver = {
+ .name = "gen_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = plat_nand_match,
},
};
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index def50caa6f84..252aaefcacfa 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -682,14 +682,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
}
static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
+ struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
@@ -1004,7 +1005,6 @@ KEEP_CONFIG:
chip->ecc.size = host->page_size;
chip->ecc.strength = 1;
- chip->options = NAND_NO_AUTOINCR;
chip->options |= NAND_NO_READRDY;
if (host->reg_ndcr & NDCR_DWIDTH_M)
chip->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index c2040187c813..8cb627751c9c 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -539,14 +539,11 @@ exit:
* nand_read_oob_syndrome assumes we can send column address - we can't
*/
static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
- if (sndcmd) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- sndcmd = 0;
- }
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return sndcmd;
+ return 0;
}
/*
@@ -1104,18 +1101,7 @@ static struct pci_driver r852_pci_driver = {
.driver.pm = &r852_pm_ops,
};
-static __init int r852_module_init(void)
-{
- return pci_register_driver(&r852_pci_driver);
-}
-
-static void __exit r852_module_exit(void)
-{
- pci_unregister_driver(&r852_pci_driver);
-}
-
-module_init(r852_module_init);
-module_exit(r852_module_exit);
+module_pci_driver(r852_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e9b2b260de3a..aa9b8a5e0b8f 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -344,7 +344,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
}
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -359,14 +359,14 @@ static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (flctl->hwecc_cant_correct[i])
mtd->ecc_stats.failed++;
else
- mtd->ecc_stats.corrected += 0;
+ mtd->ecc_stats.corrected += 0; /* FIXME */
}
return 0;
}
static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -881,8 +881,6 @@ static int __devinit flctl_probe(struct platform_device *pdev)
flctl->hwecc = pdata->has_hwecc;
flctl->holden = pdata->use_holden;
- nand->options = NAND_NO_AUTOINCR;
-
/* Set address of hardware control function */
/* 20 us command delay time */
nand->chip_delay = 20;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 774c3c266713..082bcdcd6bcf 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -94,17 +94,16 @@ static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
{NULL,}
};
-#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
static struct nand_flash_dev nand_xd_flash_ids[] = {
{"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
{"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
{"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
{"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
- {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
- {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
- {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
- {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
+ {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
+ {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
+ {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
+ {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
{NULL,}
};
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index b3ce12ef359e..7153e0d27101 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1201,7 +1201,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
}
/**
@@ -1333,7 +1334,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
}
/**
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 4dcc752a0c0b..738ee8dc16cd 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -52,12 +52,4 @@ config MTD_UBI_GLUEBI
work on top of UBI. Do not enable this unless you use legacy
software.
-config MTD_UBI_DEBUG
- bool "UBI debugging"
- depends on SYSFS
- select DEBUG_FS
- select KALLSYMS
- help
- This option enables UBI debugging.
-
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index c9302a5452b0..a0803ac74712 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -1,7 +1,6 @@
obj-$(CONFIG_MTD_UBI) += ubi.o
-ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
-ubi-y += misc.o
+ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
+ubi-y += misc.o debug.o
-ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/attach.c
index 12c43b44f815..bd27cbbb4066 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/attach.c
@@ -19,21 +19,21 @@
*/
/*
- * UBI scanning sub-system.
+ * UBI attaching sub-system.
*
- * This sub-system is responsible for scanning the flash media, checking UBI
- * headers and providing complete information about the UBI flash image.
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
*
- * The scanning information is represented by a &struct ubi_scan_info' object.
- * Information about found volumes is represented by &struct ubi_scan_volume
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
* objects which are kept in volume RB-tree with root at the @volumes field.
* The RB-tree is indexed by the volume ID.
*
- * Scanned logical eraseblocks are represented by &struct ubi_scan_leb objects.
- * These objects are kept in per-volume RB-trees with the root at the
- * corresponding &struct ubi_scan_volume object. To put it differently, we keep
- * an RB-tree of per-volume objects and each of these objects is the root of
- * RB-tree of per-eraseblock objects.
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
*
* Corrupted physical eraseblocks are put to the @corr list, free physical
* eraseblocks are put to the @free list and the physical eraseblock to be
@@ -51,28 +51,29 @@
*
* 1. Corruptions caused by power cuts. These are expected corruptions and UBI
* tries to handle them gracefully, without printing too many warnings and
- * error messages. The idea is that we do not lose important data in these case
- * - we may lose only the data which was being written to the media just before
- * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to
- * handle such data losses (e.g., by using the FS journal).
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
*
* When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
* the reason is a power cut, UBI puts this PEB to the @erase list, and all
* PEBs in the @erase list are scheduled for erasure later.
*
* 2. Unexpected corruptions which are not caused by power cuts. During
- * scanning, such PEBs are put to the @corr list and UBI preserves them.
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
* Obviously, this lessens the amount of available PEBs, and if at some point
* UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
* about such PEBs every time the MTD device is attached.
*
* However, it is difficult to reliably distinguish between these types of
- * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2
- * if the VID header is corrupted and the data area does not contain all 0xFFs,
- * and there were no bit-flips or integrity errors while reading the data area.
- * Otherwise UBI assumes corruption type 1. So the decision criteria are as
- * follows.
- * o If the data area contains only 0xFFs, there is no data, and it is safe
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area. Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ * o If the data area contains only 0xFFs, there are no data, and it is safe
* to just erase this PEB - this is corruption type 1.
* o If the data area has bit-flips or data integrity errors (ECC errors on
* NAND), it is probably a PEB which was being erased when power cut
@@ -88,11 +89,7 @@
#include <linux/random.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
-#else
-#define paranoid_check_si(ubi, si) 0
-#endif
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* Temporary variables used during scanning */
static struct ubi_ec_hdr *ech;
@@ -100,13 +97,18 @@ static struct ubi_vid_hdr *vidh;
/**
* add_to_list - add physical eraseblock to a list.
- * @si: scanning information
+ * @ai: attaching information
* @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
* @ec: erase counter of the physical eraseblock
* @to_head: if not zero, add to the head of the list
* @list: the list to add to
*
- * This function adds physical eraseblock @pnum to free, erase, or alien lists.
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
* If @to_head is not zero, PEB will be added to the head of the list, which
* basically means it will be processed first later. E.g., we add corrupted
* PEBs (corrupted due to power cuts) to the head of the erase list to make
@@ -114,65 +116,68 @@ static struct ubi_vid_hdr *vidh;
* returns zero in case of success and a negative error code in case of
* failure.
*/
-static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head,
- struct list_head *list)
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+ int lnum, int ec, int to_head, struct list_head *list)
{
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
- if (list == &si->free) {
+ if (list == &ai->free) {
dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
- } else if (list == &si->erase) {
+ } else if (list == &ai->erase) {
dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
- } else if (list == &si->alien) {
+ } else if (list == &ai->alien) {
dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
- si->alien_peb_count += 1;
+ ai->alien_peb_count += 1;
} else
BUG();
- seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
- if (!seb)
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
return -ENOMEM;
- seb->pnum = pnum;
- seb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->ec = ec;
if (to_head)
- list_add(&seb->u.list, list);
+ list_add(&aeb->u.list, list);
else
- list_add_tail(&seb->u.list, list);
+ list_add_tail(&aeb->u.list, list);
return 0;
}
/**
* add_corrupted - add a corrupted physical eraseblock.
- * @si: scanning information
+ * @ai: attaching information
* @pnum: physical eraseblock number to add
* @ec: erase counter of the physical eraseblock
*
- * This function adds corrupted physical eraseblock @pnum to the 'corr' list.
- * The corruption was presumably not caused by a power cut. Returns zero in
- * case of success and a negative error code in case of failure.
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
*/
-static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
{
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
- seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
- if (!seb)
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
return -ENOMEM;
- si->corr_peb_count += 1;
- seb->pnum = pnum;
- seb->ec = ec;
- list_add(&seb->u.list, &si->corr);
+ ai->corr_peb_count += 1;
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->corr);
return 0;
}
/**
* validate_vid_hdr - check volume identifier header.
* @vid_hdr: the volume identifier header to check
- * @sv: information about the volume this logical eraseblock belongs to
+ * @av: information about the volume this logical eraseblock belongs to
* @pnum: physical eraseblock number the VID header came from
*
* This function checks that data stored in @vid_hdr is consistent. Returns
@@ -184,15 +189,15 @@ static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec)
* headers of the same volume.
*/
static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
- const struct ubi_scan_volume *sv, int pnum)
+ const struct ubi_ainf_volume *av, int pnum)
{
int vol_type = vid_hdr->vol_type;
int vol_id = be32_to_cpu(vid_hdr->vol_id);
int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
int data_pad = be32_to_cpu(vid_hdr->data_pad);
- if (sv->leb_count != 0) {
- int sv_vol_type;
+ if (av->leb_count != 0) {
+ int av_vol_type;
/*
* This is not the first logical eraseblock belonging to this
@@ -200,28 +205,28 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
* to the data in previous logical eraseblock headers.
*/
- if (vol_id != sv->vol_id) {
- dbg_err("inconsistent vol_id");
+ if (vol_id != av->vol_id) {
+ ubi_err("inconsistent vol_id");
goto bad;
}
- if (sv->vol_type == UBI_STATIC_VOLUME)
- sv_vol_type = UBI_VID_STATIC;
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av_vol_type = UBI_VID_STATIC;
else
- sv_vol_type = UBI_VID_DYNAMIC;
+ av_vol_type = UBI_VID_DYNAMIC;
- if (vol_type != sv_vol_type) {
- dbg_err("inconsistent vol_type");
+ if (vol_type != av_vol_type) {
+ ubi_err("inconsistent vol_type");
goto bad;
}
- if (used_ebs != sv->used_ebs) {
- dbg_err("inconsistent used_ebs");
+ if (used_ebs != av->used_ebs) {
+ ubi_err("inconsistent used_ebs");
goto bad;
}
- if (data_pad != sv->data_pad) {
- dbg_err("inconsistent data_pad");
+ if (data_pad != av->data_pad) {
+ ubi_err("inconsistent data_pad");
goto bad;
}
}
@@ -230,74 +235,74 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
bad:
ubi_err("inconsistent VID header at PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_sv(sv);
+ ubi_dump_vid_hdr(vid_hdr);
+ ubi_dump_av(av);
return -EINVAL;
}
/**
- * add_volume - add volume to the scanning information.
- * @si: scanning information
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
* @vol_id: ID of the volume to add
* @pnum: physical eraseblock number
* @vid_hdr: volume identifier header
*
* If the volume corresponding to the @vid_hdr logical eraseblock is already
- * present in the scanning information, this function does nothing. Otherwise
- * it adds corresponding volume to the scanning information. Returns a pointer
- * to the scanning volume object in case of success and a negative error code
- * in case of failure.
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
*/
-static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
- int pnum,
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+ int vol_id, int pnum,
const struct ubi_vid_hdr *vid_hdr)
{
- struct ubi_scan_volume *sv;
- struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
/* Walk the volume RB-tree to look if this volume is already present */
while (*p) {
parent = *p;
- sv = rb_entry(parent, struct ubi_scan_volume, rb);
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
- if (vol_id == sv->vol_id)
- return sv;
+ if (vol_id == av->vol_id)
+ return av;
- if (vol_id > sv->vol_id)
+ if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
/* The volume is absent - add it */
- sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL);
- if (!sv)
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
return ERR_PTR(-ENOMEM);
- sv->highest_lnum = sv->leb_count = 0;
- sv->vol_id = vol_id;
- sv->root = RB_ROOT;
- sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
- sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
- sv->compat = vid_hdr->compat;
- sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->root = RB_ROOT;
+ av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+ av->compat = vid_hdr->compat;
+ av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
: UBI_STATIC_VOLUME;
- if (vol_id > si->highest_vol_id)
- si->highest_vol_id = vol_id;
+ if (vol_id > ai->highest_vol_id)
+ ai->highest_vol_id = vol_id;
- rb_link_node(&sv->rb, parent, p);
- rb_insert_color(&sv->rb, &si->volumes);
- si->vols_found += 1;
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+ ai->vols_found += 1;
dbg_bld("added volume %d", vol_id);
- return sv;
+ return av;
}
/**
* compare_lebs - find out which logical eraseblock is newer.
* @ubi: UBI device description object
- * @seb: first logical eraseblock to compare
+ * @aeb: first logical eraseblock to compare
* @pnum: physical eraseblock number of the second logical eraseblock to
* compare
* @vid_hdr: volume identifier header of the second logical eraseblock
@@ -306,7 +311,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* case of success this function returns a positive value, in case of failure, a
* negative error code is returned. The success return codes use the following
* bits:
- * o bit 0 is cleared: the first PEB (described by @seb) is newer than the
+ * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
* second PEB (described by @pnum and @vid_hdr);
* o bit 0 is set: the second PEB is newer;
* o bit 1 is cleared: no bit-flips were detected in the newer LEB;
@@ -314,7 +319,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* o bit 2 is cleared: the older LEB is not corrupted;
* o bit 2 is set: the older LEB is corrupted.
*/
-static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
+static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
void *buf;
@@ -323,7 +328,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
struct ubi_vid_hdr *vh = NULL;
unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
- if (sqnum2 == seb->sqnum) {
+ if (sqnum2 == aeb->sqnum) {
/*
* This must be a really ancient UBI image which has been
* created before sequence numbers support has been added. At
@@ -337,7 +342,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
}
/* Obviously the LEB with lower sequence counter is older */
- second_is_newer = !!(sqnum2 > seb->sqnum);
+ second_is_newer = (sqnum2 > aeb->sqnum);
/*
* Now we know which copy is newer. If the copy flag of the PEB with
@@ -356,7 +361,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
return 1;
}
} else {
- if (!seb->copy_flag) {
+ if (!aeb->copy_flag) {
/* It is not a copy, so it is newer */
dbg_bld("first PEB %d is newer, copy_flag is unset",
pnum);
@@ -367,13 +372,13 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
if (!vh)
return -ENOMEM;
- pnum = seb->pnum;
+ pnum = aeb->pnum;
err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
if (err) {
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
else {
- dbg_err("VID of PEB %d header is bad, but it "
+ ubi_err("VID of PEB %d header is bad, but it "
"was OK earlier, err %d", pnum, err);
if (err > 0)
err = -EIO;
@@ -429,9 +434,9 @@ out_free_vidh:
}
/**
- * ubi_scan_add_used - add physical eraseblock to the scanning information.
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @pnum: the physical eraseblock number
* @ec: erase counter
* @vid_hdr: the volume identifier header
@@ -444,14 +449,13 @@ out_free_vidh:
* to be picked, while the older one has to be dropped. This function returns
* zero in case of success and a negative error code in case of failure.
*/
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
- int bitflips)
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
{
int err, vol_id, lnum;
unsigned long long sqnum;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
struct rb_node **p, *parent = NULL;
vol_id = be32_to_cpu(vid_hdr->vol_id);
@@ -461,25 +465,25 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
pnum, vol_id, lnum, ec, sqnum, bitflips);
- sv = add_volume(si, vol_id, pnum, vid_hdr);
- if (IS_ERR(sv))
- return PTR_ERR(sv);
+ av = add_volume(ai, vol_id, pnum, vid_hdr);
+ if (IS_ERR(av))
+ return PTR_ERR(av);
- if (si->max_sqnum < sqnum)
- si->max_sqnum = sqnum;
+ if (ai->max_sqnum < sqnum)
+ ai->max_sqnum = sqnum;
/*
* Walk the RB-tree of logical eraseblocks of volume @vol_id to look
* if this is the first instance of this logical eraseblock or not.
*/
- p = &sv->root.rb_node;
+ p = &av->root.rb_node;
while (*p) {
int cmp_res;
parent = *p;
- seb = rb_entry(parent, struct ubi_scan_leb, u.rb);
- if (lnum != seb->lnum) {
- if (lnum < seb->lnum)
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (lnum != aeb->lnum) {
+ if (lnum < aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -491,8 +495,8 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* logical eraseblock present.
*/
- dbg_bld("this LEB already exists: PEB %d, sqnum %llu, "
- "EC %d", seb->pnum, seb->sqnum, seb->ec);
+ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+ aeb->pnum, aeb->sqnum, aeb->ec);
/*
* Make sure that the logical eraseblocks have different
@@ -507,11 +511,11 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* images, but refuse attaching old images with duplicated
* logical eraseblocks because there was an unclean reboot.
*/
- if (seb->sqnum == sqnum && sqnum != 0) {
+ if (aeb->sqnum == sqnum && sqnum != 0) {
ubi_err("two LEBs with same sequence number %llu",
sqnum);
- ubi_dbg_dump_seb(seb, 0);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_vid_hdr(vid_hdr);
return -EINVAL;
}
@@ -519,7 +523,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* Now we have to drop the older one and preserve the newer
* one.
*/
- cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr);
+ cmp_res = compare_lebs(ubi, aeb, pnum, vid_hdr);
if (cmp_res < 0)
return cmp_res;
@@ -528,23 +532,26 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* This logical eraseblock is newer than the one
* found earlier.
*/
- err = validate_vid_hdr(vid_hdr, sv, pnum);
+ err = validate_vid_hdr(vid_hdr, av, pnum);
if (err)
return err;
- err = add_to_list(si, seb->pnum, seb->ec, cmp_res & 4,
- &si->erase);
+ err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+ aeb->lnum, aeb->ec, cmp_res & 4,
+ &ai->erase);
if (err)
return err;
- seb->ec = ec;
- seb->pnum = pnum;
- seb->scrub = ((cmp_res & 2) || bitflips);
- seb->copy_flag = vid_hdr->copy_flag;
- seb->sqnum = sqnum;
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = ((cmp_res & 2) || bitflips);
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
- if (sv->highest_lnum == lnum)
- sv->last_data_size =
+ if (av->highest_lnum == lnum)
+ av->last_data_size =
be32_to_cpu(vid_hdr->data_size);
return 0;
@@ -553,92 +560,64 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
* This logical eraseblock is older than the one found
* previously.
*/
- return add_to_list(si, pnum, ec, cmp_res & 4,
- &si->erase);
+ return add_to_list(ai, pnum, vol_id, lnum, ec,
+ cmp_res & 4, &ai->erase);
}
}
/*
* We've met this logical eraseblock for the first time, add it to the
- * scanning information.
+ * attaching information.
*/
- err = validate_vid_hdr(vid_hdr, sv, pnum);
+ err = validate_vid_hdr(vid_hdr, av, pnum);
if (err)
return err;
- seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL);
- if (!seb)
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
return -ENOMEM;
- seb->ec = ec;
- seb->pnum = pnum;
- seb->lnum = lnum;
- seb->scrub = bitflips;
- seb->copy_flag = vid_hdr->copy_flag;
- seb->sqnum = sqnum;
-
- if (sv->highest_lnum <= lnum) {
- sv->highest_lnum = lnum;
- sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = bitflips;
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum <= lnum) {
+ av->highest_lnum = lnum;
+ av->last_data_size = be32_to_cpu(vid_hdr->data_size);
}
- sv->leb_count += 1;
- rb_link_node(&seb->u.rb, parent, p);
- rb_insert_color(&seb->u.rb, &sv->root);
+ av->leb_count += 1;
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
return 0;
}
/**
- * ubi_scan_find_sv - find volume in the scanning information.
- * @si: scanning information
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
* @vol_id: the requested volume ID
*
* This function returns a pointer to the volume description or %NULL if there
- * are no data about this volume in the scanning information.
- */
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
- int vol_id)
-{
- struct ubi_scan_volume *sv;
- struct rb_node *p = si->volumes.rb_node;
-
- while (p) {
- sv = rb_entry(p, struct ubi_scan_volume, rb);
-
- if (vol_id == sv->vol_id)
- return sv;
-
- if (vol_id > sv->vol_id)
- p = p->rb_left;
- else
- p = p->rb_right;
- }
-
- return NULL;
-}
-
-/**
- * ubi_scan_find_seb - find LEB in the volume scanning information.
- * @sv: a pointer to the volume scanning information
- * @lnum: the requested logical eraseblock
- *
- * This function returns a pointer to the scanning logical eraseblock or %NULL
- * if there are no data about it in the scanning volume information.
+ * are no data about this volume in the attaching information.
*/
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
- int lnum)
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id)
{
- struct ubi_scan_leb *seb;
- struct rb_node *p = sv->root.rb_node;
+ struct ubi_ainf_volume *av;
+ struct rb_node *p = ai->volumes.rb_node;
while (p) {
- seb = rb_entry(p, struct ubi_scan_leb, u.rb);
+ av = rb_entry(p, struct ubi_ainf_volume, rb);
- if (lnum == seb->lnum)
- return seb;
+ if (vol_id == av->vol_id)
+ return av;
- if (lnum > seb->lnum)
+ if (vol_id > av->vol_id)
p = p->rb_left;
else
p = p->rb_right;
@@ -648,34 +627,34 @@ struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
}
/**
- * ubi_scan_rm_volume - delete scanning information about a volume.
- * @si: scanning information
- * @sv: the volume scanning information to delete
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
*/
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
{
struct rb_node *rb;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
- dbg_bld("remove scanning information about volume %d", sv->vol_id);
+ dbg_bld("remove attaching information about volume %d", av->vol_id);
- while ((rb = rb_first(&sv->root))) {
- seb = rb_entry(rb, struct ubi_scan_leb, u.rb);
- rb_erase(&seb->u.rb, &sv->root);
- list_add_tail(&seb->u.list, &si->erase);
+ while ((rb = rb_first(&av->root))) {
+ aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, &ai->erase);
}
- rb_erase(&sv->rb, &si->volumes);
- kfree(sv);
- si->vols_found -= 1;
+ rb_erase(&av->rb, &ai->volumes);
+ kfree(av);
+ ai->vols_found -= 1;
}
/**
- * ubi_scan_erase_peb - erase a physical eraseblock.
+ * early_erase_peb - erase a physical eraseblock.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @pnum: physical eraseblock number to erase;
- * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown)
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
*
* This function erases physical eraseblock 'pnum', and writes the erase
* counter header to it. This function should only be used on UBI device
@@ -683,8 +662,8 @@ void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
- int pnum, int ec)
+static int early_erase_peb(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai, int pnum, int ec)
{
int err;
struct ubi_ec_hdr *ec_hdr;
@@ -716,9 +695,9 @@ out_free:
}
/**
- * ubi_scan_get_free_peb - get a free physical eraseblock.
+ * ubi_early_get_peb - get a free physical eraseblock.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns a free physical eraseblock. It is supposed to be
* called on the UBI initialization stages when the wear-leveling sub-system is
@@ -726,20 +705,20 @@ out_free:
* the lists, writes the EC header if it is needed, and removes it from the
* list.
*
- * This function returns scanning physical eraseblock information in case of
- * success and an error code in case of failure.
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
*/
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
{
int err = 0;
- struct ubi_scan_leb *seb, *tmp_seb;
+ struct ubi_ainf_peb *aeb, *tmp_aeb;
- if (!list_empty(&si->free)) {
- seb = list_entry(si->free.next, struct ubi_scan_leb, u.list);
- list_del(&seb->u.list);
- dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec);
- return seb;
+ if (!list_empty(&ai->free)) {
+ aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+ list_del(&aeb->u.list);
+ dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
}
/*
@@ -748,18 +727,18 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
* so forth. We don't want to take care about bad eraseblocks here -
* they'll be handled later.
*/
- list_for_each_entry_safe(seb, tmp_seb, &si->erase, u.list) {
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
- err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1);
+ err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
if (err)
continue;
- seb->ec += 1;
- list_del(&seb->u.list);
- dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec);
- return seb;
+ aeb->ec += 1;
+ list_del(&aeb->u.list);
+ dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
}
ubi_err("no free eraseblocks");
@@ -814,7 +793,7 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
ubi_err("PEB %d contains corrupted VID header, and the data does not "
"contain all 0xFF, this may be a non-UBI PEB or a severe VID "
"header corruption which requires manual inspection", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dump_vid_hdr(vid_hdr);
dbg_msg("hexdump of PEB %d offset %d, length %d",
pnum, ubi->leb_start, ubi->leb_size);
ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
@@ -827,16 +806,18 @@ out_unlock:
}
/**
- * process_eb - read, check UBI headers, and add them to scanning information.
+ * scan_peb - scan and process UBI headers of a PEB.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @pnum: the physical eraseblock number
*
- * This function returns a zero if the physical eraseblock was successfully
- * handled and a negative error code in case of failure.
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
*/
-static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum)
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int pnum)
{
long long uninitialized_var(ec);
int err, bitflips = 0, vol_id, ec_err = 0;
@@ -848,12 +829,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
if (err < 0)
return err;
else if (err) {
- /*
- * FIXME: this is actually duty of the I/O sub-system to
- * initialize this, but MTD does not provide enough
- * information.
- */
- si->bad_peb_count += 1;
+ ai->bad_peb_count += 1;
return 0;
}
@@ -867,13 +843,13 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
bitflips = 1;
break;
case UBI_IO_FF:
- si->empty_peb_count += 1;
- return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 0,
- &si->erase);
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 0, &ai->erase);
case UBI_IO_FF_BITFLIPS:
- si->empty_peb_count += 1;
- return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 1,
- &si->erase);
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 1, &ai->erase);
case UBI_IO_BAD_HDR_EBADMSG:
case UBI_IO_BAD_HDR:
/*
@@ -882,7 +858,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
* moved and EC be re-created.
*/
ec_err = err;
- ec = UBI_SCAN_UNKNOWN_EC;
+ ec = UBI_UNKNOWN;
bitflips = 1;
break;
default:
@@ -911,7 +887,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
*/
ubi_err("erase counter overflow, max is %d",
UBI_MAX_ERASECOUNTER);
- ubi_dbg_dump_ec_hdr(ech);
+ ubi_dump_ec_hdr(ech);
return -EINVAL;
}
@@ -933,7 +909,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
ubi->image_seq != image_seq) {
ubi_err("bad image sequence number %d in PEB %d, "
"expected %d", image_seq, pnum, ubi->image_seq);
- ubi_dbg_dump_ec_hdr(ech);
+ ubi_dump_ec_hdr(ech);
return -EINVAL;
}
}
@@ -957,7 +933,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
* PEB, bit it is not marked as bad yet. This may also
* be a result of power cut during erasure.
*/
- si->maybe_bad_peb_count += 1;
+ ai->maybe_bad_peb_count += 1;
case UBI_IO_BAD_HDR:
if (ec_err)
/*
@@ -984,23 +960,27 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
return err;
else if (!err)
/* This corruption is caused by a power cut */
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
else
/* This is an unexpected corruption */
- err = add_corrupted(si, pnum, ec);
+ err = add_corrupted(ai, pnum, ec);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF_BITFLIPS:
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ ec, 1, &ai->erase);
if (err)
return err;
goto adjust_mean_ec;
case UBI_IO_FF:
if (ec_err)
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
else
- err = add_to_list(si, pnum, ec, 0, &si->free);
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 0, &ai->free);
if (err)
return err;
goto adjust_mean_ec;
@@ -1019,7 +999,8 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
case UBI_COMPAT_DELETE:
ubi_msg("\"delete\" compatible internal volume %d:%d"
" found, will remove it", vol_id, lnum);
- err = add_to_list(si, pnum, ec, 1, &si->erase);
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 1, &ai->erase);
if (err)
return err;
return 0;
@@ -1034,7 +1015,8 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
case UBI_COMPAT_PRESERVE:
ubi_msg("\"preserve\" compatible internal volume %d:%d"
" found", vol_id, lnum);
- err = add_to_list(si, pnum, ec, 0, &si->alien);
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 0, &ai->alien);
if (err)
return err;
return 0;
@@ -1049,40 +1031,40 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
if (ec_err)
ubi_warn("valid VID header but corrupted EC header at PEB %d",
pnum);
- err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips);
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
if (err)
return err;
adjust_mean_ec:
if (!ec_err) {
- si->ec_sum += ec;
- si->ec_count += 1;
- if (ec > si->max_ec)
- si->max_ec = ec;
- if (ec < si->min_ec)
- si->min_ec = ec;
+ ai->ec_sum += ec;
+ ai->ec_count += 1;
+ if (ec > ai->max_ec)
+ ai->max_ec = ec;
+ if (ec < ai->min_ec)
+ ai->min_ec = ec;
}
return 0;
}
/**
- * check_what_we_have - check what PEB were found by scanning.
+ * late_analysis - analyze the overall situation with PEB.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
- * This is a helper function which takes a look what PEBs were found by
- * scanning, and decides whether the flash is empty and should be formatted and
- * whether there are too many corrupted PEBs and we should not attach this
- * MTD device. Returns zero if we should proceed with attaching the MTD device,
- * and %-EINVAL if we should not.
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
*/
-static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
int max_corr, peb_count;
- peb_count = ubi->peb_count - si->bad_peb_count - si->alien_peb_count;
+ peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
max_corr = peb_count / 20 ?: 8;
/*
@@ -1090,25 +1072,25 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
* unclean reboots. However, many of them may indicate some problems
* with the flash HW or driver.
*/
- if (si->corr_peb_count) {
+ if (ai->corr_peb_count) {
ubi_err("%d PEBs are corrupted and preserved",
- si->corr_peb_count);
+ ai->corr_peb_count);
printk(KERN_ERR "Corrupted PEBs are:");
- list_for_each_entry(seb, &si->corr, u.list)
- printk(KERN_CONT " %d", seb->pnum);
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ printk(KERN_CONT " %d", aeb->pnum);
printk(KERN_CONT "\n");
/*
* If too many PEBs are corrupted, we refuse attaching,
* otherwise, only print a warning.
*/
- if (si->corr_peb_count >= max_corr) {
+ if (ai->corr_peb_count >= max_corr) {
ubi_err("too many corrupted PEBs, refusing");
return -EINVAL;
}
}
- if (si->empty_peb_count + si->maybe_bad_peb_count == peb_count) {
+ if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
/*
* All PEBs are empty, or almost all - a couple PEBs look like
* they may be bad PEBs which were not marked as bad yet.
@@ -1124,8 +1106,8 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
* 2. Flash contains non-UBI data and we do not want to format
* it and destroy possibly important information.
*/
- if (si->maybe_bad_peb_count <= 2) {
- si->is_empty = 1;
+ if (ai->maybe_bad_peb_count <= 2) {
+ ai->is_empty = 1;
ubi_msg("empty MTD device detected");
get_random_bytes(&ubi->image_seq,
sizeof(ubi->image_seq));
@@ -1141,40 +1123,41 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si)
}
/**
- * ubi_scan - scan an MTD device.
+ * scan_all - scan entire MTD device.
* @ubi: UBI device description object
*
* This function does full scanning of an MTD device and returns complete
- * information about it. In case of failure, an error code is returned.
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
*/
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
+static struct ubi_attach_info *scan_all(struct ubi_device *ubi)
{
int err, pnum;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb;
- struct ubi_scan_info *si;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+ struct ubi_attach_info *ai;
- si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL);
- if (!si)
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&si->corr);
- INIT_LIST_HEAD(&si->free);
- INIT_LIST_HEAD(&si->erase);
- INIT_LIST_HEAD(&si->alien);
- si->volumes = RB_ROOT;
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
err = -ENOMEM;
- si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab",
- sizeof(struct ubi_scan_leb),
- 0, 0, NULL);
- if (!si->scan_leb_slab)
- goto out_si;
+ ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache)
+ goto out_ai;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out_si;
+ goto out_ai;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
@@ -1184,7 +1167,7 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = process_eb(ubi, si, pnum);
+ err = scan_peb(ubi, ai, pnum);
if (err < 0)
goto out_vidh;
}
@@ -1192,10 +1175,10 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
dbg_msg("scanning is finished");
/* Calculate mean erase counter */
- if (si->ec_count)
- si->mean_ec = div_u64(si->ec_sum, si->ec_count);
+ if (ai->ec_count)
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
- err = check_what_we_have(ubi, si);
+ err = late_analysis(ubi, ai);
if (err)
goto out_vidh;
@@ -1203,55 +1186,102 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
* In case of unknown erase counter we use the mean erase counter
* value.
*/
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
}
- list_for_each_entry(seb, &si->free, u.list) {
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
}
- list_for_each_entry(seb, &si->corr, u.list)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
- list_for_each_entry(seb, &si->erase, u.list)
- if (seb->ec == UBI_SCAN_UNKNOWN_EC)
- seb->ec = si->mean_ec;
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
- err = paranoid_check_si(ubi, si);
+ err = self_check_ai(ubi, ai);
if (err)
goto out_vidh;
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
- return si;
+ return ai;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
-out_si:
- ubi_scan_destroy_si(si);
+out_ai:
+ ubi_destroy_ai(ai);
return ERR_PTR(err);
}
/**
- * destroy_sv - free the scanning volume information
- * @sv: scanning volume information
- * @si: scanning information
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
*
- * This function destroys the volume RB-tree (@sv->root) and the scanning
- * volume information.
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
*/
-static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
+int ubi_attach(struct ubi_device *ubi)
{
- struct ubi_scan_leb *seb;
- struct rb_node *this = sv->root.rb_node;
+ int err;
+ struct ubi_attach_info *ai;
+
+ ai = scan_all(ubi);
+ if (IS_ERR(ai))
+ return PTR_ERR(ai);
+
+ ubi->bad_peb_count = ai->bad_peb_count;
+ ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+ ubi->corr_peb_count = ai->corr_peb_count;
+ ubi->max_ec = ai->max_ec;
+ ubi->mean_ec = ai->mean_ec;
+ ubi_msg("max. sequence number: %llu", ai->max_sqnum);
+
+ err = ubi_read_volume_table(ubi, ai);
+ if (err)
+ goto out_ai;
+
+ err = ubi_wl_init(ubi, ai);
+ if (err)
+ goto out_vtbl;
+
+ err = ubi_eba_init(ubi, ai);
+ if (err)
+ goto out_wl;
+
+ ubi_destroy_ai(ai);
+ return 0;
+
+out_wl:
+ ubi_wl_close(ubi);
+out_vtbl:
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+out_ai:
+ ubi_destroy_ai(ai);
+ return err;
+}
+
+/**
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *this = av->root.rb_node;
while (this) {
if (this->rb_left)
@@ -1259,224 +1289,222 @@ static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
else if (this->rb_right)
this = this->rb_right;
else {
- seb = rb_entry(this, struct ubi_scan_leb, u.rb);
+ aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
this = rb_parent(this);
if (this) {
- if (this->rb_left == &seb->u.rb)
+ if (this->rb_left == &aeb->u.rb)
this->rb_left = NULL;
else
this->rb_right = NULL;
}
- kmem_cache_free(si->scan_leb_slab, seb);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
}
}
- kfree(sv);
+ kfree(av);
}
/**
- * ubi_scan_destroy_si - destroy scanning information.
- * @si: scanning information
+ * ubi_destroy_ai - destroy attaching information.
+ * @ai: attaching information
*/
-void ubi_scan_destroy_si(struct ubi_scan_info *si)
+void ubi_destroy_ai(struct ubi_attach_info *ai)
{
- struct ubi_scan_leb *seb, *seb_tmp;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_peb *aeb, *aeb_tmp;
+ struct ubi_ainf_volume *av;
struct rb_node *rb;
- list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
}
- list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
}
- list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
}
- list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) {
- list_del(&seb->u.list);
- kmem_cache_free(si->scan_leb_slab, seb);
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
}
/* Destroy the volume RB-tree */
- rb = si->volumes.rb_node;
+ rb = ai->volumes.rb_node;
while (rb) {
if (rb->rb_left)
rb = rb->rb_left;
else if (rb->rb_right)
rb = rb->rb_right;
else {
- sv = rb_entry(rb, struct ubi_scan_volume, rb);
+ av = rb_entry(rb, struct ubi_ainf_volume, rb);
rb = rb_parent(rb);
if (rb) {
- if (rb->rb_left == &sv->rb)
+ if (rb->rb_left == &av->rb)
rb->rb_left = NULL;
else
rb->rb_right = NULL;
}
- destroy_sv(si, sv);
+ destroy_av(ai, av);
}
}
- if (si->scan_leb_slab)
- kmem_cache_destroy(si->scan_leb_slab);
+ if (ai->aeb_slab_cache)
+ kmem_cache_destroy(ai->aeb_slab_cache);
- kfree(si);
+ kfree(ai);
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_si - check the scanning information.
+ * self_check_ai - check the attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
- * This function returns zero if the scanning information is all right, and a
+ * This function returns zero if the attaching information is all right, and a
* negative error code if not or if an error occurred.
*/
-static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int pnum, err, vols_found = 0;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb, *last_seb;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *last_aeb;
uint8_t *buf;
if (!ubi->dbg->chk_gen)
return 0;
/*
- * At first, check that scanning information is OK.
+ * At first, check that attaching information is OK.
*/
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
int leb_count = 0;
cond_resched();
vols_found += 1;
- if (si->is_empty) {
+ if (ai->is_empty) {
ubi_err("bad is_empty flag");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->vol_id < 0 || sv->highest_lnum < 0 ||
- sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 ||
- sv->data_pad < 0 || sv->last_data_size < 0) {
+ if (av->vol_id < 0 || av->highest_lnum < 0 ||
+ av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+ av->data_pad < 0 || av->last_data_size < 0) {
ubi_err("negative values");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->vol_id >= UBI_MAX_VOLUMES &&
- sv->vol_id < UBI_INTERNAL_VOL_START) {
+ if (av->vol_id >= UBI_MAX_VOLUMES &&
+ av->vol_id < UBI_INTERNAL_VOL_START) {
ubi_err("bad vol_id");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->vol_id > si->highest_vol_id) {
+ if (av->vol_id > ai->highest_vol_id) {
ubi_err("highest_vol_id is %d, but vol_id %d is there",
- si->highest_vol_id, sv->vol_id);
+ ai->highest_vol_id, av->vol_id);
goto out;
}
- if (sv->vol_type != UBI_DYNAMIC_VOLUME &&
- sv->vol_type != UBI_STATIC_VOLUME) {
+ if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+ av->vol_type != UBI_STATIC_VOLUME) {
ubi_err("bad vol_type");
- goto bad_sv;
+ goto bad_av;
}
- if (sv->data_pad > ubi->leb_size / 2) {
+ if (av->data_pad > ubi->leb_size / 2) {
ubi_err("bad data_pad");
- goto bad_sv;
+ goto bad_av;
}
- last_seb = NULL;
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
- last_seb = seb;
+ last_aeb = aeb;
leb_count += 1;
- if (seb->pnum < 0 || seb->ec < 0) {
+ if (aeb->pnum < 0 || aeb->ec < 0) {
ubi_err("negative values");
- goto bad_seb;
+ goto bad_aeb;
}
- if (seb->ec < si->min_ec) {
- ubi_err("bad si->min_ec (%d), %d found",
- si->min_ec, seb->ec);
- goto bad_seb;
+ if (aeb->ec < ai->min_ec) {
+ ubi_err("bad ai->min_ec (%d), %d found",
+ ai->min_ec, aeb->ec);
+ goto bad_aeb;
}
- if (seb->ec > si->max_ec) {
- ubi_err("bad si->max_ec (%d), %d found",
- si->max_ec, seb->ec);
- goto bad_seb;
+ if (aeb->ec > ai->max_ec) {
+ ubi_err("bad ai->max_ec (%d), %d found",
+ ai->max_ec, aeb->ec);
+ goto bad_aeb;
}
- if (seb->pnum >= ubi->peb_count) {
+ if (aeb->pnum >= ubi->peb_count) {
ubi_err("too high PEB number %d, total PEBs %d",
- seb->pnum, ubi->peb_count);
- goto bad_seb;
+ aeb->pnum, ubi->peb_count);
+ goto bad_aeb;
}
- if (sv->vol_type == UBI_STATIC_VOLUME) {
- if (seb->lnum >= sv->used_ebs) {
+ if (av->vol_type == UBI_STATIC_VOLUME) {
+ if (aeb->lnum >= av->used_ebs) {
ubi_err("bad lnum or used_ebs");
- goto bad_seb;
+ goto bad_aeb;
}
} else {
- if (sv->used_ebs != 0) {
+ if (av->used_ebs != 0) {
ubi_err("non-zero used_ebs");
- goto bad_seb;
+ goto bad_aeb;
}
}
- if (seb->lnum > sv->highest_lnum) {
+ if (aeb->lnum > av->highest_lnum) {
ubi_err("incorrect highest_lnum or lnum");
- goto bad_seb;
+ goto bad_aeb;
}
}
- if (sv->leb_count != leb_count) {
+ if (av->leb_count != leb_count) {
ubi_err("bad leb_count, %d objects in the tree",
leb_count);
- goto bad_sv;
+ goto bad_av;
}
- if (!last_seb)
+ if (!last_aeb)
continue;
- seb = last_seb;
+ aeb = last_aeb;
- if (seb->lnum != sv->highest_lnum) {
+ if (aeb->lnum != av->highest_lnum) {
ubi_err("bad highest_lnum");
- goto bad_seb;
+ goto bad_aeb;
}
}
- if (vols_found != si->vols_found) {
- ubi_err("bad si->vols_found %d, should be %d",
- si->vols_found, vols_found);
+ if (vols_found != ai->vols_found) {
+ ubi_err("bad ai->vols_found %d, should be %d",
+ ai->vols_found, vols_found);
goto out;
}
- /* Check that scanning information is correct */
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- last_seb = NULL;
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ /* Check that attaching information is correct */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
int vol_type;
cond_resched();
- last_seb = seb;
+ last_aeb = aeb;
- err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1);
+ err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err("VID header is not OK (%d)", err);
if (err > 0)
@@ -1486,52 +1514,52 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
- if (sv->vol_type != vol_type) {
+ if (av->vol_type != vol_type) {
ubi_err("bad vol_type");
goto bad_vid_hdr;
}
- if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
- ubi_err("bad sqnum %llu", seb->sqnum);
+ if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+ ubi_err("bad sqnum %llu", aeb->sqnum);
goto bad_vid_hdr;
}
- if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
- ubi_err("bad vol_id %d", sv->vol_id);
+ if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+ ubi_err("bad vol_id %d", av->vol_id);
goto bad_vid_hdr;
}
- if (sv->compat != vidh->compat) {
+ if (av->compat != vidh->compat) {
ubi_err("bad compat %d", vidh->compat);
goto bad_vid_hdr;
}
- if (seb->lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad lnum %d", seb->lnum);
+ if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad lnum %d", aeb->lnum);
goto bad_vid_hdr;
}
- if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
- ubi_err("bad used_ebs %d", sv->used_ebs);
+ if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+ ubi_err("bad used_ebs %d", av->used_ebs);
goto bad_vid_hdr;
}
- if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
- ubi_err("bad data_pad %d", sv->data_pad);
+ if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+ ubi_err("bad data_pad %d", av->data_pad);
goto bad_vid_hdr;
}
}
- if (!last_seb)
+ if (!last_aeb)
continue;
- if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
- ubi_err("bad highest_lnum %d", sv->highest_lnum);
+ if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err("bad highest_lnum %d", av->highest_lnum);
goto bad_vid_hdr;
}
- if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
- ubi_err("bad last_data_size %d", sv->last_data_size);
+ if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+ ubi_err("bad last_data_size %d", av->last_data_size);
goto bad_vid_hdr;
}
}
@@ -1553,21 +1581,21 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
buf[pnum] = 1;
}
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb)
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb)
- buf[seb->pnum] = 1;
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->free, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->free, u.list)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->corr, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->erase, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ buf[aeb->pnum] = 1;
- list_for_each_entry(seb, &si->alien, u.list)
- buf[seb->pnum] = 1;
+ list_for_each_entry(aeb, &ai->alien, u.list)
+ buf[aeb->pnum] = 1;
err = 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++)
@@ -1581,25 +1609,23 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
goto out;
return 0;
-bad_seb:
- ubi_err("bad scanning information about LEB %d", seb->lnum);
- ubi_dbg_dump_seb(seb, 0);
- ubi_dbg_dump_sv(sv);
+bad_aeb:
+ ubi_err("bad attaching information about LEB %d", aeb->lnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_av(av);
goto out;
-bad_sv:
- ubi_err("bad scanning information about volume %d", sv->vol_id);
- ubi_dbg_dump_sv(sv);
+bad_av:
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
goto out;
bad_vid_hdr:
- ubi_err("bad scanning information about volume %d", sv->vol_id);
- ubi_dbg_dump_sv(sv);
- ubi_dbg_dump_vid_hdr(vidh);
+ ubi_err("bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ ubi_dump_vid_hdr(vidh);
out:
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 0fde9fc7d2e5..2c5ed5ca9c33 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,10 +27,6 @@
* module load parameters or the kernel boot parameters. If MTD devices were
* specified, UBI does not attach any MTD device, but it is possible to do
* later using the "UBI control device".
- *
- * At the moment we only attach UBI devices by scanning, which will become a
- * bottleneck when flashes reach certain large size. Then one may improve UBI
- * and add other methods, although it does not seem to be easy to do.
*/
#include <linux/err.h>
@@ -554,10 +550,10 @@ static void uif_close(struct ubi_device *ubi)
}
/**
- * free_internal_volumes - free internal volumes.
+ * ubi_free_internal_volumes - free internal volumes.
* @ubi: UBI device description object
*/
-static void free_internal_volumes(struct ubi_device *ubi)
+void ubi_free_internal_volumes(struct ubi_device *ubi)
{
int i;
@@ -569,59 +565,6 @@ static void free_internal_volumes(struct ubi_device *ubi)
}
/**
- * attach_by_scanning - attach an MTD device using scanning method.
- * @ubi: UBI device descriptor
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- *
- * Note, currently this is the only method to attach UBI devices. Hopefully in
- * the future we'll have more scalable attaching methods and avoid full media
- * scanning. But even in this case scanning will be needed as a fall-back
- * attaching method if there are some on-flash table corruptions.
- */
-static int attach_by_scanning(struct ubi_device *ubi)
-{
- int err;
- struct ubi_scan_info *si;
-
- si = ubi_scan(ubi);
- if (IS_ERR(si))
- return PTR_ERR(si);
-
- ubi->bad_peb_count = si->bad_peb_count;
- ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
- ubi->corr_peb_count = si->corr_peb_count;
- ubi->max_ec = si->max_ec;
- ubi->mean_ec = si->mean_ec;
- ubi_msg("max. sequence number: %llu", si->max_sqnum);
-
- err = ubi_read_volume_table(ubi, si);
- if (err)
- goto out_si;
-
- err = ubi_wl_init_scan(ubi, si);
- if (err)
- goto out_vtbl;
-
- err = ubi_eba_init_scan(ubi, si);
- if (err)
- goto out_wl;
-
- ubi_scan_destroy_si(si);
- return 0;
-
-out_wl:
- ubi_wl_close(ubi);
-out_vtbl:
- free_internal_volumes(ubi);
- vfree(ubi->vtbl);
-out_si:
- ubi_scan_destroy_si(si);
- return err;
-}
-
-/**
* io_init - initialize I/O sub-system for a given UBI device.
* @ubi: UBI device description object
*
@@ -790,11 +733,11 @@ static int io_init(struct ubi_device *ubi)
ubi_msg("data offset: %d", ubi->leb_start);
/*
- * Note, ideally, we have to initialize ubi->bad_peb_count here. But
+ * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
* over all physical eraseblocks and invoke mtd->block_is_bad() for
- * each physical eraseblock. So, we skip ubi->bad_peb_count
- * uninitialized and initialize it after scanning.
+ * each physical eraseblock. So, we leave @ubi->bad_peb_count
+ * uninitialized so far.
*/
return 0;
@@ -805,7 +748,7 @@ static int io_init(struct ubi_device *ubi)
* @ubi: UBI device description object
* @vol_id: ID of the volume to re-size
*
- * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
* the volume table to the largest possible size. See comments in ubi-header.h
* for more description of the flag. Returns zero in case of success and a
* negative error code in case of failure.
@@ -881,7 +824,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
- dbg_err("mtd%d is already attached to ubi%d",
+ ubi_err("mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
@@ -907,7 +850,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
- dbg_err("only %d UBI devices may be created",
+ ubi_err("only %d UBI devices may be created",
UBI_MAX_DEVICES);
return -ENFILE;
}
@@ -917,7 +860,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- dbg_err("ubi%d already exists", ubi_num);
+ ubi_err("ubi%d already exists", ubi_num);
return -EEXIST;
}
}
@@ -937,7 +880,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
spin_lock_init(&ubi->volumes_lock);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
- dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb));
+ dbg_msg("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
err = io_init(ubi);
@@ -953,9 +896,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (err)
goto out_free;
- err = attach_by_scanning(ubi);
+ err = ubi_attach(ubi);
if (err) {
- dbg_err("failed to attach by scanning, error %d", err);
+ ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
goto out_debugging;
}
@@ -1020,7 +963,7 @@ out_uif:
uif_close(ubi);
out_detach:
ubi_wl_close(ubi);
- free_internal_volumes(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
out_debugging:
ubi_debugging_exit_dev(ubi);
@@ -1092,7 +1035,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
ubi_wl_close(ubi);
- free_internal_volumes(ubi);
+ ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
ubi_debugging_exit_dev(ubi);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index ad76592fb2f4..acec85deb6af 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -63,7 +63,7 @@ static int get_exclusive(struct ubi_volume_desc *desc)
users = vol->readers + vol->writers + vol->exclusive;
ubi_assert(users > 0);
if (users > 1) {
- dbg_err("%d users for volume %d", users, vol->vol_id);
+ ubi_err("%d users for volume %d", users, vol->vol_id);
err = -EBUSY;
} else {
vol->readers = vol->writers = 0;
@@ -159,7 +159,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
if (vol->updating) {
/* Update is in progress, seeking is prohibited */
- dbg_err("updating");
+ ubi_err("updating");
return -EBUSY;
}
@@ -178,7 +178,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
}
if (new_offset < 0 || new_offset > vol->used_bytes) {
- dbg_err("bad seek %lld", new_offset);
+ ubi_err("bad seek %lld", new_offset);
return -EINVAL;
}
@@ -216,11 +216,11 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
count, *offp, vol->vol_id);
if (vol->updating) {
- dbg_err("updating");
+ ubi_err("updating");
return -EBUSY;
}
if (vol->upd_marker) {
- dbg_err("damaged volume, update marker is set");
+ ubi_err("damaged volume, update marker is set");
return -EBADF;
}
if (*offp == vol->used_bytes || count == 0)
@@ -300,7 +300,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
if (off & (ubi->min_io_size - 1)) {
- dbg_err("unaligned position");
+ ubi_err("unaligned position");
return -EINVAL;
}
@@ -309,7 +309,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
/* We can write only in fractions of the minimum I/O unit */
if (count & (ubi->min_io_size - 1)) {
- dbg_err("unaligned write length");
+ ubi_err("unaligned write length");
return -EINVAL;
}
@@ -334,8 +334,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
break;
}
- err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
if (err)
break;
@@ -477,9 +476,6 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
req.bytes < 0 || req.lnum >= vol->usable_leb_size)
break;
- if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
- req.dtype != UBI_UNKNOWN)
- break;
err = get_exclusive(desc);
if (err < 0)
@@ -518,7 +514,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
if (err)
break;
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
break;
}
@@ -532,7 +528,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
err = -EFAULT;
break;
}
- err = ubi_leb_map(desc, req.lnum, req.dtype);
+ err = ubi_leb_map(desc, req.lnum);
break;
}
@@ -647,8 +643,8 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
return 0;
bad:
- dbg_err("bad volume creation request");
- ubi_dbg_dump_mkvol_req(req);
+ ubi_err("bad volume creation request");
+ ubi_dump_mkvol_req(req);
return err;
}
@@ -713,12 +709,12 @@ static int rename_volumes(struct ubi_device *ubi,
for (i = 0; i < req->count - 1; i++) {
for (n = i + 1; n < req->count; n++) {
if (req->ents[i].vol_id == req->ents[n].vol_id) {
- dbg_err("duplicated volume id %d",
+ ubi_err("duplicated volume id %d",
req->ents[i].vol_id);
return -EINVAL;
}
if (!strcmp(req->ents[i].name, req->ents[n].name)) {
- dbg_err("duplicated volume name \"%s\"",
+ ubi_err("duplicated volume name \"%s\"",
req->ents[i].name);
return -EINVAL;
}
@@ -741,7 +737,7 @@ static int rename_volumes(struct ubi_device *ubi,
re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
if (IS_ERR(re->desc)) {
err = PTR_ERR(re->desc);
- dbg_err("cannot open volume %d, error %d", vol_id, err);
+ ubi_err("cannot open volume %d, error %d", vol_id, err);
kfree(re);
goto out_free;
}
@@ -800,7 +796,7 @@ static int rename_volumes(struct ubi_device *ubi,
continue;
/* The volume exists but busy, or an error occurred */
- dbg_err("cannot open volume \"%s\", error %d",
+ ubi_err("cannot open volume \"%s\", error %d",
re->new_name, err);
goto out_free;
}
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 61af9bb560ab..7c1380305219 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -18,24 +18,49 @@
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
-/*
- * Here we keep all the UBI debugging stuff which should normally be disabled
- * and compiled-out, but it is extremely helpful when hunting bugs or doing big
- * changes.
- */
-
-#ifdef CONFIG_MTD_UBI_DEBUG
-
#include "ubi.h"
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+
+/**
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
+ */
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ int err;
+ size_t read;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return;
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, "
+ "read %zd bytes", err, len, pnum, offset, read);
+ goto out;
+ }
+
+ ubi_msg("dumping %d bytes of data from PEB %d, offset %d",
+ len, pnum, offset);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+ vfree(buf);
+ return;
+}
+
/**
- * ubi_dbg_dump_ec_hdr - dump an erase counter header.
+ * ubi_dump_ec_hdr - dump an erase counter header.
* @ec_hdr: the erase counter header to dump
*/
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
{
printk(KERN_DEBUG "Erase counter header dump:\n");
printk(KERN_DEBUG "\tmagic %#08x\n",
@@ -57,10 +82,10 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
}
/**
- * ubi_dbg_dump_vid_hdr - dump a volume identifier header.
+ * ubi_dump_vid_hdr - dump a volume identifier header.
* @vid_hdr: the volume identifier header to dump
*/
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
{
printk(KERN_DEBUG "Volume identifier header dump:\n");
printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
@@ -82,10 +107,10 @@ void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
}
/**
- * ubi_dbg_dump_vol_info- dump volume information.
+ * ubi_dump_vol_info - dump volume information.
* @vol: UBI volume description object
*/
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
+void ubi_dump_vol_info(const struct ubi_volume *vol)
{
printk(KERN_DEBUG "Volume information dump:\n");
printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id);
@@ -112,11 +137,11 @@ void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
}
/**
- * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
* @r: the object to dump
* @idx: volume table index
*/
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
{
int name_len = be16_to_cpu(r->name_len);
@@ -146,44 +171,44 @@ void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
}
/**
- * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object.
- * @sv: the object to dump
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
*/
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv)
+void ubi_dump_av(const struct ubi_ainf_volume *av)
{
- printk(KERN_DEBUG "Volume scanning information dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", sv->vol_id);
- printk(KERN_DEBUG "\thighest_lnum %d\n", sv->highest_lnum);
- printk(KERN_DEBUG "\tleb_count %d\n", sv->leb_count);
- printk(KERN_DEBUG "\tcompat %d\n", sv->compat);
- printk(KERN_DEBUG "\tvol_type %d\n", sv->vol_type);
- printk(KERN_DEBUG "\tused_ebs %d\n", sv->used_ebs);
- printk(KERN_DEBUG "\tlast_data_size %d\n", sv->last_data_size);
- printk(KERN_DEBUG "\tdata_pad %d\n", sv->data_pad);
+ printk(KERN_DEBUG "Volume attaching information dump:\n");
+ printk(KERN_DEBUG "\tvol_id %d\n", av->vol_id);
+ printk(KERN_DEBUG "\thighest_lnum %d\n", av->highest_lnum);
+ printk(KERN_DEBUG "\tleb_count %d\n", av->leb_count);
+ printk(KERN_DEBUG "\tcompat %d\n", av->compat);
+ printk(KERN_DEBUG "\tvol_type %d\n", av->vol_type);
+ printk(KERN_DEBUG "\tused_ebs %d\n", av->used_ebs);
+ printk(KERN_DEBUG "\tlast_data_size %d\n", av->last_data_size);
+ printk(KERN_DEBUG "\tdata_pad %d\n", av->data_pad);
}
/**
- * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object.
- * @seb: the object to dump
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
* @type: object type: 0 - not corrupted, 1 - corrupted
*/
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type)
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
{
- printk(KERN_DEBUG "eraseblock scanning information dump:\n");
- printk(KERN_DEBUG "\tec %d\n", seb->ec);
- printk(KERN_DEBUG "\tpnum %d\n", seb->pnum);
+ printk(KERN_DEBUG "eraseblock attaching information dump:\n");
+ printk(KERN_DEBUG "\tec %d\n", aeb->ec);
+ printk(KERN_DEBUG "\tpnum %d\n", aeb->pnum);
if (type == 0) {
- printk(KERN_DEBUG "\tlnum %d\n", seb->lnum);
- printk(KERN_DEBUG "\tscrub %d\n", seb->scrub);
- printk(KERN_DEBUG "\tsqnum %llu\n", seb->sqnum);
+ printk(KERN_DEBUG "\tlnum %d\n", aeb->lnum);
+ printk(KERN_DEBUG "\tscrub %d\n", aeb->scrub);
+ printk(KERN_DEBUG "\tsqnum %llu\n", aeb->sqnum);
}
}
/**
- * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
* @req: the object to dump
*/
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
{
char nm[17];
@@ -200,38 +225,6 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
}
/**
- * ubi_dbg_dump_flash - dump a region of flash.
- * @ubi: UBI device description object
- * @pnum: the physical eraseblock number to dump
- * @offset: the starting offset within the physical eraseblock to dump
- * @len: the length of the region to dump
- */
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
-{
- int err;
- size_t read;
- void *buf;
- loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
-
- buf = vmalloc(len);
- if (!buf)
- return;
- err = mtd_read(ubi->mtd, addr, len, &read, buf);
- if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
- goto out;
- }
-
- dbg_msg("dumping %d bytes of data from PEB %d, offset %d",
- len, pnum, offset);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
-out:
- vfree(buf);
- return;
-}
-
-/**
* ubi_debugging_init_dev - initialize debugging for an UBI device.
* @ubi: UBI device description object
*
@@ -271,6 +264,9 @@ static struct dentry *dfs_rootdir;
*/
int ubi_debugfs_init(void)
{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
dfs_rootdir = debugfs_create_dir("ubi", NULL);
if (IS_ERR_OR_NULL(dfs_rootdir)) {
int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -288,7 +284,8 @@ int ubi_debugfs_init(void)
*/
void ubi_debugfs_exit(void)
{
- debugfs_remove(dfs_rootdir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove(dfs_rootdir);
}
/* Read an UBI debugfs file */
@@ -410,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
struct dentry *dent;
struct ubi_debug_info *d = ubi->dbg;
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
ubi->ubi_num);
if (n == UBI_DFS_DIR_LEN) {
@@ -477,7 +477,6 @@ out:
*/
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
{
- debugfs_remove_recursive(ubi->dbg->dfs_dir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ubi->dbg->dfs_dir);
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index ead2cd16ba75..d5d2645b51a7 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -21,21 +21,20 @@
#ifndef __UBI_DEBUG_H__
#define __UBI_DEBUG_H__
-#ifdef CONFIG_MTD_UBI_DEBUG
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+
#include <linux/random.h>
#define ubi_assert(expr) do { \
if (unlikely(!(expr))) { \
printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
__func__, __LINE__, current->pid); \
- ubi_dbg_dump_stack(); \
+ dump_stack(); \
} \
} while (0)
-#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
-
-#define ubi_dbg_dump_stack() dump_stack()
-
#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
print_hex_dump(l, ps, pt, r, g, b, len, a)
@@ -58,17 +57,13 @@
/* Initialization and build messages */
#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
-void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
-void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
-void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
-void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
-void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
-void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
-void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
-void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
- int offset, int len);
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len);
int ubi_debugging_init_dev(struct ubi_device *ubi);
void ubi_debugging_exit_dev(struct ubi_device *ubi);
int ubi_debugfs_init(void);
@@ -167,73 +162,4 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
return 0;
}
-#else
-
-/* Use "if (0)" to make compiler check arguments even if debugging is off */
-#define ubi_assert(expr) do { \
- if (0) { \
- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
- __func__, __LINE__, current->pid); \
- } \
-} while (0)
-
-#define dbg_err(fmt, ...) do { \
- if (0) \
- ubi_err(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define ubi_dbg_msg(fmt, ...) do { \
- if (0) \
- printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
-} while (0)
-
-#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_gen(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_eba(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_wl(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_io(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-#define dbg_bld(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
-
-static inline void ubi_dbg_dump_stack(void) { return; }
-static inline void
-ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) { return; }
-static inline void
-ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) { return; }
-static inline void
-ubi_dbg_dump_vol_info(const struct ubi_volume *vol) { return; }
-static inline void
-ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) { return; }
-static inline void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) { return; }
-static inline void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb,
- int type) { return; }
-static inline void
-ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) { return; }
-static inline void ubi_dbg_dump_flash(struct ubi_device *ubi,
- int pnum, int offset, int len) { return; }
-static inline void
-ubi_dbg_print_hex_dump(const char *l, const char *ps, int pt, int r,
- int g, const void *b, size_t len, bool a) { return; }
-static inline int ubi_dbg_check_all_ff(struct ubi_device *ubi,
- int pnum, int offset,
- int len) { return 0; }
-static inline int ubi_dbg_check_write(struct ubi_device *ubi,
- const void *buf, int pnum,
- int offset, int len) { return 0; }
-
-static inline int ubi_debugging_init_dev(struct ubi_device *ubi) { return 0; }
-static inline void ubi_debugging_exit_dev(struct ubi_device *ubi) { return; }
-static inline int ubi_debugfs_init(void) { return 0; }
-static inline void ubi_debugfs_exit(void) { return; }
-static inline int ubi_debugfs_init_dev(struct ubi_device *ubi) { return 0; }
-static inline void ubi_debugfs_exit_dev(struct ubi_device *ubi) { return; }
-
-static inline int
-ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi) { return 0; }
-static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) { return 0; }
-static inline int
-ubi_dbg_is_write_failure(const struct ubi_device *ubi) { return 0; }
-static inline int
-ubi_dbg_is_erase_failure(const struct ubi_device *ubi) { return 0; }
-
-#endif /* !CONFIG_MTD_UBI_DEBUG */
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 2455d620d96b..b703ac7729cf 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -341,7 +341,7 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
- err = ubi_wl_put_peb(ubi, pnum, 0);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock:
leb_write_unlock(ubi, vol_id, lnum);
@@ -507,7 +507,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
return -ENOMEM;
retry:
- new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
+ new_pnum = ubi_wl_get_peb(ubi);
if (new_pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
return new_pnum;
@@ -550,7 +550,7 @@ retry:
ubi_free_vid_hdr(ubi, vid_hdr);
vol->eba_tbl[lnum] = new_pnum;
- ubi_wl_put_peb(ubi, pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg("data was successfully recovered");
return 0;
@@ -558,7 +558,7 @@ retry:
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_put:
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -568,7 +568,7 @@ write_error:
* get another one.
*/
ubi_warn("failed to write to PEB %d", new_pnum);
- ubi_wl_put_peb(ubi, new_pnum, 1);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -585,7 +585,6 @@ write_error:
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
* @vol. Returns zero in case of success and a negative error code in case
@@ -593,7 +592,7 @@ write_error:
* written to the flash media, but may be some garbage.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype)
+ const void *buf, int offset, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -641,7 +640,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -687,7 +686,7 @@ write_error:
* eraseblock, so just put it and request a new one. We assume that if
* this physical eraseblock went bad, the erase code will handle that.
*/
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -707,7 +706,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
@@ -724,8 +722,7 @@ write_error:
* code in case of failure.
*/
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs)
+ int lnum, const void *buf, int len, int used_ebs)
{
int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -763,7 +760,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
@@ -807,7 +804,7 @@ write_error:
return err;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
@@ -827,7 +824,6 @@ write_error:
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
- * @dtype: data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
@@ -839,7 +835,7 @@ write_error:
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype)
+ int lnum, const void *buf, int len)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
@@ -856,7 +852,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -881,7 +877,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
- pnum = ubi_wl_get_peb(ubi, dtype);
+ pnum = ubi_wl_get_peb(ubi);
if (pnum < 0) {
err = pnum;
goto out_leb_unlock;
@@ -905,7 +901,7 @@ retry:
}
if (vol->eba_tbl[lnum] >= 0) {
- err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
if (err)
goto out_leb_unlock;
}
@@ -930,7 +926,7 @@ write_error:
goto out_leb_unlock;
}
- err = ubi_wl_put_peb(ubi, pnum, 1);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
goto out_leb_unlock;
@@ -1171,7 +1167,7 @@ out_unlock_leb:
* print_rsvd_warning - warn about not having enough reserved PEBs.
* @ubi: UBI device description object
*
- * This is a helper function for 'ubi_eba_init_scan()' which is called when UBI
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
* cannot reserve enough PEBs for bad block handling. This function makes a
* decision whether we have to print a warning or not. The algorithm is as
* follows:
@@ -1186,13 +1182,13 @@ out_unlock_leb:
* reported by real users.
*/
static void print_rsvd_warning(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+ struct ubi_attach_info *ai)
{
/*
* The 1 << 18 (256KiB) number is picked randomly, just a reasonably
* large number to distinguish between newly flashed and used images.
*/
- if (si->max_sqnum > (1 << 18)) {
+ if (ai->max_sqnum > (1 << 18)) {
int min = ubi->beb_rsvd_level / 10;
if (!min)
@@ -1209,19 +1205,19 @@ static void print_rsvd_warning(struct ubi_device *ubi,
}
/**
- * ubi_eba_init_scan - initialize the EBA sub-system using scanning information.
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, j, err, num_volumes;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct rb_node *rb;
dbg_eba("initialize EBA sub-system");
@@ -1230,7 +1226,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
- ubi->global_sqnum = si->max_sqnum + 1;
+ ubi->global_sqnum = ai->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
for (i = 0; i < num_volumes; i++) {
@@ -1250,18 +1246,18 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
for (j = 0; j < vol->reserved_pebs; j++)
vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
- sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
- if (!sv)
+ av = ubi_find_av(ai, idx2vol_id(ubi, i));
+ if (!av)
continue;
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- if (seb->lnum >= vol->reserved_pebs)
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ if (aeb->lnum >= vol->reserved_pebs)
/*
* This may happen in case of an unclean reboot
* during re-size.
*/
- ubi_scan_move_to_list(sv, seb, &si->erase);
- vol->eba_tbl[seb->lnum] = seb->pnum;
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
}
}
@@ -1283,7 +1279,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (ubi->avail_pebs < ubi->beb_rsvd_level) {
/* No enough free physical eraseblocks */
ubi->beb_rsvd_pebs = ubi->avail_pebs;
- print_rsvd_warning(ubi, si);
+ print_rsvd_warning(ubi, ai);
} else
ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 90b98822d9a4..4e44bee4c564 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -227,7 +227,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (to_write > total_written)
to_write = total_written;
- err = ubi_write(gluebi->desc, lnum, buf, offs, to_write);
+ err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
break;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 43f1a0011a55..a8d523794b52 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -91,21 +91,15 @@
#include <linux/slab.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr);
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr);
-#else
-#define paranoid_check_not_bad(ubi, pnum) 0
-#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
-#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr) 0
-#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
-#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
-#endif
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len);
/**
* ubi_io_read - read data from a physical eraseblock.
@@ -142,7 +136,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
ubi_assert(len > 0);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
return err;
@@ -189,16 +183,16 @@ retry:
}
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d%s while reading %d bytes from PEB "
- "%d:%d, read only %zd bytes, retry",
- err, errstr, len, pnum, offset, read);
+ ubi_warn("error %d%s while reading %d bytes from PEB "
+ "%d:%d, read only %zd bytes, retry",
+ err, errstr, len, pnum, offset, read);
yield();
goto retry;
}
ubi_err("error %d%s while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, errstr, len, pnum, offset, read);
- ubi_dbg_dump_stack();
+ dump_stack();
/*
* The driver should never return -EBADMSG if it failed to read
@@ -257,14 +251,12 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
return -EROFS;
}
- /* The below has to be compiled out if paranoid checks are disabled */
-
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err)
return err;
/* The area we are writing to has to contain all 0xFF bytes */
- err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
if (err)
return err;
@@ -273,18 +265,18 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
* We write to the data area of the physical eraseblock. Make
* sure it has valid EC and VID headers.
*/
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
- err = paranoid_check_peb_vid_hdr(ubi, pnum);
+ err = self_check_peb_vid_hdr(ubi, pnum);
if (err)
return err;
}
if (ubi_dbg_is_write_failure(ubi)) {
- dbg_err("cannot write %d bytes to PEB %d:%d "
+ ubi_err("cannot write %d bytes to PEB %d:%d "
"(emulated)", len, pnum, offset);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EIO;
}
@@ -293,13 +285,13 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
if (err) {
ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
"%zd bytes", err, len, pnum, offset, written);
- ubi_dbg_dump_stack();
- ubi_dbg_dump_flash(ubi, pnum, offset, len);
+ dump_stack();
+ ubi_dump_flash(ubi, pnum, offset, len);
} else
ubi_assert(written == len);
if (!err) {
- err = ubi_dbg_check_write(ubi, buf, pnum, offset, len);
+ err = self_check_write(ubi, buf, pnum, offset, len);
if (err)
return err;
@@ -310,7 +302,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
offset += len;
len = ubi->peb_size - offset;
if (len)
- err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
}
return err;
@@ -364,13 +356,13 @@ retry:
err = mtd_erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error %d while erasing PEB %d, retry",
- err, pnum);
+ ubi_warn("error %d while erasing PEB %d, retry",
+ err, pnum);
yield();
goto retry;
}
ubi_err("cannot erase PEB %d, error %d", pnum, err);
- ubi_dbg_dump_stack();
+ dump_stack();
return err;
}
@@ -383,21 +375,21 @@ retry:
if (ei.state == MTD_ERASE_FAILED) {
if (retries++ < UBI_IO_RETRIES) {
- dbg_io("error while erasing PEB %d, retry", pnum);
+ ubi_warn("error while erasing PEB %d, retry", pnum);
yield();
goto retry;
}
ubi_err("cannot erase PEB %d", pnum);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EIO;
}
- err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
if (err)
return err;
if (ubi_dbg_is_erase_failure(ubi)) {
- dbg_err("cannot erase PEB %d (emulated)", pnum);
+ ubi_err("cannot erase PEB %d (emulated)", pnum);
return -EIO;
}
@@ -521,8 +513,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
* It is important to first invalidate the EC header, and then the VID
* header. Otherwise a power cut may lead to valid EC header and
* invalid VID header, in which case UBI will treat this PEB as
- * corrupted and will try to preserve it, and print scary warnings (see
- * the header comment in scan.c for more information).
+ * corrupted and will try to preserve it, and print scary warnings.
*/
addr = (loff_t)pnum * ubi->peb_size;
err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
@@ -563,7 +554,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
*/
ubi_err("cannot invalidate PEB %d, write returned %d read returned %d",
pnum, err, err1);
- ubi_dbg_dump_flash(ubi, pnum, 0, ubi->peb_size);
+ ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
return -EIO;
}
@@ -589,7 +580,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_not_bad(ubi, pnum);
+ err = self_check_not_bad(ubi, pnum);
if (err != 0)
return err;
@@ -721,8 +712,8 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad EC header");
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
return 1;
}
@@ -803,7 +794,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (verbose) {
ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_EC_HDR_MAGIC);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_dump_ec_hdr(ec_hdr);
}
dbg_bld("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_EC_HDR_MAGIC);
@@ -817,7 +808,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (verbose) {
ubi_warn("bad EC header CRC at PEB %d, calculated "
"%#08x, read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_ec_hdr(ec_hdr);
+ ubi_dump_ec_hdr(ec_hdr);
}
dbg_bld("bad EC header CRC at PEB %d, calculated "
"%#08x, read %#08x", pnum, crc, hdr_crc);
@@ -874,7 +865,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
ec_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
if (err)
return err;
@@ -905,40 +896,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
int usable_leb_size = ubi->leb_size - data_pad;
if (copy_flag != 0 && copy_flag != 1) {
- dbg_err("bad copy_flag");
+ ubi_err("bad copy_flag");
goto bad;
}
if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
data_pad < 0) {
- dbg_err("negative values");
+ ubi_err("negative values");
goto bad;
}
if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
- dbg_err("bad vol_id");
+ ubi_err("bad vol_id");
goto bad;
}
if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
compat != UBI_COMPAT_REJECT) {
- dbg_err("bad compat");
+ ubi_err("bad compat");
goto bad;
}
if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
- dbg_err("bad vol_type");
+ ubi_err("bad vol_type");
goto bad;
}
if (data_pad >= ubi->leb_size / 2) {
- dbg_err("bad data_pad");
+ ubi_err("bad data_pad");
goto bad;
}
@@ -950,45 +941,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
* mapped logical eraseblocks.
*/
if (used_ebs == 0) {
- dbg_err("zero used_ebs");
+ ubi_err("zero used_ebs");
goto bad;
}
if (data_size == 0) {
- dbg_err("zero data_size");
+ ubi_err("zero data_size");
goto bad;
}
if (lnum < used_ebs - 1) {
if (data_size != usable_leb_size) {
- dbg_err("bad data_size");
+ ubi_err("bad data_size");
goto bad;
}
} else if (lnum == used_ebs - 1) {
if (data_size == 0) {
- dbg_err("bad data_size at last LEB");
+ ubi_err("bad data_size at last LEB");
goto bad;
}
} else {
- dbg_err("too high lnum");
+ ubi_err("too high lnum");
goto bad;
}
} else {
if (copy_flag == 0) {
if (data_crc != 0) {
- dbg_err("non-zero data CRC");
+ ubi_err("non-zero data CRC");
goto bad;
}
if (data_size != 0) {
- dbg_err("non-zero data_size");
+ ubi_err("non-zero data_size");
goto bad;
}
} else {
if (data_size == 0) {
- dbg_err("zero data_size of copy");
+ ubi_err("zero data_size of copy");
goto bad;
}
}
if (used_ebs != 0) {
- dbg_err("bad used_ebs");
+ ubi_err("bad used_ebs");
goto bad;
}
}
@@ -997,8 +988,8 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
bad:
ubi_err("bad VID header");
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
return 1;
}
@@ -1054,7 +1045,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (verbose) {
ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_VID_HDR_MAGIC);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dump_vid_hdr(vid_hdr);
}
dbg_bld("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_VID_HDR_MAGIC);
@@ -1068,7 +1059,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (verbose) {
ubi_warn("bad CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc);
- ubi_dbg_dump_vid_hdr(vid_hdr);
+ ubi_dump_vid_hdr(vid_hdr);
}
dbg_bld("bad CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc);
@@ -1112,7 +1103,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
dbg_io("write VID header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- err = paranoid_check_peb_ec_hdr(ubi, pnum);
+ err = self_check_peb_ec_hdr(ubi, pnum);
if (err)
return err;
@@ -1121,7 +1112,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
vid_hdr->hdr_crc = cpu_to_be32(crc);
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
if (err)
return err;
@@ -1131,17 +1122,15 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_not_bad - ensure that a physical eraseblock is not bad.
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
* @ubi: UBI device description object
* @pnum: physical eraseblock number to check
*
* This function returns zero if the physical eraseblock is good, %-EINVAL if
* it is bad and a negative error code if an error occurred.
*/
-static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
@@ -1152,13 +1141,13 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
if (!err)
return err;
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ dump_stack();
return err > 0 ? -EINVAL : err;
}
/**
- * paranoid_check_ec_hdr - check if an erase counter header is all right.
+ * self_check_ec_hdr - check if an erase counter header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the erase counter header belongs to
* @ec_hdr: the erase counter header to check
@@ -1166,8 +1155,8 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
* This function returns zero if the erase counter header contains valid
* values, and %-EINVAL if not.
*/
-static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_ec_hdr *ec_hdr)
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr)
{
int err;
uint32_t magic;
@@ -1184,27 +1173,27 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
err = validate_ec_hdr(ubi, ec_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return 0;
fail:
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_peb_ec_hdr - check erase counter header.
+ * self_check_peb_ec_hdr - check erase counter header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the erase counter header is all right and and
* a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
@@ -1225,14 +1214,14 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_ec_hdr(ec_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
err = -EINVAL;
goto exit;
}
- err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
exit:
kfree(ec_hdr);
@@ -1240,7 +1229,7 @@ exit:
}
/**
- * paranoid_check_vid_hdr - check that a volume identifier header is all right.
+ * self_check_vid_hdr - check that a volume identifier header is all right.
* @ubi: UBI device description object
* @pnum: physical eraseblock number the volume identifier header belongs to
* @vid_hdr: the volume identifier header to check
@@ -1248,8 +1237,8 @@ exit:
* This function returns zero if the volume identifier header is all right, and
* %-EINVAL if not.
*/
-static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
- const struct ubi_vid_hdr *vid_hdr)
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
{
int err;
uint32_t magic;
@@ -1266,29 +1255,29 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
err = validate_vid_hdr(ubi, vid_hdr);
if (err) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
goto fail;
}
return err;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_peb_vid_hdr - check volume identifier header.
+ * self_check_peb_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the volume identifier header is all right,
* and a negative error code if not or if an error occurred.
*/
-static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
{
int err;
uint32_t crc, hdr_crc;
@@ -1313,14 +1302,14 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
if (hdr_crc != crc) {
ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc);
- ubi_err("paranoid check failed for PEB %d", pnum);
- ubi_dbg_dump_vid_hdr(vid_hdr);
- ubi_dbg_dump_stack();
+ ubi_err("self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
err = -EINVAL;
goto exit;
}
- err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
exit:
ubi_free_vid_hdr(ubi, vid_hdr);
@@ -1328,7 +1317,7 @@ exit:
}
/**
- * ubi_dbg_check_write - make sure write succeeded.
+ * self_check_write - make sure write succeeded.
* @ubi: UBI device description object
* @buf: buffer with data which were written
* @pnum: physical eraseblock number the data were written to
@@ -1339,8 +1328,8 @@ exit:
* the original data buffer - the data have to match. Returns zero if the data
* match and a negative error code if not or in case of failure.
*/
-int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
- int offset, int len)
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len)
{
int err, i;
size_t read;
@@ -1368,7 +1357,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
if (c == c1)
continue;
- ubi_err("paranoid check failed for PEB %d:%d, len %d",
+ ubi_err("self-check failed for PEB %d:%d, len %d",
pnum, offset, len);
ubi_msg("data differ at position %d", i);
dump_len = max_t(int, 128, len - i);
@@ -1380,7 +1369,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
i, i + dump_len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
buf1 + i, dump_len, 1);
- ubi_dbg_dump_stack();
+ dump_stack();
err = -EINVAL;
goto out_free;
}
@@ -1394,7 +1383,7 @@ out_free:
}
/**
- * ubi_dbg_check_all_ff - check that a region of flash is empty.
+ * ubi_self_check_all_ff - check that a region of flash is empty.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @offset: the starting offset within the physical eraseblock to check
@@ -1404,7 +1393,7 @@ out_free:
* @offset of the physical eraseblock @pnum, and a negative error code if not
* or if an error occurred.
*/
-int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
size_t read;
int err;
@@ -1438,14 +1427,12 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
return 0;
fail:
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
ubi_msg("hex dump of the %d-%d region", offset, offset + len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
err = -EINVAL;
error:
- ubi_dbg_dump_stack();
+ dump_stack();
vfree(buf);
return err;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 9fdb35367fe0..3aac1acceeb4 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -221,7 +221,7 @@ out_free:
kfree(desc);
out_put_ubi:
ubi_put_device(ubi);
- dbg_err("cannot open device %d, volume %d, error %d",
+ ubi_err("cannot open device %d, volume %d, error %d",
ubi_num, vol_id, err);
return ERR_PTR(err);
}
@@ -426,11 +426,9 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* @buf: data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function writes @len bytes of data from @buf to offset @offset of
- * logical eraseblock @lnum. The @dtype argument describes expected lifetime of
- * the data.
+ * logical eraseblock @lnum.
*
* This function takes care of physical eraseblock write failures. If write to
* the physical eraseblock write operation fails, the logical eraseblock is
@@ -447,7 +445,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_read);
* returns immediately with %-EBADF code.
*/
int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int offset, int len, int dtype)
+ int offset, int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -466,17 +464,13 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_write);
@@ -486,7 +480,6 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
* @lnum: logical eraseblock number to change
* @buf: data to write
* @len: how many bytes to write
- * @dtype: expected data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
@@ -497,7 +490,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_write);
* code in case of failure.
*/
int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int len, int dtype)
+ int len)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -515,17 +508,13 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (len == 0)
return 0;
- return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
}
EXPORT_SYMBOL_GPL(ubi_leb_change);
@@ -562,7 +551,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (err)
return err;
- return ubi_wl_flush(ubi);
+ return ubi_wl_flush(ubi, vol->vol_id, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_erase);
@@ -626,7 +615,6 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
* ubi_leb_map - map logical eraseblock to a physical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
- * @dtype: expected data type
*
* This function maps an un-mapped logical eraseblock @lnum to a physical
* eraseblock. This means, that after a successful invocation of this
@@ -639,7 +627,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
* eraseblock is already mapped, and other negative error codes in case of
* other failures.
*/
-int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
@@ -652,17 +640,13 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
if (lnum < 0 || lnum >= vol->reserved_pebs)
return -EINVAL;
- if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
- dtype != UBI_UNKNOWN)
- return -EINVAL;
-
if (vol->upd_marker)
return -EBADF;
if (vol->eba_tbl[lnum] >= 0)
return -EBADMSG;
- return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
}
EXPORT_SYMBOL_GPL(ubi_leb_map);
@@ -720,6 +704,33 @@ int ubi_sync(int ubi_num)
}
EXPORT_SYMBOL_GPL(ubi_sync);
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+ struct ubi_device *ubi;
+ int err = 0;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ err = ubi_wl_flush(ubi, vol_id, lnum);
+ ubi_put_device(ubi);
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
+
BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
/**
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
deleted file mode 100644
index d48aef15ab5d..000000000000
--- a/drivers/mtd/ubi/scan.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) International Business Machines Corp., 2006
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Author: Artem Bityutskiy (Битюцкий Артём)
- */
-
-#ifndef __UBI_SCAN_H__
-#define __UBI_SCAN_H__
-
-/* The erase counter value for this physical eraseblock is unknown */
-#define UBI_SCAN_UNKNOWN_EC (-1)
-
-/**
- * struct ubi_scan_leb - scanning information about a physical eraseblock.
- * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown)
- * @pnum: physical eraseblock number
- * @lnum: logical eraseblock number
- * @scrub: if this physical eraseblock needs scrubbing
- * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
- * @sqnum: sequence number
- * @u: unions RB-tree or @list links
- * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects
- * @u.list: link in one of the eraseblock lists
- *
- * One object of this type is allocated for each physical eraseblock during
- * scanning.
- */
-struct ubi_scan_leb {
- int ec;
- int pnum;
- int lnum;
- unsigned int scrub:1;
- unsigned int copy_flag:1;
- unsigned long long sqnum;
- union {
- struct rb_node rb;
- struct list_head list;
- } u;
-};
-
-/**
- * struct ubi_scan_volume - scanning information about a volume.
- * @vol_id: volume ID
- * @highest_lnum: highest logical eraseblock number in this volume
- * @leb_count: number of logical eraseblocks in this volume
- * @vol_type: volume type
- * @used_ebs: number of used logical eraseblocks in this volume (only for
- * static volumes)
- * @last_data_size: amount of data in the last logical eraseblock of this
- * volume (always equivalent to the usable logical eraseblock
- * size in case of dynamic volumes)
- * @data_pad: how many bytes at the end of logical eraseblocks of this volume
- * are not used (due to volume alignment)
- * @compat: compatibility flags of this volume
- * @rb: link in the volume RB-tree
- * @root: root of the RB-tree containing all the eraseblock belonging to this
- * volume (&struct ubi_scan_leb objects)
- *
- * One object of this type is allocated for each volume during scanning.
- */
-struct ubi_scan_volume {
- int vol_id;
- int highest_lnum;
- int leb_count;
- int vol_type;
- int used_ebs;
- int last_data_size;
- int data_pad;
- int compat;
- struct rb_node rb;
- struct rb_root root;
-};
-
-/**
- * struct ubi_scan_info - UBI scanning information.
- * @volumes: root of the volume RB-tree
- * @corr: list of corrupted physical eraseblocks
- * @free: list of free physical eraseblocks
- * @erase: list of physical eraseblocks which have to be erased
- * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
- * those belonging to "preserve"-compatible internal volumes)
- * @corr_peb_count: count of PEBs in the @corr list
- * @empty_peb_count: count of PEBs which are presumably empty (contain only
- * 0xFF bytes)
- * @alien_peb_count: count of PEBs in the @alien list
- * @bad_peb_count: count of bad physical eraseblocks
- * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
- * as bad yet, but which look like bad
- * @vols_found: number of volumes found during scanning
- * @highest_vol_id: highest volume ID
- * @is_empty: flag indicating whether the MTD device is empty or not
- * @min_ec: lowest erase counter value
- * @max_ec: highest erase counter value
- * @max_sqnum: highest sequence number value
- * @mean_ec: mean erase counter value
- * @ec_sum: a temporary variable used when calculating @mean_ec
- * @ec_count: a temporary variable used when calculating @mean_ec
- * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects
- *
- * This data structure contains the result of scanning and may be used by other
- * UBI sub-systems to build final UBI data structures, further error-recovery
- * and so on.
- */
-struct ubi_scan_info {
- struct rb_root volumes;
- struct list_head corr;
- struct list_head free;
- struct list_head erase;
- struct list_head alien;
- int corr_peb_count;
- int empty_peb_count;
- int alien_peb_count;
- int bad_peb_count;
- int maybe_bad_peb_count;
- int vols_found;
- int highest_vol_id;
- int is_empty;
- int min_ec;
- int max_ec;
- unsigned long long max_sqnum;
- int mean_ec;
- uint64_t ec_sum;
- int ec_count;
- struct kmem_cache *scan_leb_slab;
-};
-
-struct ubi_device;
-struct ubi_vid_hdr;
-
-/*
- * ubi_scan_move_to_list - move a PEB from the volume tree to a list.
- *
- * @sv: volume scanning information
- * @seb: scanning eraseblock information
- * @list: the list to move to
- */
-static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
- struct ubi_scan_leb *seb,
- struct list_head *list)
-{
- rb_erase(&seb->u.rb, &sv->root);
- list_add_tail(&seb->u.list, list);
-}
-
-int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
- int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
- int bitflips);
-struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
- int vol_id);
-struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
- int lnum);
-void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
-struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
- struct ubi_scan_info *si);
-int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
- int pnum, int ec);
-struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
-void ubi_scan_destroy_si(struct ubi_scan_info *si);
-
-#endif /* !__UBI_SCAN_H__ */
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 6fb8ec2174a5..468ffbc0eabd 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -149,10 +149,10 @@ enum {
* The @image_seq field is used to validate a UBI image that has been prepared
* for a UBI device. The @image_seq value can be any value, but it must be the
* same on all eraseblocks. UBI will ensure that all new erase counter headers
- * also contain this value, and will check the value when scanning at start-up.
+ * also contain this value, and will check the value when attaching the flash.
* One way to make use of @image_seq is to increase its value by one every time
* an image is flashed over an existing image, then, if the flashing does not
- * complete, UBI will detect the error when scanning.
+ * complete, UBI will detect the error when attaching the media.
*/
struct ubi_ec_hdr {
__be32 magic;
@@ -298,8 +298,8 @@ struct ubi_vid_hdr {
#define UBI_INT_VOL_COUNT 1
/*
- * Starting ID of internal volumes. There is reserved room for 4096 internal
- * volumes.
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
*/
#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index b162790790a9..a1a81c9ea8ce 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -43,7 +43,6 @@
#include <asm/pgtable.h>
#include "ubi-media.h"
-#include "scan.h"
/* Maximum number of supported UBI devices */
#define UBI_MAX_DEVICES 32
@@ -66,7 +65,10 @@
/* Background thread name pattern */
#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
-/* This marker in the EBA table means that the LEB is um-mapped */
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
#define UBI_LEB_UNMAPPED -1
/*
@@ -82,6 +84,9 @@
*/
#define UBI_PROT_QUEUE_LEN 10
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
/*
* Error codes returned by the I/O sub-system.
*
@@ -222,8 +227,6 @@ struct ubi_volume_desc;
* @upd_ebs: how many eraseblocks are expected to be updated
* @ch_lnum: LEB number which is being changing by the atomic LEB change
* operation
- * @ch_dtype: data persistency type which is being changing by the atomic LEB
- * change operation
* @upd_bytes: how many bytes are expected to be received for volume update or
* atomic LEB change
* @upd_received: how many bytes were already received for volume update or
@@ -270,7 +273,6 @@ struct ubi_volume {
int upd_ebs;
int ch_lnum;
- int ch_dtype;
long long upd_bytes;
long long upd_received;
void *upd_buf;
@@ -477,6 +479,124 @@ struct ubi_device {
struct ubi_debug_info *dbg;
};
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+ int ec;
+ int pnum;
+ int vol_id;
+ int lnum;
+ unsigned int scrub:1;
+ unsigned int copy_flag:1;
+ unsigned long long sqnum;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ * static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ * volume (always equivalent to the usable logical eraseblock
+ * size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ * are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ * volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+ int vol_id;
+ int highest_lnum;
+ int leb_count;
+ int vol_type;
+ int used_ebs;
+ int last_data_size;
+ int data_pad;
+ int compat;
+ struct rb_node rb;
+ struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * those belonging to "preserve"-compatible internal volumes)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ * 0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ * as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+ struct rb_root volumes;
+ struct list_head corr;
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
+ int corr_peb_count;
+ int empty_peb_count;
+ int alien_peb_count;
+ int bad_peb_count;
+ int maybe_bad_peb_count;
+ int vols_found;
+ int highest_vol_id;
+ int is_empty;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
+ struct kmem_cache *aeb_slab_cache;
+};
+
#include "debug.h"
extern struct kmem_cache *ubi_wl_entry_slab;
@@ -487,12 +607,23 @@ extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
+/* scan.c */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
+
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
struct ubi_vtbl_record *vtbl_rec);
int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
struct list_head *rename_list);
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* vmt.c */
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
@@ -525,22 +656,22 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check);
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
- const void *buf, int offset, int len, int dtype);
+ const void *buf, int offset, int len);
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype,
- int used_ebs);
+ int lnum, const void *buf, int len, int used_ebs);
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
- int lnum, const void *buf, int len, int dtype);
+ int lnum, const void *buf, int len);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
-int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
/* wl.c */
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture);
-int ubi_wl_flush(struct ubi_device *ubi);
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
void ubi_wl_close(struct ubi_device *ubi);
int ubi_thread(void *u);
@@ -573,6 +704,7 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_notify_all(struct ubi_device *ubi, int ntype,
struct notifier_block *nb);
int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
/* kapi.c */
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
@@ -593,6 +725,21 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
rb = rb_next(rb), \
pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+ struct ubi_ainf_peb *aeb,
+ struct list_head *list)
+{
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, list);
+}
+
/**
* ubi_zalloc_vid_hdr - allocate a volume identifier header object.
* @ubi: UBI device description object
@@ -667,7 +814,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
if (!ubi->ro_mode) {
ubi->ro_mode = 1;
ubi_warn("switch to read-only mode");
- ubi_dbg_dump_stack();
+ dump_stack();
}
}
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 425bf5a3edd4..9f2ebd8750e7 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -147,7 +147,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
}
if (bytes == 0) {
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
@@ -186,14 +186,12 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_gen("start changing LEB %d:%d, %u bytes",
vol->vol_id, req->lnum, req->bytes);
if (req->bytes == 0)
- return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
- req->dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
vol->upd_bytes = req->bytes;
vol->upd_received = 0;
vol->changing_leb = 1;
vol->ch_lnum = req->lnum;
- vol->ch_dtype = req->dtype;
vol->upd_buf = vmalloc(req->bytes);
if (!vol->upd_buf)
@@ -246,8 +244,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return 0;
}
- err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
} else {
/*
* When writing static volume, and this is the last logical
@@ -259,8 +256,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
* contain zeros, not random trash.
*/
memset(buf + len, 0, vol->usable_leb_size - len);
- err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
- UBI_UNKNOWN, used_ebs);
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
}
return err;
@@ -365,7 +361,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
if (err)
return err;
/* The update is finished, clear the update marker */
@@ -421,7 +417,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
len - vol->upd_bytes);
len = ubi_calc_data_len(ubi, vol->upd_buf, len);
err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
- vol->upd_buf, len, UBI_UNKNOWN);
+ vol->upd_buf, len);
if (err)
return err;
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 863835f4aefe..0669cff8ac3c 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -29,11 +29,7 @@
#include <linux/export.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_volumes(struct ubi_device *ubi);
-#else
-#define paranoid_check_volumes(ubi) 0
-#endif
+static int self_check_volumes(struct ubi_device *ubi);
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -227,7 +223,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
}
if (vol_id == UBI_VOL_NUM_AUTO) {
- dbg_err("out of volume IDs");
+ ubi_err("out of volume IDs");
err = -ENFILE;
goto out_unlock;
}
@@ -241,7 +237,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that this volume does not exist */
err = -EEXIST;
if (ubi->volumes[vol_id]) {
- dbg_err("volume %d already exists", vol_id);
+ ubi_err("volume %d already exists", vol_id);
goto out_unlock;
}
@@ -250,7 +246,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
- dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
+ ubi_err("volume \"%s\" exists (ID %d)", req->name, i);
goto out_unlock;
}
@@ -261,9 +257,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
+ ubi_err("not enough PEBs, only %d available", ubi->avail_pebs);
if (ubi->corr_peb_count)
- dbg_err("%d PEBs are corrupted and not used",
+ ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_unlock;
@@ -284,7 +280,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
* Finish all pending erases because there may be some LEBs belonging
* to the same volume ID.
*/
- err = ubi_wl_flush(ubi);
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
if (err)
goto out_acc;
@@ -360,8 +356,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while creating volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_sysfs:
@@ -461,8 +456,8 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
spin_unlock(&ubi->volumes_lock);
ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
- if (!no_vtbl && paranoid_check_volumes(ubi))
- dbg_err("check failed while removing volume %d", vol_id);
+ if (!no_vtbl)
+ self_check_volumes(ubi);
return err;
@@ -500,7 +495,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
- dbg_err("too small size %d, %d LEBs contain data",
+ ubi_err("too small size %d, %d LEBs contain data",
reserved_pebs, vol->used_ebs);
return -EINVAL;
}
@@ -529,10 +524,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (pebs > 0) {
spin_lock(&ubi->volumes_lock);
if (pebs > ubi->avail_pebs) {
- dbg_err("not enough PEBs: requested %d, available %d",
+ ubi_err("not enough PEBs: requested %d, available %d",
pebs, ubi->avail_pebs);
if (ubi->corr_peb_count)
- dbg_err("%d PEBs are corrupted and not used",
+ ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
spin_unlock(&ubi->volumes_lock);
err = -ENOSPC;
@@ -588,8 +583,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
}
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while re-sizing volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_acc:
@@ -638,8 +632,8 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
}
}
- if (!err && paranoid_check_volumes(ubi))
- ;
+ if (!err)
+ self_check_volumes(ubi);
return err;
}
@@ -686,8 +680,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
return err;
}
- if (paranoid_check_volumes(ubi))
- dbg_err("check failed while adding volume %d", vol_id);
+ self_check_volumes(ubi);
return err;
out_cdev:
@@ -712,16 +705,14 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
volume_sysfs_close(vol);
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_volume - check volume information.
+ * self_check_volume - check volume information.
* @ubi: UBI device description object
* @vol_id: volume ID
*
* Returns zero if volume is all right and a a negative error code if not.
*/
-static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
int idx = vol_id2idx(ubi, vol_id);
int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
@@ -771,7 +762,7 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
}
if (vol->upd_marker && vol->corrupted) {
- dbg_err("update marker and corrupted simultaneously");
+ ubi_err("update marker and corrupted simultaneously");
goto fail;
}
@@ -853,22 +844,22 @@ static int paranoid_check_volume(struct ubi_device *ubi, int vol_id)
return 0;
fail:
- ubi_err("paranoid check failed for volume %d", vol_id);
+ ubi_err("self-check failed for volume %d", vol_id);
if (vol)
- ubi_dbg_dump_vol_info(vol);
- ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ ubi_dump_vol_info(vol);
+ ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
dump_stack();
spin_unlock(&ubi->volumes_lock);
return -EINVAL;
}
/**
- * paranoid_check_volumes - check information about all volumes.
+ * self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
*
* Returns zero if volumes are all right and a a negative error code if not.
*/
-static int paranoid_check_volumes(struct ubi_device *ubi)
+static int self_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
@@ -876,11 +867,10 @@ static int paranoid_check_volumes(struct ubi_device *ubi)
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
- err = paranoid_check_volume(ubi, i);
+ err = self_check_volume(ubi, i);
if (err)
break;
}
return err;
}
-#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 17cec0c01544..437bc193e170 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -37,16 +37,15 @@
* LEB 1. This scheme guarantees recoverability from unclean reboots.
*
* In this UBI implementation the on-flash volume table does not contain any
- * information about how many data static volumes contain. This information may
- * be found from the scanning data.
+ * information about how much data static volumes contain.
*
* But it would still be beneficial to store this information in the volume
* table. For example, suppose we have a static volume X, and all its physical
* eraseblocks became bad for some reasons. Suppose we are attaching the
- * corresponding MTD device, the scanning has found no logical eraseblocks
+ * corresponding MTD device, for some reason we find no logical eraseblocks
* corresponding to the volume X. According to the volume table volume X does
* exist. So we don't know whether it is just empty or all its physical
- * eraseblocks went bad. So we cannot alarm the user about this corruption.
+ * eraseblocks went bad. So we cannot alarm the user properly.
*
* The volume table also stores so-called "update marker", which is used for
* volume updates. Before updating the volume, the update marker is set, and
@@ -62,11 +61,7 @@
#include <asm/div64.h>
#include "ubi.h"
-#ifdef CONFIG_MTD_UBI_DEBUG
-static void paranoid_vtbl_check(const struct ubi_device *ubi);
-#else
-#define paranoid_vtbl_check(ubi)
-#endif
+static void self_vtbl_check(const struct ubi_device *ubi);
/* Empty volume table record */
static struct ubi_vtbl_record empty_vtbl_record;
@@ -106,12 +101,12 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
return err;
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size, UBI_LONGTERM);
+ ubi->vtbl_size);
if (err)
return err;
}
- paranoid_vtbl_check(ubi);
+ self_vtbl_check(ubi);
return 0;
}
@@ -158,7 +153,7 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
return err;
err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
- ubi->vtbl_size, UBI_LONGTERM);
+ ubi->vtbl_size);
if (err)
return err;
}
@@ -197,7 +192,7 @@ static int vtbl_check(const struct ubi_device *ubi,
if (be32_to_cpu(vtbl[i].crc) != crc) {
ubi_err("bad CRC at record %u: %#08x, not %#08x",
i, crc, be32_to_cpu(vtbl[i].crc));
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return 1;
}
@@ -229,7 +224,7 @@ static int vtbl_check(const struct ubi_device *ubi,
n = ubi->leb_size % alignment;
if (data_pad != n) {
- dbg_err("bad data_pad, has to be %d", n);
+ ubi_err("bad data_pad, has to be %d", n);
err = 6;
goto bad;
}
@@ -245,7 +240,7 @@ static int vtbl_check(const struct ubi_device *ubi,
}
if (reserved_pebs > ubi->good_peb_count) {
- dbg_err("too large reserved_pebs %d, good PEBs %d",
+ ubi_err("too large reserved_pebs %d, good PEBs %d",
reserved_pebs, ubi->good_peb_count);
err = 9;
goto bad;
@@ -277,8 +272,8 @@ static int vtbl_check(const struct ubi_device *ubi,
!strncmp(vtbl[i].name, vtbl[n].name, len1)) {
ubi_err("volumes %d and %d have the same name"
" \"%s\"", i, n, vtbl[i].name);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
- ubi_dbg_dump_vtbl_record(&vtbl[n], n);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[n], n);
return -EINVAL;
}
}
@@ -288,26 +283,26 @@ static int vtbl_check(const struct ubi_device *ubi,
bad:
ubi_err("volume table check failed: record %d, error %d", i, err);
- ubi_dbg_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[i], i);
return -EINVAL;
}
/**
* create_vtbl - create a copy of volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
* @copy: number of the volume table copy
* @vtbl: contents of the volume table
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
int copy, void *vtbl)
{
int err, tries = 0;
struct ubi_vid_hdr *vid_hdr;
- struct ubi_scan_leb *new_seb;
+ struct ubi_ainf_peb *new_aeb;
ubi_msg("create volume table (copy #%d)", copy + 1);
@@ -316,9 +311,9 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
return -ENOMEM;
retry:
- new_seb = ubi_scan_get_free_peb(ubi, si);
- if (IS_ERR(new_seb)) {
- err = PTR_ERR(new_seb);
+ new_aeb = ubi_early_get_peb(ubi, ai);
+ if (IS_ERR(new_aeb)) {
+ err = PTR_ERR(new_aeb);
goto out_free;
}
@@ -328,25 +323,24 @@ retry:
vid_hdr->data_size = vid_hdr->used_ebs =
vid_hdr->data_pad = cpu_to_be32(0);
vid_hdr->lnum = cpu_to_be32(copy);
- vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
+ vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
/* The EC header is already there, write the VID header */
- err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
+ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
if (err)
goto write_error;
/* Write the layout volume contents */
- err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size);
+ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
if (err)
goto write_error;
/*
- * And add it to the scanning information. Don't delete the old version
- * of this LEB as it will be deleted and freed in 'ubi_scan_add_used()'.
+ * And add it to the attaching information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
- err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
- vid_hdr, 0);
- kfree(new_seb);
+ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+ kfree(new_aeb);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -356,10 +350,10 @@ write_error:
* Probably this physical eraseblock went bad, try to pick
* another one.
*/
- list_add(&new_seb->u.list, &si->erase);
+ list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
- kfree(new_seb);
+ kfree(new_aeb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -369,20 +363,20 @@ out_free:
/**
* process_lvol - process the layout volume.
* @ubi: UBI device description object
- * @si: scanning information
- * @sv: layout volume scanning information
+ * @ai: attaching information
+ * @av: layout volume attaching information
*
* This function is responsible for reading the layout volume, ensuring it is
* not corrupted, and recovering from corruptions if needed. Returns volume
* table in case of success and a negative error code in case of failure.
*/
static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si,
- struct ubi_scan_volume *sv)
+ struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av)
{
int err;
struct rb_node *rb;
- struct ubi_scan_leb *seb;
+ struct ubi_ainf_peb *aeb;
struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
@@ -414,14 +408,14 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
dbg_gen("check layout volume");
/* Read both LEB 0 and LEB 1 into memory */
- ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- leb[seb->lnum] = vzalloc(ubi->vtbl_size);
- if (!leb[seb->lnum]) {
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+ if (!leb[aeb->lnum]) {
err = -ENOMEM;
goto out_free;
}
- err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
+ err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
ubi->vtbl_size);
if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
/*
@@ -429,12 +423,12 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
* uncorrectable ECC error, but we have our own CRC and
* the data will be checked later. If the data is OK,
* the PEB will be scrubbed (because we set
- * seb->scrub). If the data is not OK, the contents of
+ * aeb->scrub). If the data is not OK, the contents of
* the PEB will be recovered from the second copy, and
- * seb->scrub will be cleared in
- * 'ubi_scan_add_used()'.
+ * aeb->scrub will be cleared in
+ * 'ubi_add_to_av()'.
*/
- seb->scrub = 1;
+ aeb->scrub = 1;
else if (err)
goto out_free;
}
@@ -453,7 +447,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
ubi->vtbl_size);
if (leb_corrupted[1]) {
ubi_warn("volume table copy #2 is corrupted");
- err = create_vtbl(ubi, si, 1, leb[0]);
+ err = create_vtbl(ubi, ai, 1, leb[0]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -476,7 +470,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
}
ubi_warn("volume table copy #1 is corrupted");
- err = create_vtbl(ubi, si, 0, leb[1]);
+ err = create_vtbl(ubi, ai, 0, leb[1]);
if (err)
goto out_free;
ubi_msg("volume table was restored");
@@ -494,13 +488,13 @@ out_free:
/**
* create_empty_lvol - create empty layout volume.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns volume table contents in case of success and a
* negative error code in case of failure.
*/
static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
- struct ubi_scan_info *si)
+ struct ubi_attach_info *ai)
{
int i;
struct ubi_vtbl_record *vtbl;
@@ -515,7 +509,7 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
int err;
- err = create_vtbl(ubi, si, i, vtbl);
+ err = create_vtbl(ubi, ai, i, vtbl);
if (err) {
vfree(vtbl);
return ERR_PTR(err);
@@ -528,18 +522,19 @@ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
/**
* init_volumes - initialize volume information for existing volumes.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: scanning information
* @vtbl: volume table
*
* This function allocates volume description objects for existing volumes.
* Returns zero in case of success and a negative error code in case of
* failure.
*/
-static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
+static int init_volumes(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai,
const struct ubi_vtbl_record *vtbl)
{
int i, reserved_pebs = 0;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
for (i = 0; i < ubi->vtbl_slots; i++) {
@@ -595,8 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/* Static volumes only */
- sv = ubi_scan_find_sv(si, i);
- if (!sv) {
+ av = ubi_find_av(ai, i);
+ if (!av) {
/*
* No eraseblocks belonging to this volume found. We
* don't actually know whether this static volume is
@@ -608,22 +603,22 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
continue;
}
- if (sv->leb_count != sv->used_ebs) {
+ if (av->leb_count != av->used_ebs) {
/*
* We found a static volume which misses several
* eraseblocks. Treat it as corrupted.
*/
ubi_warn("static volume %d misses %d LEBs - corrupted",
- sv->vol_id, sv->used_ebs - sv->leb_count);
+ av->vol_id, av->used_ebs - av->leb_count);
vol->corrupted = 1;
continue;
}
- vol->used_ebs = sv->used_ebs;
+ vol->used_ebs = av->used_ebs;
vol->used_bytes =
(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
- vol->used_bytes += sv->last_data_size;
- vol->last_eb_bytes = sv->last_data_size;
+ vol->used_bytes += av->last_data_size;
+ vol->last_eb_bytes = av->last_data_size;
}
/* And add the layout volume */
@@ -664,105 +659,104 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
/**
- * check_sv - check volume scanning information.
+ * check_av - check volume attaching information.
* @vol: UBI volume description object
- * @sv: volume scanning information
+ * @av: volume attaching information
*
- * This function returns zero if the volume scanning information is consistent
+ * This function returns zero if the volume attaching information is consistent
* to the data read from the volume tabla, and %-EINVAL if not.
*/
-static int check_sv(const struct ubi_volume *vol,
- const struct ubi_scan_volume *sv)
+static int check_av(const struct ubi_volume *vol,
+ const struct ubi_ainf_volume *av)
{
int err;
- if (sv->highest_lnum >= vol->reserved_pebs) {
+ if (av->highest_lnum >= vol->reserved_pebs) {
err = 1;
goto bad;
}
- if (sv->leb_count > vol->reserved_pebs) {
+ if (av->leb_count > vol->reserved_pebs) {
err = 2;
goto bad;
}
- if (sv->vol_type != vol->vol_type) {
+ if (av->vol_type != vol->vol_type) {
err = 3;
goto bad;
}
- if (sv->used_ebs > vol->reserved_pebs) {
+ if (av->used_ebs > vol->reserved_pebs) {
err = 4;
goto bad;
}
- if (sv->data_pad != vol->data_pad) {
+ if (av->data_pad != vol->data_pad) {
err = 5;
goto bad;
}
return 0;
bad:
- ubi_err("bad scanning information, error %d", err);
- ubi_dbg_dump_sv(sv);
- ubi_dbg_dump_vol_info(vol);
+ ubi_err("bad attaching information, error %d", err);
+ ubi_dump_av(av);
+ ubi_dump_vol_info(vol);
return -EINVAL;
}
/**
- * check_scanning_info - check that scanning information.
+ * check_attaching_info - check that attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* Even though we protect on-flash data by CRC checksums, we still don't trust
- * the media. This function ensures that scanning information is consistent to
- * the information read from the volume table. Returns zero if the scanning
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
* information is OK and %-EINVAL if it is not.
*/
-static int check_scanning_info(const struct ubi_device *ubi,
- struct ubi_scan_info *si)
+static int check_attaching_info(const struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
{
int err, i;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
struct ubi_volume *vol;
- if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
- ubi_err("scanning found %d volumes, maximum is %d + %d",
- si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+ if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+ ubi_err("found %d volumes while attaching, maximum is %d + %d",
+ ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
return -EINVAL;
}
- if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
- si->highest_vol_id < UBI_INTERNAL_VOL_START) {
- ubi_err("too large volume ID %d found by scanning",
- si->highest_vol_id);
+ if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+ ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err("too large volume ID %d found", ai->highest_vol_id);
return -EINVAL;
}
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
cond_resched();
- sv = ubi_scan_find_sv(si, i);
+ av = ubi_find_av(ai, i);
vol = ubi->volumes[i];
if (!vol) {
- if (sv)
- ubi_scan_rm_volume(si, sv);
+ if (av)
+ ubi_remove_av(ai, av);
continue;
}
if (vol->reserved_pebs == 0) {
ubi_assert(i < ubi->vtbl_slots);
- if (!sv)
+ if (!av)
continue;
/*
- * During scanning we found a volume which does not
+ * During attaching we found a volume which does not
* exist according to the information in the volume
* table. This must have happened due to an unclean
* reboot while the volume was being removed. Discard
* these eraseblocks.
*/
- ubi_msg("finish volume %d removal", sv->vol_id);
- ubi_scan_rm_volume(si, sv);
- } else if (sv) {
- err = check_sv(vol, sv);
+ ubi_msg("finish volume %d removal", av->vol_id);
+ ubi_remove_av(ai, av);
+ } else if (av) {
+ err = check_av(vol, av);
if (err)
return err;
}
@@ -774,16 +768,16 @@ static int check_scanning_info(const struct ubi_device *ubi,
/**
* ubi_read_volume_table - read the volume table.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function reads volume table, checks it, recover from errors if needed,
* or creates it if needed. Returns zero in case of success and a negative
* error code in case of failure.
*/
-int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int i, err;
- struct ubi_scan_volume *sv;
+ struct ubi_ainf_volume *av;
empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
@@ -798,8 +792,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
- if (!sv) {
+ av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+ if (!av) {
/*
* No logical eraseblocks belonging to the layout volume were
* found. This could mean that the flash is just empty. In
@@ -808,8 +802,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
* But if flash is not empty this must be a corruption or the
* MTD device just contains garbage.
*/
- if (si->is_empty) {
- ubi->vtbl = create_empty_lvol(ubi, si);
+ if (ai->is_empty) {
+ ubi->vtbl = create_empty_lvol(ubi, ai);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
} else {
@@ -817,14 +811,14 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
return -EINVAL;
}
} else {
- if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+ if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
/* This must not happen with proper UBI images */
- dbg_err("too many LEBs (%d) in layout volume",
- sv->leb_count);
+ ubi_err("too many LEBs (%d) in layout volume",
+ av->leb_count);
return -EINVAL;
}
- ubi->vtbl = process_lvol(ubi, si, sv);
+ ubi->vtbl = process_lvol(ubi, ai, av);
if (IS_ERR(ubi->vtbl))
return PTR_ERR(ubi->vtbl);
}
@@ -835,15 +829,15 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
* The layout volume is OK, initialize the corresponding in-RAM data
* structures.
*/
- err = init_volumes(ubi, si, ubi->vtbl);
+ err = init_volumes(ubi, ai, ubi->vtbl);
if (err)
goto out_free;
/*
- * Make sure that the scanning information is consistent to the
+ * Make sure that the attaching information is consistent to the
* information stored in the volume table.
*/
- err = check_scanning_info(ubi, si);
+ err = check_attaching_info(ubi, ai);
if (err)
goto out_free;
@@ -858,21 +852,17 @@ out_free:
return err;
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_vtbl_check - check volume table.
+ * self_vtbl_check - check volume table.
* @ubi: UBI device description object
*/
-static void paranoid_vtbl_check(const struct ubi_device *ubi)
+static void self_vtbl_check(const struct ubi_device *ubi)
{
if (!ubi->dbg->chk_gen)
return;
if (vtbl_check(ubi, ubi->vtbl)) {
- ubi_err("paranoid check failed");
+ ubi_err("self-check failed");
BUG();
}
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 7c1a9bf8ac86..b6be644e7b85 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -41,12 +41,6 @@
* physical eraseblocks with low erase counter to free physical eraseblocks
* with high erase counter.
*
- * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
- * an "optimal" physical eraseblock. For example, when it is known that the
- * physical eraseblock will be "put" soon because it contains short-term data,
- * the WL sub-system may pick a free physical eraseblock with low erase
- * counter, and so forth.
- *
* If the WL sub-system fails to erase a physical eraseblock, it marks it as
* bad.
*
@@ -70,8 +64,7 @@
* to the user; instead, we first want to let users fill them up with data;
*
* o there is a chance that the user will put the physical eraseblock very
- * soon, so it makes sense not to move it for some time, but wait; this is
- * especially important in case of "short term" physical eraseblocks.
+ * soon, so it makes sense not to move it for some time, but wait.
*
* Physical eraseblocks stay protected only for limited time. But the "time" is
* measured in erase cycles in this case. This is implemented with help of the
@@ -147,6 +140,8 @@
* @list: a link in the list of pending works
* @func: worker function
* @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
* @torture: if the physical eraseblock has to be tortured
*
* The @func pointer points to the worker function. If the @cancel argument is
@@ -159,21 +154,16 @@ struct ubi_work {
int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
/* The below fields are only relevant to erasure works */
struct ubi_wl_entry *e;
+ int vol_id;
+ int lnum;
int torture;
};
-#ifdef CONFIG_MTD_UBI_DEBUG
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
-static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
- struct ubi_wl_entry *e,
- struct rb_root *root);
-static int paranoid_check_in_pq(const struct ubi_device *ubi,
- struct ubi_wl_entry *e);
-#else
-#define paranoid_check_ec(ubi, pnum, ec) 0
-#define paranoid_check_in_wl_tree(ubi, e, root)
-#define paranoid_check_in_pq(ubi, e) 0
-#endif
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
@@ -383,19 +373,15 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
/**
* ubi_wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
- * @dtype: type of data which will be stored in this physical eraseblock
*
* This function returns a physical eraseblock in case of success and a
* negative error code in case of failure. Might sleep.
*/
-int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+int ubi_wl_get_peb(struct ubi_device *ubi)
{
int err;
struct ubi_wl_entry *e, *first, *last;
- ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
- dtype == UBI_UNKNOWN);
-
retry:
spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) {
@@ -413,45 +399,15 @@ retry:
goto retry;
}
- switch (dtype) {
- case UBI_LONGTERM:
- /*
- * For long term data we pick a physical eraseblock with high
- * erase counter. But the highest erase counter we can pick is
- * bounded by the the lowest erase counter plus
- * %WL_FREE_MAX_DIFF.
- */
- e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- break;
- case UBI_UNKNOWN:
- /*
- * For unknown data we pick a physical eraseblock with medium
- * erase counter. But we by no means can pick a physical
- * eraseblock with erase counter greater or equivalent than the
- * lowest erase counter plus %WL_FREE_MAX_DIFF/2.
- */
- first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
- u.rb);
- last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
-
- if (last->ec - first->ec < WL_FREE_MAX_DIFF)
- e = rb_entry(ubi->free.rb_node,
- struct ubi_wl_entry, u.rb);
- else
- e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
- break;
- case UBI_SHORTTERM:
- /*
- * For short term data we pick a physical eraseblock with the
- * lowest erase counter as we expect it will be erased soon.
- */
- e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
- break;
- default:
- BUG();
- }
+ first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
+ last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
+
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF)
+ e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb);
+ else
+ e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
- paranoid_check_in_wl_tree(ubi, e, &ubi->free);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
/*
* Move the physical eraseblock to the protection queue where it will
@@ -462,8 +418,8 @@ retry:
prot_queue_add(ubi, e);
spin_unlock(&ubi->wl_lock);
- err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
- ubi->peb_size - ubi->vid_hdr_aloffset);
+ err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
return err;
@@ -488,7 +444,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
if (!e)
return -ENODEV;
- if (paranoid_check_in_pq(ubi, e))
+ if (self_check_in_pq(ubi, e))
return -ENODEV;
list_del(&e->u.list);
@@ -514,7 +470,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
- err = paranoid_check_ec(ubi, e->pnum, e->ec);
+ err = self_check_ec(ubi, e->pnum, e->ec);
if (err)
return -EINVAL;
@@ -627,13 +583,15 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* schedule_erase - schedule an erase work.
* @ubi: UBI device description object
* @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
*
* This function returns zero in case of success and a %-ENOMEM in case of
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int torture)
+ int vol_id, int lnum, int torture)
{
struct ubi_work *wl_wrk;
@@ -646,6 +604,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
wl_wrk->func = &erase_worker;
wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
schedule_ubi_work(ubi, wl_wrk);
@@ -714,7 +674,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
e1->ec, e2->ec);
goto out_cancel;
}
- paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
@@ -723,12 +683,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub);
+ self_check_in_wl_tree(ubi, e1, &ubi->scrub);
rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
- paranoid_check_in_wl_tree(ubi, e2, &ubi->free);
+ self_check_in_wl_tree(ubi, e2, &ubi->free);
rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
@@ -846,7 +806,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e1, 0);
+ err = schedule_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e1);
if (e2)
@@ -861,7 +821,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
e2->pnum, vol_id, lnum);
- err = schedule_erase(ubi, e2, 0);
+ err = schedule_erase(ubi, e2, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -900,7 +860,7 @@ out_not_moved:
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
- err = schedule_erase(ubi, e2, torture);
+ err = schedule_erase(ubi, e2, vol_id, lnum, torture);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
@@ -1019,6 +979,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
{
struct ubi_wl_entry *e = wl_wrk->e;
int pnum = e->pnum, err, need;
+ int vol_id = wl_wrk->vol_id;
+ int lnum = wl_wrk->lnum;
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1027,7 +989,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
return 0;
}
- dbg_wl("erase PEB %d EC %d", pnum, e->ec);
+ dbg_wl("erase PEB %d EC %d LEB %d:%d",
+ pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
@@ -1057,7 +1020,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, 0);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
if (err1) {
err = err1;
goto out_ro;
@@ -1125,6 +1088,8 @@ out_ro:
/**
* ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
*
@@ -1133,7 +1098,8 @@ out_ro:
* occurred to this @pnum and it has to be tested. This function returns zero
* in case of success, and a negative error code in case of failure.
*/
-int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture)
{
int err;
struct ubi_wl_entry *e;
@@ -1175,13 +1141,13 @@ retry:
return 0;
} else {
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->scrub);
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
} else if (in_wl_tree(e, &ubi->erroneous)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous);
+ self_check_in_wl_tree(ubi, e, &ubi->erroneous);
rb_erase(&e->u.rb, &ubi->erroneous);
ubi->erroneous_peb_count -= 1;
ubi_assert(ubi->erroneous_peb_count >= 0);
@@ -1199,7 +1165,7 @@ retry:
}
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e, torture);
+ err = schedule_erase(ubi, e, vol_id, lnum, torture);
if (err) {
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->used);
@@ -1248,7 +1214,7 @@ retry:
}
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(ubi, e, &ubi->used);
+ self_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else {
int err;
@@ -1275,23 +1241,54 @@ retry:
/**
* ubi_wl_flush - flush all pending works.
* @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
*
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
*/
-int ubi_wl_flush(struct ubi_device *ubi)
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
{
- int err;
+ int err = 0;
+ int found = 1;
/*
* Erase while the pending works queue is not empty, but not more than
* the number of currently pending works.
*/
- dbg_wl("flush (%d pending works)", ubi->works_count);
- while (ubi->works_count) {
- err = do_work(ubi);
- if (err)
- return err;
+ dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+ vol_id, lnum, ubi->works_count);
+
+ while (found) {
+ struct ubi_work *wrk;
+ found = 0;
+
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry(wrk, &ubi->works, list) {
+ if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+ (lnum == UBI_ALL || wrk->lnum == lnum)) {
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ err = wrk->func(ubi, wrk, 0);
+ if (err) {
+ up_read(&ubi->work_sem);
+ return err;
+ }
+
+ spin_lock(&ubi->wl_lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
}
/*
@@ -1301,18 +1298,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
down_write(&ubi->work_sem);
up_write(&ubi->work_sem);
- /*
- * And in case last was the WL worker and it canceled the LEB
- * movement, flush again.
- */
- while (ubi->works_count) {
- dbg_wl("flush more (%d pending works)", ubi->works_count);
- err = do_work(ubi);
- if (err)
- return err;
- }
-
- return 0;
+ return err;
}
/**
@@ -1421,26 +1407,26 @@ static void cancel_pending(struct ubi_device *ubi)
}
/**
- * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
- * @si: scanning information
+ * @ai: attaching information
*
* This function returns zero in case of success, and a negative error code in
* case of failure.
*/
-int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int err, i;
struct rb_node *rb1, *rb2;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *seb, *tmp;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp;
struct ubi_wl_entry *e;
ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem);
- ubi->max_ec = si->max_ec;
+ ubi->max_ec = ai->max_ec;
INIT_LIST_HEAD(&ubi->works);
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
@@ -1454,48 +1440,48 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
- list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, 0)) {
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
}
- list_for_each_entry(seb, &si->free, u.list) {
+ list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
wl_tree_add(e, &ubi->free);
ubi->lookuptbl[e->pnum] = e;
}
- ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (!seb->scrub) {
+ if (!aeb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
wl_tree_add(e, &ubi->used);
@@ -1567,10 +1553,8 @@ void ubi_wl_close(struct ubi_device *ubi)
kfree(ubi->lookuptbl);
}
-#ifdef CONFIG_MTD_UBI_DEBUG
-
/**
- * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
* @ec: the erase counter to check
@@ -1579,7 +1563,7 @@ void ubi_wl_close(struct ubi_device *ubi)
* is equivalent to @ec, and a negative error code if not or if an error
* occurred.
*/
-static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
{
int err;
long long read_ec;
@@ -1601,9 +1585,9 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
read_ec = be64_to_cpu(ec_hdr->ec);
if (ec != read_ec) {
- ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_err("self-check failed for PEB %d", pnum);
ubi_err("read EC is %lld, should be %d", read_ec, ec);
- ubi_dbg_dump_stack();
+ dump_stack();
err = 1;
} else
err = 0;
@@ -1614,7 +1598,7 @@ out_free:
}
/**
- * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
* @root: the root of the tree
@@ -1622,9 +1606,8 @@ out_free:
* This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
* is not.
*/
-static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
- struct ubi_wl_entry *e,
- struct rb_root *root)
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root)
{
if (!ubi->dbg->chk_gen)
return 0;
@@ -1632,22 +1615,22 @@ static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
if (in_wl_tree(e, root))
return 0;
- ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
+ ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
e->pnum, e->ec, root);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
/**
- * paranoid_check_in_pq - check if wear-leveling entry is in the protection
+ * self_check_in_pq - check if wear-leveling entry is in the protection
* queue.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
*
* This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
*/
-static int paranoid_check_in_pq(const struct ubi_device *ubi,
- struct ubi_wl_entry *e)
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
@@ -1660,10 +1643,8 @@ static int paranoid_check_in_pq(const struct ubi_device *ubi,
if (p == e)
return 0;
- ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
+ ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
- ubi_dbg_dump_stack();
+ dump_stack();
return -EINVAL;
}
-
-#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 3680aa251dea..2cf084eb9d52 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -6,7 +6,7 @@
#include "bonding.h"
#include "bond_alb.h"
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ee8cf9e8a3b..2ee76993f052 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,6 +76,7 @@
#include <net/route.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/pkt_sched.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
return next;
}
-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
-
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
{
skb->dev = slave_dev;
- skb->queue_mapping = bond_queue_mapping(skb);
+ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+ sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
+ skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
if (unlikely(netpoll_tx_running(slave_dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -3226,6 +3227,12 @@ static int bond_master_netdev_event(unsigned long event,
switch (event) {
case NETDEV_CHANGENAME:
return bond_event_changename(event_bond);
+ case NETDEV_UNREGISTER:
+ bond_remove_proc_entry(event_bond);
+ break;
+ case NETDEV_REGISTER:
+ bond_create_proc_entry(event_bond);
+ break;
default:
break;
}
@@ -4171,7 +4178,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
/*
* Save the original txq to restore before passing to the driver
*/
- bond_queue_mapping(skb) = skb->queue_mapping;
+ qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
@@ -4410,8 +4417,6 @@ static void bond_uninit(struct net_device *bond_dev)
bond_work_cancel_all(bond);
- bond_remove_proc_entry(bond);
-
bond_debug_unregister(bond);
__hw_addr_flush(&bond->mc_list);
@@ -4813,7 +4818,6 @@ static int bond_init(struct net_device *bond_dev)
bond_set_lockdep_class(bond_dev);
- bond_create_proc_entry(bond);
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index ad284baafe87..3cea38d37344 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -150,14 +150,25 @@ static void bond_info_show_master(struct seq_file *seq)
}
}
+static const char *bond_slave_link_status(s8 link)
+{
+ static const char * const status[] = {
+ [BOND_LINK_UP] = "up",
+ [BOND_LINK_FAIL] = "going down",
+ [BOND_LINK_DOWN] = "down",
+ [BOND_LINK_BACK] = "going back",
+ };
+
+ return status[link];
+}
+
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
struct bonding *bond = seq->private;
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
- seq_printf(seq, "MII Status: %s\n",
- (slave->link == BOND_LINK_UP) ? "up" : "down");
+ seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
if (slave->speed == SPEED_UNKNOWN)
seq_printf(seq, "Speed: %s\n", "Unknown");
else
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index aef42f045320..485bedb8278c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
}
}
- pr_info("%s: Unable to set %.*s as primary slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ strncpy(bond->params.primary, ifname, IFNAMSIZ);
+ bond->params.primary[IFNAMSIZ - 1] = 0;
+
+ pr_info("%s: Recording %s as primary, "
+ "but it has not been enslaved to %s yet.\n",
+ bond->dev->name, ifname, bond->dev->name);
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 1520814c77c7..4a27adb7ae67 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -693,8 +693,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
*/
memcpy(rx_buf, (u8 *)piggy_desc,
CFHSI_DESC_SHORT_SZ);
- /* Mark no embedded frame here */
- piggy_desc->offset = 0;
if (desc_pld_len == -EPROTO)
goto out_of_sync;
}
@@ -737,6 +735,8 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
/* Extract any payload in piggyback descriptor. */
if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
goto out_of_sync;
+ /* Mark no embedded frame after extracting it */
+ piggy_desc->offset = 0;
}
}
@@ -1178,6 +1178,7 @@ int cfhsi_probe(struct platform_device *pdev)
dev_err(&ndev->dev, "%s: Registration error: %d.\n",
__func__, res);
free_netdev(ndev);
+ return -ENODEV;
}
/* Add CAIF HSI device to list. */
spin_lock(&cfhsi_list_lock);
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 536bda072a16..86cd532c78f9 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -590,8 +590,8 @@ static void c_can_chip_config(struct net_device *dev)
priv->write_reg(priv, &priv->regs->control,
CONTROL_ENABLE_AR);
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
- CAN_CTRLMODE_LOOPBACK)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
+ (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
/* loopback + silent mode : useful for hot self-test */
priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
*
* We iterate from priv->tx_echo to priv->tx_next and check if the
* packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted package, stop looking for more.
+ * If we discover a not yet transmitted packet, stop looking for more.
*/
static void c_can_do_tx(struct net_device *dev)
{
@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev)
for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
msg_obj_no = get_tx_echo_msg_obj(priv);
val = c_can_read_reg32(priv, &priv->regs->txrqst1);
- if (!(val & (1 << msg_obj_no))) {
+ if (!(val & (1 << (msg_obj_no - 1)))) {
can_get_echo_skb(dev,
msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
stats->tx_bytes += priv->read_reg(priv,
@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev)
& IF_MCONT_DLC_MASK;
stats->tx_packets++;
c_can_inval_msg_object(dev, 0, msg_obj_no);
+ } else {
+ break;
}
}
@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
struct net_device *dev = napi->dev;
struct c_can_priv *priv = netdev_priv(dev);
- irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ irqstatus = priv->irqstatus;
if (!irqstatus)
goto end;
@@ -1028,12 +1030,11 @@ end:
static irqreturn_t c_can_isr(int irq, void *dev_id)
{
- u16 irqstatus;
struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev);
- irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
- if (!irqstatus)
+ priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ if (!priv->irqstatus)
return IRQ_NONE;
/* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev)
goto exit_irq_fail;
}
+ napi_enable(&priv->napi);
+
/* start the c_can controller */
c_can_start(dev);
- napi_enable(&priv->napi);
netif_start_queue(dev);
return 0;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 9b7fbef3d09a..5f32d34af507 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -76,6 +76,7 @@ struct c_can_priv {
unsigned int tx_next;
unsigned int tx_echo;
void *priv; /* for board-specific data */
+ u16 irqstatus;
};
struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 53115eee8075..688371cda37a 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
struct cc770_platform_data *pdata = pdev->dev.platform_data;
priv->can.clock.freq = pdata->osc_freq;
- if (priv->cpu_interface | CPUIF_DSC)
+ if (priv->cpu_interface & CPUIF_DSC)
priv->can.clock.freq /= 2;
priv->clkout = pdata->cor;
priv->bus_config = pdata->bcr;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 38c0690df5c8..81d474102378 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -939,12 +939,12 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
return PTR_ERR(pinctrl);
if (pdev->dev.of_node) {
- const u32 *clock_freq_p;
+ const __be32 *clock_freq_p;
clock_freq_p = of_get_property(pdev->dev.of_node,
"clock-frequency", NULL);
if (clock_freq_p)
- clock_freq = *clock_freq_p;
+ clock_freq = be32_to_cpup(clock_freq_p);
}
if (!clock_freq) {
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index b60d6c5f29a0..03df9a8f2bbf 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -75,7 +75,7 @@ config CAN_KVASER_PCI
tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
depends on PCI
---help---
- This driver is for the the PCIcanx and PCIcan cards (1, 2 or
+ This driver is for the PCIcanx and PCIcan cards (1, 2 or
4 channel) from Kvaser (http://www.kvaser.com).
config CAN_PLX_PCI
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index ec03b401620a..9c755db6b16d 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1131,7 +1131,6 @@ static irqreturn_t
e100rxtx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct net_local *np = netdev_priv(dev);
unsigned long irqbits;
/*
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 442d91a2747b..bab0158f1cc3 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void)
rtnl_lock();
err = __rtnl_link_register(&dummy_link_ops);
- for (i = 0; i < numdummies && !err; i++)
+ for (i = 0; i < numdummies && !err; i++) {
err = dummy_init_one();
+ cond_resched();
+ }
if (err < 0)
__rtnl_link_unregister(&dummy_link_ops);
rtnl_unlock();
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 1234a14b2b73..b15366635147 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2549,8 +2549,7 @@ typhoon_init(void)
static void __exit
typhoon_cleanup(void)
{
- if (typhoon_fw)
- release_firmware(typhoon_fw);
+ release_firmware(typhoon_fw);
pci_unregister_driver(&typhoon_driver);
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 9cc15701101b..1f78b63d5efe 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
if ((phy_data & BMSR_LSTATUS) == 0) {
/* link down */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
hw->hibernate = true;
if (atl1c_reset_mac(hw) != 0)
if (netif_msg_hw(adapter))
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 46b8b7d81633..d09c6b583d17 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dma_unmap_single(bp->sdev->dma_dev, mapping,
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
- skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+ skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
if (skb == NULL)
return -ENOMEM;
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
DMA_TO_DEVICE);
- bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb)
goto err_out;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index ac7b74488531..1fa4927a45b1 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -5372,7 +5372,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
int k, last;
if (skb == NULL) {
- j++;
+ j = NEXT_TX_BD(j);
continue;
}
@@ -5384,8 +5384,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf->skb = NULL;
last = tx_buf->nr_frags;
- j++;
- for (k = 0; k < last; k++, j++) {
+ j = NEXT_TX_BD(j);
+ for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e30e2a2f354c..7de824184979 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -747,21 +747,6 @@ struct bnx2x_fastpath {
#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
-#define BNX2X_IP_CSUM_ERR(cqe) \
- (!((cqe)->fast_path_cqe.status_flags & \
- ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
- ((cqe)->fast_path_cqe.type_error_flags & \
- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
-
-#define BNX2X_L4_CSUM_ERR(cqe) \
- (!((cqe)->fast_path_cqe.status_flags & \
- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
- ((cqe)->fast_path_cqe.type_error_flags & \
- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
-
-#define BNX2X_RX_CSUM_OK(cqe) \
- (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
-
#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
(((le16_to_cpu(flags) & \
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ad0743bf4bde..8098eea9704d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -190,7 +190,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
if ((netif_tx_queue_stopped(txq)) &&
(bp->state == BNX2X_STATE_OPEN) &&
- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
return 0;
}
+static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+ struct bnx2x_fastpath *fp)
+{
+ /* Do nothing if no IP/L4 csum validation was done */
+
+ if (cqe->fast_path_cqe.status_flags &
+ (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
+ return;
+
+ /* If both IP/L4 validation were done, check if an error was found. */
+
+ if (cqe->fast_path_cqe.type_error_flags &
+ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+ fp->eth_q_stats.hw_csum_err++;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
@@ -806,13 +825,9 @@ reuse_rx:
skb_checksum_none_assert(skb);
- if (bp->dev->features & NETIF_F_RXCSUM) {
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ bnx2x_csum_validate(skb, cqe, fp);
- if (likely(BNX2X_RX_CSUM_OK(cqe)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- fp->eth_q_stats.hw_csum_err++;
- }
skb_record_rx_queue(skb, fp->rx_queue);
@@ -2501,8 +2516,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* we split the first BD into headers and data BDs
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
*/
static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata,
@@ -3156,7 +3169,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txdata->tx_bd_prod += nbd;
- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
netif_tx_stop_queue(txq);
/* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3165,7 +3178,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
smp_mb();
fp->eth_q_stats.driver_xoff++;
- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
netif_tx_wake_queue(txq);
}
txdata->tx_pkt++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a3fb7215cd89..6e7d5c0843b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -40,6 +40,7 @@
#define I2C_BSC0 0
#define I2C_BSC1 1
#define I2C_WA_RETRY_CNT 3
+#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1)
#define MCPR_IMC_COMMAND_READ_OP 1
#define MCPR_IMC_COMMAND_WRITE_OP 2
@@ -7659,6 +7660,28 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
+static void bnx2x_warpcore_power_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ u32 pin_cfg;
+ struct bnx2x *bp = params->bp;
+
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+ PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+ power, pin_cfg);
+ /* Low ==> corresponding SFP+ module is powered
+ * high ==> the SFP+ module is powered down
+ */
+ bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
u16 addr, u8 byte_cnt,
@@ -7678,6 +7701,12 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* 4 byte aligned address */
addr32 = addr & (~0x3);
do {
+ if (cnt == I2C_WA_PWR_ITER) {
+ bnx2x_warpcore_power_module(params, phy, 0);
+ /* Note that 100us are not enough here */
+ usleep_range(1000,1000);
+ bnx2x_warpcore_power_module(params, phy, 1);
+ }
rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -8200,29 +8229,6 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
}
-static void bnx2x_warpcore_power_module(struct link_params *params,
- struct bnx2x_phy *phy,
- u8 power)
-{
- u32 pin_cfg;
- struct bnx2x *bp = params->bp;
-
- pin_cfg = (REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
- PORT_HW_CFG_E3_PWR_DIS_MASK) >>
- PORT_HW_CFG_E3_PWR_DIS_SHIFT;
-
- if (pin_cfg == PIN_CFG_NA)
- return;
- DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
- power, pin_cfg);
- /* Low ==> corresponding SFP+ module is powered
- * high ==> the SFP+ module is powered down
- */
- bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
-}
-
static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -9748,7 +9754,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
msleep(1);
- if (!(CHIP_IS_E1(bp)))
+ if (!(CHIP_IS_E1x(bp)))
port = BP_PATH(bp);
else
port = params->port;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c95e7b5e2b85..2c89d17cbb29 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -534,7 +534,8 @@ int cnic_unregister_driver(int ulp_type)
}
if (atomic_read(&ulp_ops->ref_count) != 0)
- netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+ pr_warn("%s: Failed waiting for ref count to go to zero\n",
+ __func__);
return 0;
out_unlock:
@@ -1053,12 +1054,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo = &udev->cnic_uinfo;
- uinfo->mem[0].addr = dev->netdev->base_addr;
+ uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
uinfo->mem[0].internal_addr = dev->regview;
- uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
+ TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1070,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
+
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d55df3290174..e47ff8be1d7b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
}
- if (tg3_flag(tp, 5755_PLUS))
+ if (tg3_flag(tp, 5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_flag_set(tp, SHORT_DMA_BUG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
@@ -15883,8 +15884,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
if (dev) {
struct tg3 *tp = netdev_priv(dev);
- if (tp->fw)
- release_firmware(tp->fw);
+ release_firmware(tp->fw);
tg3_reset_task_cancel(tp);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 25c4e7f2a099..67cd2ed0306a 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3520,9 +3520,7 @@ static void __exit
bnad_module_exit(void)
{
pci_unregister_driver(&bnad_pci_driver);
-
- if (bfi_fw)
- release_firmware(bfi_fw);
+ release_firmware(bfi_fw);
}
module_init(bnad_module_init);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8d06ea381741..921c2082af4c 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -122,15 +122,15 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
goto done;
if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
- dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
- "permitted to execute this cmd (opcode %d)\n",
- opcode);
+ dev_warn(&adapter->pdev->dev,
+ "opcode %d-%d is not permitted\n",
+ opcode, subsystem);
} else {
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
CQE_STATUS_EXTD_MASK;
- dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
- "status %d, extd-status %d\n",
- opcode, compl_status, extd_status);
+ dev_err(&adapter->pdev->dev,
+ "opcode %d-%d failed:status %d-%d\n",
+ opcode, subsystem, compl_status, extd_status);
}
}
done:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 9625bf420c16..b3f3fc3d1323 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1566,7 +1566,7 @@ struct be_hw_stats_v1 {
u32 rsvd0[BE_TXP_SW_SZ];
struct be_erx_stats_v1 erx;
struct be_pmem_stats pmem;
- u32 rsvd1[3];
+ u32 rsvd1[18];
};
struct be_cmd_req_get_stats_v1 {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08efd308d78a..501dfa9c88ec 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
if (copied) {
+ int gso_segs = skb_shinfo(skb)->gso_segs;
+
/* record the sent skb in the sent_skb table */
BUG_ON(txo->sent_skb_list[start]);
txo->sent_skb_list[start] = skb;
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
be_txq_notify(adapter, txq->id, wrb_cnt);
- be_tx_stats_update(txo, wrb_cnt, copied,
- skb_shinfo(skb)->gso_segs, stopped);
+ be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
} else {
txq->head = start;
dev_kfree_skb_any(skb);
@@ -3236,7 +3237,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- netif_set_gso_max_size(netdev, 65535);
+ netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
netdev->netdev_ops = &be_netdev_ops;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 8f2cf8c09e2d..ff7f4c5115a1 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -207,7 +207,8 @@ struct fec_enet_private {
struct net_device *netdev;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -1065,7 +1066,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000);
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fep->phy_speed--;
fep->phy_speed <<= 1;
@@ -1618,12 +1619,20 @@ fec_probe(struct platform_device *pdev)
goto failed_pin;
}
- fep->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fep->clk)) {
- ret = PTR_ERR(fep->clk);
+ fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fep->clk_ipg)) {
+ ret = PTR_ERR(fep->clk_ipg);
goto failed_clk;
}
- clk_prepare_enable(fep->clk);
+
+ fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(fep->clk_ahb)) {
+ ret = PTR_ERR(fep->clk_ahb);
+ goto failed_clk;
+ }
+
+ clk_prepare_enable(fep->clk_ahb);
+ clk_prepare_enable(fep->clk_ipg);
ret = fec_enet_init(ndev);
if (ret)
@@ -1646,8 +1655,8 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
- clk_disable_unprepare(fep->clk);
- clk_put(fep->clk);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
failed_pin:
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1680,8 +1689,8 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
- clk_disable_unprepare(fep->clk);
- clk_put(fep->clk);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
iounmap(fep->hwp);
free_netdev(ndev);
@@ -1705,7 +1714,8 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- clk_disable_unprepare(fep->clk);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
return 0;
}
@@ -1716,7 +1726,8 @@ fec_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- clk_prepare_enable(fep->clk);
+ clk_prepare_enable(fep->clk_ahb);
+ clk_prepare_enable(fep->clk_ipg);
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 97f947b3d94a..2933d08b036e 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
length = status & BCOM_FEC_RX_BD_LEN_MASK;
skb_put(rskb, length - 4); /* length without CRC32 */
rskb->protocol = eth_type_trans(rskb, dev);
- if (!skb_defer_rx_timestamp(skb))
+ if (!skb_defer_rx_timestamp(rskb))
netif_rx(rskb);
spin_lock(&priv->lock);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 1adb0245b9dd..ab1d80ff0791 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1082,7 +1082,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (dev->features & NETIF_F_IP_CSUM ||
priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->hard_header_len += GMAC_FCB_LEN;
+ dev->needed_headroom = GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
if (priv->num_grps > 1) {
@@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
if (priv->mode == MQ_MG_MODE) {
baddr = &regs->txic0;
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
- if (likely(priv->tx_queue[i]->txcoalescing)) {
- gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, 0);
+ if (likely(priv->tx_queue[i]->txcoalescing))
gfar_write(baddr + i, priv->tx_queue[i]->txic);
- }
}
baddr = &regs->rxic0;
for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
- if (likely(priv->rx_queue[i]->rxcoalescing)) {
- gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, 0);
+ if (likely(priv->rx_queue[i]->rxcoalescing))
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
- }
}
}
}
@@ -2065,10 +2063,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /* Steal sock reference for processing TX time stamps */
- swap(skb_new->sk, skb->sk);
- swap(skb_new->destructor, skb->destructor);
- kfree_skb(skb);
+ if (skb->sk)
+ skb_set_owner_w(skb_new, skb->sk);
+ consume_skb(skb);
skb = skb_new;
}
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 79b07ec6726f..0cafe4fe9406 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -122,8 +122,10 @@ config IGB_DCA
config IGB_PTP
bool "PTP Hardware Clock (PHC)"
- default y
- depends on IGB && PTP_1588_CLOCK
+ default n
+ depends on IGB && EXPERIMENTAL
+ select PPS
+ select PTP_1588_CLOCK
---help---
Say Y here if you want to use PTP Hardware Clock (PHC) in the
driver. Only the basic clock operations have been implemented.
@@ -223,7 +225,9 @@ config IXGBE_DCB
config IXGBE_PTP
bool "PTP Clock Support"
default n
- depends on IXGBE && PTP_1588_CLOCK
+ depends on IXGBE && EXPERIMENTAL
+ select PPS
+ select PTP_1588_CLOCK
---help---
Say Y here if you want support for 1588 Timestamping with a
PHC device, using the PTP 1588 Clock support. This is
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 95731c841044..7483ca0a6282 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4080,7 +4080,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
spin_lock_irqsave(&adapter->stats_lock,
irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
- length, skb->data);
+ length, mapped);
spin_unlock_irqrestore(&adapter->stats_lock,
irq_flags);
length--;
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 36db4df09aed..1f063dcd8f85 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
ctrl = er32(CTRL);
status = er32(STATUS);
rxcw = er32(RXCW);
+ /* SYNCH bit and IV bit are sticky */
+ udelay(10);
+ rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351a4097b2ba..76edbc1be33b 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -103,6 +103,7 @@
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d863075df7a4..905e2147d918 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev,
* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
- if (hw->phy.ops.check_reset_block(hw)) {
+ if (hw->phy.ops.check_reset_block &&
+ hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
* PHY loopback cannot be performed if SoL/IDER
* sessions are active
*/
- if (hw->phy.ops.check_reset_block(hw)) {
+ if (hw->phy.ops.check_reset_block &&
+ hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index bbf70ba367da..e3a7b07df629 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -165,14 +165,14 @@
#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
/* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_MASK 0x1000
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
#define I217_CGFREG PHY_REG(772, 29)
-#define I217_CGFREG_MASK 0x0002
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26)
-#define I217_MEMPWR_MASK 0x0010
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
/* Strapping Option Register - RO */
#define E1000_STRAP 0x0000C
@@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
**/
static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
{
- u16 phy_reg;
- u32 phy_id;
+ u16 phy_reg = 0;
+ u32 phy_id = 0;
+ s32 ret_val;
+ u16 retry_count;
+
+ for (retry_count = 0; retry_count < 2; retry_count++) {
+ ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF))
+ continue;
+ phy_id = (u32)(phy_reg << 16);
- e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
- phy_id = (u32)(phy_reg << 16);
- e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
- phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF)) {
+ phy_id = 0;
+ continue;
+ }
+ phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ break;
+ }
if (hw->phy.id) {
if (hw->phy.id == phy_id)
return true;
- } else {
- if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
- hw->phy.id = phy_id;
+ } else if (phy_id) {
+ hw->phy.id = phy_id;
+ hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
return true;
}
- return false;
+ /*
+ * In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000e_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+
+ return !ret_val;
}
/**
@@ -4089,12 +4111,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
* power good.
*/
e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
- phy_reg |= I217_SxCTRL_MASK;
+ phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
/* Disable the SMB release on LCD reset. */
e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
- phy_reg &= ~I217_MEMPWR;
+ phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
}
@@ -4103,7 +4125,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
* Support
*/
e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
- phy_reg |= I217_CGFREG_MASK;
+ phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
release:
@@ -4176,7 +4198,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
if (ret_val)
goto release;
- phy_reg |= I217_MEMPWR_MASK;
+ phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
/* Disable Proxy */
@@ -4186,7 +4208,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
if (ret_val)
goto release;
- phy_reg &= ~I217_CGFREG_MASK;
+ phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
release:
if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 026e8b3ab52e..a13439928488 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
- if (hw->phy.ops.check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
return 0;
/*
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a4b0435b00dc..623e30b9964d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
* @sk_buff: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
- __le16 csum, struct sk_buff *skb)
+ struct sk_buff *skb)
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
if (status & E1000_RXD_STAT_IXSM)
return;
- /* TCP/UDP checksum error bit is set */
- if (errors & E1000_RXD_ERR_TCPE) {
+ /* TCP/UDP checksum error bit or IP checksum error bit is set */
+ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
/* let the stack verify checksum errors */
adapter->hw_csum_err++;
return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
return;
/* It must be a TCP or UDP packet with a valid checksum */
- if (status & E1000_RXD_STAT_TCPCS) {
- /* TCP checksum is good */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- /*
- * IP fragment with UDP payload
- * Hardware complements the payload checksum, so we undo it
- * and then put the value in host order for further stack use.
- */
- __sum16 sum = (__force __sum16)swab16((__force u16)csum);
- skb->csum = csum_unfold(~sum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
/* Receive Checksum Offload */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1341,8 +1328,7 @@ copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
}
}
- /* Receive Checksum Offload XXX recompute due to CRC strip? */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
- if (adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (adapter->netdev->features & NETIF_F_RXCSUM)
rxcsum |= E1000_RXCSUM_TUOFL;
-
- /*
- * IPv4 payload checksum for UDP fragments must be
- * used in conjunction with packet-split.
- */
- if (adapter->rx_ps_pages)
- rxcsum |= E1000_RXCSUM_IPPCSE;
- } else {
+ else
rxcsum &= ~E1000_RXCSUM_TUOFL;
- /* no need to clear IPPCSE as it defaults to 0 */
- }
ew32(RXCSUM, rxcsum);
if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
- if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
- if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- e_err("Jumbo Frames not supported.\n");
- return -EINVAL;
- }
-
- /*
- * IP payload checksum (enabled with jumbos/packet-split when
- * Rx checksum is enabled) and generation of RSS hash is
- * mutually exclusive in the hardware.
- */
- if ((netdev->features & NETIF_F_RXCSUM) &&
- (netdev->features & NETIF_F_RXHASH)) {
- e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
- return -EINVAL;
- }
+ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
+ return -EINVAL;
}
/* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
NETIF_F_RXALL)))
return 0;
- /*
- * IP payload checksum (enabled with jumbos/packet-split when Rx
- * checksum is enabled) and generation of RSS hash is mutually
- * exclusive in the hardware.
- */
- if (adapter->rx_ps_pages &&
- (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
- e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
- return -EINVAL;
- }
-
if (changed & NETIF_F_RXFCS) {
if (features & NETIF_F_RXFCS) {
adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
@@ -6237,7 +6190,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.phy.ms_type = e1000_ms_hw_default;
}
- if (hw->phy.ops.check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
e_info("PHY reset is blocked due to SOL/IDER session.\n");
/* Set initial default active device features */
@@ -6404,7 +6357,7 @@ err_register:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
- if (!hw->phy.ops.check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
e1000_phy_hw_reset(&adapter->hw);
err_hw_init:
kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0334d013bc3c..b860d4f7ea2a 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
s32 ret_val;
u32 ctrl;
- ret_val = phy->ops.check_reset_block(hw);
- if (ret_val)
- return 0;
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return 0;
+ }
ret_val = phy->ops.acquire(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index e65083958421..5e84eaac48c1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -206,8 +206,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
break;
case e1000_i350:
- case e1000_i210:
- case e1000_i211:
mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break;
default:
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 8ce67064b9c5..90eef07943f4 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
- (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
- (ec->rx_coalesce_usecs == 2))
- return -EINVAL;
-
- /* convert to rate of irq's per second */
- if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+ if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+ (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+ adapter->current_itr = ec->rx_coalesce_usecs << 2;
+ adapter->requested_itr = 1000000000 /
+ (adapter->current_itr * 256);
+ } else if ((ec->rx_coalesce_usecs == 3) ||
+ (ec->rx_coalesce_usecs == 2)) {
adapter->current_itr = IGBVF_START_ITR;
adapter->requested_itr = ec->rx_coalesce_usecs;
- } else {
- adapter->current_itr = ec->rx_coalesce_usecs << 2;
+ } else if (ec->rx_coalesce_usecs == 0) {
+ /*
+ * The user's desire is to turn off interrupt throttling
+ * altogether, but due to HW limitations, we can't do that.
+ * Instead we set a very small value in EITR, which would
+ * allow ~967k interrupts per second, but allow the adapter's
+ * internal clocking to still function properly.
+ */
+ adapter->current_itr = 4;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
- }
+ } else
+ return -EINVAL;
writel(adapter->current_itr,
hw->hw_addr + adapter->rx_ring->itr_register);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 3ef3c5284e52..7af291e236bf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -196,7 +196,7 @@ enum ixgbe_ring_state_t {
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
- __IXGBE_RX_FCOE_BUFSZ,
+ __IXGBE_RX_FCOE,
};
#define check_for_tx_hang(ring) \
@@ -290,7 +290,7 @@ struct ixgbe_ring_feature {
#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{
- return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+ return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
}
#else
#define ixgbe_rx_pg_order(_ring) 0
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index af1a5314b494..c377706e81a8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -634,7 +634,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
f = &adapter->ring_feature[RING_F_FCOE];
if ((rxr_idx >= f->mask) &&
(rxr_idx < f->mask + f->indices))
- set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
+ set_bit(__IXGBE_RX_FCOE, &ring->state);
}
#endif /* IXGBE_FCOE */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bf20457ea23a..e242104ab471 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1058,17 +1058,17 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
#ifdef IXGBE_FCOE
/**
* ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
* @rx_desc: advanced rx descriptor
*
* Returns : true if it is FCoE pkt
*/
-static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
- return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
(cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
@@ -1148,7 +1148,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
/* alloc new page for storage */
if (likely(!page)) {
- page = alloc_pages(GFP_ATOMIC | __GFP_COLD,
+ page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
ixgbe_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ struct net_device *dev = rx_ring->netdev;
+
ixgbe_update_rsc_stats(rx_ring, skb);
ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
#endif
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+ ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
__vlan_hwaccel_put_tag(skb, vid);
}
skb_record_rx_queue(skb, rx_ring->queue_index);
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ skb->protocol = eth_type_trans(skb, dev);
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1546,6 +1549,12 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
skb->truesize -= ixgbe_rx_bufsz(rx_ring);
}
+#ifdef IXGBE_FCOE
+ /* do not attempt to pad FCoE Frames as this will disrupt DDP */
+ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
+ return false;
+
+#endif
/* if skb_pad returns an error the skb was freed */
if (unlikely(skb->len < 60)) {
int pad_len = 60 - skb->len;
@@ -1772,7 +1781,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
- if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
if (!ddp_bytes) {
dev_kfree_skb_any(skb);
@@ -3607,10 +3616,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768);
-
- /* Enable VLAN tag insert/strip */
- adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
-
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
#ifdef IXGBE_FCOE
@@ -6642,6 +6647,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ e_err(drv, "Enable failed, SR-IOV enabled\n");
+ return -EINVAL;
+ }
+
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
(hw->mac.type == ixgbe_mac_82598EB &&
@@ -6701,11 +6711,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- features &= ~NETIF_F_HW_VLAN_RX;
-#endif
-
/* return error if RXHASH is being enabled when RSS is not supported */
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
features &= ~NETIF_F_RXHASH;
@@ -6718,7 +6723,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
features &= ~NETIF_F_LRO;
-
return features;
}
@@ -6766,6 +6770,11 @@ static int ixgbe_set_features(struct net_device *netdev,
need_reset = true;
}
+ if (features & NETIF_F_HW_VLAN_RX)
+ ixgbe_vlan_strip_enable(adapter);
+ else
+ ixgbe_vlan_strip_disable(adapter);
+
if (changed & NETIF_F_RXALL)
need_reset = true;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ddc6a4d19302..dcebd128becf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -708,6 +708,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 incval = 0;
+ u32 timinca = 0;
u32 shift = 0;
u32 cycle_speed;
unsigned long flags;
@@ -730,8 +731,16 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
break;
}
- /* Bail if the cycle speed didn't change */
- if (adapter->cycle_speed == cycle_speed)
+ /*
+ * grab the current TIMINCA value from the register so that it can be
+ * double checked. If the register value has been cleared, it must be
+ * reset to the correct value for generating a cyclecounter. If
+ * TIMINCA is zero, the SYSTIME registers do not increment at all.
+ */
+ timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
+
+ /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
+ if (adapter->cycle_speed == cycle_speed && timinca)
return;
/* disable the SDP clock out */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index f69ec4288b10..41e32257a4e8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -201,6 +201,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
unsigned int i, eop, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return true;
+
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -969,8 +972,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
tx_ring = &(adapter->tx_ring[r_idx]);
- tx_ring->total_bytes = 0;
- tx_ring->total_packets = 0;
ixgbevf_clean_tx_irq(adapter, tx_ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1);
@@ -994,16 +995,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbevf_ring *rx_ring;
int r_idx;
- int i;
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- rx_ring->total_bytes = 0;
- rx_ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
if (!q_vector->rxr_count)
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c8950da60e6b..f0f06b2bc28b 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -57,6 +57,7 @@
#include <linux/types.h>
#include <linux/inet_lro.h>
#include <linux/slab.h>
+#include <linux/clk.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.4";
@@ -289,10 +290,10 @@ struct mv643xx_eth_shared_private {
/*
* Hardware-specific parameters.
*/
- unsigned int t_clk;
int extended_rx_coal_limit;
int tx_bw_control;
int tx_csum_limit;
+
};
#define TX_BW_CONTROL_ABSENT 0
@@ -431,6 +432,14 @@ struct mv643xx_eth_private {
int tx_desc_sram_size;
int txq_count;
struct tx_queue txq[8];
+
+ /*
+ * Hardware-specific parameters.
+ */
+#if defined(CONFIG_HAVE_CLK)
+ struct clk *clk;
+#endif
+ unsigned int t_clk;
};
@@ -1010,7 +1019,7 @@ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
int mtu;
int bucket_size;
- token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+ token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
if (token_rate > 1023)
token_rate = 1023;
@@ -1042,7 +1051,7 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
int token_rate;
int bucket_size;
- token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+ token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
if (token_rate > 1023)
token_rate = 1023;
@@ -1309,7 +1318,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
temp = (val & 0x003fff00) >> 8;
temp *= 64000000;
- do_div(temp, mp->shared->t_clk);
+ do_div(temp, mp->t_clk);
return (unsigned int)temp;
}
@@ -1319,7 +1328,7 @@ static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
u64 temp;
u32 val;
- temp = (u64)usec * mp->shared->t_clk;
+ temp = (u64)usec * mp->t_clk;
temp += 31999999;
do_div(temp, 64000000);
@@ -1345,7 +1354,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
temp *= 64000000;
- do_div(temp, mp->shared->t_clk);
+ do_div(temp, mp->t_clk);
return (unsigned int)temp;
}
@@ -1354,7 +1363,7 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
{
u64 temp;
- temp = (u64)usec * mp->shared->t_clk;
+ temp = (u64)usec * mp->t_clk;
temp += 31999999;
do_div(temp, 64000000);
@@ -2663,10 +2672,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
if (dram)
mv643xx_eth_conf_mbus_windows(msp, dram);
- /*
- * Detect hardware parameters.
- */
- msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
pd->tx_csum_limit : 9 * 1024;
infer_hw_params(msp);
@@ -2891,6 +2896,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev;
+ /*
+ * Start with a default rate, and if there is a clock, allow
+ * it to override the default.
+ */
+ mp->t_clk = 133000000;
+#if defined(CONFIG_HAVE_CLK)
+ mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
+ if (!IS_ERR(mp->clk)) {
+ clk_prepare_enable(mp->clk);
+ mp->t_clk = clk_get_rate(mp->clk);
+ }
+#endif
set_params(mp, pd);
netif_set_real_num_tx_queues(dev, mp->txq_count);
netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2979,6 +2996,14 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
if (mp->phy != NULL)
phy_detach(mp->phy);
cancel_work_sync(&mp->tx_timeout_task);
+
+#if defined(CONFIG_HAVE_CLK)
+ if (!IS_ERR(mp->clk)) {
+ clk_disable_unprepare(mp->clk);
+ clk_put(mp->clk);
+ }
+#endif
+
free_netdev(mp->dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cace36f2ab92..28a54451a3e5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
struct sky2_port *sky2 = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
- if (changed & NETIF_F_RXCSUM) {
- bool on = features & NETIF_F_RXCSUM;
- sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
- on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+ if ((changed & NETIF_F_RXCSUM) &&
+ !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+ sky2_write32(sky2->hw,
+ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ (features & NETIF_F_RXCSUM)
+ ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
if (changed & NETIF_F_RXHASH)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1bcead1fa2f6..842c8ce9494e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = NULL
+ .wrapper = mlx4_QUERY_FW_wrapper
},
{
.opcode = MLX4_CMD_QUERY_HCA,
@@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = NULL
+ .wrapper = mlx4_QUERY_DEV_CAP_wrapper
},
{
.opcode = MLX4_CMD_QUERY_FUNC_CAP,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 988b2424e1c6..69ba57270481 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
struct mlx4_en_priv *priv;
- if (!mdev->pndev[port])
- return;
-
- priv = netdev_priv(mdev->pndev[port]);
switch (event) {
case MLX4_DEV_EVENT_PORT_UP:
case MLX4_DEV_EVENT_PORT_DOWN:
+ if (!mdev->pndev[port])
+ return;
+ priv = netdev_priv(mdev->pndev[port]);
/* To prevent races, we poll the link state in a separate
task rather than changing it here */
priv->link_state = event;
@@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
break;
default:
- mlx4_warn(mdev, "Unhandled event: %d\n", event);
+ if (port < 1 || port > dev->caps.num_ports ||
+ !mdev->pndev[port])
+ return;
+ mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 926d8aac941c..073b85b45fc5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -929,15 +929,20 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
if (priv->rx_cq[i].buf)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
+
+ if (priv->base_tx_qpn) {
+ mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
+ priv->base_tx_qpn = 0;
+ }
}
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
- int base_tx_qpn, err;
+ int err;
- err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+ err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
if (err) {
en_err(priv, "failed reserving range for TX rings\n");
return err;
@@ -949,7 +954,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
prof->tx_ring_size, i, TX))
goto err;
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
prof->tx_ring_size, TXBB_SIZE))
goto err;
}
@@ -969,7 +974,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
err:
en_err(priv, "Failed to allocate NIC resources\n");
- mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
return -ENOMEM;
}
@@ -1204,9 +1208,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
/* Configure port */
+ mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
- MLX4_EN_MIN_MTU,
- 0, 0, 0, 0);
+ priv->rx_skb_size + ETH_FCS_LEN,
+ prof->tx_pause, prof->tx_ppp,
+ prof->rx_pause, prof->rx_ppp);
if (err) {
en_err(priv, "Failed setting port general configurations "
"for port %d, with error %d\n", priv->port, err);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 3b6f8efbf141..bce98d9c0039 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -426,7 +426,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
- if (flr_slave > dev->num_slaves) {
+ if (flr_slave >= dev->num_slaves) {
mlx4_warn(dev,
"Got FLR for unknown function: %d\n",
flr_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 68f5cd6cb3c7..9c83bb8151ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -412,7 +412,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
- MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -590,8 +590,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
- MLX4_CMD_TIME_CLASS_B,
- !mlx4_is_slave(dev));
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -669,6 +668,28 @@ out:
return err;
}
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+ u8 field;
+
+ err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+
+ /* For guests, report Blueflame disabled */
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
+ field &= 0x7f;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
+
+ return 0;
+}
+
int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -860,6 +881,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
+ if (mlx4_is_slave(dev))
+ goto out;
+
MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
dev->caps.function = lg;
@@ -927,6 +951,27 @@ out:
return err;
}
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u8 *outbuf;
+ int err;
+
+ outbuf = outbox->buf;
+ err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+
+ /* for slaves, zero out everything except FW version */
+ outbuf[0] = outbuf[1] = 0;
+ memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
+ return 0;
+}
+
static void get_board_id(void *vsd, char *board_id)
{
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2e024a68fa81..a0313de122de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -142,12 +142,6 @@ struct mlx4_port_config {
struct pci_dev *pdev;
};
-static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
-{
- return dev->caps.reserved_eqs +
- MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
-}
-
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -217,6 +211,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
dev->caps.num_ports = dev_cap->num_ports;
+ dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
@@ -435,12 +430,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
memset(&dev_cap, 0, sizeof(dev_cap));
+ dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
return err;
}
+ err = mlx4_QUERY_FW(dev);
+ if (err)
+ mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+
page_size = ~dev->caps.page_size_cap + 1;
mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
if (page_size > PAGE_SIZE) {
@@ -485,15 +485,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.num_mgms = 0;
dev->caps.num_amgms = 0;
- for (i = 1; i <= dev->caps.num_ports; ++i)
- dev->caps.port_mask[i] = dev->caps.port_type[i];
-
if (dev->caps.num_ports > MLX4_MAX_PORTS) {
mlx4_err(dev, "HCA has %d ports, but we only support %d, "
"aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
+
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
pci_resource_len(dev->pdev, 2)) {
@@ -504,18 +504,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
-#if 0
- mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
- mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
- dev->caps.num_uars, dev->caps.reserved_uars,
- dev->caps.uar_page_size * dev->caps.num_uars,
- pci_resource_len(dev->pdev, 2));
- mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
- dev->caps.reserved_eqs);
- mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
- dev->caps.num_pds, dev->caps.reserved_pds,
- dev->caps.slave_pd_shift, dev->caps.pd_base);
-#endif
return 0;
}
@@ -810,9 +798,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
if (err)
goto err_srq;
- num_eqs = (mlx4_is_master(dev)) ?
- roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
- dev->caps.num_eqs;
+ num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
@@ -874,9 +861,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
}
- num_eqs = (mlx4_is_master(dev)) ?
- roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
- dev->caps.num_eqs;
+ num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
num_eqs, num_eqs, 0, 0);
@@ -1989,6 +1975,8 @@ slave_start:
if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
!mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
+ dev->caps.num_comp_vectors = 1;
+ dev->caps.comp_pool = 0;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 86b6e5a2fabf..e5d20220762c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1039,6 +1039,11 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev);
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
enum mlx4_res_tracker_free_type type);
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -1054,6 +1059,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 6ae350921b1a..225c20d47900 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -495,6 +495,7 @@ struct mlx4_en_priv {
int vids[128];
bool wol;
struct device *ddev;
+ int base_tx_qpn;
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 1fe2c7a8b40c..a8fb52992c64 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -697,10 +697,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
if (slave != dev->caps.function)
memset(inbox->buf, 0, 256);
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
- *(u8 *) inbox->buf = !!reset_qkey_viols << 6;
+ *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
((__be32 *) inbox->buf)[2] = agg_cap_mask;
} else {
- ((u8 *) inbox->buf)[3] = !!reset_qkey_viols;
+ ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
((__be32 *) inbox->buf)[1] = agg_cap_mask;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 06e5adeb76f7..b83bc928d52a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
- profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
+ profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
+ dev->phys_caps.num_phys_eqs :
+ min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
@@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->log_num_cqs = profile[i].log_num;
break;
case MLX4_RES_EQ:
- dev->caps.num_eqs = profile[i].num;
+ dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
+ MAX_MSIX));
init_hca->eqc_base = profile[i].start;
- init_hca->log_num_eqs = profile[i].log_num;
+ init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
break;
case MLX4_RES_DMPT:
dev->caps.num_mpts = profile[i].num;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8d2666fcffd7..083d6715335c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
/* Update stats */
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
-
- /* Free buffer */
- dev_kfree_skb_irq(skb);
}
+ dev_kfree_skb_irq(skb);
txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
}
- if (netif_queue_stopped(ndev))
- netif_wake_queue(ndev);
+ if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ }
}
static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_set_rx_mode = lpc_eth_set_multicast_list,
.ndo_do_ioctl = lpc_eth_ioctl,
.ndo_set_mac_address = lpc_set_mac_address,
+ .ndo_change_mtu = eth_change_mtu,
};
static int lpc_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 0d725dc91bcb..8694124ef77d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1260,8 +1260,7 @@ next:
void
netxen_release_firmware(struct netxen_adapter *adapter)
{
- if (adapter->fw)
- release_firmware(adapter->fw);
+ release_firmware(adapter->fw);
adapter->fw = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index d32cf0ddf1b9..799fd40ed03a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1321,8 +1321,7 @@ next:
void
qlcnic_release_firmware(struct qlcnic_adapter *adapter)
{
- if (adapter->fw)
- release_firmware(adapter->fw);
+ release_firmware(adapter->fw);
adapter->fw = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 46e77a2c5121..ad98f4d7919d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -479,7 +479,7 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pfn = pci_info[i].id;
- if (pfn > QLCNIC_MAX_PCI_FUNC) {
+ if (pfn >= QLCNIC_MAX_PCI_FUNC) {
ret = QL_STATUS_INVALID_PARAM;
goto err_eswitch;
}
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4de73643fec6..d1827e887f4e 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1096,20 +1096,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
- goto err_out;
+ goto err_out_disable_dev;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
- goto err_out;
+ goto err_out_disable_dev;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
- goto err_out;
+ goto err_out_disable_dev;
}
pci_set_master(pdev);
@@ -1117,7 +1117,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
err = -ENOMEM;
- goto err_out;
+ goto err_out_disable_dev;
}
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
@@ -1233,11 +1233,15 @@ err_out_mdio_irq:
err_out_mdio:
mdiobus_free(lp->mii_bus);
err_out_unmap:
+ netif_napi_del(&lp->napi);
+ pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_free_dev:
free_netdev(dev);
+err_out_disable_dev:
+ pci_disable_device(pdev);
err_out:
return err;
}
@@ -1251,6 +1255,9 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
mdiobus_unregister(lp->mii_bus);
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
+ netif_napi_del(&lp->napi);
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, lp->base);
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5eef290997f9..995d0cfc4c06 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp)
cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
cp_start_hw(cp);
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
@@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp)
cpw8(Config5, cpr8(Config5) & PMEStatus);
- cpw32_f(HiTxRingAddr, 0);
- cpw32_f(HiTxRingAddr + 4, 0);
-
- ring_dma = cp->ring_dma;
- cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
- ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
- cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
cpw16(MultiIntr, 0);
cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
static void eeprom_cmd_end(void __iomem *ee_addr)
{
- writeb (~EE_CS, ee_addr);
+ writeb(0, ee_addr);
eeprom_delay ();
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 03df076ed596..1d83565cc6af 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l
}
/* Terminate the EEPROM access. */
- RTL_W8 (Cfg9346, ~EE_CS);
+ RTL_W8(Cfg9346, 0);
eeprom_delay ();
return retval;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4f74b9762c29..d7a04e091101 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3894,6 +3894,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_22:
case RTL_GIGA_MAC_VER_23:
case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_34:
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
default:
@@ -5889,11 +5890,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
if (status & LinkChg)
__rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
- napi_disable(&tp->napi);
- rtl_irq_disable(tp);
-
- napi_enable(&tp->napi);
- napi_schedule(&tp->napi);
+ rtl_irq_enable_all(tp);
}
static void rtl_task(struct work_struct *work)
@@ -6051,7 +6048,7 @@ static int rtl_open(struct net_device *dev)
pm_runtime_get_sync(&pdev->dev);
/*
- * Rx and Tx desscriptors needs 256 bytes alignment.
+ * Rx and Tx descriptors needs 256 bytes alignment.
* dma_alloc_coherent provides more.
*/
tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
@@ -6345,6 +6342,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->wk.work);
+ netif_napi_del(&tp->napi);
+
unregister_netdev(dev);
rtl_release_firmware(tp);
@@ -6668,6 +6667,7 @@ out:
return rc;
err_out_msi_4:
+ netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);
iounmap(ioaddr);
err_out_free_res_3:
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index be3c22179161..79bf09b41971 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1011,7 +1011,7 @@ static int sh_eth_txfree(struct net_device *ndev)
}
/* Packet receive function */
-static int sh_eth_rx(struct net_device *ndev)
+static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
@@ -1101,8 +1101,14 @@ static int sh_eth_rx(struct net_device *ndev)
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
- if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
+ /* fix the values for the next receiving if RDE is set */
+ if (intr_status & EESR_RDE)
+ mdp->cur_rx = mdp->dirty_rx =
+ (sh_eth_read(ndev, RDFAR) -
+ sh_eth_read(ndev, RDLAR)) >> 4;
sh_eth_write(ndev, EDRRR_R, EDRRR);
+ }
return 0;
}
@@ -1199,8 +1205,6 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
- if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
- sh_eth_write(ndev, EDRRR_R, EDRRR);
if (netif_msg_rx_err(mdp))
dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
@@ -1271,7 +1275,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
EESR_RTSF | /* short frame recv */
EESR_PRE | /* PHY-LSI recv error */
EESR_CERF)){ /* recv frame CRC error */
- sh_eth_rx(ndev);
+ sh_eth_rx(ndev, intr_status);
}
/* Tx Check */
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 8e9fda0c7aeb..2ed3ab4b3c2d 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -1,7 +1,7 @@
/*
* Ethernet driver for S6105 on chip network device
* (c)2008 emlix GmbH http://www.emlix.com
- * Authors: Oskar Schirmer <os@emlix.com>
+ * Authors: Oskar Schirmer <oskar@scara.com>
* Daniel Gloeckner <dg@emlix.com>
*
* This program is free software; you can redistribute it and/or
@@ -1070,4 +1070,4 @@ module_exit(s6gmac_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
-MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>");
+MODULE_AUTHOR("Oskar Schirmer <oskar@scara.com>");
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index dab9c6f671ec..1466e5d2af44 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2390,11 +2390,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_request_resources(pdev);
if (retval)
- goto out_return_resources;
+ goto out_request_resources_fail;
retval = smsc911x_enable_resources(pdev);
if (retval)
- goto out_disable_resources;
+ goto out_enable_resources_fail;
if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
@@ -2501,8 +2501,9 @@ out_free_irq:
free_irq(dev->irq, dev);
out_disable_resources:
(void)smsc911x_disable_resources(pdev);
-out_return_resources:
+out_enable_resources_fail:
smsc911x_free_resources(pdev);
+out_request_resources_fail:
platform_set_drvdata(pdev, NULL);
iounmap(pdata->ioaddr);
free_netdev(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 036428348faa..9f448279e12a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -13,7 +13,7 @@ config STMMAC_ETH
if STMMAC_ETH
config STMMAC_PLATFORM
- tristate "STMMAC platform bus support"
+ bool "STMMAC Platform bus support"
depends on STMMAC_ETH
default y
---help---
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM
If unsure, say N.
config STMMAC_PCI
- tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+ bool "STMMAC PCI bus support (EXPERIMENTAL)"
depends on STMMAC_ETH && PCI && EXPERIMENTAL
---help---
This is to select the Synopsys DWMAC available on PCI devices,
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index fb8377da1687..4b785e10f2ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
csum);
-
+ wmb();
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+ wmb();
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 6b5d060ee9de..dc20c56efc9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -26,6 +26,7 @@
#include <linux/clk.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
+#include <linux/pci.h>
#include "common.h"
#ifdef CONFIG_STMMAC_TIMER
#include "stmmac_timer.h"
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
-
int stmmac_freeze(struct net_device *ndev);
int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
static inline int stmmac_clk_enable(struct stmmac_priv *priv)
{
if (!IS_ERR(priv->stmmac_clk))
- return clk_enable(priv->stmmac_clk);
+ return clk_prepare_enable(priv->stmmac_clk);
return 0;
}
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
if (IS_ERR(priv->stmmac_clk))
return;
- clk_disable(priv->stmmac_clk);
+ clk_disable_unprepare(priv->stmmac_clk);
}
static inline int stmmac_clk_get(struct stmmac_priv *priv)
{
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
return 0;
}
#endif /* CONFIG_HAVE_CLK */
+
+
+#ifdef CONFIG_STMMAC_PLATFORM
+extern struct platform_driver stmmac_pltfr_driver;
+static inline int stmmac_register_platform(void)
+{
+ int err;
+
+ err = platform_driver_register(&stmmac_pltfr_driver);
+ if (err)
+ pr_err("stmmac: failed to register the platform driver\n");
+
+ return err;
+}
+static inline void stmmac_unregister_platform(void)
+{
+ platform_driver_register(&stmmac_pltfr_driver);
+}
+#else
+static inline int stmmac_register_platform(void)
+{
+ pr_debug("stmmac: do not register the platf driver\n");
+
+ return -EINVAL;
+}
+static inline void stmmac_unregister_platform(void)
+{
+}
+#endif /* CONFIG_STMMAC_PLATFORM */
+
+#ifdef CONFIG_STMMAC_PCI
+extern struct pci_driver stmmac_pci_driver;
+static inline int stmmac_register_pci(void)
+{
+ int err;
+
+ err = pci_register_driver(&stmmac_pci_driver);
+ if (err)
+ pr_err("stmmac: failed to register the PCI driver\n");
+
+ return err;
+}
+static inline void stmmac_unregister_pci(void)
+{
+ pci_unregister_driver(&stmmac_pci_driver);
+}
+#else
+static inline int stmmac_register_pci(void)
+{
+ pr_debug("stmmac: do not register the PCI driver\n");
+
+ return -EINVAL;
+}
+static inline void stmmac_unregister_pci(void)
+{
+}
+#endif /* CONFIG_STMMAC_PCI */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 70966330f44e..ea3003edde18 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
/**
* stmmac_selec_desc_mode
- * @dev : device pointer
- * Description: select the Enhanced/Alternate or Normal descriptors */
+ * @priv : private structure
+ * Description: select the Enhanced/Alternate or Normal descriptors
+ */
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
if (priv->plat->enh_desc) {
@@ -1211,6 +1212,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
wmb();
priv->hw->desc->set_tx_owner(desc);
+ wmb();
}
/* Interrupt on completition only for the latest segment */
@@ -1226,6 +1228,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* To avoid raise condition */
priv->hw->desc->set_tx_owner(first);
+ wmb();
priv->cur_tx++;
@@ -1289,6 +1292,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
wmb();
priv->hw->desc->set_rx_owner(p + entry);
+ wmb();
}
}
@@ -1861,6 +1865,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/**
* stmmac_dvr_probe
* @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
*/
@@ -2090,6 +2096,34 @@ int stmmac_restore(struct net_device *ndev)
}
#endif /* CONFIG_PM */
+/* Driver can be configured w/ and w/ both PCI and Platf drivers
+ * depending on the configuration selected.
+ */
+static int __init stmmac_init(void)
+{
+ int err_plt = 0;
+ int err_pci = 0;
+
+ err_plt = stmmac_register_platform();
+ err_pci = stmmac_register_pci();
+
+ if ((err_pci) && (err_plt)) {
+ pr_err("stmmac: driver registration failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+ stmmac_unregister_platform();
+ stmmac_unregister_pci();
+}
+
+module_init(stmmac_init);
+module_exit(stmmac_exit);
+
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 58fab5303e9c..cf826e6b6aa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
MODULE_DEVICE_TABLE(pci, stmmac_id_table);
-static struct pci_driver stmmac_driver = {
+struct pci_driver stmmac_pci_driver = {
.name = STMMAC_RESOURCE_NAME,
.id_table = stmmac_id_table,
.probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = {
#endif
};
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
- int ret;
-
- ret = pci_register_driver(&stmmac_driver);
- if (ret < 0)
- pr_err("%s: ERROR: driver registration failed\n", __func__);
-
- return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
- pci_unregister_driver(&stmmac_driver);
-}
-
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3dd8f0803808..680d2b8dfe27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
-static struct platform_driver stmmac_driver = {
+struct platform_driver stmmac_pltfr_driver = {
.probe = stmmac_pltfr_probe,
.remove = stmmac_pltfr_remove,
.driver = {
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = {
},
};
-module_platform_driver(stmmac_driver);
-
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 703c8cce2a2c..8c726b7004d3 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
{
struct netdev_queue *txq;
- unsigned int tx_bytes;
u16 pkt_cnt, tmp;
int cons, index;
u64 cs;
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
netif_printk(np, tx_done, KERN_DEBUG, np->dev,
"%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
- tx_bytes = 0;
- tmp = pkt_cnt;
- while (tmp--) {
- tx_bytes += rp->tx_buffs[cons].skb->len;
+ while (pkt_cnt--)
cons = release_tx_packet(np, rp, cons);
- }
rp->cons = cons;
smp_mb();
- netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
-
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np)
struct tx_ring_info *rp = &np->tx_rings[i];
niu_free_tx_ring_info(np, rp);
- netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
}
kfree(np->tx_rings);
np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
prod = NEXT_TX(rp, prod);
}
- netdev_tx_sent_queue(txq, skb->len);
-
if (prod < rp->prod)
rp->wrap_bit ^= TX_RING_KICK_WRAP;
rp->prod = prod;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 8846516678c3..447a6932cab3 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -341,8 +341,8 @@ static int bdx_fw_load(struct bdx_priv *priv)
out:
if (master)
WRITE_REG(priv, regINIT_SEMAPHORE, 1);
- if (fw)
- release_firmware(fw);
+
+ release_firmware(fw);
if (rc) {
netdev_err(priv->ndev, "firmware loading failed\n");
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index b42252c4bec8..1b173a6145d6 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -51,7 +51,7 @@ config TI_DAVINCI_CPDMA
config TI_CPSW
tristate "TI CPSW Switch Support"
- depends on ARM && (ARCH_DAVINCI || SOC_OMAPAM33XX)
+ depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
---help---
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index d614c374ed9d..3b5c4571b55e 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 2d9218f86bca..098b1c42b393 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -7,6 +7,8 @@ config TILE_NET
depends on TILE
default y
select CRC32
+ select TILE_GXIO_MPIPE if TILEGX
+ select HIGH_RES_TIMERS if TILEGX
---help---
This is a standard Linux network device driver for the
on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index f634f142cab4..0ef9eefd3211 100644
--- a/drivers/net/ethernet/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_TILE_NET) += tile_net.o
ifdef CONFIG_TILEGX
-tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+tile_net-y := tilegx.o
else
-tile_net-objs := tilepro.o
+tile_net-y := tilepro.o
endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644
index 000000000000..83b4b388ad49
--- /dev/null
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -0,0 +1,1898 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/io.h>
+#include <linux/ctype.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+#include <gxio/mpipe.h>
+#include <arch/sim.h>
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* The maximum number of distinct channels (idesc.channel is 5 bits). */
+#define TILE_NET_CHANNELS 32
+
+/* Maximum number of idescs to handle per "poll". */
+#define TILE_NET_BATCH 128
+
+/* Maximum number of packets to handle per "poll". */
+#define TILE_NET_WEIGHT 64
+
+/* Number of entries in each iqueue. */
+#define IQUEUE_ENTRIES 512
+
+/* Number of entries in each equeue. */
+#define EQUEUE_ENTRIES 2048
+
+/* Total header bytes per equeue slot. Must be big enough for 2 bytes
+ * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
+ * 60 bytes of actual TCP header. We round up to align to cache lines.
+ */
+#define HEADER_BYTES 128
+
+/* Maximum completions per cpu per device (must be a power of two).
+ * ISSUE: What is the right number here? If this is too small, then
+ * egress might block waiting for free space in a completions array.
+ * ISSUE: At the least, allocate these only for initialized echannels.
+ */
+#define TILE_NET_MAX_COMPS 64
+
+#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+
+/* Size of completions data to allocate.
+ * ISSUE: Probably more than needed since we don't use all the channels.
+ */
+#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
+
+/* Size of NotifRing data to allocate. */
+#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
+
+/* Timeout to wake the per-device TX timer after we stop the queue.
+ * We don't want the timeout too short (adds overhead, and might end
+ * up causing stop/wake/stop/wake cycles) or too long (affects performance).
+ * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
+ */
+#define TX_TIMER_DELAY_USEC 30
+
+/* Timeout to wake the per-cpu egress timer to free completions. */
+#define EGRESS_TIMER_DELAY_USEC 1000
+
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+/* A "packet fragment" (a chunk of memory). */
+struct frag {
+ void *buf;
+ size_t length;
+};
+
+/* A single completion. */
+struct tile_net_comp {
+ /* The "complete_count" when the completion will be complete. */
+ s64 when;
+ /* The buffer to be freed when the completion is complete. */
+ struct sk_buff *skb;
+};
+
+/* The completions for a given cpu and echannel. */
+struct tile_net_comps {
+ /* The completions. */
+ struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
+ /* The number of completions used. */
+ unsigned long comp_next;
+ /* The number of completions freed. */
+ unsigned long comp_last;
+};
+
+/* The transmit wake timer for a given cpu and echannel. */
+struct tile_net_tx_wake {
+ struct hrtimer timer;
+ struct net_device *dev;
+};
+
+/* Info for a specific cpu. */
+struct tile_net_info {
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Packet queue. */
+ gxio_mpipe_iqueue_t iqueue;
+ /* Our cpu. */
+ int my_cpu;
+ /* True if iqueue is valid. */
+ bool has_iqueue;
+ /* NAPI flags. */
+ bool napi_added;
+ bool napi_enabled;
+ /* Number of small sk_buffs which must still be provided. */
+ unsigned int num_needed_small_buffers;
+ /* Number of large sk_buffs which must still be provided. */
+ unsigned int num_needed_large_buffers;
+ /* A timer for handling egress completions. */
+ struct hrtimer egress_timer;
+ /* True if "egress_timer" is scheduled. */
+ bool egress_timer_scheduled;
+ /* Comps for each egress channel. */
+ struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+ /* Transmit wake timer for each egress channel. */
+ struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+};
+
+/* Info for egress on a particular egress channel. */
+struct tile_net_egress {
+ /* The "equeue". */
+ gxio_mpipe_equeue_t *equeue;
+ /* The headers for TSO. */
+ unsigned char *headers;
+};
+
+/* Info for a specific device. */
+struct tile_net_priv {
+ /* Our network device. */
+ struct net_device *dev;
+ /* The primary link. */
+ gxio_mpipe_link_t link;
+ /* The primary channel, if open, else -1. */
+ int channel;
+ /* The "loopify" egress link, if needed. */
+ gxio_mpipe_link_t loopify_link;
+ /* The "loopify" egress channel, if open, else -1. */
+ int loopify_channel;
+ /* The egress channel (channel or loopify_channel). */
+ int echannel;
+ /* Total stats. */
+ struct net_device_stats stats;
+};
+
+/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
+static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+
+/* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+/* A mutex for "tile_net_devs_for_channel". */
+static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
+
+/* The per-cpu info. */
+static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
+
+/* The "context" for all devices. */
+static gxio_mpipe_context_t context;
+
+/* Buffer sizes and mpipe enum codes for buffer stacks.
+ * See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ */
+#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
+#define BUFFER_SIZE_SMALL 128
+#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
+#define BUFFER_SIZE_LARGE 1664
+
+/* The small/large "buffer stacks". */
+static int small_buffer_stack = -1;
+static int large_buffer_stack = -1;
+
+/* Amount of memory allocated for each buffer stack. */
+static size_t buffer_stack_size;
+
+/* The actual memory allocated for the buffer stacks. */
+static void *small_buffer_stack_va;
+static void *large_buffer_stack_va;
+
+/* The buckets. */
+static int first_bucket = -1;
+static int num_buckets = 1;
+
+/* The ingress irq. */
+static int ingress_irq = -1;
+
+/* Text value of tile_net.cpus if passed as a module parameter. */
+static char *network_cpus_string;
+
+/* The actual cpus in "network_cpus". */
+static struct cpumask network_cpus_map;
+
+/* If "loopify=LINK" was specified, this is "LINK". */
+static char *loopify_link_name;
+
+/* If "tile_net.custom" was specified, this is non-NULL. */
+static char *custom_str;
+
+/* The "tile_net.cpus" argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static bool network_cpus_init(void)
+{
+ char buf[1024];
+ int rc;
+
+ if (network_cpus_string == NULL)
+ return false;
+
+ rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
+ if (rc != 0) {
+ pr_warn("tile_net.cpus=%s: malformed cpu list\n",
+ network_cpus_string);
+ return false;
+ }
+
+ /* Remove dedicated cpus. */
+ cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
+
+ if (cpumask_empty(&network_cpus_map)) {
+ pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
+ network_cpus_string);
+ return false;
+ }
+
+ cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+ pr_info("Linux network CPUs: %s\n", buf);
+ return true;
+}
+
+module_param_named(cpus, network_cpus_string, charp, 0444);
+MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
+
+/* The "tile_net.loopify=LINK" argument causes the named device to
+ * actually use "loop0" for ingress, and "loop1" for egress. This
+ * allows an app to sit between the actual link and linux, passing
+ * (some) packets along to linux, and forwarding (some) packets sent
+ * out by linux.
+ */
+module_param_named(loopify, loopify_link_name, charp, 0444);
+MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
+
+/* The "tile_net.custom" argument causes us to ignore the "conventional"
+ * classifier metadata, in particular, the "l2_offset".
+ */
+module_param_named(custom, custom_str, charp, 0444);
+MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+
+/* Atomically update a statistics field.
+ * Note that on TILE-Gx, this operation is fire-and-forget on the
+ * issuing core (single-cycle dispatch) and takes only a few cycles
+ * longer than a regular store when the request reaches the home cache.
+ * No expensive bus management overhead is required.
+ */
+static void tile_net_stats_add(unsigned long value, unsigned long *field)
+{
+ BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
+ atomic_long_add(value, (atomic_long_t *)field);
+}
+
+/* Allocate and push a buffer. */
+static bool tile_net_provide_buffer(bool small)
+{
+ int stack = small ? small_buffer_stack : large_buffer_stack;
+ const unsigned long buffer_alignment = 128;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(struct sk_buff **) + buffer_alignment;
+ len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+ skb = dev_alloc_skb(len);
+ if (skb == NULL)
+ return false;
+
+ /* Make room for a back-pointer to 'skb' and guarantee alignment. */
+ skb_reserve(skb, sizeof(struct sk_buff **));
+ skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
+
+ /* Save a back-pointer to 'skb'. */
+ *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
+
+ /* Make sure "skb" and the back-pointer have been flushed. */
+ wmb();
+
+ gxio_mpipe_push_buffer(&context, stack,
+ (void *)va_to_tile_io_addr(skb->data));
+
+ return true;
+}
+
+/* Convert a raw mpipe buffer to its matching skb pointer. */
+static struct sk_buff *mpipe_buf_to_skb(void *va)
+{
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ /* Paranoia. */
+ if (skb->data != va) {
+ /* Panic here since there's a reasonable chance
+ * that corrupt buffers means generic memory
+ * corruption, with unpredictable system effects.
+ */
+ panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
+ va, skb, skb->data);
+ }
+
+ return skb;
+}
+
+static void tile_net_pop_all_buffers(int stack)
+{
+ for (;;) {
+ tile_io_addr_t addr =
+ (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+ if (addr == 0)
+ break;
+ dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
+ }
+}
+
+/* Provide linux buffers to mPIPE. */
+static void tile_net_provide_needed_buffers(void)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+ while (info->num_needed_small_buffers != 0) {
+ if (!tile_net_provide_buffer(true))
+ goto oops;
+ info->num_needed_small_buffers--;
+ }
+
+ while (info->num_needed_large_buffers != 0) {
+ if (!tile_net_provide_buffer(false))
+ goto oops;
+ info->num_needed_large_buffers--;
+ }
+
+ return;
+
+oops:
+ /* Add a description to the page allocation failure dump. */
+ pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+}
+
+static inline bool filter_packet(struct net_device *dev, void *buf)
+{
+ /* Filter packets received before we're up. */
+ if (dev == NULL || !(dev->flags & IFF_UP))
+ return true;
+
+ /* Filter out packets that aren't for us. */
+ if (!(dev->flags & IFF_PROMISC) &&
+ !is_multicast_ether_addr(buf) &&
+ compare_ether_addr(dev->dev_addr, buf) != 0)
+ return true;
+
+ return false;
+}
+
+static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
+ gxio_mpipe_idesc_t *idesc, unsigned long len)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ /* Encode the actual packet length. */
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Acknowledge "good" hardware checksums. */
+ if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb);
+
+ /* Update stats. */
+ tile_net_stats_add(1, &priv->stats.rx_packets);
+ tile_net_stats_add(len, &priv->stats.rx_bytes);
+
+ /* Need a new buffer. */
+ if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
+ info->num_needed_small_buffers++;
+ else
+ info->num_needed_large_buffers++;
+}
+
+/* Handle a packet. Return true if "processed", false if "filtered". */
+static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+ uint8_t l2_offset;
+ void *va;
+ void *buf;
+ unsigned long len;
+ bool filter;
+
+ /* Drop packets for which no buffer was available.
+ * NOTE: This happens under heavy load.
+ */
+ if (idesc->be) {
+ struct tile_net_priv *priv = netdev_priv(dev);
+ tile_net_stats_add(1, &priv->stats.rx_dropped);
+ gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+ if (net_ratelimit())
+ pr_info("Dropping packet (insufficient buffers).\n");
+ return false;
+ }
+
+ /* Get the "l2_offset", if allowed. */
+ l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+
+ /* Get the raw buffer VA (includes "headroom"). */
+ va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+
+ /* Get the actual packet start/length. */
+ buf = va + l2_offset;
+ len = idesc->l2_size - l2_offset;
+
+ /* Point "va" at the raw buffer. */
+ va -= NET_IP_ALIGN;
+
+ filter = filter_packet(dev, buf);
+ if (filter) {
+ gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+ } else {
+ struct sk_buff *skb = mpipe_buf_to_skb(va);
+
+ /* Skip headroom, and any custom header. */
+ skb_reserve(skb, NET_IP_ALIGN + l2_offset);
+
+ tile_net_receive_skb(dev, skb, idesc, len);
+ }
+
+ gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+ return !filter;
+}
+
+/* Handle some packets for the current CPU.
+ *
+ * This function handles up to TILE_NET_BATCH idescs per call.
+ *
+ * ISSUE: Since we do not provide new buffers until this function is
+ * complete, we must initially provide enough buffers for each network
+ * cpu to fill its iqueue and also its batched idescs.
+ *
+ * ISSUE: The "rotting packet" race condition occurs if a packet
+ * arrives after the queue appears to be empty, and before the
+ * hypervisor interrupt is re-enabled.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ unsigned int work = 0;
+ gxio_mpipe_idesc_t *idesc;
+ int i, n;
+
+ /* Process packets. */
+ while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+ for (i = 0; i < n; i++) {
+ if (i == TILE_NET_BATCH)
+ goto done;
+ if (tile_net_handle_packet(idesc + i)) {
+ if (++work >= budget)
+ goto done;
+ }
+ }
+ }
+
+ /* There are no packets left. */
+ napi_complete(&info->napi);
+
+ /* Re-enable hypervisor interrupts. */
+ gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+
+ /* HACK: Avoid the "rotting packet" problem. */
+ if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
+ napi_schedule(&info->napi);
+
+ /* ISSUE: Handle completions? */
+
+done:
+ tile_net_provide_needed_buffers();
+
+ return work;
+}
+
+/* Handle an ingress interrupt on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ napi_schedule(&info->napi);
+ return IRQ_HANDLED;
+}
+
+/* Free some completions. This must be called with interrupts blocked. */
+static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
+ struct tile_net_comps *comps,
+ int limit, bool force_update)
+{
+ int n = 0;
+ while (comps->comp_last < comps->comp_next) {
+ unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
+ struct tile_net_comp *comp = &comps->comp_queue[cid];
+ if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
+ force_update || n == 0))
+ break;
+ dev_kfree_skb_irq(comp->skb);
+ comps->comp_last++;
+ if (++n == limit)
+ break;
+ }
+ return n;
+}
+
+/* Add a completion. This must be called with interrupts blocked.
+ * tile_net_equeue_try_reserve() will have ensured a free completion entry.
+ */
+static void add_comp(gxio_mpipe_equeue_t *equeue,
+ struct tile_net_comps *comps,
+ uint64_t when, struct sk_buff *skb)
+{
+ int cid = comps->comp_next % TILE_NET_MAX_COMPS;
+ comps->comp_queue[cid].when = when;
+ comps->comp_queue[cid].skb = skb;
+ comps->comp_next++;
+}
+
+static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ hrtimer_start(&info->tx_wake[priv->echannel].timer,
+ ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
+{
+ struct tile_net_tx_wake *tx_wake =
+ container_of(t, struct tile_net_tx_wake, timer);
+ netif_wake_subqueue(tx_wake->dev, smp_processor_id());
+ return HRTIMER_NORESTART;
+}
+
+/* Make sure the egress timer is scheduled. */
+static void tile_net_schedule_egress_timer(void)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+ if (!info->egress_timer_scheduled) {
+ hrtimer_start(&info->egress_timer,
+ ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
+ HRTIMER_MODE_REL_PINNED);
+ info->egress_timer_scheduled = true;
+ }
+}
+
+/* The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected for this tile.
+ */
+static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ unsigned long irqflags;
+ bool pending = false;
+ int i;
+
+ local_irq_save(irqflags);
+
+ /* The timer is no longer scheduled. */
+ info->egress_timer_scheduled = false;
+
+ /* Free all possible comps for this tile. */
+ for (i = 0; i < TILE_NET_CHANNELS; i++) {
+ struct tile_net_egress *egress = &egress_for_echannel[i];
+ struct tile_net_comps *comps = info->comps_for_echannel[i];
+ if (comps->comp_last >= comps->comp_next)
+ continue;
+ tile_net_free_comps(egress->equeue, comps, -1, true);
+ pending = pending || (comps->comp_last < comps->comp_next);
+ }
+
+ /* Reschedule timer if needed. */
+ if (pending)
+ tile_net_schedule_egress_timer();
+
+ local_irq_restore(irqflags);
+
+ return HRTIMER_NORESTART;
+}
+
+/* Helper function for "tile_net_update()".
+ * "dev" (i.e. arg) is the device being brought up or down,
+ * or NULL if all devices are now down.
+ */
+static void tile_net_update_cpu(void *arg)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct net_device *dev = arg;
+
+ if (!info->has_iqueue)
+ return;
+
+ if (dev != NULL) {
+ if (!info->napi_added) {
+ netif_napi_add(dev, &info->napi,
+ tile_net_poll, TILE_NET_WEIGHT);
+ info->napi_added = true;
+ }
+ if (!info->napi_enabled) {
+ napi_enable(&info->napi);
+ info->napi_enabled = true;
+ }
+ enable_percpu_irq(ingress_irq, 0);
+ } else {
+ disable_percpu_irq(ingress_irq);
+ if (info->napi_enabled) {
+ napi_disable(&info->napi);
+ info->napi_enabled = false;
+ }
+ /* FIXME: Drain the iqueue. */
+ }
+}
+
+/* Helper function for tile_net_open() and tile_net_stop().
+ * Always called under tile_net_devs_for_channel_mutex.
+ */
+static int tile_net_update(struct net_device *dev)
+{
+ static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
+ bool saw_channel = false;
+ int channel;
+ int rc;
+ int cpu;
+
+ gxio_mpipe_rules_init(&rules, &context);
+
+ for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
+ if (tile_net_devs_for_channel[channel] == NULL)
+ continue;
+ if (!saw_channel) {
+ saw_channel = true;
+ gxio_mpipe_rules_begin(&rules, first_bucket,
+ num_buckets, NULL);
+ gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
+ }
+ gxio_mpipe_rules_add_channel(&rules, channel);
+ }
+
+ /* NOTE: This can fail if there is no classifier.
+ * ISSUE: Can anything else cause it to fail?
+ */
+ rc = gxio_mpipe_rules_commit(&rules);
+ if (rc != 0) {
+ netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
+ for_each_online_cpu(cpu)
+ smp_call_function_single(cpu, tile_net_update_cpu,
+ (saw_channel ? dev : NULL), 1);
+
+ /* HACK: Allow packets to flow in the simulator. */
+ if (saw_channel)
+ sim_enable_mpipe_links(0, -1);
+
+ return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for both small and large packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+{
+ pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
+ int rc;
+
+ /* Compute stack bytes; we round up to 64KB and then use
+ * alloc_pages() so we get the required 64KB alignment as well.
+ */
+ buffer_stack_size =
+ ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
+ 64 * 1024);
+
+ /* Allocate two buffer stack indices. */
+ rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
+ rc);
+ return rc;
+ }
+ small_buffer_stack = rc;
+ large_buffer_stack = rc + 1;
+
+ /* Allocate the small memory stack. */
+ small_buffer_stack_va =
+ alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+ if (small_buffer_stack_va == NULL) {
+ netdev_err(dev,
+ "Could not alloc %zd bytes for buffer stacks\n",
+ buffer_stack_size);
+ return -ENOMEM;
+ }
+ rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
+ BUFFER_SIZE_SMALL_ENUM,
+ small_buffer_stack_va,
+ buffer_stack_size, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+ return rc;
+ }
+ rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+ hash_pte, 0);
+ if (rc != 0) {
+ netdev_err(dev,
+ "gxio_mpipe_register_buffer_memory failed: %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Allocate the large buffer stack. */
+ large_buffer_stack_va =
+ alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+ if (large_buffer_stack_va == NULL) {
+ netdev_err(dev,
+ "Could not alloc %zd bytes for buffer stacks\n",
+ buffer_stack_size);
+ return -ENOMEM;
+ }
+ rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
+ BUFFER_SIZE_LARGE_ENUM,
+ large_buffer_stack_va,
+ buffer_stack_size, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
+ rc);
+ return rc;
+ }
+ rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
+ hash_pte, 0);
+ if (rc != 0) {
+ netdev_err(dev,
+ "gxio_mpipe_register_buffer_memory failed: %d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Allocate per-cpu resources (memory for completions and idescs).
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int alloc_percpu_mpipe_resources(struct net_device *dev,
+ int cpu, int ring)
+{
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ int order, i, rc;
+ struct page *page;
+ void *addr;
+
+ /* Allocate the "comps". */
+ order = get_order(COMPS_SIZE);
+ page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+ if (page == NULL) {
+ netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
+ COMPS_SIZE);
+ return -ENOMEM;
+ }
+ addr = pfn_to_kaddr(page_to_pfn(page));
+ memset(addr, 0, COMPS_SIZE);
+ for (i = 0; i < TILE_NET_CHANNELS; i++)
+ info->comps_for_echannel[i] =
+ addr + i * sizeof(struct tile_net_comps);
+
+ /* If this is a network cpu, create an iqueue. */
+ if (cpu_isset(cpu, network_cpus_map)) {
+ order = get_order(NOTIF_RING_SIZE);
+ page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+ if (page == NULL) {
+ netdev_err(dev,
+ "Failed to alloc %zd bytes iqueue memory\n",
+ NOTIF_RING_SIZE);
+ return -ENOMEM;
+ }
+ addr = pfn_to_kaddr(page_to_pfn(page));
+ rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
+ addr, NOTIF_RING_SIZE, 0);
+ if (rc < 0) {
+ netdev_err(dev,
+ "gxio_mpipe_iqueue_init failed: %d\n", rc);
+ return rc;
+ }
+ info->has_iqueue = true;
+ }
+
+ return ring;
+}
+
+/* Initialize NotifGroup and buckets.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_notif_group_and_buckets(struct net_device *dev,
+ int ring, int network_cpus_count)
+{
+ int group, rc;
+
+ /* Allocate one NotifGroup. */
+ rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
+ rc);
+ return rc;
+ }
+ group = rc;
+
+ /* Initialize global num_buckets value. */
+ if (network_cpus_count > 4)
+ num_buckets = 256;
+ else if (network_cpus_count > 1)
+ num_buckets = 16;
+
+ /* Allocate some buckets, and set global first_bucket value. */
+ rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+ return rc;
+ }
+ first_bucket = rc;
+
+ /* Init group and buckets. */
+ rc = gxio_mpipe_init_notif_group_and_buckets(
+ &context, group, ring, network_cpus_count,
+ first_bucket, num_buckets,
+ GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
+ if (rc != 0) {
+ netdev_err(
+ dev,
+ "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Create an irq and register it, then activate the irq and request
+ * interrupts on all cores. Note that "ingress_irq" being initialized
+ * is how we know not to call tile_net_init_mpipe() again.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int tile_net_setup_interrupts(struct net_device *dev)
+{
+ int cpu, rc;
+
+ rc = create_irq();
+ if (rc < 0) {
+ netdev_err(dev, "create_irq failed: %d\n", rc);
+ return rc;
+ }
+ ingress_irq = rc;
+ tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
+ rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
+ 0, NULL, NULL);
+ if (rc != 0) {
+ netdev_err(dev, "request_irq failed: %d\n", rc);
+ destroy_irq(ingress_irq);
+ ingress_irq = -1;
+ return rc;
+ }
+
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ if (info->has_iqueue) {
+ gxio_mpipe_request_notif_ring_interrupt(
+ &context, cpu_x(cpu), cpu_y(cpu),
+ 1, ingress_irq, info->iqueue.ring);
+ }
+ }
+
+ return 0;
+}
+
+/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
+static void tile_net_init_mpipe_fail(void)
+{
+ int cpu;
+
+ /* Do cleanups that require the mpipe context first. */
+ if (small_buffer_stack >= 0)
+ tile_net_pop_all_buffers(small_buffer_stack);
+ if (large_buffer_stack >= 0)
+ tile_net_pop_all_buffers(large_buffer_stack);
+
+ /* Destroy mpipe context so the hardware no longer owns any memory. */
+ gxio_mpipe_destroy(&context);
+
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ free_pages((unsigned long)(info->comps_for_echannel[0]),
+ get_order(COMPS_SIZE));
+ info->comps_for_echannel[0] = NULL;
+ free_pages((unsigned long)(info->iqueue.idescs),
+ get_order(NOTIF_RING_SIZE));
+ info->iqueue.idescs = NULL;
+ }
+
+ if (small_buffer_stack_va)
+ free_pages_exact(small_buffer_stack_va, buffer_stack_size);
+ if (large_buffer_stack_va)
+ free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+
+ small_buffer_stack_va = NULL;
+ large_buffer_stack_va = NULL;
+ large_buffer_stack = -1;
+ small_buffer_stack = -1;
+ first_bucket = -1;
+}
+
+/* The first time any tilegx network device is opened, we initialize
+ * the global mpipe state. If this step fails, we fail to open the
+ * device, but if it succeeds, we never need to do it again, and since
+ * tile_net can't be unloaded, we never undo it.
+ *
+ * Note that some resources in this path (buffer stack indices,
+ * bindings from init_buffer_stack, etc.) are hypervisor resources
+ * that are freed implicitly by gxio_mpipe_destroy().
+ */
+static int tile_net_init_mpipe(struct net_device *dev)
+{
+ int i, num_buffers, rc;
+ int cpu;
+ int first_ring, ring;
+ int network_cpus_count = cpus_weight(network_cpus_map);
+
+ if (!hash_default) {
+ netdev_err(dev, "Networking requires hash_default!\n");
+ return -EIO;
+ }
+
+ rc = gxio_mpipe_init(&context, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Set up the buffer stacks. */
+ num_buffers =
+ network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+ rc = init_buffer_stacks(dev, num_buffers);
+ if (rc != 0)
+ goto fail;
+
+ /* Provide initial buffers. */
+ rc = -ENOMEM;
+ for (i = 0; i < num_buffers; i++) {
+ if (!tile_net_provide_buffer(true)) {
+ netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+ goto fail;
+ }
+ }
+ for (i = 0; i < num_buffers; i++) {
+ if (!tile_net_provide_buffer(false)) {
+ netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+ goto fail;
+ }
+ }
+
+ /* Allocate one NotifRing for each network cpu. */
+ rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
+ rc);
+ goto fail;
+ }
+
+ /* Init NotifRings per-cpu. */
+ first_ring = rc;
+ ring = first_ring;
+ for_each_online_cpu(cpu) {
+ rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
+ if (rc < 0)
+ goto fail;
+ ring = rc;
+ }
+
+ /* Initialize NotifGroup and buckets. */
+ rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
+ if (rc != 0)
+ goto fail;
+
+ /* Create and enable interrupts. */
+ rc = tile_net_setup_interrupts(dev);
+ if (rc != 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ tile_net_init_mpipe_fail();
+ return rc;
+}
+
+/* Create persistent egress info for a given egress channel.
+ * Note that this may be shared between, say, "gbe0" and "xgbe0".
+ * ISSUE: Defer header allocation until TSO is actually needed?
+ */
+static int tile_net_init_egress(struct net_device *dev, int echannel)
+{
+ struct page *headers_page, *edescs_page, *equeue_page;
+ gxio_mpipe_edesc_t *edescs;
+ gxio_mpipe_equeue_t *equeue;
+ unsigned char *headers;
+ int headers_order, edescs_order, equeue_order;
+ size_t edescs_size;
+ int edma;
+ int rc = -ENOMEM;
+
+ /* Only initialize once. */
+ if (egress_for_echannel[echannel].equeue != NULL)
+ return 0;
+
+ /* Allocate memory for the "headers". */
+ headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
+ headers_page = alloc_pages(GFP_KERNEL, headers_order);
+ if (headers_page == NULL) {
+ netdev_warn(dev,
+ "Could not alloc %zd bytes for TSO headers.\n",
+ PAGE_SIZE << headers_order);
+ goto fail;
+ }
+ headers = pfn_to_kaddr(page_to_pfn(headers_page));
+
+ /* Allocate memory for the "edescs". */
+ edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
+ edescs_order = get_order(edescs_size);
+ edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
+ if (edescs_page == NULL) {
+ netdev_warn(dev,
+ "Could not alloc %zd bytes for eDMA ring.\n",
+ edescs_size);
+ goto fail_headers;
+ }
+ edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
+
+ /* Allocate memory for the "equeue". */
+ equeue_order = get_order(sizeof(*equeue));
+ equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
+ if (equeue_page == NULL) {
+ netdev_warn(dev,
+ "Could not alloc %zd bytes for equeue info.\n",
+ PAGE_SIZE << equeue_order);
+ goto fail_edescs;
+ }
+ equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
+
+ /* Allocate an edma ring. Note that in practice this can't
+ * fail, which is good, because we will leak an edma ring if so.
+ */
+ rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
+ if (rc < 0) {
+ netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
+ rc);
+ goto fail_equeue;
+ }
+ edma = rc;
+
+ /* Initialize the equeue. */
+ rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+ edescs, edescs_size, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+ goto fail_equeue;
+ }
+
+ /* Done. */
+ egress_for_echannel[echannel].equeue = equeue;
+ egress_for_echannel[echannel].headers = headers;
+ return 0;
+
+fail_equeue:
+ __free_pages(equeue_page, equeue_order);
+
+fail_edescs:
+ __free_pages(edescs_page, edescs_order);
+
+fail_headers:
+ __free_pages(headers_page, headers_order);
+
+fail:
+ return rc;
+}
+
+/* Return channel number for a newly-opened link. */
+static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
+ const char *link_name)
+{
+ int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+ if (rc < 0) {
+ netdev_err(dev, "Failed to open '%s'\n", link_name);
+ return rc;
+ }
+ rc = gxio_mpipe_link_channel(link);
+ if (rc < 0 || rc >= TILE_NET_CHANNELS) {
+ netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
+ gxio_mpipe_link_close(link);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+/* Help the kernel activate the given network interface. */
+static int tile_net_open(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int cpu, rc;
+
+ mutex_lock(&tile_net_devs_for_channel_mutex);
+
+ /* Do one-time initialization the first time any device is opened. */
+ if (ingress_irq < 0) {
+ rc = tile_net_init_mpipe(dev);
+ if (rc != 0)
+ goto fail;
+ }
+
+ /* Determine if this is the "loopify" device. */
+ if (unlikely((loopify_link_name != NULL) &&
+ !strcmp(dev->name, loopify_link_name))) {
+ rc = tile_net_link_open(dev, &priv->link, "loop0");
+ if (rc < 0)
+ goto fail;
+ priv->channel = rc;
+ rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
+ if (rc < 0)
+ goto fail;
+ priv->loopify_channel = rc;
+ priv->echannel = rc;
+ } else {
+ rc = tile_net_link_open(dev, &priv->link, dev->name);
+ if (rc < 0)
+ goto fail;
+ priv->channel = rc;
+ priv->echannel = rc;
+ }
+
+ /* Initialize egress info (if needed). Once ever, per echannel. */
+ rc = tile_net_init_egress(dev, priv->echannel);
+ if (rc != 0)
+ goto fail;
+
+ tile_net_devs_for_channel[priv->channel] = dev;
+
+ rc = tile_net_update(dev);
+ if (rc != 0)
+ goto fail;
+
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+ /* Initialize the transmit wake timer for this device for each cpu. */
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ struct tile_net_tx_wake *tx_wake =
+ &info->tx_wake[priv->echannel];
+
+ hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ tx_wake->timer.function = tile_net_handle_tx_wake_timer;
+ tx_wake->dev = dev;
+ }
+
+ for_each_online_cpu(cpu)
+ netif_start_subqueue(dev, cpu);
+ netif_carrier_on(dev);
+ return 0;
+
+fail:
+ if (priv->loopify_channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+ netdev_warn(dev, "Failed to close loopify link!\n");
+ priv->loopify_channel = -1;
+ }
+ if (priv->channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->link) != 0)
+ netdev_warn(dev, "Failed to close link!\n");
+ priv->channel = -1;
+ }
+ priv->echannel = -1;
+ tile_net_devs_for_channel[priv->channel] = NULL;
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+ /* Don't return raw gxio error codes to generic Linux. */
+ return (rc > -512) ? rc : -EIO;
+}
+
+/* Help the kernel deactivate the given network interface. */
+static int tile_net_stop(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ struct tile_net_tx_wake *tx_wake =
+ &info->tx_wake[priv->echannel];
+
+ hrtimer_cancel(&tx_wake->timer);
+ netif_stop_subqueue(dev, cpu);
+ }
+
+ mutex_lock(&tile_net_devs_for_channel_mutex);
+ tile_net_devs_for_channel[priv->channel] = NULL;
+ (void)tile_net_update(dev);
+ if (priv->loopify_channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+ netdev_warn(dev, "Failed to close loopify link!\n");
+ priv->loopify_channel = -1;
+ }
+ if (priv->channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->link) != 0)
+ netdev_warn(dev, "Failed to close link!\n");
+ priv->channel = -1;
+ }
+ priv->echannel = -1;
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+ return 0;
+}
+
+/* Determine the VA for a fragment. */
+static inline void *tile_net_frag_buf(skb_frag_t *f)
+{
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
+ return pfn_to_kaddr(pfn) + f->page_offset;
+}
+
+/* Acquire a completion entry and an egress slot, or if we can't,
+ * stop the queue and schedule the tx_wake timer.
+ */
+static s64 tile_net_equeue_try_reserve(struct net_device *dev,
+ struct tile_net_comps *comps,
+ gxio_mpipe_equeue_t *equeue,
+ int num_edescs)
+{
+ /* Try to acquire a completion entry. */
+ if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
+ tile_net_free_comps(equeue, comps, 32, false) != 0) {
+
+ /* Try to acquire an egress slot. */
+ s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+ if (slot >= 0)
+ return slot;
+
+ /* Freeing some completions gives the equeue time to drain. */
+ tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
+
+ slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+ if (slot >= 0)
+ return slot;
+ }
+
+ /* Still nothing; give up and stop the queue for a short while. */
+ netif_stop_subqueue(dev, smp_processor_id());
+ tile_net_schedule_tx_wake_timer(dev);
+ return -1;
+}
+
+/* Determine how many edesc's are needed for TSO.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ * Sometimes, for example when using NFS over TCP, a single segment can
+ * span 3 fragments. This requires special care.
+ */
+static int tso_count_edescs(struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int data_len = skb->data_len;
+ unsigned int p_len = sh->gso_size;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = -1; /* size of the current fragment */
+ long f_used = -1; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int num_edescs = 0;
+ int segment;
+
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+
+ unsigned int p_used = 0;
+
+ /* One edesc for header and for each piece of the payload. */
+ for (num_edescs++; p_used < p_len; num_edescs++) {
+
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = sh->frags[f_id].size;
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+ }
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ return num_edescs;
+}
+
+/* Prepare modified copies of the skbuff headers.
+ * FIXME: add support for IPv6.
+ */
+static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
+ s64 slot)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ struct iphdr *ih;
+ struct tcphdr *th;
+ unsigned int data_len = skb->data_len;
+ unsigned char *data = skb->data;
+ unsigned int ih_off, th_off, sh_len, p_len;
+ unsigned int isum_seed, tsum_seed, id, seq;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = -1; /* size of the current fragment */
+ long f_used = -1; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int segment;
+
+ /* Locate original headers and compute various lengths. */
+ ih = ip_hdr(skb);
+ th = tcp_hdr(skb);
+ ih_off = skb_network_offset(skb);
+ th_off = skb_transport_offset(skb);
+ sh_len = th_off + tcp_hdrlen(skb);
+ p_len = sh->gso_size;
+
+ /* Set up seed values for IP and TCP csum and initialize id and seq. */
+ isum_seed = ((0xFFFF - ih->check) +
+ (0xFFFF - ih->tot_len) +
+ (0xFFFF - ih->id));
+ tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
+ id = ntohs(ih->id);
+ seq = ntohl(th->seq);
+
+ /* Prepare all the headers. */
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned char *buf;
+ unsigned int p_used = 0;
+
+ /* Copy to the header memory for this segment. */
+ buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+ NET_IP_ALIGN;
+ memcpy(buf, data, sh_len);
+
+ /* Update copied ip header. */
+ ih = (struct iphdr *)(buf + ih_off);
+ ih->tot_len = htons(sh_len + p_len - ih_off);
+ ih->id = htons(id);
+ ih->check = csum_long(isum_seed + ih->tot_len +
+ ih->id) ^ 0xffff;
+
+ /* Update copied tcp header. */
+ th = (struct tcphdr *)(buf + th_off);
+ th->seq = htonl(seq);
+ th->check = csum_long(tsum_seed + htons(sh_len + p_len));
+ if (segment != sh->gso_segs - 1) {
+ th->fin = 0;
+ th->psh = 0;
+ }
+
+ /* Skip past the header. */
+ slot++;
+
+ /* Skip past the payload. */
+ while (p_used < p_len) {
+
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = sh->frags[f_id].size;
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+
+ slot++;
+ }
+
+ id++;
+ seq += p_len;
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* Flush the headers so they are ready for hardware DMA. */
+ wmb();
+}
+
+/* Pass all the data to mpipe for egress. */
+static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
+ struct sk_buff *skb, unsigned char *headers, s64 slot)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int data_len = skb->data_len;
+ unsigned int p_len = sh->gso_size;
+ gxio_mpipe_edesc_t edesc_head = { { 0 } };
+ gxio_mpipe_edesc_t edesc_body = { { 0 } };
+ long f_id = -1; /* id of the current fragment */
+ long f_size = -1; /* size of the current fragment */
+ long f_used = -1; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ unsigned long tx_packets = 0, tx_bytes = 0;
+ unsigned int csum_start, sh_len;
+ int segment;
+
+ /* Prepare to egress the headers: set up header edesc. */
+ csum_start = skb_checksum_start_offset(skb);
+ sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ edesc_head.csum = 1;
+ edesc_head.csum_start = csum_start;
+ edesc_head.csum_dest = csum_start + skb->csum_offset;
+ edesc_head.xfer_size = sh_len;
+
+ /* This is only used to specify the TLB. */
+ edesc_head.stack_idx = large_buffer_stack;
+ edesc_body.stack_idx = large_buffer_stack;
+
+ /* Egress all the edescs. */
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ void *va;
+ unsigned char *buf;
+ unsigned int p_used = 0;
+
+ /* Egress the header. */
+ buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+ NET_IP_ALIGN;
+ edesc_head.va = va_to_tile_io_addr(buf);
+ gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
+ slot++;
+
+ /* Egress the payload. */
+ while (p_used < p_len) {
+
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = sh->frags[f_id].size;
+ f_used = 0;
+ }
+
+ va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+
+ /* Egress a piece of the payload. */
+ edesc_body.va = va_to_tile_io_addr(va);
+ edesc_body.xfer_size = n;
+ edesc_body.bound = !(p_used < p_len);
+ gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
+ slot++;
+ }
+
+ tx_packets++;
+ tx_bytes += sh_len + p_len;
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* Update stats. */
+ tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
+ tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+}
+
+/* Do "TSO" handling for egress.
+ *
+ * Normally drivers set NETIF_F_TSO only to support hardware TSO;
+ * otherwise the stack uses scatter-gather to implement GSO in software.
+ * On our testing, enabling GSO support (via NETIF_F_SG) drops network
+ * performance down to around 7.5 Gbps on the 10G interfaces, although
+ * also dropping cpu utilization way down, to under 8%. But
+ * implementing "TSO" in the driver brings performance back up to line
+ * rate, while dropping cpu usage even further, to less than 4%. In
+ * practice, profiling of GSO shows that skb_segment() is what causes
+ * the performance overheads; we benefit in the driver from using
+ * preallocated memory to duplicate the TCP/IP headers.
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int channel = priv->echannel;
+ struct tile_net_egress *egress = &egress_for_echannel[channel];
+ struct tile_net_comps *comps = info->comps_for_echannel[channel];
+ gxio_mpipe_equeue_t *equeue = egress->equeue;
+ unsigned long irqflags;
+ int num_edescs;
+ s64 slot;
+
+ /* Determine how many mpipe edesc's are needed. */
+ num_edescs = tso_count_edescs(skb);
+
+ local_irq_save(irqflags);
+
+ /* Try to acquire a completion entry and an egress slot. */
+ slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+ if (slot < 0) {
+ local_irq_restore(irqflags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Set up copies of header data properly. */
+ tso_headers_prepare(skb, egress->headers, slot);
+
+ /* Actually pass the data to the network hardware. */
+ tso_egress(dev, equeue, skb, egress->headers, slot);
+
+ /* Add a completion record. */
+ add_comp(equeue, comps, slot + num_edescs - 1, skb);
+
+ local_irq_restore(irqflags);
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer();
+
+ return NETDEV_TX_OK;
+}
+
+/* Analyze the body and frags for a transmit request. */
+static unsigned int tile_net_tx_frags(struct frag *frags,
+ struct sk_buff *skb,
+ void *b_data, unsigned int b_len)
+{
+ unsigned int i, n = 0;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ if (b_len != 0) {
+ frags[n].buf = b_data;
+ frags[n++].length = b_len;
+ }
+
+ for (i = 0; i < sh->nr_frags; i++) {
+ skb_frag_t *f = &sh->frags[i];
+ frags[n].buf = tile_net_frag_buf(f);
+ frags[n++].length = skb_frag_size(f);
+ }
+
+ return n;
+}
+
+/* Help the kernel transmit a packet. */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+ struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+ gxio_mpipe_equeue_t *equeue = egress->equeue;
+ struct tile_net_comps *comps =
+ info->comps_for_echannel[priv->echannel];
+ unsigned int len = skb->len;
+ unsigned char *data = skb->data;
+ unsigned int num_edescs;
+ struct frag frags[MAX_FRAGS];
+ gxio_mpipe_edesc_t edescs[MAX_FRAGS];
+ unsigned long irqflags;
+ gxio_mpipe_edesc_t edesc = { { 0 } };
+ unsigned int i;
+ s64 slot;
+
+ if (skb_is_gso(skb))
+ return tile_net_tx_tso(skb, dev);
+
+ num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+ /* This is only used to specify the TLB. */
+ edesc.stack_idx = large_buffer_stack;
+
+ /* Prepare the edescs. */
+ for (i = 0; i < num_edescs; i++) {
+ edesc.xfer_size = frags[i].length;
+ edesc.va = va_to_tile_io_addr(frags[i].buf);
+ edescs[i] = edesc;
+ }
+
+ /* Mark the final edesc. */
+ edescs[num_edescs - 1].bound = 1;
+
+ /* Add checksum info to the initial edesc, if needed. */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ unsigned int csum_start = skb_checksum_start_offset(skb);
+ edescs[0].csum = 1;
+ edescs[0].csum_start = csum_start;
+ edescs[0].csum_dest = csum_start + skb->csum_offset;
+ }
+
+ local_irq_save(irqflags);
+
+ /* Try to acquire a completion entry and an egress slot. */
+ slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+ if (slot < 0) {
+ local_irq_restore(irqflags);
+ return NETDEV_TX_BUSY;
+ }
+
+ for (i = 0; i < num_edescs; i++)
+ gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+
+ /* Add a completion record. */
+ add_comp(equeue, comps, slot - 1, skb);
+
+ /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
+ tile_net_stats_add(1, &priv->stats.tx_packets);
+ tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
+ &priv->stats.tx_bytes);
+
+ local_irq_restore(irqflags);
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer();
+
+ return NETDEV_TX_OK;
+}
+
+/* Return subqueue id on this core (one per core). */
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ return smp_processor_id();
+}
+
+/* Deal with a transmit timeout. */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ netif_wake_subqueue(dev, cpu);
+}
+
+/* Ioctl commands. */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+/* Get system network statistics for device. */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ return &priv->stats;
+}
+
+/* Change the MTU. */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/* Change the Ethernet address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address. However,
+ * the hardware does not do anything with the MAC address, so the address
+ * which gets used on outgoing packets, and which is accepted on incoming
+ * packets, is completely up to us.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void tile_net_netpoll(struct net_device *dev)
+{
+ disable_percpu_irq(ingress_irq);
+ tile_net_handle_ingress_irq(ingress_irq, NULL);
+ enable_percpu_irq(ingress_irq, 0);
+}
+#endif
+
+static const struct net_device_ops tile_net_ops = {
+ .ndo_open = tile_net_open,
+ .ndo_stop = tile_net_stop,
+ .ndo_start_xmit = tile_net_tx,
+ .ndo_select_queue = tile_net_select_queue,
+ .ndo_do_ioctl = tile_net_ioctl,
+ .ndo_get_stats = tile_net_get_stats,
+ .ndo_change_mtu = tile_net_change_mtu,
+ .ndo_tx_timeout = tile_net_tx_timeout,
+ .ndo_set_mac_address = tile_net_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tile_net_netpoll,
+#endif
+};
+
+/* The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->netdev_ops = &tile_net_ops;
+ dev->watchdog_timeo = TILE_NET_TIMEOUT;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_TSO;
+ dev->mtu = 1500;
+}
+
+/* Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static void tile_net_dev_init(const char *name, const uint8_t *mac)
+{
+ int ret;
+ int i;
+ int nz_addr = 0;
+ struct net_device *dev;
+ struct tile_net_priv *priv;
+
+ /* HACK: Ignore "loop" links. */
+ if (strncmp(name, "loop", 4) == 0)
+ return;
+
+ /* Allocate the device structure. Normally, "name" is a
+ * template, instantiated by register_netdev(), but not for us.
+ */
+ dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
+ NR_CPUS, 1);
+ if (!dev) {
+ pr_err("alloc_netdev_mqs(%s) failed\n", name);
+ return;
+ }
+
+ /* Initialize "priv". */
+ priv = netdev_priv(dev);
+ memset(priv, 0, sizeof(*priv));
+ priv->dev = dev;
+ priv->channel = -1;
+ priv->loopify_channel = -1;
+ priv->echannel = -1;
+
+ /* Get the MAC address and set it in the device struct; this must
+ * be done before the device is opened. If the MAC is all zeroes,
+ * we use a random address, since we're probably on the simulator.
+ */
+ for (i = 0; i < 6; i++)
+ nz_addr |= mac[i];
+
+ if (nz_addr) {
+ memcpy(dev->dev_addr, mac, 6);
+ dev->addr_len = 6;
+ } else {
+ random_ether_addr(dev->dev_addr);
+ }
+
+ /* Register the network device. */
+ ret = register_netdev(dev);
+ if (ret) {
+ netdev_err(dev, "register_netdev failed %d\n", ret);
+ free_netdev(dev);
+ return;
+ }
+}
+
+/* Per-cpu module initialization. */
+static void tile_net_init_module_percpu(void *unused)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ int my_cpu = smp_processor_id();
+
+ info->has_iqueue = false;
+
+ info->my_cpu = my_cpu;
+
+ /* Initialize the egress timer. */
+ hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ info->egress_timer.function = tile_net_handle_egress_timer;
+}
+
+/* Module initialization. */
+static int __init tile_net_init_module(void)
+{
+ int i;
+ char name[GXIO_MPIPE_LINK_NAME_LEN];
+ uint8_t mac[6];
+
+ pr_info("Tilera Network Driver\n");
+
+ mutex_init(&tile_net_devs_for_channel_mutex);
+
+ /* Initialize each CPU. */
+ on_each_cpu(tile_net_init_module_percpu, NULL, 1);
+
+ /* Find out what devices we have, and initialize them. */
+ for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
+ tile_net_dev_init(name, mac);
+
+ if (!network_cpus_init())
+ network_cpus_map = *cpu_online_mask;
+
+ return 0;
+}
+
+module_init(tile_net_init_module);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4ffcd57b011b..2857ab078aac 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -478,6 +478,7 @@ struct netvsc_device {
u32 nvsp_version;
atomic_t num_outstanding_sends;
+ wait_queue_head_t wait_drain;
bool start_remove;
bool destroy;
/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8b919471472f..0c569831db5a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
if (!net_device)
return NULL;
+ init_waitqueue_head(&net_device->wait_drain);
net_device->start_remove = false;
net_device->destroy = false;
net_device->dev = device;
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device)
spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/* Wait for all send completions */
- while (atomic_read(&net_device->num_outstanding_sends)) {
- dev_info(&device->device,
- "waiting for %d requests to complete...\n",
- atomic_read(&net_device->num_outstanding_sends));
- udelay(100);
- }
+ wait_event(net_device->wait_drain,
+ atomic_read(&net_device->num_outstanding_sends) == 0);
netvsc_disconnect_vsp(net_device);
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device,
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
+ if (net_device->destroy && num_outstanding_sends == 0)
+ wake_up(&net_device->wait_drain);
+
if (netif_queue_stopped(ndev) && !net_device->start_remove &&
(hv_ringbuf_avail_percent(&device->channel->outbound)
> RING_AVAIL_PERCENT_HIWATER ||
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 031d8e8ed1ad..595205406d73 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -316,7 +316,7 @@ config AU1000_FIR
tristate "Alchemy IrDA SIR/FIR"
depends on IRDA && MIPS_ALCHEMY
help
- Say Y/M here to build suppor the the IrDA peripheral on the
+ Say Y/M here to build support the IrDA peripheral on the
Alchemy Au1000 and Au1100 SoCs.
Say M to build a module; it will be called au1k_ir.ko
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 5ac46f5226f3..47f8e8939266 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL");
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
+#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
+#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
static int ip175c_config_init(struct phy_device *phydev)
{
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
+ /* INTR pin used: speed/link/duplex will cause an interrupt */
+ c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+ if (c < 0)
+ return c;
+
if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
/* Additional delay (2ns) used to adjust RX clock phase
* at RGMII interface */
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 39ea0674dcde..5c120189ec86 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
struct mdio_mux_parent_bus *pb = cb->parent;
int r;
- mutex_lock(&pb->mii_bus->mdio_lock);
+ /* In theory multiple mdio_mux could be stacked, thus creating
+ * more than a single level of nesting. But in practice,
+ * SINGLE_DEPTH_NESTING will cover the vast majority of use
+ * cases. We use it, instead of trying to handle the general
+ * case.
+ */
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
@@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
int r;
- mutex_lock(&pb->mii_bus->mdio_lock);
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 683ef1ce5519..5061608f408c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
}
/**
* of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
- * @mdio_np: Pointer to the mii_bus.
+ * @mdio_bus_np: Pointer to the mii_bus.
*
* Returns a pointer to the mii_bus, or NULL if none found.
*
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 590f902deb6b..9d6c80c8a0cf 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -161,7 +161,7 @@ static struct phy_driver ks8051_driver = {
static struct phy_driver ks8001_driver = {
.phy_id = PHY_ID_KS8001,
.name = "Micrel KS8001 or KS8721",
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
@@ -174,7 +174,7 @@ static struct phy_driver ks8001_driver = {
static struct phy_driver ksz9021_driver = {
.phy_id = PHY_ID_KSZ9021,
- .phy_id_mask = 0x000fff10,
+ .phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
@@ -240,8 +240,8 @@ MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
- { PHY_ID_KSZ9021, 0x000fff10 },
- { PHY_ID_KS8001, 0x00fffff0 },
+ { PHY_ID_KSZ9021, 0x000ffffe },
+ { PHY_ID_KS8001, 0x00ffffff },
{ PHY_ID_KS8737, 0x00fffff0 },
{ PHY_ID_KS8041, 0x00fffff0 },
{ PHY_ID_KS8051, 0x00fffff0 },
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 71e2b0523bc2..3ae80eccd0ef 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -35,6 +35,7 @@
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
@@ -321,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0;
}
- if ((size > dev->net->mtu + ETH_HLEN) ||
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
(size + offset > skb->len)) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 964031e3da87..a28a983d465e 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
#define USB_PRODUCT_IPHONE_4 0x1297
+#define USB_PRODUCT_IPAD 0x129a
#define USB_PRODUCT_IPHONE_4_VZW 0x129c
#define USB_PRODUCT_IPHONE_4S 0x12a0
@@ -101,6 +102,10 @@ static struct usb_device_id ipheth_table[] = {
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
{ USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index add1064f755d..03c2d8d653df 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return skb->len > 0;
}
+static void mcs7830_status(struct usbnet *dev, struct urb *urb)
+{
+ u8 *buf = urb->transfer_buffer;
+ bool link;
+
+ if (urb->actual_length < 16)
+ return;
+
+ link = !(buf[1] & 0x20);
+ if (netif_carrier_ok(dev->net) != link) {
+ if (link) {
+ netif_carrier_on(dev->net);
+ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+ } else
+ netif_carrier_off(dev->net);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
+}
+
static const struct driver_info moschip_info = {
.description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
.bind = mcs7830_bind,
.rx_fixup = mcs7830_rx_fixup,
- .flags = FLAG_ETHER,
+ .flags = FLAG_ETHER | FLAG_LINK_INTR,
+ .status = mcs7830_status,
.in = 1,
.out = 2,
};
@@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = {
.description = "Sitecom LN-30 usb-NET adapter",
.bind = mcs7830_bind,
.rx_fixup = mcs7830_rx_fixup,
- .flags = FLAG_ETHER,
+ .flags = FLAG_ETHER | FLAG_LINK_INTR,
+ .status = mcs7830_status,
.in = 1,
.out = 2,
};
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 380dbea6109d..a051cedd64bd 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -197,6 +197,10 @@ err:
static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
{
struct usbnet *dev = usb_get_intfdata(intf);
+
+ /* can be called while disconnecting */
+ if (!dev)
+ return 0;
return qmi_wwan_manage_power(dev, on);
}
@@ -257,29 +261,6 @@ err:
return rv;
}
-/* Gobi devices uses identical class/protocol codes for all interfaces regardless
- * of function. Some of these are CDC ACM like and have the exact same endpoints
- * we are looking for. This leaves two possible strategies for identifying the
- * correct interface:
- * a) hardcoding interface number, or
- * b) use the fact that the wwan interface is the only one lacking additional
- * (CDC functional) descriptors
- *
- * Let's see if we can get away with the generic b) solution.
- */
-static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf)
-{
- int rv = -EINVAL;
-
- /* ignore any interface with additional descriptors */
- if (intf->cur_altsetting->extralen)
- goto err;
-
- rv = qmi_wwan_bind_shared(dev, intf);
-err:
- return rv;
-}
-
static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
{
struct usb_driver *subdriver = (void *)dev->data[0];
@@ -347,15 +328,15 @@ static const struct driver_info qmi_wwan_shared = {
.manage_power = qmi_wwan_manage_power,
};
-static const struct driver_info qmi_wwan_gobi = {
- .description = "Qualcomm Gobi wwan/QMI device",
+static const struct driver_info qmi_wwan_force_int0 = {
+ .description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_gobi,
+ .bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
+ .data = BIT(0), /* interface whitelist bitmap */
};
-/* ZTE suck at making USB descriptors */
static const struct driver_info qmi_wwan_force_int1 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
@@ -365,6 +346,24 @@ static const struct driver_info qmi_wwan_force_int1 = {
.data = BIT(1), /* interface whitelist bitmap */
};
+static const struct driver_info qmi_wwan_force_int2 = {
+ .description = "Qualcomm WWAN/QMI device",
+ .flags = FLAG_WWAN,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind_shared,
+ .manage_power = qmi_wwan_manage_power,
+ .data = BIT(2), /* interface whitelist bitmap */
+};
+
+static const struct driver_info qmi_wwan_force_int3 = {
+ .description = "Qualcomm WWAN/QMI device",
+ .flags = FLAG_WWAN,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind_shared,
+ .manage_power = qmi_wwan_manage_power,
+ .data = BIT(3), /* interface whitelist bitmap */
+};
+
static const struct driver_info qmi_wwan_force_int4 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
@@ -390,16 +389,23 @@ static const struct driver_info qmi_wwan_force_int4 = {
static const struct driver_info qmi_wwan_sierra = {
.description = "Sierra Wireless wwan/QMI device",
.flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_gobi,
+ .bind = qmi_wwan_bind_shared,
.unbind = qmi_wwan_unbind_shared,
.manage_power = qmi_wwan_manage_power,
.data = BIT(8) | BIT(19), /* interface whitelist bitmap */
};
#define HUAWEI_VENDOR_ID 0x12D1
+
+/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
+#define QMI_GOBI1K_DEVICE(vend, prod) \
+ USB_DEVICE(vend, prod), \
+ .driver_info = (unsigned long)&qmi_wwan_force_int3
+
+/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
#define QMI_GOBI_DEVICE(vend, prod) \
USB_DEVICE(vend, prod), \
- .driver_info = (unsigned long)&qmi_wwan_gobi
+ .driver_info = (unsigned long)&qmi_wwan_force_int0
static const struct usb_device_id products[] = {
{ /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -501,6 +507,15 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
+ { /* ZTE MF60 */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x1402,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int2,
+ },
{ /* Sierra Wireless MC77xx in QMI mode */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x1199,
@@ -510,20 +525,24 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_sierra,
},
- {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
- {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
- {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
- {QMI_GOBI_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
- {QMI_GOBI_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
- {QMI_GOBI_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
- {QMI_GOBI_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
+
+ /* Gobi 1000 devices */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+ {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
+ {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
+ {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
+
+ /* Gobi 2000 and 3000 devices */
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
@@ -547,6 +566,8 @@ static const struct usb_device_id products[] = {
{QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+ {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{ } /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 3faef5670d1f..d75d1f56becf 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
}
static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
.rx_urb_size = 8 * 1024,
.whitelist = {
.infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
}
};
-static const struct driver_info sierra_net_info_68A3 = {
+static const struct driver_info sierra_net_info_direct_ip = {
.description = "Sierra Wireless USB-to-WWAN Modem",
.flags = FLAG_WWAN | FLAG_SEND_ZLP,
.bind = sierra_net_bind,
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = {
.status = sierra_net_status,
.rx_fixup = sierra_net_rx_fixup,
.tx_fixup = sierra_net_tx_fixup,
- .data = (unsigned long)&sierra_net_info_data_68A3,
+ .data = (unsigned long)&sierra_net_info_data_direct_ip,
};
static const struct usb_device_id products[] = {
{USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
- .driver_info = (unsigned long) &sierra_net_info_68A3},
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
{}, /* last item */
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9f58330f1312..aba769d77459 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -796,11 +796,13 @@ int usbnet_open (struct net_device *net)
if (info->manage_power) {
retval = info->manage_power(dev, 1);
if (retval < 0)
- goto done;
+ goto done_manage_power_error;
usb_autopm_put_interface(dev->intf);
}
return retval;
+done_manage_power_error:
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
done:
usb_autopm_put_interface(dev->intf);
done_nopm:
@@ -876,9 +878,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
- strncpy (info->driver, dev->driver_name, sizeof info->driver);
- strncpy (info->version, DRIVER_VERSION, sizeof info->version);
- strncpy (info->fw_version, dev->driver_info->description,
+ strlcpy (info->driver, dev->driver_name, sizeof info->driver);
+ strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
+ strlcpy (info->fw_version, dev->driver_info->description,
sizeof info->fw_version);
usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
}
@@ -1202,6 +1204,21 @@ deferred:
}
EXPORT_SYMBOL_GPL(usbnet_start_xmit);
+static void rx_alloc_submit(struct usbnet *dev, gfp_t flags)
+{
+ struct urb *urb;
+ int i;
+
+ /* don't refill the queue all at once */
+ for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
+ urb = usb_alloc_urb(0, flags);
+ if (urb != NULL) {
+ if (rx_submit(dev, urb, flags) == -ENOLINK)
+ return;
+ }
+ }
+}
+
/*-------------------------------------------------------------------------*/
// tasklet (work deferred from completions, in_irq) or timer
@@ -1241,26 +1258,14 @@ static void usbnet_bh (unsigned long param)
!timer_pending (&dev->delay) &&
!test_bit (EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
- int qlen = RX_QLEN (dev);
-
- if (temp < qlen) {
- struct urb *urb;
- int i;
-
- // don't refill the queue all at once
- for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
- urb = usb_alloc_urb (0, GFP_ATOMIC);
- if (urb != NULL) {
- if (rx_submit (dev, urb, GFP_ATOMIC) ==
- -ENOLINK)
- return;
- }
- }
+
+ if (temp < RX_QLEN(dev)) {
+ rx_alloc_submit(dev, GFP_ATOMIC);
if (temp != dev->rxq.qlen)
netif_dbg(dev, link, dev->net,
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
- if (dev->rxq.qlen < qlen)
+ if (dev->rxq.qlen < RX_QLEN(dev))
tasklet_schedule (&dev->bh);
}
if (dev->txq.qlen < TX_QLEN (dev))
@@ -1513,6 +1518,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
spin_lock_irq(&dev->txq.lock);
/* don't autosuspend while transmitting */
if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
+ dev->suspend_count--;
spin_unlock_irq(&dev->txq.lock);
return -EBUSY;
} else {
@@ -1569,6 +1575,13 @@ int usbnet_resume (struct usb_interface *intf)
spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+ /* handle remote wakeup ASAP */
+ if (!dev->wait &&
+ netif_device_present(dev->net) &&
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags))
+ rx_alloc_submit(dev, GFP_KERNEL);
+
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
tasklet_schedule (&dev->bh);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ce6995e8d08..f18149ae2588 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
- struct u64_stats_sync syncp;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
u64 tx_bytes;
u64 tx_packets;
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
hdr = skb_vnet_hdr(skb);
- u64_stats_update_begin(&stats->syncp);
+ u64_stats_update_begin(&stats->rx_syncp);
stats->rx_bytes += skb->len;
stats->rx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_end(&stats->rx_syncp);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- u64_stats_update_begin(&stats->syncp);
+ u64_stats_update_begin(&stats->tx_syncp);
stats->tx_bytes += skb->len;
stats->tx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_end(&stats->tx_syncp);
tot_sgs += skb_vnet_hdr(skb)->num_sg;
dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
u64 tpackets, tbytes, rpackets, rbytes;
do {
- start = u64_stats_fetch_begin(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->tx_syncp);
tpackets = stats->tx_packets;
tbytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin(&stats->rx_syncp);
rpackets = stats->rx_packets;
rbytes = stats->rx_bytes;
- } while (u64_stats_fetch_retry(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
@@ -1231,11 +1236,6 @@ static int virtnet_freeze(struct virtio_device *vdev)
vi->config_enable = false;
mutex_unlock(&vi->config_lock);
- virtqueue_disable_cb(vi->rvq);
- virtqueue_disable_cb(vi->svq);
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
- virtqueue_disable_cb(vi->cvq);
-
netif_device_detach(vi->dev);
cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 520a4b2eb9cc..a747c632597a 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -7233,8 +7233,8 @@ static int airo_get_aplist(struct net_device *dev,
}
} else {
dwrq->flags = 1; /* Should be define'd */
- memcpy(extra + sizeof(struct sockaddr)*i,
- &qual, sizeof(struct iw_quality)*i);
+ memcpy(extra + sizeof(struct sockaddr) * i, qual,
+ sizeof(struct iw_quality) * i);
}
dwrq->length = i;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index c54b7d37bff1..420d69b2674c 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -143,6 +143,7 @@ struct ath_common {
u32 keymax;
DECLARE_BITMAP(keymap, ATH_KEYMAX);
DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
enum ath_crypt_caps crypt_caps;
unsigned int clockrate;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0ba81a66061f..44ad6fe0278f 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1045,11 +1045,11 @@ ath5k_drain_tx_buffs(struct ath5k_hw *ah)
ath5k_txbuf_free_skb(ah, bf);
- spin_lock_bh(&ah->txbuflock);
+ spin_lock(&ah->txbuflock);
list_move_tail(&bf->list, &ah->txbuf);
ah->txbuf_len++;
txq->txq_len--;
- spin_unlock_bh(&ah->txbuflock);
+ spin_unlock(&ah->txbuflock);
}
txq->link = NULL;
txq->txq_poll_mark = false;
@@ -2415,6 +2415,22 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
* Initialization routines *
\*************************/
+static const struct ieee80211_iface_limit if_limits[] = {
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 4, .types =
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+};
+
int __devinit
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
{
@@ -2436,6 +2452,9 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
+ hw->wiphy->iface_combinations = &if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
/* SW support for IBSS_RSN is provided by mac80211 */
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 28a65d3a03d0..b869a358ce43 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -693,8 +693,8 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
ie, 2 + vif->ssid_len + beacon_ie_len,
0, GFP_KERNEL);
if (bss)
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added bss %pM to "
- "cfg80211\n", bssid);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "added bss %pM to cfg80211\n", bssid);
kfree(ie);
} else
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
@@ -882,6 +882,32 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
vif->sme_state = SME_DISCONNECTED;
}
+static int ath6kl_set_probed_ssids(struct ath6kl *ar,
+ struct ath6kl_vif *vif,
+ struct cfg80211_ssid *ssids, int n_ssids)
+{
+ u8 i;
+
+ if (n_ssids > MAX_PROBED_SSID_INDEX)
+ return -EINVAL;
+
+ for (i = 0; i < n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
+ ssids[i].ssid_len ?
+ SPECIFIC_SSID_FLAG : ANY_SSID_FLAG,
+ ssids[i].ssid_len,
+ ssids[i].ssid);
+ }
+
+ /* Make sure no old entries are left behind */
+ for (i = n_ssids; i < MAX_PROBED_SSID_INDEX; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
+ DISABLE_SSID_FLAG, 0, NULL);
+ }
+
+ return 0;
+}
+
static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request)
{
@@ -899,36 +925,25 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
if (!ar->usr_bss_filter) {
clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
- ret = ath6kl_wmi_bssfilter_cmd(
- ar->wmi, vif->fw_vif_idx,
- (test_bit(CONNECTED, &vif->flags) ?
- ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0);
if (ret) {
ath6kl_err("couldn't set bss filtering\n");
return ret;
}
}
- if (request->n_ssids && request->ssids[0].ssid_len) {
- u8 i;
-
- if (request->n_ssids > (MAX_PROBED_SSID_INDEX - 1))
- request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
-
- for (i = 0; i < request->n_ssids; i++)
- ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
- i + 1, SPECIFIC_SSID_FLAG,
- request->ssids[i].ssid_len,
- request->ssids[i].ssid);
- }
+ ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
+ request->n_ssids);
+ if (ret < 0)
+ return ret;
/* this also clears IE in fw if it's not set */
ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
- ath6kl_err("failed to set Probe Request appie for "
- "scan");
+ ath6kl_err("failed to set Probe Request appie for scan");
return ret;
}
@@ -945,8 +960,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
channels = kzalloc(n_channels * sizeof(u16), GFP_KERNEL);
if (channels == NULL) {
- ath6kl_warn("failed to set scan channels, "
- "scan all channels");
+ ath6kl_warn("failed to set scan channels, scan all channels");
n_channels = 0;
}
@@ -1018,6 +1032,20 @@ out:
vif->scan_req = NULL;
}
+void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
+ enum wmi_phy_mode mode)
+{
+ enum nl80211_channel_type type;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "channel switch notify nw_type %d freq %d mode %d\n",
+ vif->nw_type, freq, mode);
+
+ type = (mode == WMI_11G_HT20) ? NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT;
+
+ cfg80211_ch_switch_notify(vif->ndev, freq, type);
+}
+
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr,
@@ -1111,9 +1139,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
ar->ap_mode_bkey.key_len = key->key_len;
memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
if (!test_bit(CONNECTED, &vif->flags)) {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
- "key configuration until AP mode has been "
- "started\n");
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delay initial group key configuration until AP mode has been started\n");
/*
* The key will be set in ath6kl_connect_ap_mode() once
* the connected event is received from the target.
@@ -1129,8 +1156,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
* the AP mode has properly started
* (ath6kl_install_statioc_wep_keys).
*/
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
- "until AP mode has been started\n");
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delay WEP key configuration until AP mode has been started\n");
vif->wep_key_list[key_index].key_len = key->key_len;
memcpy(vif->wep_key_list[key_index].key, key->key,
key->key_len);
@@ -1962,8 +1989,7 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
sizeof(discvr_pattern), discvr_offset,
discvr_pattern, discvr_mask);
if (ret) {
- ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR "
- "pattern\n");
+ ath6kl_err("failed to add WOW mDNS/SSDP/LLMNR pattern\n");
return ret;
}
}
@@ -2031,6 +2057,10 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
u8 index = 0;
__be32 ips[MAX_IP_ADDRS];
+ /* The FW currently can't support multi-vif WoW properly. */
+ if (ar->num_vif > 1)
+ return -EIO;
+
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
@@ -2044,6 +2074,13 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
return -EINVAL;
+ if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags)) {
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, false);
+ if (ret)
+ return ret;
+ }
+
/* Clear existing WOW patterns */
for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
@@ -2147,8 +2184,8 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_AWAKE);
if (ret) {
- ath6kl_warn("Failed to configure host sleep mode for "
- "wow resume: %d\n", ret);
+ ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
+ ret);
ar->state = ATH6KL_STATE_WOW;
return ret;
}
@@ -2172,6 +2209,13 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
ar->state = ATH6KL_STATE_ON;
+ if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags)) {
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, true);
+ if (ret)
+ return ret;
+ }
+
netif_wake_queue(vif->ndev);
return 0;
@@ -2186,8 +2230,10 @@ static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
if (!vif)
return -EIO;
- if (!ath6kl_cfg80211_ready(vif))
+ if (!test_bit(WMI_READY, &ar->flag)) {
+ ath6kl_err("deepsleep failed as wmi is not ready\n");
return -EIO;
+ }
ath6kl_cfg80211_stop_all(ar);
@@ -2447,6 +2493,24 @@ static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
band, htcap);
}
+static int ath6kl_restore_htcap(struct ath6kl_vif *vif)
+{
+ struct wiphy *wiphy = vif->ar->wiphy;
+ int band, ret = 0;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+
+ ret = ath6kl_set_htcap(vif, band,
+ wiphy->bands[band]->ht_cap.ht_supported);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static bool ath6kl_is_p2p_ie(const u8 *pos)
{
return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
@@ -2568,28 +2632,34 @@ static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
/* skip element id and length */
rsn_ie += 2;
- /* skip version, group cipher */
- if (rsn_ie_len < 6)
+ /* skip version */
+ if (rsn_ie_len < 2)
return -EINVAL;
- rsn_ie += 6;
- rsn_ie_len -= 6;
+ rsn_ie += 2;
+ rsn_ie_len -= 2;
+
+ /* skip group cipher suite */
+ if (rsn_ie_len < 4)
+ return 0;
+ rsn_ie += 4;
+ rsn_ie_len -= 4;
/* skip pairwise cipher suite */
if (rsn_ie_len < 2)
- return -EINVAL;
- cnt = *((u16 *) rsn_ie);
+ return 0;
+ cnt = get_unaligned_le16(rsn_ie);
rsn_ie += (2 + cnt * 4);
rsn_ie_len -= (2 + cnt * 4);
/* skip akm suite */
if (rsn_ie_len < 2)
- return -EINVAL;
- cnt = *((u16 *) rsn_ie);
+ return 0;
+ cnt = get_unaligned_le16(rsn_ie);
rsn_ie += (2 + cnt * 4);
rsn_ie_len -= (2 + cnt * 4);
if (rsn_ie_len < 2)
- return -EINVAL;
+ return 0;
memcpy(rsn_capab, rsn_ie, 2);
@@ -2766,6 +2836,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
return res;
}
+ memcpy(&vif->profile, &p, sizeof(p));
res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
if (res < 0)
return res;
@@ -2801,13 +2872,7 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
clear_bit(CONNECTED, &vif->flags);
/* Restore ht setting in firmware */
- if (ath6kl_set_htcap(vif, IEEE80211_BAND_2GHZ, true))
- return -EIO;
-
- if (ath6kl_set_htcap(vif, IEEE80211_BAND_5GHZ, true))
- return -EIO;
-
- return 0;
+ return ath6kl_restore_htcap(vif);
}
static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -3081,7 +3146,6 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
struct ath6kl_vif *vif = netdev_priv(dev);
u16 interval;
int ret;
- u8 i;
if (ar->state != ATH6KL_STATE_ON)
return -EIO;
@@ -3089,29 +3153,23 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (vif->sme_state != SME_DISCONNECTED)
return -EBUSY;
+ /* The FW currently can't support multi-vif WoW properly. */
+ if (ar->num_vif > 1)
+ return -EIO;
+
ath6kl_cfg80211_scan_complete_event(vif, true);
- for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
- ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
- i, DISABLE_SSID_FLAG,
- 0, NULL);
- }
+ ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
+ request->n_ssids);
+ if (ret < 0)
+ return ret;
/* fw uses seconds, also make sure that it's >0 */
interval = max_t(u16, 1, request->interval / 1000);
ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
interval, interval,
- 10, 0, 0, 0, 3, 0, 0, 0);
-
- if (request->n_ssids && request->ssids[0].ssid_len) {
- for (i = 0; i < request->n_ssids; i++) {
- ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
- i, SPECIFIC_SSID_FLAG,
- request->ssids[i].ssid_len,
- request->ssids[i].ssid);
- }
- }
+ vif->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_WOW_MODE_ENABLE,
@@ -3271,8 +3329,7 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
- ath6kl_warn("ath6kl_deep_sleep_enable: "
- "wmi_powermode_cmd failed\n");
+ ath6kl_warn("ath6kl_deep_sleep_enable: wmi_powermode_cmd failed\n");
return;
}
@@ -3352,6 +3409,7 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
vif->next_mode = nw_type;
vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
+ vif->bg_scan_period = 0;
vif->htcap.ht_enable = true;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
@@ -3393,6 +3451,7 @@ err:
int ath6kl_cfg80211_init(struct ath6kl *ar)
{
struct wiphy *wiphy = ar->wiphy;
+ bool band_2gig = false, band_5gig = false, ht = false;
int ret;
wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
@@ -3413,8 +3472,46 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
/* max num of ssids that can be probed during scanning */
wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
- wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
- wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ switch (ar->hw.cap) {
+ case WMI_11AN_CAP:
+ ht = true;
+ case WMI_11A_CAP:
+ band_5gig = true;
+ break;
+ case WMI_11GN_CAP:
+ ht = true;
+ case WMI_11G_CAP:
+ band_2gig = true;
+ break;
+ case WMI_11AGN_CAP:
+ ht = true;
+ case WMI_11AG_CAP:
+ band_2gig = true;
+ band_5gig = true;
+ break;
+ default:
+ ath6kl_err("invalid phy capability!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Even if the fw has HT support, advertise HT cap only when
+ * the firmware has support to override RSN capability, otherwise
+ * 4-way handshake would fail.
+ */
+ if (!(ht &&
+ test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+ ar->fw_capabilities))) {
+ ath6kl_band_2ghz.ht_cap.cap = 0;
+ ath6kl_band_2ghz.ht_cap.ht_supported = false;
+ ath6kl_band_5ghz.ht_cap.cap = 0;
+ ath6kl_band_5ghz.ht_cap.ht_supported = false;
+ }
+ if (band_2gig)
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ if (band_5gig)
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->cipher_suites = cipher_suites;
@@ -3430,7 +3527,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->wowlan.pattern_min_len = 1;
wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
- wiphy->max_sched_scan_ssids = 10;
+ wiphy->max_sched_scan_ssids = MAX_PROBED_SSID_INDEX;
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
WIPHY_FLAG_HAVE_AP_SME |
@@ -3447,8 +3544,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
ar->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
- NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
- NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
ret = wiphy_register(wiphy);
if (ret < 0) {
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index c5def436417f..5ea8cbb79f43 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -28,6 +28,8 @@ enum ath6kl_cfg_suspend_mode {
struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
enum nl80211_iftype type,
u8 fw_vif_idx, u8 nw_type);
+void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
+ enum wmi_phy_mode mode);
void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 9d67964a51dd..4d9c6f142698 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -126,9 +126,9 @@ struct ath6kl_fw_ie {
#define AR6003_HW_2_0_FIRMWARE_FILE "athwlan.bin.z77"
#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "athtcmd_ram.bin"
#define AR6003_HW_2_0_PATCH_FILE "data.patch.bin"
-#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
+#define AR6003_HW_2_0_BOARD_DATA_FILE AR6003_HW_2_0_FW_DIR "/bdata.bin"
#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6003/hw2.0/bdata.SD31.bin"
+ AR6003_HW_2_0_FW_DIR "/bdata.SD31.bin"
/* AR6003 3.0 definitions */
#define AR6003_HW_2_1_1_VERSION 0x30000582
@@ -139,25 +139,33 @@ struct ath6kl_fw_ie {
#define AR6003_HW_2_1_1_UTF_FIRMWARE_FILE "utf.bin"
#define AR6003_HW_2_1_1_TESTSCRIPT_FILE "nullTestFlow.bin"
#define AR6003_HW_2_1_1_PATCH_FILE "data.patch.bin"
-#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
+#define AR6003_HW_2_1_1_BOARD_DATA_FILE AR6003_HW_2_1_1_FW_DIR "/bdata.bin"
#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
+ AR6003_HW_2_1_1_FW_DIR "/bdata.SD31.bin"
/* AR6004 1.0 definitions */
#define AR6004_HW_1_0_VERSION 0x30000623
#define AR6004_HW_1_0_FW_DIR "ath6k/AR6004/hw1.0"
#define AR6004_HW_1_0_FIRMWARE_FILE "fw.ram.bin"
-#define AR6004_HW_1_0_BOARD_DATA_FILE "ath6k/AR6004/hw1.0/bdata.bin"
+#define AR6004_HW_1_0_BOARD_DATA_FILE AR6004_HW_1_0_FW_DIR "/bdata.bin"
#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6004/hw1.0/bdata.DB132.bin"
+ AR6004_HW_1_0_FW_DIR "/bdata.DB132.bin"
/* AR6004 1.1 definitions */
#define AR6004_HW_1_1_VERSION 0x30000001
#define AR6004_HW_1_1_FW_DIR "ath6k/AR6004/hw1.1"
#define AR6004_HW_1_1_FIRMWARE_FILE "fw.ram.bin"
-#define AR6004_HW_1_1_BOARD_DATA_FILE "ath6k/AR6004/hw1.1/bdata.bin"
+#define AR6004_HW_1_1_BOARD_DATA_FILE AR6004_HW_1_1_FW_DIR "/bdata.bin"
#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6004/hw1.1/bdata.DB132.bin"
+ AR6004_HW_1_1_FW_DIR "/bdata.DB132.bin"
+
+/* AR6004 1.2 definitions */
+#define AR6004_HW_1_2_VERSION 0x300007e8
+#define AR6004_HW_1_2_FW_DIR "ath6k/AR6004/hw1.2"
+#define AR6004_HW_1_2_FIRMWARE_FILE "fw.ram.bin"
+#define AR6004_HW_1_2_BOARD_DATA_FILE AR6004_HW_1_2_FW_DIR "/bdata.bin"
+#define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \
+ AR6004_HW_1_2_FW_DIR "/bdata.bin"
/* Per STA data, used in AP mode */
#define STA_PS_AWAKE BIT(0)
@@ -502,6 +510,8 @@ enum ath6kl_vif_state {
WLAN_ENABLED,
STATS_UPDATE_PEND,
HOST_SLEEP_MODE_CMD_PROCESSED,
+ NETDEV_MCAST_ALL_ON,
+ NETDEV_MCAST_ALL_OFF,
};
struct ath6kl_vif {
@@ -549,9 +559,11 @@ struct ath6kl_vif {
u16 assoc_bss_beacon_int;
u16 listen_intvl_t;
u16 bmiss_time_t;
+ u16 bg_scan_period;
u8 assoc_bss_dtim_period;
struct net_device_stats net_stats;
struct target_stats target_stats;
+ struct wmi_connect_cmd profile;
struct list_head mc_filter;
};
@@ -640,6 +652,7 @@ struct ath6kl {
u8 sta_list_index;
struct ath6kl_req_key ap_mode_bkey;
struct sk_buff_head mcastpsq;
+ u32 want_ch_switch;
/*
* FIXME: protects access to mcastpsq but is actually useless as
@@ -672,6 +685,7 @@ struct ath6kl {
u32 refclk_hz;
u32 uarttx_pin;
u32 testscript_addr;
+ enum wmi_phy_cap cap;
struct ath6kl_hw_fw {
const char *dir;
@@ -805,7 +819,8 @@ void aggr_reset_state(struct aggr_info_conn *aggr_conn);
struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
-void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver);
+void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
+ enum wmi_phy_cap cap);
int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid);
void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 1b76aff78508..15cfe30e54fd 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -401,8 +401,10 @@ static ssize_t ath6kl_fwlog_block_read(struct file *file,
ret = wait_for_completion_interruptible(
&ar->debug.fwlog_completion);
- if (ret == -ERESTARTSYS)
+ if (ret == -ERESTARTSYS) {
+ vfree(buf);
return ret;
+ }
spin_lock(&ar->debug.fwlog_queue.lock);
}
@@ -1570,10 +1572,15 @@ static ssize_t ath6kl_bgscan_int_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
u16 bgscan_int;
char buf[32];
ssize_t len;
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
@@ -1585,6 +1592,8 @@ static ssize_t ath6kl_bgscan_int_write(struct file *file,
if (bgscan_int == 0)
bgscan_int = 0xffff;
+ vif->bg_scan_period = bgscan_int;
+
ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
0, 0, 0);
@@ -1809,6 +1818,7 @@ int ath6kl_debug_init_fs(struct ath6kl *ar)
void ath6kl_debug_cleanup(struct ath6kl *ar)
{
skb_queue_purge(&ar->debug.fwlog_queue);
+ complete(&ar->debug.fwlog_completion);
kfree(ar->debug.roam_tbl);
}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 065e61516d7a..2798624d3a9d 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -83,10 +83,7 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
* never goes inactive EVER.
*/
cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
- } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
- /* this is the lowest priority data endpoint */
- /* FIXME: this looks fishy, check */
- cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+ }
/*
* Streams have to be created (explicit | implicit) for all
@@ -100,6 +97,13 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
*/
}
+ /*
+ * For ath6kl_credit_seek function,
+ * it use list_for_each_entry_reverse to walk around the whole ep list.
+ * Therefore assign this lowestpri_ep_dist after walk around the ep_list
+ */
+ cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
WARN_ON(cred_info->cur_free_credits <= 0);
list_for_each_entry(cur_ep_dist, ep_list, list) {
@@ -758,7 +762,7 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
u32 txb_mask;
u8 ac = WMM_NUM_AC;
- if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
@@ -793,16 +797,17 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
* itself
*/
txb_mask = ((1 << ac) - 1);
- /*
- * when the scatter request resources drop below a
- * certain threshold, disable Tx bundling for all
- * AC's with priority lower than the current requesting
- * AC. Otherwise re-enable Tx bundling for them
- */
- if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
- target->tx_bndl_mask &= ~txb_mask;
- else
- target->tx_bndl_mask |= txb_mask;
+
+ /*
+ * when the scatter request resources drop below a
+ * certain threshold, disable Tx bundling for all
+ * AC's with priority lower than the current requesting
+ * AC. Otherwise re-enable Tx bundling for them
+ */
+ if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
+ target->tx_bndl_mask &= ~txb_mask;
+ else
+ target->tx_bndl_mask |= txb_mask;
}
ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
@@ -849,6 +854,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
int bundle_sent;
int n_pkts_bundle;
u8 ac = WMM_NUM_AC;
+ int status;
spin_lock_bh(&target->tx_lock);
@@ -866,7 +872,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
*/
INIT_LIST_HEAD(&txq);
- if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
+ if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
@@ -910,7 +916,12 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
0, packet->info.tx.seqno);
- ath6kl_htc_tx_issue(target, packet);
+ status = ath6kl_htc_tx_issue(target, packet);
+
+ if (status) {
+ packet->status = status;
+ packet->completion(packet->context, packet);
+ }
}
spin_lock_bh(&target->tx_lock);
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index b277b3446882..f9626c723693 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -108,8 +108,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
/* get packet at head, but don't remove it */
packet = list_first_entry(&ep->txq, struct htc_packet, list);
- if (packet == NULL)
- break;
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: got head packet:0x%p , queue depth: %d\n",
@@ -803,8 +801,6 @@ static int htc_send_packets_multiple(struct htc_target *target,
/* get first packet to find out which ep the packets will go into */
packet = list_first_entry(pkt_queue, struct htc_packet, list);
- if (packet == NULL)
- return -EINVAL;
if (packet->endpoint >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
@@ -1382,6 +1378,9 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
/* copy all the callbacks */
ep->ep_cb = conn_req->ep_cb;
+ /* initialize tx_drop_packet_threshold */
+ ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
+
status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
&ep->pipe.pipeid_ul,
&ep->pipe.pipeid_dl);
@@ -1636,10 +1635,6 @@ static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
return -EINVAL;
first = list_first_entry(pkt_queue, struct htc_packet, list);
- if (first == NULL) {
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
if (first->endpoint >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 29ef50ea07d5..7eb0515f458a 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -119,6 +119,24 @@ static const struct ath6kl_hw hw_list[] = {
.fw_board = AR6004_HW_1_1_BOARD_DATA_FILE,
.fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE,
},
+ {
+ .id = AR6004_HW_1_2_VERSION,
+ .name = "ar6004 hw 1.2",
+ .dataset_patch_addr = 0x436ecc,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 9216,
+ .board_addr = 0x435c00,
+ .refclk_hz = 40000000,
+ .uarttx_pin = 11,
+
+ .fw = {
+ .dir = AR6004_HW_1_2_FW_DIR,
+ .fw = AR6004_HW_1_2_FIRMWARE_FILE,
+ },
+ .fw_board = AR6004_HW_1_2_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE,
+ },
};
/*
@@ -445,9 +463,9 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
P2P_FLAG_MACADDR_REQ |
P2P_FLAG_HMODEL_REQ);
if (ret) {
- ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
- "capabilities (%d) - assuming P2P not "
- "supported\n", ret);
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "failed to request P2P capabilities (%d) - assuming P2P not supported\n",
+ ret);
ar->p2p = false;
}
}
@@ -456,8 +474,9 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
/* Enable Probe Request reporting for P2P */
ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true);
if (ret) {
- ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe "
- "Request reporting (%d)\n", ret);
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "failed to enable Probe Request reporting (%d)\n",
+ ret);
}
}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 4d818f96c415..e5524470529c 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -421,8 +421,8 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
if (!ik->valid)
break;
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed addkey for "
- "the initial group key for AP mode\n");
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delayed addkey for the initial group key for AP mode\n");
memset(key_rsc, 0, sizeof(key_rsc));
res = ath6kl_wmi_addkey_cmd(
ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
@@ -430,12 +430,19 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
ik->key,
KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
if (res) {
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
- "addkey failed: %d\n", res);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "Delayed addkey failed: %d\n", res);
}
break;
}
+ if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) {
+ ar->want_ch_switch &= ~(1 << vif->fw_vif_idx);
+ /* we actually don't know the phymode, default to HT20 */
+ ath6kl_cfg80211_ch_switch_notify(vif, channel,
+ WMI_11G_HT20);
+ }
+
ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
set_bit(CONNECTED, &vif->flags);
netif_carrier_on(vif->ndev);
@@ -541,7 +548,8 @@ void ath6kl_disconnect(struct ath6kl_vif *vif)
/* WMI Event handlers */
-void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
+void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
+ enum wmi_phy_cap cap)
{
struct ath6kl *ar = devt;
@@ -551,6 +559,7 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
ar->version.wlan_ver = sw_ver;
ar->version.abi_ver = abi_ver;
+ ar->hw.cap = cap;
snprintf(ar->wiphy->fw_version,
sizeof(ar->wiphy->fw_version),
@@ -584,6 +593,45 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
}
+static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
+{
+
+ struct ath6kl *ar = vif->ar;
+
+ vif->next_chan = channel;
+ vif->profile.ch = cpu_to_le16(channel);
+
+ switch (vif->nw_type) {
+ case AP_NETWORK:
+ return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx,
+ &vif->profile);
+ default:
+ ath6kl_err("won't switch channels nw_type=%d\n", vif->nw_type);
+ return -ENOTSUPP;
+ }
+}
+
+static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
+{
+
+ struct ath6kl_vif *vif;
+ int res = 0;
+
+ if (!ar->want_ch_switch)
+ return;
+
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (ar->want_ch_switch & (1 << vif->fw_vif_idx))
+ res = ath6kl_commit_ch_switch(vif, channel);
+
+ if (res)
+ ath6kl_err("channel switch failed nw_type %d res %d\n",
+ vif->nw_type, res);
+ }
+ spin_unlock_bh(&ar->list_lock);
+}
+
void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
u16 listen_int, u16 beacon_int,
enum network_type net_type, u8 beacon_ie_len,
@@ -601,9 +649,11 @@ void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
memcpy(vif->bssid, bssid, sizeof(vif->bssid));
vif->bss_ch = channel;
- if ((vif->nw_type == INFRA_NETWORK))
+ if ((vif->nw_type == INFRA_NETWORK)) {
ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
vif->listen_intvl_t, 0);
+ ath6kl_check_ch_switch(ar, channel);
+ }
netif_wake_queue(vif->ndev);
@@ -926,6 +976,11 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
struct ath6kl *ar = vif->ar;
if (vif->nw_type == AP_NETWORK) {
+ /* disconnect due to other STA vif switching channels */
+ if (reason == BSS_DISCONNECTED &&
+ prot_reason_status == WMI_AP_REASON_STA_ROAM)
+ ar->want_ch_switch |= 1 << vif->fw_vif_idx;
+
if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
return;
@@ -1090,7 +1145,7 @@ static int ath6kl_set_features(struct net_device *dev,
static void ath6kl_set_multicast_list(struct net_device *ndev)
{
struct ath6kl_vif *vif = netdev_priv(ndev);
- bool mc_all_on = false, mc_all_off = false;
+ bool mc_all_on = false;
int mc_count = netdev_mc_count(ndev);
struct netdev_hw_addr *ha;
bool found;
@@ -1102,24 +1157,41 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
!test_bit(WLAN_ENABLED, &vif->flags))
return;
+ /* Enable multicast-all filter. */
mc_all_on = !!(ndev->flags & IFF_PROMISC) ||
!!(ndev->flags & IFF_ALLMULTI) ||
!!(mc_count > ATH6K_MAX_MC_FILTERS_PER_LIST);
- mc_all_off = !(ndev->flags & IFF_MULTICAST) || mc_count == 0;
+ if (mc_all_on)
+ set_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
+ else
+ clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
+
+ mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON);
- if (mc_all_on || mc_all_off) {
- /* Enable/disable all multicast */
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast filter\n",
- mc_all_on ? "enabling" : "disabling");
- ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx,
+ if (!(ndev->flags & IFF_MULTICAST)) {
+ mc_all_on = false;
+ set_bit(NETDEV_MCAST_ALL_OFF, &vif->flags);
+ } else {
+ clear_bit(NETDEV_MCAST_ALL_OFF, &vif->flags);
+ }
+
+ /* Enable/disable "multicast-all" filter*/
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast-all filter\n",
+ mc_all_on ? "enabling" : "disabling");
+
+ ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx,
mc_all_on);
- if (ret)
- ath6kl_warn("Failed to %s multicast receive\n",
- mc_all_on ? "enable" : "disable");
+ if (ret) {
+ ath6kl_warn("Failed to %s multicast-all receive\n",
+ mc_all_on ? "enable" : "disable");
return;
}
+ if (test_bit(NETDEV_MCAST_ALL_ON, &vif->flags))
+ return;
+
+ /* Keep the driver and firmware mcast list in sync. */
list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) {
found = false;
netdev_for_each_mc_addr(ha, ndev) {
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 44ea7a742101..05b95405f7b5 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -552,7 +552,7 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
- if (!bus_req)
+ if (WARN_ON_ONCE(!bus_req))
return -ENOMEM;
bus_req->address = address;
@@ -915,6 +915,9 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
}
cut_pwr:
+ if (func->card && func->card->host)
+ func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER;
+
return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER, NULL);
}
@@ -985,9 +988,8 @@ static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
}
if (status) {
- ath6kl_err("%s: failed to write initial bytes of 0x%x "
- "to window reg: 0x%X\n", __func__,
- addr, reg_addr);
+ ath6kl_err("%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n",
+ __func__, addr, reg_addr);
return status;
}
@@ -1076,8 +1078,8 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
(u8 *)&ar->bmi.cmd_credits, 4,
HIF_RD_SYNC_BYTE_INC);
if (ret) {
- ath6kl_err("Unable to decrement the command credit "
- "count register: %d\n", ret);
+ ath6kl_err("Unable to decrement the command credit count register: %d\n",
+ ret);
return ret;
}
@@ -1457,3 +1459,6 @@ MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 82f2f5cb475b..67206aedea6c 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -362,15 +362,11 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
skb, skb->data, skb->len);
/* If target is not associated */
- if (!test_bit(CONNECTED, &vif->flags)) {
- dev_kfree_skb(skb);
- return 0;
- }
+ if (!test_bit(CONNECTED, &vif->flags))
+ goto fail_tx;
- if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON)) {
- dev_kfree_skb(skb);
- return 0;
- }
+ if (WARN_ON_ONCE(ar->state != ATH6KL_STATE_ON))
+ goto fail_tx;
if (!test_bit(WMI_READY, &ar->flag))
goto fail_tx;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 44a795f14da9..3740c3d6ab88 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -1037,6 +1037,14 @@ static void ath6kl_usb_stop(struct ath6kl *ar)
hif_stop(ar);
}
+static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar)
+{
+ /*
+ * USB doesn't support it. Just return.
+ */
+ return;
+}
+
static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.diag_read32 = ath6kl_usb_diag_read32,
.diag_write32 = ath6kl_usb_diag_write32,
@@ -1049,6 +1057,7 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.pipe_get_default = ath6kl_usb_get_default_pipe,
.pipe_map_service = ath6kl_usb_map_service_pipe,
.pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
+ .cleanup_scatter = ath6kl_usb_cleanup_scatter,
};
/* ath6kl usb driver registered functions */
@@ -1208,3 +1217,6 @@ MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 7c8a9977faf5..ee8ec2394c2c 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -16,6 +16,7 @@
*/
#include <linux/ip.h>
+#include <linux/in.h>
#include "core.h"
#include "debug.h"
#include "testmode.h"
@@ -289,6 +290,13 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
layer2_priority);
} else
usr_pri = layer2_priority & 0x7;
+
+ /*
+ * Queue the EAPOL frames in the same WMM_AC_VO queue
+ * as that of management frames.
+ */
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+ usr_pri = WMI_VOICE_USER_PRIORITY;
}
/*
@@ -460,8 +468,9 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
freq, dur);
chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
- ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel "
- "(freq=%u)\n", freq);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "remain_on_chnl: Unknown channel (freq=%u)\n",
+ freq);
return -EINVAL;
}
id = vif->last_roc_id;
@@ -488,12 +497,14 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
ev = (struct wmi_cancel_remain_on_chnl_event *) datap;
freq = le32_to_cpu(ev->freq);
dur = le32_to_cpu(ev->duration);
- ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u "
- "status=%u\n", freq, dur, ev->status);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "cancel_remain_on_chnl: freq=%u dur=%u status=%u\n",
+ freq, dur, ev->status);
chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
- ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown "
- "channel (freq=%u)\n", freq);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "cancel_remain_on_chnl: Unknown channel (freq=%u)\n",
+ freq);
return -EINVAL;
}
if (vif->last_cancel_roc_id &&
@@ -548,12 +559,12 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
freq = le32_to_cpu(ev->freq);
dlen = le16_to_cpu(ev->len);
if (datap + len < ev->data + dlen) {
- ath6kl_err("invalid wmi_p2p_rx_probe_req_event: "
- "len=%d dlen=%u\n", len, dlen);
+ ath6kl_err("invalid wmi_p2p_rx_probe_req_event: len=%d dlen=%u\n",
+ len, dlen);
return -EINVAL;
}
- ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u "
- "probe_req_report=%d\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "rx_probe_req: len=%u freq=%u probe_req_report=%d\n",
dlen, freq, vif->probe_req_report);
if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
@@ -592,8 +603,8 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
freq = le32_to_cpu(ev->freq);
dlen = le16_to_cpu(ev->len);
if (datap + len < ev->data + dlen) {
- ath6kl_err("invalid wmi_rx_action_event: "
- "len=%d dlen=%u\n", len, dlen);
+ ath6kl_err("invalid wmi_rx_action_event: len=%d dlen=%u\n",
+ len, dlen);
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
@@ -687,7 +698,7 @@ static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
le32_to_cpu(ev->sw_version),
- le32_to_cpu(ev->abi_version));
+ le32_to_cpu(ev->abi_version), ev->phy_cap);
return 0;
}
@@ -777,16 +788,15 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
/* AP mode start/STA connected event */
struct net_device *dev = vif->ndev;
if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
- ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM "
- "(AP started)\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: freq %d bssid %pM (AP started)\n",
__func__, le16_to_cpu(ev->u.ap_bss.ch),
ev->u.ap_bss.bssid);
ath6kl_connect_ap_mode_bss(
vif, le16_to_cpu(ev->u.ap_bss.ch));
} else {
- ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM "
- "auth=%u keymgmt=%u cipher=%u apsd_info=%u "
- "(STA connected)\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: aid %u mac_addr %pM auth=%u keymgmt=%u cipher=%u apsd_info=%u (STA connected)\n",
__func__, ev->u.ap_sta.aid,
ev->u.ap_sta.mac_addr,
ev->u.ap_sta.auth,
@@ -1229,8 +1239,9 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
ev = (struct wmi_neighbor_report_event *) datap;
if (sizeof(*ev) + ev->num_neighbors * sizeof(struct wmi_neighbor_info)
> len) {
- ath6kl_dbg(ATH6KL_DBG_WMI, "truncated neighbor event "
- "(num=%d len=%d)\n", ev->num_neighbors, len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "truncated neighbor event (num=%d len=%d)\n",
+ ev->num_neighbors, len);
return -EINVAL;
}
for (i = 0; i < ev->num_neighbors; i++) {
@@ -1814,12 +1825,14 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates)
{
+ struct ieee80211_supported_band *sband;
struct sk_buff *skb;
struct wmi_begin_scan_cmd *sc;
- s8 size;
+ s8 size, *supp_rates;
int i, band, ret;
struct ath6kl *ar = wmi->parent_dev;
int num_rates;
+ u32 ratemask;
size = sizeof(struct wmi_begin_scan_cmd);
@@ -1846,10 +1859,13 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
sc->num_ch = num_chan;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
- struct ieee80211_supported_band *sband =
- ar->wiphy->bands[band];
- u32 ratemask = rates[band];
- u8 *supp_rates = sc->supp_rates[band].rates;
+ sband = ar->wiphy->bands[band];
+
+ if (!sband)
+ continue;
+
+ ratemask = rates[band];
+ supp_rates = sc->supp_rates[band].rates;
num_rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
@@ -2129,8 +2145,8 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
struct wmi_add_cipher_key_cmd *cmd;
int ret;
- ath6kl_dbg(ATH6KL_DBG_WMI, "addkey cmd: key_index=%u key_type=%d "
- "key_usage=%d key_len=%d key_op_ctrl=%d\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "addkey cmd: key_index=%u key_type=%d key_usage=%d key_len=%d key_op_ctrl=%d\n",
key_index, key_type, key_usage, key_len, key_op_ctrl);
if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
@@ -3047,8 +3063,8 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
NO_SYNC_WMIFLAG);
- ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u "
- "ctrl_flags=0x%x-> res=%d\n",
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: nw_type=%u auth_mode=%u ch=%u ctrl_flags=0x%x-> res=%d\n",
__func__, p->nw_type, p->auth_mode, le16_to_cpu(p->ch),
le32_to_cpu(p->ctrl_flags), res);
return res;
@@ -3208,8 +3224,9 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
if (!skb)
return -ENOMEM;
- ath6kl_dbg(ATH6KL_DBG_WMI, "set_appie_cmd: mgmt_frm_type=%u "
- "ie_len=%u\n", mgmt_frm_type, ie_len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "set_appie_cmd: mgmt_frm_type=%u ie_len=%u\n",
+ mgmt_frm_type, ie_len);
p = (struct wmi_set_appie_cmd *) skb->data;
p->mgmt_frm_type = mgmt_frm_type;
p->ie_len = ie_len;
@@ -3310,8 +3327,9 @@ static int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id,
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
- ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u "
- "len=%u\n", id, freq, wait, data_len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "send_action_cmd: id=%u freq=%u wait=%u len=%u\n",
+ id, freq, wait, data_len);
p = (struct wmi_send_action_cmd *) skb->data;
p->id = cpu_to_le32(id);
p->freq = cpu_to_le32(freq);
@@ -3348,8 +3366,9 @@ static int __ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id,
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
- ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u "
- "len=%u\n", id, freq, wait, data_len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "send_action_cmd: id=%u freq=%u wait=%u len=%u\n",
+ id, freq, wait, data_len);
p = (struct wmi_send_mgmt_cmd *) skb->data;
p->id = cpu_to_le32(id);
p->freq = cpu_to_le32(freq);
@@ -3402,8 +3421,9 @@ int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
if (!skb)
return -ENOMEM;
- ath6kl_dbg(ATH6KL_DBG_WMI, "send_probe_response_cmd: freq=%u dst=%pM "
- "len=%u\n", freq, dst, data_len);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "send_probe_response_cmd: freq=%u dst=%pM len=%u\n",
+ freq, dst, data_len);
p = (struct wmi_p2p_probe_response_cmd *) skb->data;
p->freq = cpu_to_le32(freq);
memcpy(p->destination_addr, dst, ETH_ALEN);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index d3d2ab5c1689..9076bec3a2ba 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -106,6 +106,8 @@ struct wmi_data_sync_bufs {
#define WMM_AC_VI 2 /* video */
#define WMM_AC_VO 3 /* voice */
+#define WMI_VOICE_USER_PRIORITY 0x7
+
struct wmi {
u16 stream_exist_for_ac[WMM_NUM_AC];
u8 fat_pipe_exist;
@@ -1151,6 +1153,7 @@ enum wmi_phy_mode {
WMI_11AG_MODE = 0x3,
WMI_11B_MODE = 0x4,
WMI_11GONLY_MODE = 0x5,
+ WMI_11G_HT20 = 0x6,
};
#define WMI_MAX_CHANNELS 32
@@ -1416,6 +1419,16 @@ struct wmi_ready_event_2 {
u8 phy_cap;
} __packed;
+/* WMI_PHY_CAPABILITY */
+enum wmi_phy_cap {
+ WMI_11A_CAP = 0x01,
+ WMI_11G_CAP = 0x02,
+ WMI_11AG_CAP = 0x03,
+ WMI_11AN_CAP = 0x04,
+ WMI_11GN_CAP = 0x05,
+ WMI_11AGN_CAP = 0x06,
+};
+
/* Connect Event */
struct wmi_connect_event {
union {
@@ -1468,6 +1481,17 @@ enum wmi_disconnect_reason {
IBSS_MERGE = 0xe,
};
+/* AP mode disconnect proto_reasons */
+enum ap_disconnect_reason {
+ WMI_AP_REASON_STA_LEFT = 101,
+ WMI_AP_REASON_FROM_HOST = 102,
+ WMI_AP_REASON_COMM_TIMEOUT = 103,
+ WMI_AP_REASON_MAX_STA = 104,
+ WMI_AP_REASON_ACL = 105,
+ WMI_AP_REASON_STA_ROAM = 106,
+ WMI_AP_REASON_DFS_CHANNEL = 107,
+};
+
#define ATH6KL_COUNTRY_RD_SHIFT 16
struct ath6kl_wmi_regdomain {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index a0387a027db0..9fdd70fcaf5b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -892,34 +892,6 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
}
-static bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
-{
- struct ath9k_rtt_hist *hist;
- u32 *table;
- int i;
- bool restore;
-
- if (!ah->caldata)
- return false;
-
- hist = &ah->caldata->rtt_hist;
- if (!hist->num_readings)
- return false;
-
- ar9003_hw_rtt_enable(ah);
- ar9003_hw_rtt_set_mask(ah, 0x00);
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
- continue;
- table = &hist->table[i][hist->num_readings][0];
- ar9003_hw_rtt_load_hist(ah, i, table);
- }
- restore = ar9003_hw_rtt_force_restore(ah);
- ar9003_hw_rtt_disable(ah);
-
- return restore;
-}
-
static bool ar9003_hw_init_cal(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -942,9 +914,10 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
if (!ar9003_hw_rtt_restore(ah, chan))
run_rtt_cal = true;
- ath_dbg(common, CALIBRATE, "RTT restore %s\n",
- run_rtt_cal ? "failed" : "succeed");
+ if (run_rtt_cal)
+ ath_dbg(common, CALIBRATE, "RTT calibration to be done\n");
}
+
run_agc_cal = run_rtt_cal;
if (run_rtt_cal) {
@@ -1069,17 +1042,14 @@ skip_tx_iqcal:
#undef CL_TAB_ENTRY
if (run_rtt_cal && caldata) {
- struct ath9k_rtt_hist *hist = &caldata->rtt_hist;
- if (is_reusable && (hist->num_readings < RTT_HIST_MAX)) {
- u32 *table;
+ if (is_reusable) {
+ if (!ath9k_hw_rfbus_req(ah))
+ ath_err(ath9k_hw_common(ah),
+ "Could not stop baseband\n");
+ else
+ ar9003_hw_rtt_fill_hist(ah);
- hist->num_readings++;
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
- continue;
- table = &hist->table[i][hist->num_readings][0];
- ar9003_hw_rtt_fill_hist(ah, i, table);
- }
+ ath9k_hw_rfbus_done(ah);
}
ar9003_hw_rtt_disable(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index ac53d901801d..dfb0441f406c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3809,7 +3809,7 @@ static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
return true;
}
-static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
{
int internal_regulator =
ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 2505ac44f0c1..8396d150ce01 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -334,4 +334,7 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan);
+
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 3cac293a2849..ffbb180f91e1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -756,7 +756,7 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
- caldata->rtt_hist.num_readings = 0;
+ caldata->rtt_done = false;
}
if (!ath9k_hw_init_cal(ah, chan))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 458bedf0b0ae..74de3539c2c8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_rtt.h"
@@ -69,7 +70,7 @@ bool ar9003_hw_rtt_force_restore(struct ath_hw *ah)
}
static void ar9003_hw_rtt_load_hist_entry(struct ath_hw *ah, u8 chain,
- u32 index, u32 data28)
+ u32 index, u32 data28)
{
u32 val;
@@ -100,12 +101,21 @@ static void ar9003_hw_rtt_load_hist_entry(struct ath_hw *ah, u8 chain,
RTT_ACCESS_TIMEOUT);
}
-void ar9003_hw_rtt_load_hist(struct ath_hw *ah, u8 chain, u32 *table)
+void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
{
- int i;
+ int chain, i;
- for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
- ar9003_hw_rtt_load_hist_entry(ah, chain, i, table[i]);
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->rxchainmask & (1 << chain)))
+ continue;
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
+ ar9003_hw_rtt_load_hist_entry(ah, chain, i,
+ ah->caldata->rtt_table[chain][i]);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "Load RTT value at idx %d, chain %d: 0x%x\n",
+ i, chain, ah->caldata->rtt_table[chain][i]);
+ }
+ }
}
static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
@@ -128,27 +138,71 @@ static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
RTT_ACCESS_TIMEOUT))
return RTT_BAD_VALUE;
- val = REG_READ(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain));
+ val = MS(REG_READ(ah, AR_PHY_RTT_TABLE_SW_INTF_1_B(chain)),
+ AR_PHY_RTT_SW_RTT_TABLE_DATA);
+
return val;
}
-void ar9003_hw_rtt_fill_hist(struct ath_hw *ah, u8 chain, u32 *table)
+void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
{
- int i;
+ int chain, i;
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->rxchainmask & (1 << chain)))
+ continue;
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
+ ah->caldata->rtt_table[chain][i] =
+ ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "RTT value at idx %d, chain %d is: 0x%x\n",
+ i, chain, ah->caldata->rtt_table[chain][i]);
+ }
+ }
- for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
- table[i] = ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+ ah->caldata->rtt_done = true;
}
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
{
- int i, j;
+ int chain, i;
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->rxchainmask & (1 << chain)))
continue;
- for (j = 0; j < MAX_RTT_TABLE_ENTRY; j++)
- ar9003_hw_rtt_load_hist_entry(ah, i, j, 0);
+ for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++)
+ ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0);
}
+
+ if (ah->caldata)
+ ah->caldata->rtt_done = false;
+}
+
+bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
+{
+ bool restore;
+
+ if (!ah->caldata)
+ return false;
+
+ if (!ah->caldata->rtt_done)
+ return false;
+
+ ar9003_hw_rtt_enable(ah);
+ ar9003_hw_rtt_set_mask(ah, 0x10);
+
+ if (!ath9k_hw_rfbus_req(ah)) {
+ ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
+ restore = false;
+ goto fail;
+ }
+
+ ar9003_hw_rtt_load_hist(ah);
+ restore = ar9003_hw_rtt_force_restore(ah);
+
+fail:
+ ath9k_hw_rfbus_done(ah);
+ ar9003_hw_rtt_disable(ah);
+ return restore;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
index 030758d087d6..a43b30d723a4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.h
@@ -21,8 +21,9 @@ void ar9003_hw_rtt_enable(struct ath_hw *ah);
void ar9003_hw_rtt_disable(struct ath_hw *ah);
void ar9003_hw_rtt_set_mask(struct ath_hw *ah, u32 rtt_mask);
bool ar9003_hw_rtt_force_restore(struct ath_hw *ah);
-void ar9003_hw_rtt_load_hist(struct ath_hw *ah, u8 chain, u32 *table);
-void ar9003_hw_rtt_fill_hist(struct ath_hw *ah, u8 chain, u32 *table);
+void ar9003_hw_rtt_load_hist(struct ath_hw *ah);
+void ar9003_hw_rtt_fill_hist(struct ath_hw *ah);
void ar9003_hw_rtt_clear_hist(struct ath_hw *ah);
+bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f11d9b2677fd..1bd3a3d22101 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +19,7 @@
#define INITVALS_9330_1P1_H
static const u32 ar9331_1p1_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
{0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -27,10 +28,10 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
{0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -55,7 +56,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+ {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
@@ -63,7 +64,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
};
static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -155,7 +156,7 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
};
static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
{0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
@@ -245,7 +246,7 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
};
static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -377,14 +378,14 @@ static const u32 ar9331_1p1_radio_core[][2] = {
{0x000160b4, 0x92480040},
{0x000160c0, 0x006db6db},
{0x000160c4, 0x0186db60},
- {0x000160c8, 0x6db6db6c},
+ {0x000160c8, 0x6db4db6c},
{0x000160cc, 0x6de6c300},
{0x000160d0, 0x14500820},
{0x00016100, 0x04cb0001},
{0x00016104, 0xfff80015},
{0x00016108, 0x00080010},
{0x0001610c, 0x00170000},
- {0x00016140, 0x10804000},
+ {0x00016140, 0x10800000},
{0x00016144, 0x01884080},
{0x00016148, 0x000080c0},
{0x00016280, 0x01000015},
@@ -417,7 +418,7 @@ static const u32 ar9331_1p1_radio_core[][2] = {
};
static const u32 ar9331_1p1_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
};
@@ -691,7 +692,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
};
static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -783,7 +784,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
};
static const u32 ar9331_1p1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
{0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
{0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
@@ -973,26 +974,27 @@ static const u32 ar9331_1p1_mac_core[][2] = {
static const u32 ar9331_common_rx_gain_1p1[][2] = {
/* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
+ {0x00009e18, 0x05000000},
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
{0x0000a050, 0x00000000},
{0x0000a054, 0x00000000},
{0x0000a058, 0x00000000},
@@ -1005,15 +1007,15 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a074, 0x00000000},
{0x0000a078, 0x00000000},
{0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
{0x0000a0a4, 0x00000000},
{0x0000a0a8, 0x00000000},
{0x0000a0ac, 0x00000000},
@@ -1021,27 +1023,27 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a0b4, 0x00000000},
{0x0000a0b8, 0x00000000},
{0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
{0x0000a114, 0x00000000},
{0x0000a118, 0x00000000},
{0x0000a11c, 0x00000000},
@@ -1054,26 +1056,26 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a138, 0x00000000},
{0x0000a13c, 0x00000000},
{0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
{0x0000a194, 0x00000000},
{0x0000a198, 0x00000000},
{0x0000a19c, 0x00000000},
@@ -1100,14 +1102,14 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a1f0, 0x00000396},
{0x0000a1f4, 0x00000396},
{0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
+ {0x0000a1fc, 0x00000296},
};
static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
- {0},
- {3},
- {0},
- {0},
+ {0x00000000},
+ {0x00000003},
+ {0x00000000},
+ {0x00000000},
};
static const u32 ar9331_1p1_chansel_xtal_25M[] = {
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a277cf6f339d..4866550ddd96 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -214,6 +214,7 @@ struct ath_frame_info {
enum ath9k_key_type keytype;
u8 keyix;
u8 retries;
+ u8 rtscts_rate;
};
struct ath_buf_state {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 2b8f61c210e1..abbd6effd60d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1496,6 +1496,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
+ ath9k_htc_choose_set_bssid(priv);
if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
ath9k_htc_start_ani(priv);
else if (priv->num_sta_assoc_vif == 0)
@@ -1503,13 +1504,11 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BSSID) {
+ if (changed & BSS_CHANGED_IBSS) {
if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
common->curaid = bss_conf->aid;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
ath9k_htc_set_bssid(priv);
- } else if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
- ath9k_htc_choose_set_bssid(priv);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index f84477c5ebb1..995ca8e1302e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
- ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+ ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
!ah->is_pciexpress)) {
ah->config.serialize_regmode =
SER_REG_MODE_ON;
@@ -784,13 +784,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i = 0;
+
REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
udelay(100);
REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
- while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
+
udelay(100);
+ if (WARN_ON_ONCE(i >= 100)) {
+ ath_err(common, "PLL4 meaurement not done\n");
+ break;
+ }
+
+ i++;
+ }
+
return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
}
EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
@@ -1468,6 +1480,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
return false;
ah->chip_fullsleep = false;
+
+ if (AR_SREV_9330(ah))
+ ar9003_hw_internal_regulator_apply(ah);
ath9k_hw_init_pll(ah, chan);
ath9k_hw_set_rfmode(ah, chan);
@@ -1702,10 +1717,10 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
* For AR9462, make sure that calibration data for
* re-using are present.
*/
- if (AR_SREV_9462(ah) && (!ah->caldata ||
- !ah->caldata->done_txiqcal_once ||
- !ah->caldata->done_txclcal_once ||
- !ah->caldata->rtt_hist.num_readings))
+ if (AR_SREV_9462(ah) && (ah->caldata &&
+ (!ah->caldata->done_txiqcal_once ||
+ !ah->caldata->done_txclcal_once ||
+ !ah->caldata->rtt_done)))
goto fail;
ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@@ -1941,7 +1956,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
- caldata->rtt_hist.num_readings = 0;
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 828b9bbc456d..b620c557c2a6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -348,12 +348,6 @@ enum ath9k_int {
CHANNEL_HT40MINUS)
#define MAX_RTT_TABLE_ENTRY 6
-#define RTT_HIST_MAX 3
-struct ath9k_rtt_hist {
- u32 table[AR9300_MAX_CHAINS][RTT_HIST_MAX][MAX_RTT_TABLE_ENTRY];
- u8 num_readings;
-};
-
#define MAX_IQCAL_MEASUREMENT 8
#define MAX_CL_TAB_ENTRY 16
@@ -363,6 +357,7 @@ struct ath9k_hw_cal_data {
int32_t CalValid;
int8_t iCoff;
int8_t qCoff;
+ bool rtt_done;
bool paprd_done;
bool nfcal_pending;
bool nfcal_interference;
@@ -373,8 +368,8 @@ struct ath9k_hw_cal_data {
u32 num_measures[AR9300_MAX_CHAINS];
int tx_corr_coeff[MAX_IQCAL_MEASUREMENT][AR9300_MAX_CHAINS];
u32 tx_clcal[AR9300_MAX_CHAINS][MAX_CL_TAB_ENTRY];
+ u32 rtt_table[AR9300_MAX_CHAINS][MAX_RTT_TABLE_ENTRY];
struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
- struct ath9k_rtt_hist rtt_hist;
};
struct ath9k_channel {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dfa78e8b6470..dac1a2709e3c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -239,7 +239,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- bool ret;
+ bool ret = true;
ieee80211_stop_queues(sc->hw);
@@ -250,11 +250,12 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
ath9k_debug_samp_bb_mac(sc);
ath9k_hw_disable_interrupts(ah);
- ret = ath_drain_all_txq(sc, retry_tx);
-
if (!ath_stoprecv(sc))
ret = false;
+ if (!ath_drain_all_txq(sc, retry_tx))
+ ret = false;
+
if (!flush) {
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_rx_tasklet(sc, 1, true);
@@ -970,6 +971,15 @@ void ath_hw_pll_work(struct work_struct *work)
hw_pll_work.work);
u32 pll_sqsum;
+ /*
+ * ensure that the PLL WAR is executed only
+ * after the STA is associated (or) if the
+ * beaconing had started in interfaces that
+ * uses beacons.
+ */
+ if (!(sc->sc_flags & SC_OP_BEACONS))
+ return;
+
if (AR_SREV_9485(sc->sc_ah)) {
ath9k_ps_wakeup(sc);
@@ -1442,15 +1452,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
}
}
- if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
- ((vif->type == NL80211_IFTYPE_ADHOC) &&
- sc->nvifs > 0)) {
- ath_err(common, "Cannot create ADHOC interface when other"
- " interfaces already exist.\n");
- ret = -EINVAL;
- goto out;
- }
-
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->nvifs++;
@@ -1475,15 +1476,6 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- /* See if new interface type is valid. */
- if ((new_type == NL80211_IFTYPE_ADHOC) &&
- (sc->nvifs > 1)) {
- ath_err(common, "When using ADHOC, it must be the only"
- " interface.\n");
- ret = -EINVAL;
- goto out;
- }
-
if (ath9k_uses_beacons(new_type) &&
!ath9k_uses_beacons(vif->type)) {
if (sc->nbcnvifs >= ATH_BCBUF) {
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index e1fcc68124dc..0735aeb3b26c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
__skb_unlink(skb, &rx_edma->rx_fifo);
list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
- } else {
- bf = NULL;
}
+
+ bf = NULL;
}
*dest = bf;
@@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
* descriptor does contain a valid key index. This has been observed
* mostly with CCMP encryption.
*/
- if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
if (!rx_stats->rs_datalen) {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 23eaa1b26ebe..4d571394c7a8 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -64,7 +64,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ bool dequeue);
enum {
MCS_HT20,
@@ -811,7 +812,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fi = get_frame_info(skb);
bf = fi->bf;
if (!fi->bf)
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
if (!bf)
continue;
@@ -937,6 +938,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ieee80211_tx_rate *rates;
const struct ieee80211_rate *rate;
struct ieee80211_hdr *hdr;
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
int i;
u8 rix = 0;
@@ -947,18 +949,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
/* set dur_update_en for l-sig computation except for PS-Poll frames */
info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
-
- /*
- * We check if Short Preamble is needed for the CTS rate by
- * checking the BSS's global flag.
- * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
- */
- rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
- info->rtscts_rate = rate->hw_value;
-
- if (tx_info->control.vif &&
- tx_info->control.vif->bss_conf.use_short_preamble)
- info->rtscts_rate |= rate->hw_value_short;
+ info->rtscts_rate = fi->rtscts_rate;
for (i = 0; i < 4; i++) {
bool is_40, is_sgi, is_sp;
@@ -1000,13 +991,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
}
/* legacy rates */
+ rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
!(rate->flags & IEEE80211_RATE_ERP_G))
phy = WLAN_RC_PHY_CCK;
else
phy = WLAN_RC_PHY_OFDM;
- rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
info->rates[i].Rate = rate->hw_value;
if (rate->hw_value_short) {
if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
@@ -1726,7 +1717,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
return;
}
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
return;
@@ -1753,7 +1744,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf)
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
if (!bf)
return;
@@ -1775,10 +1766,22 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_sta *sta = tx_info->control.sta;
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ const struct ieee80211_rate *rate;
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_node *an = NULL;
enum ath9k_key_type keytype;
+ bool short_preamble = false;
+ /*
+ * We check if Short Preamble is needed for the CTS rate by
+ * checking the BSS's global flag.
+ * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
+ */
+ if (tx_info->control.vif &&
+ tx_info->control.vif->bss_conf.use_short_preamble)
+ short_preamble = true;
+
+ rate = ieee80211_get_rts_cts_rate(hw, tx_info);
keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
if (sta)
@@ -1793,6 +1796,9 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
+ fi->rtscts_rate = rate->hw_value;
+ if (short_preamble)
+ fi->rtscts_rate |= rate->hw_value_short;
}
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
@@ -1814,7 +1820,8 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool dequeue)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
@@ -1863,6 +1870,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
return bf;
error:
+ if (dequeue)
+ __skb_unlink(skb, &tid->buf_q);
dev_kfree_skb_any(skb);
return NULL;
}
@@ -1893,7 +1902,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
*/
ath_tx_send_ampdu(sc, tid, skb, txctl);
} else {
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
return;
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 0e81904956cf..5c54aa43ca2d 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
return -EIO;
set_bit(idx, common->keymap);
+ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ set_bit(idx, common->ccmp_keymap);
+
if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
set_bit(idx + 64, common->keymap);
set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
return;
clear_bit(key->hw_key_idx, common->keymap);
+ clear_bit(key->hw_key_idx, common->ccmp_keymap);
if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
return;
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 67c13af6f206..c06b6cb5c91e 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -877,6 +877,10 @@ struct b43_wl {
* from the mac80211 subsystem. */
u16 mac80211_initially_registered_queues;
+ /* Set this if we call ieee80211_register_hw() and check if we call
+ * ieee80211_unregister_hw(). */
+ bool hw_registred;
+
/* We can only have one operating interface (802.11 core)
* at a time. General information about this interface follows.
*/
diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c
index 424692df239d..565fdbdd6915 100644
--- a/drivers/net/wireless/b43/bus.c
+++ b/drivers/net/wireless/b43/bus.c
@@ -107,11 +107,9 @@ struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core)
dev->dma_dev = core->dma_dev;
dev->irq = core->irq;
- /*
dev->board_vendor = core->bus->boardinfo.vendor;
dev->board_type = core->bus->boardinfo.type;
- dev->board_rev = core->bus->boardinfo.rev;
- */
+ dev->board_rev = core->bus->sprom.board_rev;
dev->chip_id = core->bus->chipinfo.id;
dev->chip_rev = core->bus->chipinfo.rev;
@@ -210,7 +208,7 @@ struct b43_bus_dev *b43_bus_dev_ssb_init(struct ssb_device *sdev)
dev->board_vendor = sdev->bus->boardinfo.vendor;
dev->board_type = sdev->bus->boardinfo.type;
- dev->board_rev = sdev->bus->boardinfo.rev;
+ dev->board_rev = sdev->bus->sprom.board_rev;
dev->chip_id = sdev->bus->chip_id;
dev->chip_rev = sdev->bus->chip_rev;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index b5f1b91002bb..777cd74921d7 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1109,7 +1109,7 @@ static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
#ifdef CONFIG_B43_SSB
if (dev->dev->bus_type == B43_BUS_SSB &&
dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
- !(dev->dev->sdev->bus->host_pci->is_pcie &&
+ !(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
return 1;
#endif
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 617afc8211b2..1b988f26bdf1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2437,6 +2437,7 @@ start_ieee80211:
err = ieee80211_register_hw(wl->hw);
if (err)
goto err_one_core_detach;
+ wl->hw_registred = true;
b43_leds_register(wl->current_dev);
goto out;
@@ -3766,7 +3767,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
if (prev_status >= B43_STAT_STARTED) {
err = b43_wireless_core_start(up_dev);
if (err) {
- b43err(wl, "Fatal: Coult not start device for "
+ b43err(wl, "Fatal: Could not start device for "
"selected %s-GHz band\n",
band_to_string(chan->band));
b43_wireless_core_exit(up_dev);
@@ -5243,10 +5244,10 @@ static void b43_sprom_fixup(struct ssb_bus *bus)
/* boardflags workarounds */
if (bus->boardinfo.vendor == SSB_BOARDVENDOR_DELL &&
- bus->chip_id == 0x4301 && bus->boardinfo.rev == 0x74)
+ bus->chip_id == 0x4301 && bus->sprom.board_rev == 0x74)
bus->sprom.boardflags_lo |= B43_BFL_BTCOEXIST;
if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
- bus->boardinfo.type == 0x4E && bus->boardinfo.rev > 0x40)
+ bus->boardinfo.type == 0x4E && bus->sprom.board_rev > 0x40)
bus->sprom.boardflags_lo |= B43_BFL_PACTRL;
if (bus->bustype == SSB_BUSTYPE_PCI) {
pdev = bus->host_pci;
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
wl->mac80211_initially_registered_queues = hw->queues;
+ wl->hw_registred = false;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core)
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
- /* Restore the queues count before unregistering, because firmware detect
- * might have modified it. Restoring is important, so the networking
- * stack can properly free resources. */
- wl->hw->queues = wl->mac80211_initially_registered_queues;
- b43_leds_stop(wldev);
- ieee80211_unregister_hw(wl->hw);
+ B43_WARN_ON(!wl);
+ if (wl->current_dev == wldev && wl->hw_registred) {
+ /* Restore the queues count before unregistering, because firmware detect
+ * might have modified it. Restoring is important, so the networking
+ * stack can properly free resources. */
+ wl->hw->queues = wl->mac80211_initially_registered_queues;
+ b43_leds_stop(wldev);
+ ieee80211_unregister_hw(wl->hw);
+ }
b43_one_core_detach(wldev->dev);
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
cancel_work_sync(&wldev->restart_work);
B43_WARN_ON(!wl);
- if (wl->current_dev == wldev) {
+ if (wl->current_dev == wldev && wl->hw_registred) {
/* Restore the queues count before unregistering, because firmware detect
* might have modified it. Restoring is important, so the networking
* stack can properly free resources. */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index f1f8bd09bd87..c8baf020c20f 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1be214b815fb..eae691e2f7dd 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1573,8 +1573,6 @@ static void b43legacy_request_firmware(struct work_struct *work)
const char *filename;
int err;
- /* do dummy read */
- ssb_read32(dev->dev, SSB_TMSHIGH);
if (!fw->ucode) {
if (rev == 2)
filename = "ucode2";
@@ -2635,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
if (prev_status >= B43legacy_STAT_STARTED) {
err = b43legacy_wireless_core_start(up_dev);
if (err) {
- b43legacyerr(wl, "Fatal: Coult not start device for "
+ b43legacyerr(wl, "Fatal: Could not start device for "
"newly selected %s-PHY mode\n",
phymode_to_string(new_mode));
b43legacy_wireless_core_exit(up_dev);
@@ -3781,7 +3779,7 @@ static void b43legacy_sprom_fixup(struct ssb_bus *bus)
/* boardflags workarounds */
if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
bus->boardinfo.type == 0x4E &&
- bus->boardinfo.rev > 0x40)
+ bus->sprom.board_rev > 0x40)
bus->sprom.boardflags_lo |= B43legacy_BFL_PACTRL;
}
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 950334197f40..995c7d0c212a 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -408,7 +408,7 @@ static void b43legacy_phy_setupg(struct b43legacy_wldev *dev)
if (is_bcm_board_vendor(dev) &&
(dev->dev->bus->boardinfo.type == 0x0416) &&
- (dev->dev->bus->boardinfo.rev == 0x0017))
+ (dev->dev->bus->sprom.board_rev == 0x0017))
return;
b43legacy_ilt_write(dev, 0x5001, 0x0002);
@@ -424,7 +424,7 @@ static void b43legacy_phy_setupg(struct b43legacy_wldev *dev)
if (is_bcm_board_vendor(dev) &&
(dev->dev->bus->boardinfo.type == 0x0416) &&
- (dev->dev->bus->boardinfo.rev == 0x0017))
+ (dev->dev->bus->sprom.board_rev == 0x0017))
return;
b43legacy_ilt_write(dev, 0x0401, 0x0002);
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index fcbafcd603cc..896177690394 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -1998,7 +1998,7 @@ u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev)
if (phy->type == B43legacy_PHYTYPE_G) {
if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x421 &&
- dev->dev->bus->boardinfo.rev >= 30)
+ dev->dev->bus->sprom.board_rev >= 30)
att = 3;
else if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x416)
@@ -2008,7 +2008,7 @@ u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev)
} else {
if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x421 &&
- dev->dev->bus->boardinfo.rev >= 30)
+ dev->dev->bus->sprom.board_rev >= 30)
att = 7;
else
att = 6;
@@ -2018,7 +2018,7 @@ u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev)
if (phy->type == B43legacy_PHYTYPE_G) {
if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x421 &&
- dev->dev->bus->boardinfo.rev >= 30)
+ dev->dev->bus->sprom.board_rev >= 30)
att = 3;
else if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type ==
@@ -2052,9 +2052,9 @@ u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev)
}
if (is_bcm_board_vendor(dev) &&
dev->dev->bus->boardinfo.type == 0x421) {
- if (dev->dev->bus->boardinfo.rev < 0x43)
+ if (dev->dev->bus->sprom.board_rev < 0x43)
att = 2;
- else if (dev->dev->bus->boardinfo.rev < 0x51)
+ else if (dev->dev->bus->sprom.board_rev < 0x51)
att = 3;
}
if (att == 0xFFFF)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 4add7da24681..8e7e6928c936 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -85,18 +85,15 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
sdiodev->irq_wake = true;
/* must configure SDIO_CCCR_IENx to enable irq */
- data = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_0,
- SDIO_CCCR_IENx, &ret);
+ data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx,
- data, &ret);
+ brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
- /* redirect, configure ane enable io for interrupt signal */
+ /* redirect, configure and enable io for interrupt signal */
data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
- if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH)
+ if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
data |= SDIO_SEPINT_ACT_HI;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT,
- data, &ret);
+ brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
return 0;
}
@@ -105,9 +102,8 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
{
brcmf_dbg(TRACE, "Entering\n");
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_BRCM_SEPINT,
- 0, NULL);
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_0, SDIO_CCCR_IENx, 0, NULL);
+ brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+ brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
if (sdiodev->irq_wake) {
disable_irq_wake(sdiodev->irq);
@@ -158,153 +154,147 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
}
#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
-u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
- int *err)
-{
- int status;
- s32 retry = 0;
- u8 data = 0;
-
- do {
- if (retry) /* wait for 1 ms till bus get settled down */
- udelay(1000);
- status = brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, fnc_num,
- addr, (u8 *) &data);
- } while (status != 0
- && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
- if (err)
- *err = status;
-
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n",
- fnc_num, addr, data);
-
- return data;
-}
-
-void
-brcmf_sdcard_cfg_write(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
- u8 data, int *err)
-{
- int status;
- s32 retry = 0;
-
- do {
- if (retry) /* wait for 1 ms till bus get settled down */
- udelay(1000);
- status = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, fnc_num,
- addr, (u8 *) &data);
- } while (status != 0
- && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
- if (err)
- *err = status;
-
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n",
- fnc_num, addr, data);
-}
-
int
brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
{
- int err = 0;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
- (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
- if (!err)
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_SBADDRMID,
- (address >> 16) & SBSDIO_SBADDRMID_MASK,
- &err);
- if (!err)
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_SBADDRHIGH,
- (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
- &err);
+ int err = 0, i;
+ u8 addr[3];
+ s32 retry;
+
+ addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
+ addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
+ addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
+
+ for (i = 0; i < 3; i++) {
+ retry = 0;
+ do {
+ if (retry)
+ usleep_range(1000, 2000);
+ err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
+ SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
+ &addr[i]);
+ } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
+
+ if (err) {
+ brcmf_dbg(ERROR, "failed at addr:0x%0x\n",
+ SBSDIO_FUNC1_SBADDRLOW + i);
+ break;
+ }
+ }
return err;
}
-u32 brcmf_sdcard_reg_read(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size)
+static int
+brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ void *data, bool write)
{
- int status;
- u32 word = 0;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
-
- brcmf_dbg(INFO, "fun = 1, addr = 0x%x\n", addr);
-
- if (bar0 != sdiodev->sbwad) {
- if (brcmf_sdcard_set_sbaddr_window(sdiodev, bar0))
- return 0xFFFFFFFF;
+ u8 func_num, reg_size;
+ u32 bar;
+ s32 retry = 0;
+ int ret;
- sdiodev->sbwad = bar0;
+ /*
+ * figure out how to read the register based on address range
+ * 0x00 ~ 0x7FF: function 0 CCCR and FBR
+ * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
+ * The rest: function 1 silicon backplane core registers
+ */
+ if ((addr & ~REG_F0_REG_MASK) == 0) {
+ func_num = SDIO_FUNC_0;
+ reg_size = 1;
+ } else if ((addr & ~REG_F1_MISC_MASK) == 0) {
+ func_num = SDIO_FUNC_1;
+ reg_size = 1;
+ } else {
+ func_num = SDIO_FUNC_1;
+ reg_size = 4;
+
+ /* Set the window for SB core register */
+ bar = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ if (bar != sdiodev->sbwad) {
+ ret = brcmf_sdcard_set_sbaddr_window(sdiodev, bar);
+ if (ret != 0) {
+ memset(data, 0xFF, reg_size);
+ return ret;
+ }
+ sdiodev->sbwad = bar;
+ }
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
}
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
- if (size == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ do {
+ if (!write)
+ memset(data, 0, reg_size);
+ if (retry) /* wait for 1 ms till bus get settled down */
+ usleep_range(1000, 2000);
+ if (reg_size == 1)
+ ret = brcmf_sdioh_request_byte(sdiodev, write,
+ func_num, addr, data);
+ else
+ ret = brcmf_sdioh_request_word(sdiodev, write,
+ func_num, addr, data, 4);
+ } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
- status = brcmf_sdioh_request_word(sdiodev, SDIOH_READ, SDIO_FUNC_1,
- addr, &word, size);
+ if (ret != 0)
+ brcmf_dbg(ERROR, "failed with %d\n", ret);
- sdiodev->regfail = (status != 0);
+ return ret;
+}
- brcmf_dbg(INFO, "u32data = 0x%x\n", word);
+u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+{
+ u8 data;
+ int retval;
- /* if ok, return appropriately masked word */
- if (status == 0) {
- switch (size) {
- case sizeof(u8):
- return word & 0xff;
- case sizeof(u16):
- return word & 0xffff;
- case sizeof(u32):
- return word;
- default:
- sdiodev->regfail = true;
+ brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+ retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ brcmf_dbg(INFO, "data:0x%02x\n", data);
- }
- }
+ if (ret)
+ *ret = retval;
- /* otherwise, bad sdio access or invalid size */
- brcmf_dbg(ERROR, "error reading addr 0x%04x size %d\n", addr, size);
- return 0xFFFFFFFF;
+ return data;
}
-u32 brcmf_sdcard_reg_write(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size,
- u32 data)
+u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
{
- int status;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- int err = 0;
+ u32 data;
+ int retval;
- brcmf_dbg(INFO, "fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
- addr, size * 8, data);
+ brcmf_dbg(INFO, "addr:0x%08x\n", addr);
+ retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
+ brcmf_dbg(INFO, "data:0x%08x\n", data);
- if (bar0 != sdiodev->sbwad) {
- err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
- if (err)
- return err;
+ if (ret)
+ *ret = retval;
- sdiodev->sbwad = bar0;
- }
+ return data;
+}
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
- if (size == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- status =
- brcmf_sdioh_request_word(sdiodev, SDIOH_WRITE, SDIO_FUNC_1,
- addr, &data, size);
- sdiodev->regfail = (status != 0);
+void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u8 data, int *ret)
+{
+ int retval;
- if (status == 0)
- return 0;
+ brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
+ retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
- brcmf_dbg(ERROR, "error writing 0x%08x to addr 0x%04x size %d\n",
- data, addr, size);
- return 0xFFFFFFFF;
+ if (ret)
+ *ret = retval;
}
-bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
+void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u32 data, int *ret)
{
- return sdiodev->regfail;
+ int retval;
+
+ brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
+ retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
+
+ if (ret)
+ *ret = retval;
}
static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index dd07d33a927c..82f51dbd0d66 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -346,43 +346,17 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
return status;
}
-/* Read client card reg */
-static int
-brcmf_sdioh_card_regread(struct brcmf_sdio_dev *sdiodev, int func, u32 regaddr,
- int regsize, u32 *data)
-{
-
- if ((func == 0) || (regsize == 1)) {
- u8 temp = 0;
-
- brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, func, regaddr,
- &temp);
- *data = temp;
- *data &= 0xff;
- brcmf_dbg(DATA, "byte read data=0x%02x\n", *data);
- } else {
- brcmf_sdioh_request_word(sdiodev, SDIOH_READ, func, regaddr,
- data, regsize);
- if (regsize == 2)
- *data &= 0xffff;
-
- brcmf_dbg(DATA, "word read data=0x%08x\n", *data);
- }
-
- return SUCCESS;
-}
-
static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
{
/* read 24 bits and return valid 17 bit addr */
- int i;
+ int i, ret;
u32 scratch, regdata;
__le32 scratch_le;
u8 *ptr = (u8 *)&scratch_le;
for (i = 0; i < 3; i++) {
- if ((brcmf_sdioh_card_regread(sdiodev, 0, regaddr, 1,
- &regdata)) != SUCCESS)
+ regdata = brcmf_sdio_regrl(sdiodev, regaddr, &ret);
+ if (ret != 0)
brcmf_dbg(ERROR, "Can't read!\n");
*ptr++ = (u8) regdata;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 149ee67beb2e..1dbf2be478c8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -629,43 +629,29 @@ static bool data_ok(struct brcmf_sdio *bus)
* Reads a register in the SDIO hardware block. This block occupies a series of
* adresses on the 32 bit backplane bus.
*/
-static void
-r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+static int
+r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- *retryvar = 0;
- do {
- *regvar = brcmf_sdcard_reg_read(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- sizeof(u32));
- } while (brcmf_sdcard_regfail(bus->sdiodev) &&
- (++(*retryvar) <= retry_limit));
- if (*retryvar) {
- bus->regfails += (*retryvar-1);
- if (*retryvar > retry_limit) {
- brcmf_dbg(ERROR, "FAILED READ %Xh\n", reg_offset);
- *regvar = 0;
- }
- }
+ int ret;
+
+ *regvar = brcmf_sdio_regrl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + offset, &ret);
+
+ return ret;
}
-static void
-w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+static int
+w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
{
u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- *retryvar = 0;
- do {
- brcmf_sdcard_reg_write(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- sizeof(u32), regval);
- } while (brcmf_sdcard_regfail(bus->sdiodev) &&
- (++(*retryvar) <= retry_limit));
- if (*retryvar) {
- bus->regfails += (*retryvar-1);
- if (*retryvar > retry_limit)
- brcmf_dbg(ERROR, "FAILED REGISTER WRITE %Xh\n",
- reg_offset);
- }
+ int ret;
+
+ brcmf_sdio_regwl(bus->sdiodev,
+ bus->ci->c_inf[idx].base + reg_offset,
+ regval, &ret);
+
+ return ret;
}
#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
@@ -697,16 +683,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
if (err) {
brcmf_dbg(ERROR, "HT Avail request error: %d\n", err);
return -EBADE;
}
/* Check current status */
- clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
brcmf_dbg(ERROR, "HT Avail read error: %d\n", err);
return -EBADE;
@@ -715,9 +701,8 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
/* Go to pending and await interrupt if appropriate */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
/* Allow only clock-available interrupt */
- devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "Devctl error setting CA: %d\n",
err);
@@ -725,30 +710,28 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
}
devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
brcmf_dbg(INFO, "CLKCTL: set PENDING\n");
bus->clkstate = CLK_PENDING;
return 0;
} else if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl =
- brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
/* Otherwise, wait here (polling) for HT Avail */
timeout = jiffies +
msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- clkctl = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- &err);
+ clkctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
if (time_after(jiffies, timeout))
break;
else
@@ -781,17 +764,16 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
}
bus->clkstate = CLK_SDONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ clkreq, &err);
brcmf_dbg(INFO, "CLKCTL: turned OFF\n");
if (err) {
brcmf_dbg(ERROR, "Failed access turning clock off: %d\n",
@@ -874,7 +856,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
{
- uint retries = 0;
+ int ret;
brcmf_dbg(INFO, "request %s (currently %s)\n",
sleep ? "SLEEP" : "WAKE",
@@ -894,22 +876,20 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Tell device to start using OOB wakeup */
- w_sdreg32(bus, SMB_USE_OOB,
- offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
- if (retries > retry_limit)
+ ret = w_sdreg32(bus, SMB_USE_OOB,
+ offsetof(struct sdpcmd_regs, tosbmailbox));
+ if (ret != 0)
brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n");
/* Turn off our contribution to the HT clock request */
brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
/* Isolate the bus */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL,
- SBSDIO_DEVCTL_PADS_ISO, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
/* Change state */
bus->sleeping = true;
@@ -917,21 +897,20 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
} else {
/* Waking up: bus power up is ok, set local state */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ 0, NULL);
/* Make sure the controller has the bus up */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Send misc interrupt to indicate OOB not needed */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, tosbmailboxdata),
- &retries);
- if (retries <= retry_limit)
- w_sdreg32(bus, SMB_DEV_INT,
- offsetof(struct sdpcmd_regs, tosbmailbox),
- &retries);
-
- if (retries > retry_limit)
+ ret = w_sdreg32(bus, 0,
+ offsetof(struct sdpcmd_regs, tosbmailboxdata));
+ if (ret == 0)
+ ret = w_sdreg32(bus, SMB_DEV_INT,
+ offsetof(struct sdpcmd_regs, tosbmailbox));
+
+ if (ret != 0)
brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP TO CLEAR OOB!!\n");
/* Make sure we have SD bus access */
@@ -955,17 +934,17 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
u32 intstatus = 0;
u32 hmb_data;
u8 fcbits;
- uint retries = 0;
+ int ret;
brcmf_dbg(TRACE, "Enter\n");
/* Read mailbox data and ack that we did so */
- r_sdreg32(bus, &hmb_data,
- offsetof(struct sdpcmd_regs, tohostmailboxdata), &retries);
+ ret = r_sdreg32(bus, &hmb_data,
+ offsetof(struct sdpcmd_regs, tohostmailboxdata));
- if (retries <= retry_limit)
+ if (ret == 0)
w_sdreg32(bus, SMB_INT_ACK,
- offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+ offsetof(struct sdpcmd_regs, tosbmailbox));
bus->f1regdata += 2;
/* Dongle recomposed rx frames, accept them again */
@@ -1040,17 +1019,16 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
if (abort)
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL,
- SFC_RF_TERM, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_RF_TERM, &err);
bus->f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
- hi = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_RFRAMEBCHI, NULL);
- lo = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+ hi = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCHI, &err);
+ lo = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_RFRAMEBCLO, &err);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
@@ -1070,11 +1048,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
if (rtx) {
bus->rxrtx++;
- w_sdreg32(bus, SMB_NAK,
- offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+ err = w_sdreg32(bus, SMB_NAK,
+ offsetof(struct sdpcmd_regs, tosbmailbox));
bus->f1regdata++;
- if (retries <= retry_limit)
+ if (err == 0)
bus->rxskip = true;
}
@@ -1082,7 +1060,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
bus->nextlen = 0;
/* If we can't reach the device, signal failure */
- if (err || brcmf_sdcard_regfail(bus->sdiodev))
+ if (err)
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
@@ -2178,21 +2156,16 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
bus->tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
- NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI,
- NULL);
- lo = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
+ hi = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -2219,7 +2192,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
u32 intstatus = 0;
- uint retries = 0;
int ret = 0, prec_out;
uint cnt = 0;
uint datalen;
@@ -2249,11 +2221,11 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
/* Check device status, signal pending interrupt */
- r_sdreg32(bus, &intstatus,
- offsetof(struct sdpcmd_regs, intstatus),
- &retries);
+ ret = r_sdreg32(bus, &intstatus,
+ offsetof(struct sdpcmd_regs,
+ intstatus));
bus->f2txdata++;
- if (brcmf_sdcard_regfail(bus->sdiodev))
+ if (ret != 0)
break;
if (intstatus & bus->hostintmask)
bus->ipend = true;
@@ -2275,7 +2247,6 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
{
u32 local_hostintmask;
u8 saveclk;
- uint retries;
int err;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
@@ -2303,7 +2274,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
local_hostintmask = bus->hostintmask;
bus->hostintmask = 0;
@@ -2311,24 +2282,23 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ saveclk = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err)
brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
/* Turn off the bus (F2), free any pending packets */
brcmf_dbg(INTR, "disable SDIO interrupts\n");
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, SDIO_FUNC_ENABLE_1,
+ NULL);
/* Clear any pending interrupts now that F2 is disabled */
w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ offsetof(struct sdpcmd_regs, intstatus));
/* Turn off the backplane clock (only) */
brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
@@ -2373,12 +2343,12 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
{
u32 intstatus, newstatus = 0;
- uint retries = 0;
uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
uint txlimit = bus->txbound; /* Tx frames to send before resched */
uint framecnt = 0; /* Temporary counter of tx/rx frames */
bool rxdone = true; /* Flag for no more read data */
bool resched = false; /* Flag indicating resched wanted */
+ int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -2389,13 +2359,12 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* If waiting for HTAVAIL, check status */
if (bus->clkstate == CLK_PENDING) {
- int err;
u8 clkctl, devctl = 0;
#ifdef DEBUG
/* Check for inconsistent device control */
- devctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
@@ -2403,8 +2372,8 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
#endif /* DEBUG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
- clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ clkctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
brcmf_dbg(ERROR, "error reading CSR: %d\n",
err);
@@ -2415,17 +2384,16 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
devctl, clkctl);
if (SBSDIO_HTAV(clkctl)) {
- devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
err);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+ devctl, &err);
if (err) {
brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
err);
@@ -2447,17 +2415,17 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
/* Pending interrupt indicates new device status */
if (bus->ipend) {
bus->ipend = false;
- r_sdreg32(bus, &newstatus,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ err = r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus));
bus->f1regdata++;
- if (brcmf_sdcard_regfail(bus->sdiodev))
+ if (err != 0)
newstatus = 0;
newstatus &= bus->hostintmask;
bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
if (newstatus) {
- w_sdreg32(bus, newstatus,
- offsetof(struct sdpcmd_regs, intstatus),
- &retries);
+ err = w_sdreg32(bus, newstatus,
+ offsetof(struct sdpcmd_regs,
+ intstatus));
bus->f1regdata++;
}
}
@@ -2472,11 +2440,11 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
*/
if (intstatus & I_HMB_FC_CHANGE) {
intstatus &= ~I_HMB_FC_CHANGE;
- w_sdreg32(bus, I_HMB_FC_CHANGE,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ err = w_sdreg32(bus, I_HMB_FC_CHANGE,
+ offsetof(struct sdpcmd_regs, intstatus));
- r_sdreg32(bus, &newstatus,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ err = r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus));
bus->f1regdata += 2;
bus->fcstate =
!!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
@@ -2546,21 +2514,18 @@ clkwait:
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
- NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, &err);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI,
- NULL);
- lo = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
+ hi = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ &err);
+ lo = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ &err);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -2587,10 +2552,8 @@ clkwait:
else await next interrupt */
/* On failed register access, all bets are off:
no resched or interrupts */
- if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) ||
- brcmf_sdcard_regfail(bus->sdiodev)) {
- brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
- brcmf_sdcard_regfail(bus->sdiodev));
+ if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
+ brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n");
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->intstatus = 0;
} else if (bus->clkstate == CLK_PENDING) {
@@ -2886,19 +2849,16 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI,
- NULL);
- lo = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
+ hi = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+ lo = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_WFRAMEBCLO, NULL);
bus->f1regdata += 2;
if (hi == 0 && lo == 0)
break;
@@ -3188,7 +3148,6 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
{
- uint retries;
int bcmerror = 0;
struct chip_info *ci = bus->ci;
@@ -3222,7 +3181,7 @@ static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
}
w_sdreg32(bus, 0xFFFFFFFF,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
+ offsetof(struct sdpcmd_regs, intstatus));
ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
@@ -3444,7 +3403,6 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
unsigned long timeout;
- uint retries = 0;
u8 ready, enable;
int err, ret = 0;
u8 saveclk;
@@ -3472,13 +3430,11 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
goto exit;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk =
- brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ saveclk = brcmf_sdio_regrb(bus->sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
@@ -3487,17 +3443,16 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Enable function 2 (frame transfers) */
w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
- offsetof(struct sdpcmd_regs, tosbmailboxdata), &retries);
+ offsetof(struct sdpcmd_regs, tosbmailboxdata));
enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- enable, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
ready = 0;
while (enable != ready) {
- ready = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_0,
- SDIO_CCCR_IORx, NULL);
+ ready = brcmf_sdio_regrb(bus->sdiodev,
+ SDIO_CCCR_IORx, NULL);
if (time_after(jiffies, timeout))
break;
else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
@@ -3512,21 +3467,18 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
w_sdreg32(bus, bus->hostintmask,
- offsetof(struct sdpcmd_regs, hostintmask), &retries);
+ offsetof(struct sdpcmd_regs, hostintmask));
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_WATERMARK, 8, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_WATERMARK, 8, &err);
} else {
/* Disable F2 again */
enable = SDIO_FUNC_ENABLE_1;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0,
- SDIO_CCCR_IOEx, enable, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, enable, NULL);
ret = -ENODEV;
}
/* Restore previous clock setting */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
if (ret == 0) {
ret = brcmf_sdio_intr_register(bus->sdiodev);
@@ -3606,9 +3558,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
if (!bus->dpc_sched) {
u8 devpend;
- devpend = brcmf_sdcard_cfg_read(bus->sdiodev,
- SDIO_FUNC_0, SDIO_CCCR_INTx,
- NULL);
+ devpend = brcmf_sdio_regrb(bus->sdiodev,
+ SDIO_CCCR_INTx,
+ NULL);
intstatus =
devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
@@ -3732,24 +3684,18 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
bus->alp_only = true;
- /* Return the window to backplane enumeration space for core access */
- if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, SI_ENUM_BASE))
- brcmf_dbg(ERROR, "FAILED to return to SI_ENUM_BASE\n");
-
pr_debug("F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdcard_reg_read(bus->sdiodev, SI_ENUM_BASE, 4));
+ brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
/*
* Force PLL off until brcmf_sdio_chip_attach()
* programs PLL control regs
*/
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- BRCMF_INIT_CLKCTL1, &err);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ BRCMF_INIT_CLKCTL1, &err);
if (!err)
- clkctl =
- brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ clkctl = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
@@ -3782,9 +3728,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
reg_addr = bus->ci->c_inf[idx].base +
offsetof(struct sdpcmd_regs, corecontrol);
- reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32));
- brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32),
- reg_val | CC_BPRESEN);
+ reg_val = brcmf_sdio_regrl(bus->sdiodev, reg_addr, NULL);
+ brcmf_sdio_regwl(bus->sdiodev, reg_addr, reg_val | CC_BPRESEN, NULL);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -3809,16 +3754,15 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
/* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->sleeping = false;
bus->rxflow = false;
/* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+ brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
/* ...and initialize clock/power states */
bus->clkstate = CLK_SDONLY;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 1534efc21631..f8e1f1c84d08 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -93,8 +93,9 @@ brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidhigh), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidhigh),
+ NULL);
return SBCOREREV(regdata);
}
@@ -118,8 +119,9 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
return (SSB_TMSLOW_CLOCK == regdata);
@@ -135,13 +137,13 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ NULL);
ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
return ret;
@@ -151,84 +153,85 @@ static void
brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
- u32 regdata;
+ u32 regdata, base;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+ base = ci->c_inf[idx].base;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if (regdata & SSB_TMSLOW_RESET)
return;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
/*
* set target reject and spin until busy is clear
* (preserve core-specific bits)
*/
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- 4, regdata | SSB_TMSLOW_REJECT);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
+ NULL);
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ regdata | SSB_TMSLOW_REJECT, NULL);
+
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
+ NULL);
udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) &
+ SPINWAIT((brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbtmstatehigh),
+ NULL) &
SSB_TMSHIGH_BUSY), 100000);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbtmstatehigh),
+ NULL);
if (regdata & SSB_TMSHIGH_BUSY)
brcmf_dbg(ERROR, "core state still busy\n");
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
+ NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4) |
- SSB_IMSTATE_REJECT;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
- regdata);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
+ regdata |= SSB_IMSTATE_REJECT;
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
+ regdata, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+ SPINWAIT((brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL) &
SSB_IMSTATE_BUSY), 100000);
}
/* set reset and reject while enabling the clocks */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
- (SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
- SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ regdata, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbtmstatelow),
+ NULL);
udelay(10);
/* clear the initiator reject bit */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
+ NULL);
if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
- ~SSB_IMSTATE_REJECT;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
- regdata);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(base, sbimstate),
+ NULL);
+ regdata &= ~SSB_IMSTATE_REJECT;
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbimstate),
+ regdata, NULL);
}
}
/* leave reset and reject asserted */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ brcmf_sdio_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
udelay(1);
}
@@ -242,20 +245,19 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
/* if core is already in reset, just return */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ NULL);
if ((regdata & BCMA_RESET_CTL_RESET) != 0)
return;
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- 4, 0);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL, 0, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
udelay(10);
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 4, BCMA_RESET_CTL_RESET);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ BCMA_RESET_CTL_RESET, NULL);
udelay(1);
}
@@ -279,41 +281,47 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
* set reset while enabling the clock and
* forcing them on throughout the core
*/
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ brcmf_sdio_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
+ NULL);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
/* clear any serror */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+ NULL);
if (regdata & SSB_TMSHIGH_SERR)
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0);
+ brcmf_sdio_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
+ 0, NULL);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate),
+ NULL);
if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO));
+ brcmf_sdio_regwl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate),
+ regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
+ NULL);
/* clear reset and allow it to propagate throughout the core */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
/* leave clock enabled */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- 4, SSB_TMSLOW_CLOCK);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ brcmf_sdio_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ SSB_TMSLOW_CLOCK, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ NULL);
udelay(1);
}
@@ -330,18 +338,18 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid);
/* now do initialization sequence */
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- 4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- 4, 0);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 0, NULL);
udelay(1);
- brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- 4, BCMA_IOCTL_CLK);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ brcmf_sdio_regwl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ BCMA_IOCTL_CLK, NULL);
+ regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ NULL);
udelay(1);
}
@@ -358,8 +366,9 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
*/
ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
ci->c_inf[0].base = regs;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipid), 4);
+ regdata = brcmf_sdio_regrl(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipid),
+ NULL);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
@@ -428,8 +437,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (err) {
brcmf_dbg(ERROR, "error writing for HT off\n");
return err;
@@ -437,8 +445,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
- clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ clkval = brcmf_sdio_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) != clkset) {
brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -446,8 +454,8 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
return -EACCES;
}
- SPINWAIT(((clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ SPINWAIT(((clkval = brcmf_sdio_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
!SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
@@ -457,13 +465,11 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
udelay(65);
/* Also, disable the extra SDIO pull-ups */
- brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+ brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
return 0;
}
@@ -472,18 +478,22 @@ static void
brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci)
{
+ u32 base = ci->c_inf[0].base;
+
/* get chipcommon rev */
ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
/* get chipcommon capabilites */
- ci->c_inf[0].caps =
- brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, capabilities), 4);
+ ci->c_inf[0].caps = brcmf_sdio_regrl(sdiodev,
+ CORE_CC_REG(base, capabilities),
+ NULL);
/* get pmu caps & rev */
if (ci->c_inf[0].caps & CC_CAP_PMU) {
- ci->pmucaps = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, pmucapabilities), 4);
+ ci->pmucaps =
+ brcmf_sdio_regrl(sdiodev,
+ CORE_CC_REG(base, pmucapabilities),
+ NULL);
ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
}
@@ -523,10 +533,10 @@ int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
brcmf_sdio_chip_buscoresetup(sdiodev, ci);
- brcmf_sdcard_reg_write(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, gpiopullup), 4, 0);
- brcmf_sdcard_reg_write(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, gpiopulldown), 4, 0);
+ brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
+ 0, NULL);
+ brcmf_sdio_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
+ 0, NULL);
*ci_ptr = ci;
return 0;
@@ -562,6 +572,7 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
u32 str_mask = 0;
u32 str_shift = 0;
char chn[8];
+ u32 base = ci->c_inf[0].base;
if (!(ci->c_inf[0].caps & CC_CAP_PMU))
return;
@@ -591,17 +602,17 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
}
}
- brcmf_sdcard_reg_write(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
- 4, 1);
- cc_data_temp = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), 4);
+ brcmf_sdio_regwl(sdiodev, CORE_CC_REG(base, chipcontrol_addr),
+ 1, NULL);
+ cc_data_temp =
+ brcmf_sdio_regrl(sdiodev,
+ CORE_CC_REG(base, chipcontrol_addr),
+ NULL);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
- 4, cc_data_temp);
+ brcmf_sdio_regwl(sdiodev, CORE_CC_REG(base, chipcontrol_addr),
+ cc_data_temp, NULL);
brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
drivestrength, cc_data_temp);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 7010eaf71f99..29bf78d264e0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -40,6 +40,10 @@
/* Maximum number of I/O funcs */
#define SDIOD_MAX_IOFUNCS 7
+/* mask of register map */
+#define REG_F0_REG_MASK 0x7FF
+#define REG_F1_MISC_MASK 0x1FFFF
+
/* as of sdiod rev 0, supports 3 functions */
#define SBSDIO_NUM_FUNCTION 3
@@ -142,7 +146,6 @@ struct brcmf_sdio_dev {
u8 num_funcs; /* Supported funcs on client */
u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
u32 sbwad; /* Save backplane window address */
- bool regfail; /* status of last reg_r/w call */
void *bus;
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
@@ -164,31 +167,13 @@ struct brcmf_sdio_dev {
extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
-/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
- * fn: function number
- * addr: unmodified SDIO-space address
- * data: data byte to write
- * err: pointer to error code (or NULL)
- */
-extern u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint func,
- u32 addr, int *err);
-extern void brcmf_sdcard_cfg_write(struct brcmf_sdio_dev *sdiodev, uint func,
- u32 addr, u8 data, int *err);
-
-/* Synchronous access to device (client) core registers via CMD53 to F1.
- * addr: backplane address (i.e. >= regsva from attach)
- * size: register width in bytes (2 or 4)
- * data: data for register write
- */
-extern u32
-brcmf_sdcard_reg_read(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size);
-
-extern u32
-brcmf_sdcard_reg_write(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size,
- u32 data);
-
-/* Indicate if last reg read/write failed */
-extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
+/* sdio device register access interface */
+extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u8 data, int *ret);
+extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
+ u32 data, int *ret);
/* Buffer transfer to/from device (client) core via cmd53.
* fn: function number
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index c5a34ffe6459..a299d42da8e7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <linux/firmware.h>
#include <linux/usb.h>
+#include <linux/vmalloc.h>
#include <net/cfg80211.h>
#include <defs.h>
@@ -1239,7 +1240,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
return -EINVAL;
}
- devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */
+ devinfo->image = vmalloc(fw->size); /* plus nvram */
if (!devinfo->image)
return -ENOMEM;
@@ -1603,7 +1604,7 @@ static struct usb_driver brcmf_usbdrvr = {
void brcmf_usb_exit(void)
{
usb_deregister(&brcmf_usbdrvr);
- kfree(g_image.data);
+ vfree(g_image.data);
g_image.data = NULL;
g_image.len = 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
index c2eb2d0af386..e227c4c68ef9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
@@ -39,10 +39,7 @@ BRCMSMAC_OFILES := \
phy/phytbl_lcn.o \
phy/phytbl_n.o \
phy/phy_qmath.o \
- otp.o \
- srom.o \
dma.o \
- nicpci.o \
brcms_trace_events.o
MODULEPFX := brcmsmac
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index c93ea35bceec..6d8b7213643a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -19,7 +19,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
-#include <linux/pci.h>
#include <defs.h>
#include <chipcommon.h>
@@ -29,8 +28,6 @@
#include "types.h"
#include "pub.h"
#include "pmu.h"
-#include "srom.h"
-#include "nicpci.h"
#include "aiutils.h"
/* slow_clk_ctl */
@@ -321,7 +318,6 @@
#define IS_SIM(chippkg) \
((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
-#define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID)
#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
@@ -454,36 +450,9 @@ struct aidmp {
u32 componentid3; /* 0xffc */
};
-/* return true if PCIE capability exists in the pci config space */
-static bool ai_ispcie(struct si_info *sii)
-{
- u8 cap_ptr;
-
- cap_ptr =
- pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL,
- NULL);
- if (!cap_ptr)
- return false;
-
- return true;
-}
-
-static bool ai_buscore_prep(struct si_info *sii)
-{
- /* kludge to enable the clock on the 4306 which lacks a slowclock */
- if (!ai_ispcie(sii))
- ai_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
- return true;
-}
-
static bool
ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
{
- struct bcma_device *pci = NULL;
- struct bcma_device *pcie = NULL;
- struct bcma_device *core;
-
-
/* no cores found, bail out */
if (cc->bus->nr_cores == 0)
return false;
@@ -492,8 +461,7 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
sii->pub.ccrev = cc->id.rev;
/* get chipcommon chipstatus */
- if (ai_get_ccrev(&sii->pub) >= 11)
- sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
+ sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
/* get chipcommon capabilites */
sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
@@ -506,64 +474,18 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
}
/* figure out buscore */
- list_for_each_entry(core, &cc->bus->cores, list) {
- uint cid, crev;
-
- cid = core->id.id;
- crev = core->id.rev;
-
- if (cid == PCI_CORE_ID) {
- pci = core;
- } else if (cid == PCIE_CORE_ID) {
- pcie = core;
- }
- }
-
- if (pci && pcie) {
- if (ai_ispcie(sii))
- pci = NULL;
- else
- pcie = NULL;
- }
- if (pci) {
- sii->buscore = pci;
- } else if (pcie) {
- sii->buscore = pcie;
- }
-
- /* fixup necessary chip/core configurations */
- if (!sii->pch) {
- sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core);
- if (sii->pch == NULL)
- return false;
- }
- if (ai_pci_fixcfg(&sii->pub))
- return false;
+ sii->buscore = ai_findcore(&sii->pub, PCIE_CORE_ID, 0);
return true;
}
-/*
- * get boardtype and boardrev
- */
-static __used void ai_nvram_process(struct si_info *sii)
-{
- uint w = 0;
-
- /* do a pci config read to get subsystem id and subvendor id */
- pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w);
-
- sii->pub.boardvendor = w & 0xffff;
- sii->pub.boardtype = (w >> 16) & 0xffff;
-}
-
static struct si_info *ai_doattach(struct si_info *sii,
struct bcma_bus *pbus)
{
struct si_pub *sih = &sii->pub;
u32 w, savewin;
struct bcma_device *cc;
- uint socitype;
+ struct ssb_sprom *sprom = &pbus->sprom;
savewin = 0;
@@ -573,38 +495,15 @@ static struct si_info *ai_doattach(struct si_info *sii,
/* switch to Chipcommon core */
cc = pbus->drv_cc.core;
- /* bus/core/clk setup for register access */
- if (!ai_buscore_prep(sii))
- return NULL;
+ sih->chip = pbus->chipinfo.id;
+ sih->chiprev = pbus->chipinfo.rev;
+ sih->chippkg = pbus->chipinfo.pkg;
+ sih->boardvendor = pbus->boardinfo.vendor;
+ sih->boardtype = pbus->boardinfo.type;
- /*
- * ChipID recognition.
- * We assume we can read chipid at offset 0 from the regs arg.
- * If we add other chiptypes (or if we need to support old sdio
- * hosts w/o chipcommon), some way of recognizing them needs to
- * be added here.
- */
- w = bcma_read32(cc, CHIPCREGOFFS(chipid));
- socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
- /* Might as wll fill in chip id rev & pkg */
- sih->chip = w & CID_ID_MASK;
- sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
- sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
-
- /* scan for cores */
- if (socitype != SOCI_AI)
- return NULL;
-
- SI_MSG("Found chip type AI (0x%08x)\n", w);
if (!ai_buscore_setup(sii, cc))
goto exit;
- /* Init nvram from sprom/otp if they exist */
- if (srom_var_init(&sii->pub))
- goto exit;
-
- ai_nvram_process(sii);
-
/* === NVRAM, clock is ready === */
bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0);
bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0);
@@ -617,15 +516,13 @@ static struct si_info *ai_doattach(struct si_info *sii,
}
/* setup the GPIO based LED powersave register */
- w = getintvar(sih, BRCMS_SROM_LEDDC);
+ w = (sprom->leddc_on_time << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
+ (sprom->leddc_off_time << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT);
if (w == 0)
w = DEFAULT_GPIOTIMERVAL;
ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
~0, w);
- if (PCIE(sih))
- pcicore_attach(sii->pch, SI_DOATTACH);
-
if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
/*
* enable 12 mA drive strenth for 43224 and
@@ -659,9 +556,6 @@ static struct si_info *ai_doattach(struct si_info *sii,
return sii;
exit:
- if (sii->pch)
- pcicore_deinit(sii->pch);
- sii->pch = NULL;
return NULL;
}
@@ -700,11 +594,6 @@ void ai_detach(struct si_pub *sih)
if (sii == NULL)
return;
- if (sii->pch)
- pcicore_deinit(sii->pch);
- sii->pch = NULL;
-
- srom_free_vars(sih);
kfree(sii);
}
@@ -755,21 +644,7 @@ uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
/* return the slow clock source - LPO, XTAL, or PCI */
static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
{
- struct si_info *sii;
- u32 val;
-
- sii = (struct si_info *)sih;
- if (ai_get_ccrev(&sii->pub) < 6) {
- pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT,
- &val);
- if (val & PCI_CFG_GPIO_SCS)
- return SCC_SS_PCI;
- return SCC_SS_XTAL;
- } else if (ai_get_ccrev(&sii->pub) < 10) {
- return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
- SCC_SS_MASK;
- } else /* Insta-clock */
- return SCC_SS_XTAL;
+ return SCC_SS_XTAL;
}
/*
@@ -779,36 +654,12 @@ static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq,
struct bcma_device *cc)
{
- u32 slowclk;
uint div;
- slowclk = ai_slowclk_src(sih, cc);
- if (ai_get_ccrev(sih) < 6) {
- if (slowclk == SCC_SS_PCI)
- return max_freq ? (PCIMAXFREQ / 64)
- : (PCIMINFREQ / 64);
- else
- return max_freq ? (XTALMAXFREQ / 32)
- : (XTALMINFREQ / 32);
- } else if (ai_get_ccrev(sih) < 10) {
- div = 4 *
- (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
- SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
- if (slowclk == SCC_SS_LPO)
- return max_freq ? LPOMAXFREQ : LPOMINFREQ;
- else if (slowclk == SCC_SS_XTAL)
- return max_freq ? (XTALMAXFREQ / div)
- : (XTALMINFREQ / div);
- else if (slowclk == SCC_SS_PCI)
- return max_freq ? (PCIMAXFREQ / div)
- : (PCIMINFREQ / div);
- } else {
- /* Chipc rev 10 is InstaClock */
- div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
- div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
- return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
- }
- return 0;
+ /* Chipc rev 10 is InstaClock */
+ div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
+ div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
+ return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
}
static void
@@ -831,8 +682,7 @@ ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
/* Starting with 4318 it is ILP that is used for the delays */
slowmaxfreq =
- ai_slowclk_freq(sih,
- (ai_get_ccrev(sih) >= 10) ? false : true, cc);
+ ai_slowclk_freq(sih, false, cc);
pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
@@ -854,9 +704,8 @@ void ai_clkctl_init(struct si_pub *sih)
return;
/* set all Instaclk chip ILP to 1 MHz */
- if (ai_get_ccrev(sih) >= 10)
- bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
- (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+ bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
ai_clkctl_setdelay(sih, cc);
}
@@ -891,140 +740,6 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
return fpdelay;
}
-/* turn primary xtal and/or pll off/on */
-int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
-{
- struct si_info *sii;
- u32 in, out, outen;
-
- sii = (struct si_info *)sih;
-
- /* pcie core doesn't have any mapping to control the xtal pu */
- if (PCIE(sih))
- return -1;
-
- pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in);
- pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out);
- pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen);
-
- /*
- * Avoid glitching the clock if GPRS is already using it.
- * We can't actually read the state of the PLLPD so we infer it
- * by the value of XTAL_PU which *is* readable via gpioin.
- */
- if (on && (in & PCI_CFG_GPIO_XTAL))
- return 0;
-
- if (what & XTAL)
- outen |= PCI_CFG_GPIO_XTAL;
- if (what & PLL)
- outen |= PCI_CFG_GPIO_PLL;
-
- if (on) {
- /* turn primary xtal on */
- if (what & XTAL) {
- out |= PCI_CFG_GPIO_XTAL;
- if (what & PLL)
- out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUTEN, outen);
- udelay(XTAL_ON_DELAY);
- }
-
- /* turn pll on */
- if (what & PLL) {
- out &= ~PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUT, out);
- mdelay(2);
- }
- } else {
- if (what & XTAL)
- out &= ~PCI_CFG_GPIO_XTAL;
- if (what & PLL)
- out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pcibus,
- PCI_GPIO_OUTEN, outen);
- }
-
- return 0;
-}
-
-/* clk control mechanism through chipcommon, no policy checking */
-static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
-{
- struct bcma_device *cc;
- u32 scc;
-
- /* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (ai_get_ccrev(&sii->pub) < 6)
- return false;
-
- cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
-
- if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
- (ai_get_ccrev(&sii->pub) < 20))
- return mode == CLK_FAST;
-
- switch (mode) {
- case CLK_FAST: /* FORCEHT, fast (pll) clock */
- if (ai_get_ccrev(&sii->pub) < 10) {
- /*
- * don't forget to force xtal back
- * on before we clear SCC_DYN_XTAL..
- */
- ai_clkctl_xtal(&sii->pub, XTAL, ON);
- bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl),
- (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
- } else if (ai_get_ccrev(&sii->pub) < 20) {
- bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR);
- } else {
- bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT);
- }
-
- /* wait for the PLL */
- if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
- u32 htavail = CCS_HTAVAIL;
- SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) &
- htavail) == 0), PMU_MAX_TRANSITION_DLY);
- } else {
- udelay(PLL_DELAY);
- }
- break;
-
- case CLK_DYNAMIC: /* enable dynamic clock control */
- if (ai_get_ccrev(&sii->pub) < 10) {
- scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl));
- scc &= ~(SCC_FS | SCC_IP | SCC_XC);
- if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
- scc |= SCC_XC;
- bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc);
-
- /*
- * for dynamic control, we have to
- * release our xtal_pu "force on"
- */
- if (scc & SCC_XC)
- ai_clkctl_xtal(&sii->pub, XTAL, OFF);
- } else if (ai_get_ccrev(&sii->pub) < 20) {
- /* Instaclock */
- bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR);
- } else {
- bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT);
- }
- break;
-
- default:
- break;
- }
-
- return mode == CLK_FAST;
-}
-
/*
* clock control policy function throught chipcommon
*
@@ -1033,133 +748,53 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
* this is a wrapper over the next internal function
* to allow flexible policy settings for outside caller
*/
-bool ai_clkctl_cc(struct si_pub *sih, uint mode)
+bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
{
struct si_info *sii;
+ struct bcma_device *cc;
sii = (struct si_info *)sih;
- /* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (ai_get_ccrev(sih) < 6)
- return false;
-
if (PCI_FORCEHT(sih))
- return mode == CLK_FAST;
+ return mode == BCMA_CLKMODE_FAST;
- return _ai_clkctl_cc(sii, mode);
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_set_clockmode(cc, mode);
+ return mode == BCMA_CLKMODE_FAST;
}
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
+ struct bcma_device *cc;
sii = (struct si_info *)sih;
- if (PCI_FORCEHT(sih))
- _ai_clkctl_cc(sii, CLK_FAST);
+ if (PCI_FORCEHT(sih)) {
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_set_clockmode(cc, BCMA_CLKMODE_FAST);
+ }
if (PCIE(sih))
- pcicore_up(sii->pch, SI_PCIUP);
-
-}
-
-/* Unconfigure and/or apply various WARs when system is going to sleep mode */
-void ai_pci_sleep(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
-
- pcicore_sleep(sii->pch);
+ bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true);
}
/* Unconfigure and/or apply various WARs when going down */
void ai_pci_down(struct si_pub *sih)
{
struct si_info *sii;
+ struct bcma_device *cc;
sii = (struct si_info *)sih;
/* release FORCEHT since chip is going to "down" state */
- if (PCI_FORCEHT(sih))
- _ai_clkctl_cc(sii, CLK_DYNAMIC);
-
- pcicore_down(sii->pch, SI_PCIDOWN);
-}
-
-/*
- * Configure the pci core for pci client (NIC) action
- * coremask is the bitvec of cores by index to be enabled.
- */
-void ai_pci_setup(struct si_pub *sih, uint coremask)
-{
- struct si_info *sii;
- u32 w;
-
- sii = (struct si_info *)sih;
-
- /*
- * Enable sb->pci interrupts. Assume
- * PCI rev 2.3 support was added in pci core rev 6 and things changed..
- */
- if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
- /* pci config write to set this core bit in PCIIntMask */
- pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w);
- w |= (coremask << PCI_SBIM_SHIFT);
- pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w);
- }
-
- if (PCI(sih)) {
- pcicore_pci_setup(sii->pch);
+ if (PCI_FORCEHT(sih)) {
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
+ bcma_core_set_clockmode(cc, BCMA_CLKMODE_DYNAMIC);
}
-}
-/*
- * Fixup SROMless PCI device's configuration.
- * The current core may be changed upon return.
- */
-int ai_pci_fixcfg(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- /* Fixup PI in SROM shadow area to enable the correct PCI core access */
- /* check 'pi' is correct and fix it if not */
- pcicore_fixcfg(sii->pch);
- pcicore_hwup(sii->pch);
- return 0;
-}
-
-/* mask&set gpiocontrol bits */
-u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
-{
- uint regoff;
-
- regoff = offsetof(struct chipcregs, gpiocontrol);
- return ai_cc_reg(sih, regoff, mask, val);
-}
-
-void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
-{
- struct bcma_device *cc;
- u32 val;
-
- cc = ai_findcore(sih, CC_CORE_ID, 0);
-
- if (on) {
- if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
- /* Ext PA Controls for 4331 12x9 Package */
- bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
- CCTRL4331_EXTPA_EN |
- CCTRL4331_EXTPA_ON_GPIO2_5);
- else
- /* Ext PA Controls for 4331 12x12 Package */
- bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
- CCTRL4331_EXTPA_EN);
- } else {
- val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
- bcma_mask32(cc, CHIPCREGOFFS(chipcontrol),
- ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5));
- }
+ if (PCIE(sih))
+ bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false);
}
/* Enable BT-COEX & Ex-PA for 4313 */
@@ -1181,6 +816,9 @@ bool ai_deviceremoved(struct si_pub *sih)
sii = (struct si_info *)sih;
+ if (sii->icbus->hosttype != BCMA_HOSTTYPE_PCI)
+ return false;
+
pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w);
if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
return true;
@@ -1188,45 +826,6 @@ bool ai_deviceremoved(struct si_pub *sih)
return false;
}
-bool ai_is_sprom_available(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- if (ai_get_ccrev(sih) >= 31) {
- struct bcma_device *cc;
- u32 sromctrl;
-
- if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
- return false;
-
- cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol));
- return sromctrl & SRC_PRESENT;
- }
-
- switch (ai_get_chip_id(sih)) {
- case BCM4313_CHIP_ID:
- return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
- default:
- return true;
- }
-}
-
-bool ai_is_otp_disabled(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- switch (ai_get_chip_id(sih)) {
- case BCM4313_CHIP_ID:
- return (sii->chipst & CST4313_OTP_PRESENT) == 0;
- /* These chips always have their OTP on */
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- default:
- return false;
- }
-}
-
uint ai_get_buscoretype(struct si_pub *sih)
{
struct si_info *sii = (struct si_info *)sih;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index f84c6f781692..d9f04a683bdb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -113,10 +113,6 @@
#define XTAL 0x1 /* primary crystal oscillator (2050) */
#define PLL 0x2 /* main chip pll */
-/* clkctl clk mode */
-#define CLK_FAST 0 /* force fast (pll) clock */
-#define CLK_DYNAMIC 2 /* enable dynamic clock control */
-
/* GPIO usage priorities */
#define GPIO_DRV_PRIORITY 0 /* Driver */
#define GPIO_APP_PRIORITY 1 /* Application */
@@ -172,9 +168,7 @@ struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
struct bcma_bus *icbus; /* handle to soc interconnect bus */
struct pci_dev *pcibus; /* handle to pci bus */
- struct pcicore_info *pch; /* PCI/E core handle */
struct bcma_device *buscore;
- struct list_head var_list; /* list of srom variables */
u32 chipst; /* chip status */
};
@@ -197,38 +191,20 @@ extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
extern struct si_pub *ai_attach(struct bcma_bus *pbus);
extern void ai_detach(struct si_pub *sih);
extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
-extern void ai_pci_setup(struct si_pub *sih, uint coremask);
extern void ai_clkctl_init(struct si_pub *sih);
extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
-extern int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on);
extern bool ai_deviceremoved(struct si_pub *sih);
-extern u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val,
- u8 priority);
-
-/* OTP status */
-extern bool ai_is_otp_disabled(struct si_pub *sih);
-
-/* SPROM availability */
-extern bool ai_is_sprom_available(struct si_pub *sih);
-extern void ai_pci_sleep(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih);
extern void ai_pci_up(struct si_pub *sih);
-extern int ai_pci_fixcfg(struct si_pub *sih);
-extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
extern uint ai_get_buscoretype(struct si_pub *sih);
extern uint ai_get_buscorerev(struct si_pub *sih);
-static inline int ai_get_ccrev(struct si_pub *sih)
-{
- return sih->ccrev;
-}
-
static inline u32 ai_get_cccaps(struct si_pub *sih)
{
return sih->cccaps;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
index a47ce25cb9a2..55e12c327911 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
@@ -108,7 +108,7 @@ brcms_c_antsel_init_cfg(struct antsel_info *asi, struct brcms_antselcfg *antsel,
struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
{
struct antsel_info *asi;
- struct si_pub *sih = wlc->hw->sih;
+ struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
asi = kzalloc(sizeof(struct antsel_info), GFP_ATOMIC);
if (!asi)
@@ -118,7 +118,7 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
asi->pub = wlc->pub;
asi->antsel_type = ANTSEL_NA;
asi->antsel_avail = false;
- asi->antsel_antswitch = (u8) getintvar(sih, BRCMS_SROM_ANTSWITCH);
+ asi->antsel_antswitch = sprom->antswitch;
if ((asi->pub->sromrev >= 4) && (asi->antsel_antswitch != 0)) {
switch (asi->antsel_antswitch) {
@@ -128,12 +128,12 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
/* 4321/2 board with 2x3 switch logic */
asi->antsel_type = ANTSEL_2x3;
/* Antenna selection availability */
- if (((u16) getintvar(sih, BRCMS_SROM_AA2G) == 7) ||
- ((u16) getintvar(sih, BRCMS_SROM_AA5G) == 7)) {
+ if ((sprom->ant_available_bg == 7) ||
+ (sprom->ant_available_a == 7)) {
asi->antsel_avail = true;
} else if (
- (u16) getintvar(sih, BRCMS_SROM_AA2G) == 3 ||
- (u16) getintvar(sih, BRCMS_SROM_AA5G) == 3) {
+ sprom->ant_available_bg == 3 ||
+ sprom->ant_available_a == 3) {
asi->antsel_avail = false;
} else {
asi->antsel_avail = false;
@@ -146,8 +146,8 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
break;
}
} else if ((asi->pub->sromrev == 4) &&
- ((u16) getintvar(sih, BRCMS_SROM_AA2G) == 7) &&
- ((u16) getintvar(sih, BRCMS_SROM_AA5G) == 0)) {
+ (sprom->ant_available_bg == 7) &&
+ (sprom->ant_available_a == 0)) {
/* hack to match old 4321CB2 cards with 2of3 antenna switch */
asi->antsel_type = ANTSEL_2x3;
asi->antsel_avail = true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 0efe88e25a9a..eb77ac3cfb6b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -1110,7 +1110,7 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
char country_abbrev[BRCM_CNTRY_BUF_SZ];
const struct country_info *country;
struct brcms_pub *pub = wlc->pub;
- char *ccode;
+ struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
@@ -1122,9 +1122,8 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
wlc->cmi = wlc_cm;
/* store the country code for passing up as a regulatory hint */
- ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE);
- if (ccode && brcms_c_country_valid(ccode))
- strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
+ if (sprom->alpha2 && brcms_c_country_valid(sprom->alpha2))
+ strncpy(wlc->pub->srom_ccode, sprom->alpha2, sizeof(sprom->alpha2));
/*
* internal country information which must match
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index aa15558f75c8..50f92a0b7c41 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -25,7 +25,6 @@
#include <linux/bcma/bcma.h>
#include <net/mac80211.h>
#include <defs.h>
-#include "nicpci.h"
#include "phy/phy_int.h"
#include "d11.h"
#include "channel.h"
@@ -770,7 +769,7 @@ void brcms_dpc(unsigned long data)
* Precondition: Since this function is called in brcms_pci_probe() context,
* no locking is required.
*/
-static int brcms_request_fw(struct brcms_info *wl, struct pci_dev *pdev)
+static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
{
int status;
struct device *device = &pdev->dev;
@@ -1022,7 +1021,7 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
spin_lock_init(&wl->isr_lock);
/* prepare ucode */
- if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) {
+ if (brcms_request_fw(wl, pdev) < 0) {
wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
"%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
brcms_release_fw(wl);
@@ -1043,12 +1042,12 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
wl->pub->ieee_hw = hw;
/* register our interrupt handler */
- if (request_irq(pdev->bus->host_pci->irq, brcms_isr,
+ if (request_irq(pdev->irq, brcms_isr,
IRQF_SHARED, KBUILD_MODNAME, wl)) {
wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
goto fail;
}
- wl->irq = pdev->bus->host_pci->irq;
+ wl->irq = pdev->irq;
/* register module */
brcms_c_module_register(wl->pub, "linux", wl, NULL);
@@ -1098,7 +1097,7 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
- pdev->bus->host_pci->irq);
+ pdev->irq);
if ((pdev->id.manuf != BCMA_MANUF_BCM) ||
(pdev->id.id != BCMA_CORE_80211))
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index b4d92792c502..19db4052c44c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -1219,7 +1219,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
}
/* control chip clock to save power, enable dynamic clock or force fast clock */
-static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
+static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, enum bcma_clkmode mode)
{
if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
/* new chips with PMU, CCS_FORCEHT will distribute the HT clock
@@ -1229,7 +1229,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
*/
if (wlc_hw->clk) {
- if (mode == CLK_FAST) {
+ if (mode == BCMA_CLKMODE_FAST) {
bcma_set32(wlc_hw->d11core,
D11REGOFFS(clk_ctl_st),
CCS_FORCEHT);
@@ -1260,7 +1260,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
~CCS_FORCEHT);
}
}
- wlc_hw->forcefastclk = (mode == CLK_FAST);
+ wlc_hw->forcefastclk = (mode == BCMA_CLKMODE_FAST);
} else {
/* old chips w/o PMU, force HT through cc,
@@ -1567,7 +1567,7 @@ void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
wlc_phy_bw_state_set(wlc_hw->band->pi, bw);
@@ -1576,7 +1576,7 @@ void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
/* restore the clk */
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
}
static void brcms_b_upd_synthpu(struct brcms_hardware *wlc_hw)
@@ -1882,27 +1882,20 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
return true;
}
-static char *brcms_c_get_macaddr(struct brcms_hardware *wlc_hw)
+static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_ALEN])
{
- enum brcms_srom_id var_id = BRCMS_SROM_MACADDR;
- char *macaddr;
+ struct ssb_sprom *sprom = &wlc_hw->d11core->bus->sprom;
/* If macaddr exists, use it (Sromrev4, CIS, ...). */
- macaddr = getvar(wlc_hw->sih, var_id);
- if (macaddr != NULL)
- return macaddr;
+ if (!is_zero_ether_addr(sprom->il0mac)) {
+ memcpy(etheraddr, sprom->il0mac, 6);
+ return;
+ }
if (wlc_hw->_nbands > 1)
- var_id = BRCMS_SROM_ET1MACADDR;
+ memcpy(etheraddr, sprom->et1mac, 6);
else
- var_id = BRCMS_SROM_IL0MACADDR;
-
- macaddr = getvar(wlc_hw->sih, var_id);
- if (macaddr == NULL)
- wiphy_err(wlc_hw->wlc->wiphy, "wl%d: wlc_get_macaddr: macaddr "
- "getvar(%d) not found\n", wlc_hw->unit, var_id);
-
- return macaddr;
+ memcpy(etheraddr, sprom->il0mac, 6);
}
/* power both the pll and external oscillator on/off */
@@ -1917,9 +1910,6 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
if (!want && wlc_hw->pllreq)
return;
- if (wlc_hw->sih)
- ai_clkctl_xtal(wlc_hw->sih, XTAL | PLL, want);
-
wlc_hw->sbclk = want;
if (!wlc_hw->sbclk) {
wlc_hw->clk = false;
@@ -2004,7 +1994,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/* reset the dma engines except first time thru */
if (bcma_core_is_enabled(wlc_hw->d11core)) {
@@ -2053,7 +2043,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
brcms_c_mctrl_reset(wlc_hw);
if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
brcms_b_phy_reset(wlc_hw);
@@ -2065,7 +2055,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
/* restore the clk setting */
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
}
/* txfifo sizes needs to be modified(increased) since the newer cores
@@ -2218,7 +2208,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
gm |= gc |= BOARD_GPIO_PACTRL;
/* apply to gpiocontrol register */
- ai_gpiocontrol(wlc_hw->sih, gm, gc, GPIO_DRV_PRIORITY);
+ bcma_chipco_gpio_control(&wlc_hw->d11core->bus->drv_cc, gm, gc);
}
static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
@@ -3371,7 +3361,7 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/* disable interrupts */
macintmask = brcms_intrsoff(wlc->wl);
@@ -3405,7 +3395,7 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
/* restore the clk */
if (!fastclk)
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
}
static void brcms_c_set_phy_chanspec(struct brcms_c_info *wlc,
@@ -4436,17 +4426,22 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
uint unit, bool piomode)
{
struct brcms_hardware *wlc_hw;
- char *macaddr = NULL;
uint err = 0;
uint j;
bool wme = false;
struct shared_phy_params sha_params;
struct wiphy *wiphy = wlc->wiphy;
struct pci_dev *pcidev = core->bus->host_pci;
+ struct ssb_sprom *sprom = &core->bus->sprom;
- BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
- pcidev->vendor,
- pcidev->device);
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI)
+ BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ pcidev->vendor,
+ pcidev->device);
+ else
+ BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ core->bus->boardinfo.vendor,
+ core->bus->boardinfo.type);
wme = true;
@@ -4472,7 +4467,8 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
}
/* verify again the device is supported */
- if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI &&
+ !brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
"vendor/device (0x%x/0x%x)\n",
unit, pcidev->vendor, pcidev->device);
@@ -4480,8 +4476,13 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
goto fail;
}
- wlc_hw->vendorid = pcidev->vendor;
- wlc_hw->deviceid = pcidev->device;
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
+ wlc_hw->vendorid = pcidev->vendor;
+ wlc_hw->deviceid = pcidev->device;
+ } else {
+ wlc_hw->vendorid = core->bus->boardinfo.vendor;
+ wlc_hw->deviceid = core->bus->boardinfo.type;
+ }
wlc_hw->d11core = core;
wlc_hw->corerev = core->id.rev;
@@ -4501,7 +4502,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
* is still false; But it will be called again inside wlc_corereset,
* after d11 is out of reset.
*/
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
if (!brcms_b_validate_chip_access(wlc_hw)) {
@@ -4512,7 +4513,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
}
/* get the board rev, used just below */
- j = getintvar(wlc_hw->sih, BRCMS_SROM_BOARDREV);
+ j = sprom->board_rev;
/* promote srom boardrev of 0xFF to 1 */
if (j == BOARDREV_PROMOTABLE)
j = BOARDREV_PROMOTED;
@@ -4525,11 +4526,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
err = 15;
goto fail;
}
- wlc_hw->sromrev = (u8) getintvar(wlc_hw->sih, BRCMS_SROM_REV);
- wlc_hw->boardflags = (u32) getintvar(wlc_hw->sih,
- BRCMS_SROM_BOARDFLAGS);
- wlc_hw->boardflags2 = (u32) getintvar(wlc_hw->sih,
- BRCMS_SROM_BOARDFLAGS2);
+ wlc_hw->sromrev = sprom->revision;
+ wlc_hw->boardflags = sprom->boardflags_lo + (sprom->boardflags_hi << 16);
+ wlc_hw->boardflags2 = sprom->boardflags2_lo + (sprom->boardflags2_hi << 16);
if (wlc_hw->boardflags & BFL_NOPLLDOWN)
brcms_b_pllreq(wlc_hw, true, BRCMS_PLLREQ_SHARED);
@@ -4702,25 +4701,18 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
*/
/* init etheraddr state variables */
- macaddr = brcms_c_get_macaddr(wlc_hw);
- if (macaddr == NULL) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: macaddr not found\n",
- unit);
- err = 21;
- goto fail;
- }
- if (!mac_pton(macaddr, wlc_hw->etheraddr) ||
- is_broadcast_ether_addr(wlc_hw->etheraddr) ||
+ brcms_c_get_macaddr(wlc_hw, wlc_hw->etheraddr);
+
+ if (is_broadcast_ether_addr(wlc_hw->etheraddr) ||
is_zero_ether_addr(wlc_hw->etheraddr)) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: bad macaddr %s\n",
- unit, macaddr);
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: bad macaddr\n",
+ unit);
err = 22;
goto fail;
}
- BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
- wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih),
- macaddr);
+ BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x\n",
+ wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih));
return err;
@@ -4770,16 +4762,16 @@ static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc)
int aa;
uint unit;
int bandtype;
- struct si_pub *sih = wlc->hw->sih;
+ struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
unit = wlc->pub->unit;
bandtype = wlc->band->bandtype;
/* get antennas available */
if (bandtype == BRCM_BAND_5G)
- aa = (s8) getintvar(sih, BRCMS_SROM_AA5G);
+ aa = sprom->ant_available_a;
else
- aa = (s8) getintvar(sih, BRCMS_SROM_AA2G);
+ aa = sprom->ant_available_bg;
if ((aa < 1) || (aa > 15)) {
wiphy_err(wlc->wiphy, "wl%d: %s: Invalid antennas available in"
@@ -4799,9 +4791,9 @@ static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc)
/* Compute Antenna Gain */
if (bandtype == BRCM_BAND_5G)
- wlc->band->antgain = (s8) getintvar(sih, BRCMS_SROM_AG1);
+ wlc->band->antgain = sprom->antenna_gain.a1;
else
- wlc->band->antgain = (s8) getintvar(sih, BRCMS_SROM_AG0);
+ wlc->band->antgain = sprom->antenna_gain.a0;
brcms_c_attach_antgain_init(wlc);
@@ -4952,15 +4944,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
callbacks = 0;
- if (wlc_hw->sih) {
- /*
- * detach interrupt sync mechanism since interrupt is disabled
- * and per-port interrupt object may has been freed. this must
- * be done before sb core switch
- */
- ai_pci_sleep(wlc_hw->sih);
- }
-
brcms_b_detach_dmapio(wlc_hw);
band = wlc_hw->band;
@@ -5047,9 +5030,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
*/
brcms_b_xtal(wlc_hw, ON);
ai_clkctl_init(wlc_hw->sih);
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
-
- ai_pci_fixcfg(wlc_hw->sih);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/*
* TODO: test suspend/resume
@@ -5078,8 +5059,6 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
{
- uint coremask;
-
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
/*
@@ -5088,15 +5067,14 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
*/
brcms_b_xtal(wlc_hw, ON);
ai_clkctl_init(wlc_hw->sih);
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
/*
* Configure pci/pcmcia here instead of in brcms_c_attach()
* to allow mfg hotswap: down, hotswap (chip power cycle), up.
*/
- coremask = (1 << wlc_hw->wlc->core->coreidx);
-
- ai_pci_setup(wlc_hw->sih, coremask);
+ bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci, wlc_hw->d11core,
+ true);
/*
* Need to read the hwradio status here to cover the case where the
@@ -5126,7 +5104,7 @@ static int brcms_b_up_finish(struct brcms_hardware *wlc_hw)
wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
/* FULLY enable dynamic power control and d11 core interrupt */
- brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_DYNAMIC);
brcms_intrson(wlc_hw->wlc->wl);
return 0;
}
@@ -5267,7 +5245,7 @@ static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
brcms_intrsoff(wlc_hw->wlc->wl);
/* ensure we're running on the pll clock again */
- brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, BCMA_CLKMODE_FAST);
}
/* down phy at the last of this stage */
callbacks += wlc_phy_down(wlc_hw->band->pi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
deleted file mode 100644
index 7fad6dc19258..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
+++ /dev/null
@@ -1,826 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-
-#include <defs.h>
-#include <soc.h>
-#include <chipcommon.h>
-#include "aiutils.h"
-#include "pub.h"
-#include "nicpci.h"
-
-/* SPROM offsets */
-#define SRSH_ASPM_OFFSET 4 /* word 4 */
-#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */
-#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */
-#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */
-
-#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */
-#define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
-#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
-#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */
-#define SRSH_BD_OFFSET 6 /* word 6 */
-
-/* chipcontrol */
-#define CHIPCTRL_4321_PLL_DOWN 0x800000/* serdes PLL down override */
-
-/* MDIO control */
-#define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
-#define MDIOCTL_DIVISOR_VAL 0x2
-#define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
-#define MDIOCTL_ACCESS_DONE 0x100 /* Transaction complete */
-
-/* MDIO Data */
-#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */
-#define MDIODATA_TA 0x00020000 /* Turnaround */
-
-#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
-#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
-#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
-#define MDIODATA_DEVADDR_MASK 0x0f800000
- /* Physmedia devaddr Mask */
-
-/* MDIO Data for older revisions < 10 */
-#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift */
-#define MDIODATA_REGADDR_MASK_OLD 0x003c0000
- /* Regaddr Mask */
-#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift */
-#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000
- /* Physmedia devaddr Mask */
-
-/* Transactions flags */
-#define MDIODATA_WRITE 0x10000000
-#define MDIODATA_READ 0x20000000
-#define MDIODATA_START 0x40000000
-
-#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
-#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
-
-/* serdes regs (rev < 10) */
-#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
-#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
-#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
-
-/* SERDES RX registers */
-#define SERDES_RX_CTRL 1 /* Rx cntrl */
-#define SERDES_RX_TIMER1 2 /* Rx Timer1 */
-#define SERDES_RX_CDR 6 /* CDR */
-#define SERDES_RX_CDRBW 7 /* CDR BW */
-/* SERDES RX control register */
-#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
-#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
-
-/* SERDES PLL registers */
-#define SERDES_PLL_CTRL 1 /* PLL control reg */
-#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
-
-/* Linkcontrol reg offset in PCIE Cap */
-#define PCIE_CAP_LINKCTRL_OFFSET 16 /* offset in pcie cap */
-#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */
-#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */
-#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */
-
-#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
-#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
-#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
-#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */
-
-/* Power management threshold */
-#define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */
-#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */
-#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */
-#define PCIE_ASPMTIMER_EXTEND 0x01000000
- /* > rev7:
- * enable extend ASPM timer
- */
-
-/* different register spaces to access thru pcie indirect access */
-#define PCIE_CONFIGREGS 1 /* Access to config space */
-#define PCIE_PCIEREGS 2 /* Access to pcie registers */
-
-/* PCIE protocol PHY diagnostic registers */
-#define PCIE_PLP_STATUSREG 0x204 /* Status */
-
-/* Status reg PCIE_PLP_STATUSREG */
-#define PCIE_PLP_POLARITYINV_STAT 0x10
-
-/* PCIE protocol DLLP diagnostic registers */
-#define PCIE_DLLP_LCREG 0x100 /* Link Control */
-#define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
-
-/* PCIE protocol TLP diagnostic registers */
-#define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */
-
-/* Sonics to PCI translation types */
-#define SBTOPCI_PREF 0x4 /* prefetch enable */
-#define SBTOPCI_BURST 0x8 /* burst enable */
-#define SBTOPCI_RC_READMULTI 0x20 /* memory read multiple */
-
-#define PCI_CLKRUN_DSBL 0x8000 /* Bit 15 forceClkrun */
-
-/* PCI core index in SROM shadow area */
-#define SRSH_PI_OFFSET 0 /* first word */
-#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
-#define SRSH_PI_SHIFT 12 /* bit 15:12 */
-
-#define PCIREGOFFS(field) offsetof(struct sbpciregs, field)
-#define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field)
-
-/* Sonics side: PCI core and host control registers */
-struct sbpciregs {
- u32 control; /* PCI control */
- u32 PAD[3];
- u32 arbcontrol; /* PCI arbiter control */
- u32 clkrun; /* Clkrun Control (>=rev11) */
- u32 PAD[2];
- u32 intstatus; /* Interrupt status */
- u32 intmask; /* Interrupt mask */
- u32 sbtopcimailbox; /* Sonics to PCI mailbox */
- u32 PAD[9];
- u32 bcastaddr; /* Sonics broadcast address */
- u32 bcastdata; /* Sonics broadcast data */
- u32 PAD[2];
- u32 gpioin; /* ro: gpio input (>=rev2) */
- u32 gpioout; /* rw: gpio output (>=rev2) */
- u32 gpioouten; /* rw: gpio output enable (>= rev2) */
- u32 gpiocontrol; /* rw: gpio control (>= rev2) */
- u32 PAD[36];
- u32 sbtopci0; /* Sonics to PCI translation 0 */
- u32 sbtopci1; /* Sonics to PCI translation 1 */
- u32 sbtopci2; /* Sonics to PCI translation 2 */
- u32 PAD[189];
- u32 pcicfg[4][64]; /* 0x400 - 0x7FF, PCI Cfg Space (>=rev8) */
- u16 sprom[36]; /* SPROM shadow Area */
- u32 PAD[46];
-};
-
-/* SB side: PCIE core and host control registers */
-struct sbpcieregs {
- u32 control; /* host mode only */
- u32 PAD[2];
- u32 biststatus; /* bist Status: 0x00C */
- u32 gpiosel; /* PCIE gpio sel: 0x010 */
- u32 gpioouten; /* PCIE gpio outen: 0x14 */
- u32 PAD[2];
- u32 intstatus; /* Interrupt status: 0x20 */
- u32 intmask; /* Interrupt mask: 0x24 */
- u32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */
- u32 PAD[53];
- u32 sbtopcie0; /* sb to pcie translation 0: 0x100 */
- u32 sbtopcie1; /* sb to pcie translation 1: 0x104 */
- u32 sbtopcie2; /* sb to pcie translation 2: 0x108 */
- u32 PAD[5];
-
- /* pcie core supports in direct access to config space */
- u32 configaddr; /* pcie config space access: Address field: 0x120 */
- u32 configdata; /* pcie config space access: Data field: 0x124 */
-
- /* mdio access to serdes */
- u32 mdiocontrol; /* controls the mdio access: 0x128 */
- u32 mdiodata; /* Data to the mdio access: 0x12c */
-
- /* pcie protocol phy/dllp/tlp register indirect access mechanism */
- u32 pcieindaddr; /* indirect access to
- * the internal register: 0x130
- */
- u32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */
-
- u32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */
- u32 PAD[177];
- u32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */
- u16 sprom[64]; /* SPROM shadow Area */
-};
-
-struct pcicore_info {
- struct bcma_device *core;
- struct si_pub *sih; /* System interconnect handle */
- struct pci_dev *dev;
- u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
- * in the config space
- */
- bool pcie_pr42767;
- u8 pcie_polarity;
- u8 pcie_war_aspm_ovr; /* Override ASPM/Clkreq settings */
-
- u8 pmecap_offset; /* PM Capability offset in the config space */
- bool pmecap; /* Capable of generating PME */
-};
-
-#define PCIE_ASPM(sih) \
- ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \
- ((ai_get_buscorerev(sih) >= 3) && \
- (ai_get_buscorerev(sih) <= 5)))
-
-
-/* delay needed between the mdio control/ mdiodata register data access */
-static void pr28829_delay(void)
-{
- udelay(10);
-}
-
-/* Initialize the PCI core.
- * It's caller's responsibility to make sure that this is done only once
- */
-struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
-{
- struct pcicore_info *pi;
-
- /* alloc struct pcicore_info */
- pi = kzalloc(sizeof(struct pcicore_info), GFP_ATOMIC);
- if (pi == NULL)
- return NULL;
-
- pi->sih = sih;
- pi->dev = core->bus->host_pci;
- pi->core = core;
-
- if (core->id.id == PCIE_CORE_ID) {
- u8 cap_ptr;
- cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
- NULL, NULL);
- pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
- }
- return pi;
-}
-
-void pcicore_deinit(struct pcicore_info *pch)
-{
- kfree(pch);
-}
-
-/* return cap_offset if requested capability exists in the PCI config space */
-/* Note that it's caller's responsibility to make sure it's a pci bus */
-u8
-pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
- unsigned char *buf, u32 *buflen)
-{
- u8 cap_id;
- u8 cap_ptr = 0;
- u32 bufsize;
- u8 byte_val;
-
- /* check for Header type 0 */
- pci_read_config_byte(dev, PCI_HEADER_TYPE, &byte_val);
- if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
- goto end;
-
- /* check if the capability pointer field exists */
- pci_read_config_byte(dev, PCI_STATUS, &byte_val);
- if (!(byte_val & PCI_STATUS_CAP_LIST))
- goto end;
-
- pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
- /* check if the capability pointer is 0x00 */
- if (cap_ptr == 0x00)
- goto end;
-
- /* loop thru the capability list
- * and see if the pcie capability exists
- */
-
- pci_read_config_byte(dev, cap_ptr, &cap_id);
-
- while (cap_id != req_cap_id) {
- pci_read_config_byte(dev, cap_ptr + 1, &cap_ptr);
- if (cap_ptr == 0x00)
- break;
- pci_read_config_byte(dev, cap_ptr, &cap_id);
- }
- if (cap_id != req_cap_id)
- goto end;
-
- /* found the caller requested capability */
- if (buf != NULL && buflen != NULL) {
- u8 cap_data;
-
- bufsize = *buflen;
- if (!bufsize)
- goto end;
- *buflen = 0;
- /* copy the capability data excluding cap ID and next ptr */
- cap_data = cap_ptr + 2;
- if ((bufsize + cap_data) > PCI_SZPCR)
- bufsize = PCI_SZPCR - cap_data;
- *buflen = bufsize;
- while (bufsize--) {
- pci_read_config_byte(dev, cap_data, buf);
- cap_data++;
- buf++;
- }
- }
-end:
- return cap_ptr;
-}
-
-/* ***** Register Access API */
-static uint
-pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
-{
- uint retval = 0xFFFFFFFF;
-
- switch (addrtype) {
- case PCIE_CONFIGREGS:
- bcma_write32(core, PCIEREGOFFS(configaddr), offset);
- (void)bcma_read32(core, PCIEREGOFFS(configaddr));
- retval = bcma_read32(core, PCIEREGOFFS(configdata));
- break;
- case PCIE_PCIEREGS:
- bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
- (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
- retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
- break;
- }
-
- return retval;
-}
-
-static uint pcie_writereg(struct bcma_device *core, uint addrtype,
- uint offset, uint val)
-{
- switch (addrtype) {
- case PCIE_CONFIGREGS:
- bcma_write32(core, PCIEREGOFFS(configaddr), offset);
- bcma_write32(core, PCIEREGOFFS(configdata), val);
- break;
- case PCIE_PCIEREGS:
- bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
- bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
-{
- uint mdiodata, i = 0;
- uint pcie_serdes_spinwait = 200;
-
- mdiodata = (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
- (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
- (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
- (blk << 4));
- bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
-
- pr28829_delay();
- /* retry till the transaction is complete */
- while (i < pcie_serdes_spinwait) {
- if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
- MDIOCTL_ACCESS_DONE)
- break;
-
- udelay(1000);
- i++;
- }
-
- if (i >= pcie_serdes_spinwait)
- return false;
-
- return true;
-}
-
-static int
-pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
- uint *val)
-{
- uint mdiodata;
- uint i = 0;
- uint pcie_serdes_spinwait = 10;
-
- /* enable mdio access to SERDES */
- bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
- MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
-
- if (ai_get_buscorerev(pi->sih) >= 10) {
- /* new serdes is slower in rw,
- * using two layers of reg address mapping
- */
- if (!pcie_mdiosetblock(pi, physmedia))
- return 1;
- mdiodata = ((MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
- (regaddr << MDIODATA_REGADDR_SHF));
- pcie_serdes_spinwait *= 20;
- } else {
- mdiodata = ((physmedia << MDIODATA_DEVADDR_SHF_OLD) |
- (regaddr << MDIODATA_REGADDR_SHF_OLD));
- }
-
- if (!write)
- mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA);
- else
- mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
- *val);
-
- bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
-
- pr28829_delay();
-
- /* retry till the transaction is complete */
- while (i < pcie_serdes_spinwait) {
- if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
- MDIOCTL_ACCESS_DONE) {
- if (!write) {
- pr28829_delay();
- *val = (bcma_read32(pi->core,
- PCIEREGOFFS(mdiodata)) &
- MDIODATA_MASK);
- }
- /* Disable mdio access to SERDES */
- bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
- return 0;
- }
- udelay(1000);
- i++;
- }
-
- /* Timed out. Disable mdio access to SERDES. */
- bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
- return 1;
-}
-
-/* use the mdio interface to read from mdio slaves */
-static int
-pcie_mdioread(struct pcicore_info *pi, uint physmedia, uint regaddr,
- uint *regval)
-{
- return pcie_mdioop(pi, physmedia, regaddr, false, regval);
-}
-
-/* use the mdio interface to write to mdio slaves */
-static int
-pcie_mdiowrite(struct pcicore_info *pi, uint physmedia, uint regaddr, uint val)
-{
- return pcie_mdioop(pi, physmedia, regaddr, true, &val);
-}
-
-/* ***** Support functions ***** */
-static u8 pcie_clkreq(struct pcicore_info *pi, u32 mask, u32 val)
-{
- u32 reg_val;
- u8 offset;
-
- offset = pi->pciecap_lcreg_offset;
- if (!offset)
- return 0;
-
- pci_read_config_dword(pi->dev, offset, &reg_val);
- /* set operation */
- if (mask) {
- if (val)
- reg_val |= PCIE_CLKREQ_ENAB;
- else
- reg_val &= ~PCIE_CLKREQ_ENAB;
- pci_write_config_dword(pi->dev, offset, reg_val);
- pci_read_config_dword(pi->dev, offset, &reg_val);
- }
- if (reg_val & PCIE_CLKREQ_ENAB)
- return 1;
- else
- return 0;
-}
-
-static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
-{
- u32 w;
- struct si_pub *sih = pi->sih;
-
- if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
- ai_get_buscorerev(sih) < 7)
- return;
-
- w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
- if (extend)
- w |= PCIE_ASPMTIMER_EXTEND;
- else
- w &= ~PCIE_ASPMTIMER_EXTEND;
- pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
- w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
-}
-
-/* centralized clkreq control policy */
-static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
-{
- struct si_pub *sih = pi->sih;
-
- switch (state) {
- case SI_DOATTACH:
- if (PCIE_ASPM(sih))
- pcie_clkreq(pi, 1, 0);
- break;
- case SI_PCIDOWN:
- /* turn on serdes PLL down */
- if (ai_get_buscorerev(sih) == 6) {
- ai_cc_reg(sih,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_cc_reg(sih,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0);
- } else if (pi->pcie_pr42767) {
- pcie_clkreq(pi, 1, 1);
- }
- break;
- case SI_PCIUP:
- /* turn off serdes PLL down */
- if (ai_get_buscorerev(sih) == 6) {
- ai_cc_reg(sih,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_cc_reg(sih,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0x40);
- } else if (PCIE_ASPM(sih)) { /* disable clkreq */
- pcie_clkreq(pi, 1, 0);
- }
- break;
- }
-}
-
-/* ***** PCI core WARs ***** */
-/* Done only once at attach time */
-static void pcie_war_polarity(struct pcicore_info *pi)
-{
- u32 w;
-
- if (pi->pcie_polarity != 0)
- return;
-
- w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
-
- /* Detect the current polarity at attach and force that polarity and
- * disable changing the polarity
- */
- if ((w & PCIE_PLP_POLARITYINV_STAT) == 0)
- pi->pcie_polarity = SERDES_RX_CTRL_FORCE;
- else
- pi->pcie_polarity = (SERDES_RX_CTRL_FORCE |
- SERDES_RX_CTRL_POLARITY);
-}
-
-/* enable ASPM and CLKREQ if srom doesn't have it */
-/* Needs to happen when update to shadow SROM is needed
- * : Coming out of 'standby'/'hibernate'
- * : If pcie_war_aspm_ovr state changed
- */
-static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
-{
- struct si_pub *sih = pi->sih;
- u16 val16;
- u32 w;
-
- if (!PCIE_ASPM(sih))
- return;
-
- /* bypass this on QT or VSIM */
- val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
-
- val16 &= ~SRSH_ASPM_ENB;
- if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
- val16 |= SRSH_ASPM_ENB;
- else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
- val16 |= SRSH_ASPM_L1_ENB;
- else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
- val16 |= SRSH_ASPM_L0s_ENB;
-
- bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
-
- pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
- w &= ~PCIE_ASPM_ENAB;
- w |= pi->pcie_war_aspm_ovr;
- pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
-
- val16 = bcma_read16(pi->core,
- PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
-
- if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
- val16 |= SRSH_CLKREQ_ENB;
- pi->pcie_pr42767 = true;
- } else
- val16 &= ~SRSH_CLKREQ_ENB;
-
- bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
- val16);
-}
-
-/* Apply the polarity determined at the start */
-/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_war_serdes(struct pcicore_info *pi)
-{
- u32 w = 0;
-
- if (pi->pcie_polarity != 0)
- pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CTRL,
- pi->pcie_polarity);
-
- pcie_mdioread(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
- if (w & PLL_CTRL_FREQDET_EN) {
- w &= ~PLL_CTRL_FREQDET_EN;
- pcie_mdiowrite(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
- }
-}
-
-/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
-/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_misc_config_fixup(struct pcicore_info *pi)
-{
- u16 val16;
-
- val16 = bcma_read16(pi->core,
- PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
-
- if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
- val16 |= SRSH_L23READY_EXIT_NOPERST;
- bcma_write16(pi->core,
- PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
- }
-}
-
-/* quick hack for testing */
-/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_war_noplldown(struct pcicore_info *pi)
-{
- /* turn off serdes PLL down */
- ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
- CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
-
- /* clear srom shadow backdoor */
- bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
-}
-
-/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_war_pci_setup(struct pcicore_info *pi)
-{
- struct si_pub *sih = pi->sih;
- u32 w;
-
- if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
- w = pcie_readreg(pi->core, PCIE_PCIEREGS,
- PCIE_TLP_WORKAROUNDSREG);
- w |= 0x8;
- pcie_writereg(pi->core, PCIE_PCIEREGS,
- PCIE_TLP_WORKAROUNDSREG, w);
- }
-
- if (ai_get_buscorerev(sih) == 1) {
- w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
- w |= 0x40;
- pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
- }
-
- if (ai_get_buscorerev(sih) == 0) {
- pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
- pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
- pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
- } else if (PCIE_ASPM(sih)) {
- /* Change the L1 threshold for better performance */
- w = pcie_readreg(pi->core, PCIE_PCIEREGS,
- PCIE_DLLP_PMTHRESHREG);
- w &= ~PCIE_L1THRESHOLDTIME_MASK;
- w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
- pcie_writereg(pi->core, PCIE_PCIEREGS,
- PCIE_DLLP_PMTHRESHREG, w);
-
- pcie_war_serdes(pi);
-
- pcie_war_aspm_clkreq(pi);
- } else if (ai_get_buscorerev(pi->sih) == 7)
- pcie_war_noplldown(pi);
-
- /* Note that the fix is actually in the SROM,
- * that's why this is open-ended
- */
- if (ai_get_buscorerev(pi->sih) >= 6)
- pcie_misc_config_fixup(pi);
-}
-
-/* ***** Functions called during driver state changes ***** */
-void pcicore_attach(struct pcicore_info *pi, int state)
-{
- struct si_pub *sih = pi->sih;
- u32 bfl2 = (u32)getintvar(sih, BRCMS_SROM_BOARDFLAGS2);
-
- /* Determine if this board needs override */
- if (PCIE_ASPM(sih)) {
- if (bfl2 & BFL2_PCIEWAR_OVR)
- pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
- else
- pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
- }
-
- /* These need to happen in this order only */
- pcie_war_polarity(pi);
-
- pcie_war_serdes(pi);
-
- pcie_war_aspm_clkreq(pi);
-
- pcie_clkreq_upd(pi, state);
-
-}
-
-void pcicore_hwup(struct pcicore_info *pi)
-{
- if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
- return;
-
- pcie_war_pci_setup(pi);
-}
-
-void pcicore_up(struct pcicore_info *pi, int state)
-{
- if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
- return;
-
- /* Restore L1 timer for better performance */
- pcie_extendL1timer(pi, true);
-
- pcie_clkreq_upd(pi, state);
-}
-
-/* When the device is going to enter D3 state
- * (or the system is going to enter S3/S4 states)
- */
-void pcicore_sleep(struct pcicore_info *pi)
-{
- u32 w;
-
- if (!pi || !PCIE_ASPM(pi->sih))
- return;
-
- pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
- w &= ~PCIE_CAP_LCREG_ASPML1;
- pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
-
- pi->pcie_pr42767 = false;
-}
-
-void pcicore_down(struct pcicore_info *pi, int state)
-{
- if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
- return;
-
- pcie_clkreq_upd(pi, state);
-
- /* Reduce L1 timer for better power savings */
- pcie_extendL1timer(pi, false);
-}
-
-void pcicore_fixcfg(struct pcicore_info *pi)
-{
- struct bcma_device *core = pi->core;
- u16 val16;
- uint regoff;
-
- switch (pi->core->id.id) {
- case BCMA_CORE_PCI:
- regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
- break;
-
- case BCMA_CORE_PCIE:
- regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
- break;
-
- default:
- return;
- }
-
- val16 = bcma_read16(pi->core, regoff);
- if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
- (u16)core->core_index) {
- val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
- (val16 & ~SRSH_PI_MASK);
- bcma_write16(pi->core, regoff, val16);
- }
-}
-
-/* precondition: current core is pci core */
-void
-pcicore_pci_setup(struct pcicore_info *pi)
-{
- bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
- SBTOPCI_PREF | SBTOPCI_BURST);
-
- if (pi->core->id.rev >= 11) {
- bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
- SBTOPCI_RC_READMULTI);
- bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
- (void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
- }
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
deleted file mode 100644
index 9fc3ead540a8..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_NICPCI_H_
-#define _BRCM_NICPCI_H_
-
-#include "types.h"
-
-/* PCI configuration address space size */
-#define PCI_SZPCR 256
-
-/* Brcm PCI configuration registers */
-/* backplane address space accessed by BAR0 */
-#define PCI_BAR0_WIN 0x80
-/* sprom property control */
-#define PCI_SPROM_CONTROL 0x88
-/* mask of PCI and other cores interrupts */
-#define PCI_INT_MASK 0x94
-/* backplane core interrupt mask bits offset */
-#define PCI_SBIM_SHIFT 8
-/* backplane address space accessed by second 4KB of BAR0 */
-#define PCI_BAR0_WIN2 0xac
-/* pci config space gpio input (>=rev3) */
-#define PCI_GPIO_IN 0xb0
-/* pci config space gpio output (>=rev3) */
-#define PCI_GPIO_OUT 0xb4
-/* pci config space gpio output enable (>=rev3) */
-#define PCI_GPIO_OUTEN 0xb8
-
-/* bar0 + 4K accesses external sprom */
-#define PCI_BAR0_SPROM_OFFSET (4 * 1024)
-/* bar0 + 6K accesses pci core registers */
-#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
-/*
- * pci core SB registers are at the end of the
- * 8KB window, so their address is the "regular"
- * address plus 4K
- */
-#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
-/* bar0 window size Match with corerev 13 */
-#define PCI_BAR0_WINSZ (16 * 1024)
-/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
-/* bar0 + 8K accesses pci/pcie core registers */
-#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
-/* bar0 + 12K accesses chipc core registers */
-#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
-
-struct sbpciregs;
-struct sbpcieregs;
-
-extern struct pcicore_info *pcicore_init(struct si_pub *sih,
- struct bcma_device *core);
-extern void pcicore_deinit(struct pcicore_info *pch);
-extern void pcicore_attach(struct pcicore_info *pch, int state);
-extern void pcicore_hwup(struct pcicore_info *pch);
-extern void pcicore_up(struct pcicore_info *pch, int state);
-extern void pcicore_sleep(struct pcicore_info *pch);
-extern void pcicore_down(struct pcicore_info *pch, int state);
-extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
- unsigned char *buf, u32 *buflen);
-extern void pcicore_fixcfg(struct pcicore_info *pch);
-extern void pcicore_pci_setup(struct pcicore_info *pch);
-
-#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
deleted file mode 100644
index f1ca12625860..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/io.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-
-#include <brcm_hw_ids.h>
-#include <chipcommon.h>
-#include "aiutils.h"
-#include "otp.h"
-
-#define OTPS_GUP_MASK 0x00000f00
-#define OTPS_GUP_SHIFT 8
-/* h/w subregion is programmed */
-#define OTPS_GUP_HW 0x00000100
-/* s/w subregion is programmed */
-#define OTPS_GUP_SW 0x00000200
-/* chipid/pkgopt subregion is programmed */
-#define OTPS_GUP_CI 0x00000400
-/* fuse subregion is programmed */
-#define OTPS_GUP_FUSE 0x00000800
-
-/* Fields in otpprog in rev >= 21 */
-#define OTPP_COL_MASK 0x000000ff
-#define OTPP_COL_SHIFT 0
-#define OTPP_ROW_MASK 0x0000ff00
-#define OTPP_ROW_SHIFT 8
-#define OTPP_OC_MASK 0x0f000000
-#define OTPP_OC_SHIFT 24
-#define OTPP_READERR 0x10000000
-#define OTPP_VALUE_MASK 0x20000000
-#define OTPP_VALUE_SHIFT 29
-#define OTPP_START_BUSY 0x80000000
-#define OTPP_READ 0x40000000
-
-/* Opcodes for OTPP_OC field */
-#define OTPPOC_READ 0
-#define OTPPOC_BIT_PROG 1
-#define OTPPOC_VERIFY 3
-#define OTPPOC_INIT 4
-#define OTPPOC_SET 5
-#define OTPPOC_RESET 6
-#define OTPPOC_OCST 7
-#define OTPPOC_ROW_LOCK 8
-#define OTPPOC_PRESCN_TEST 9
-
-#define OTPTYPE_IPX(ccrev) ((ccrev) == 21 || (ccrev) >= 23)
-
-#define OTPP_TRIES 10000000 /* # of tries for OTPP */
-
-#define MAXNUMRDES 9 /* Maximum OTP redundancy entries */
-
-/* Fixed size subregions sizes in words */
-#define OTPGU_CI_SZ 2
-
-struct otpinfo;
-
-/* OTP function struct */
-struct otp_fn_s {
- int (*init)(struct si_pub *sih, struct otpinfo *oi);
- int (*read_region)(struct otpinfo *oi, int region, u16 *data,
- uint *wlen);
-};
-
-struct otpinfo {
- struct bcma_device *core; /* chipc core */
- const struct otp_fn_s *fn; /* OTP functions */
- struct si_pub *sih; /* Saved sb handle */
-
- /* IPX OTP section */
- u16 wsize; /* Size of otp in words */
- u16 rows; /* Geometry */
- u16 cols; /* Geometry */
- u32 status; /* Flag bits (lock/prog/rv).
- * (Reflected only when OTP is power cycled)
- */
- u16 hwbase; /* hardware subregion offset */
- u16 hwlim; /* hardware subregion boundary */
- u16 swbase; /* software subregion offset */
- u16 swlim; /* software subregion boundary */
- u16 fbase; /* fuse subregion offset */
- u16 flim; /* fuse subregion boundary */
- int otpgu_base; /* offset to General Use Region */
-};
-
-/* OTP layout */
-/* CC revs 21, 24 and 27 OTP General Use Region word offset */
-#define REVA4_OTPGU_BASE 12
-
-/* CC revs 23, 25, 26, 28 and above OTP General Use Region word offset */
-#define REVB8_OTPGU_BASE 20
-
-/* CC rev 36 OTP General Use Region word offset */
-#define REV36_OTPGU_BASE 12
-
-/* Subregion word offsets in General Use region */
-#define OTPGU_HSB_OFF 0
-#define OTPGU_SFB_OFF 1
-#define OTPGU_CI_OFF 2
-#define OTPGU_P_OFF 3
-#define OTPGU_SROM_OFF 4
-
-/* Flag bit offsets in General Use region */
-#define OTPGU_HWP_OFF 60
-#define OTPGU_SWP_OFF 61
-#define OTPGU_CIP_OFF 62
-#define OTPGU_FUSEP_OFF 63
-#define OTPGU_CIP_MSK 0x4000
-#define OTPGU_P_MSK 0xf000
-#define OTPGU_P_SHIFT (OTPGU_HWP_OFF % 16)
-
-/* OTP Size */
-#define OTP_SZ_FU_324 ((roundup(324, 8))/8) /* 324 bits */
-#define OTP_SZ_FU_288 (288/8) /* 288 bits */
-#define OTP_SZ_FU_216 (216/8) /* 216 bits */
-#define OTP_SZ_FU_72 (72/8) /* 72 bits */
-#define OTP_SZ_CHECKSUM (16/8) /* 16 bits */
-#define OTP4315_SWREG_SZ 178 /* 178 bytes */
-#define OTP_SZ_FU_144 (144/8) /* 144 bits */
-
-static u16
-ipxotp_otpr(struct otpinfo *oi, uint wn)
-{
- return bcma_read16(oi->core,
- CHIPCREGOFFS(sromotp[wn]));
-}
-
-/*
- * Calculate max HW/SW region byte size by subtracting fuse region
- * and checksum size, osizew is oi->wsize (OTP size - GU size) in words
- */
-static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
-{
- int ret = 0;
-
- switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
- break;
- case BCM4313_CHIP_ID:
- ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
- break;
- default:
- break; /* Don't know about this chip */
- }
-
- return ret;
-}
-
-static void _ipxotp_init(struct otpinfo *oi)
-{
- uint k;
- u32 otpp, st;
- int ccrev = ai_get_ccrev(oi->sih);
-
-
- /*
- * record word offset of General Use Region
- * for various chipcommon revs
- */
- if (ccrev == 21 || ccrev == 24
- || ccrev == 27) {
- oi->otpgu_base = REVA4_OTPGU_BASE;
- } else if (ccrev == 36) {
- /*
- * OTP size greater than equal to 2KB (128 words),
- * otpgu_base is similar to rev23
- */
- if (oi->wsize >= 128)
- oi->otpgu_base = REVB8_OTPGU_BASE;
- else
- oi->otpgu_base = REV36_OTPGU_BASE;
- } else if (ccrev == 23 || ccrev >= 25) {
- oi->otpgu_base = REVB8_OTPGU_BASE;
- }
-
- /* First issue an init command so the status is up to date */
- otpp =
- OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
-
- bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp);
- st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
- for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++)
- st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
- if (k >= OTPP_TRIES)
- return;
-
- /* Read OTP lock bits and subregion programmed indication bits */
- oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus));
-
- if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID)
- || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) {
- u32 p_bits;
- p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) &
- OTPGU_P_MSK) >> OTPGU_P_SHIFT;
- oi->status |= (p_bits << OTPS_GUP_SHIFT);
- }
-
- /*
- * h/w region base and fuse region limit are fixed to
- * the top and the bottom of the general use region.
- * Everything else can be flexible.
- */
- oi->hwbase = oi->otpgu_base + OTPGU_SROM_OFF;
- oi->hwlim = oi->wsize;
- if (oi->status & OTPS_GUP_HW) {
- oi->hwlim =
- ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
- oi->swbase = oi->hwlim;
- } else
- oi->swbase = oi->hwbase;
-
- /* subtract fuse and checksum from beginning */
- oi->swlim = ipxotp_max_rgnsz(oi->sih, oi->wsize) / 2;
-
- if (oi->status & OTPS_GUP_SW) {
- oi->swlim =
- ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
- oi->fbase = oi->swlim;
- } else
- oi->fbase = oi->swbase;
-
- oi->flim = oi->wsize;
-}
-
-static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
-{
- /* Make sure we're running IPX OTP */
- if (!OTPTYPE_IPX(ai_get_ccrev(sih)))
- return -EBADE;
-
- /* Make sure OTP is not disabled */
- if (ai_is_otp_disabled(sih))
- return -EBADE;
-
- /* Check for otp size */
- switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
- case 0:
- /* Nothing there */
- return -EBADE;
- case 1: /* 32x64 */
- oi->rows = 32;
- oi->cols = 64;
- oi->wsize = 128;
- break;
- case 2: /* 64x64 */
- oi->rows = 64;
- oi->cols = 64;
- oi->wsize = 256;
- break;
- case 5: /* 96x64 */
- oi->rows = 96;
- oi->cols = 64;
- oi->wsize = 384;
- break;
- case 7: /* 16x64 *//* 1024 bits */
- oi->rows = 16;
- oi->cols = 64;
- oi->wsize = 64;
- break;
- default:
- /* Don't know the geometry */
- return -EBADE;
- }
-
- /* Retrieve OTP region info */
- _ipxotp_init(oi);
- return 0;
-}
-
-static int
-ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
-{
- uint base, i, sz;
-
- /* Validate region selection */
- switch (region) {
- case OTP_HW_RGN:
- sz = (uint) oi->hwlim - oi->hwbase;
- if (!(oi->status & OTPS_GUP_HW)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->hwbase;
- break;
- case OTP_SW_RGN:
- sz = ((uint) oi->swlim - oi->swbase);
- if (!(oi->status & OTPS_GUP_SW)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->swbase;
- break;
- case OTP_CI_RGN:
- sz = OTPGU_CI_SZ;
- if (!(oi->status & OTPS_GUP_CI)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->otpgu_base + OTPGU_CI_OFF;
- break;
- case OTP_FUSE_RGN:
- sz = (uint) oi->flim - oi->fbase;
- if (!(oi->status & OTPS_GUP_FUSE)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->fbase;
- break;
- case OTP_ALL_RGN:
- sz = ((uint) oi->flim - oi->hwbase);
- if (!(oi->status & (OTPS_GUP_HW | OTPS_GUP_SW))) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->hwbase;
- break;
- default:
- return -EINVAL;
- }
-
- /* Read the data */
- for (i = 0; i < sz; i++)
- data[i] = ipxotp_otpr(oi, base + i);
-
- *wlen = sz;
- return 0;
-}
-
-static const struct otp_fn_s ipxotp_fn = {
- (int (*)(struct si_pub *, struct otpinfo *)) ipxotp_init,
- (int (*)(struct otpinfo *, int, u16 *, uint *)) ipxotp_read_region,
-};
-
-static int otp_init(struct si_pub *sih, struct otpinfo *oi)
-{
- int ret;
-
- memset(oi, 0, sizeof(struct otpinfo));
-
- oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
-
- if (OTPTYPE_IPX(ai_get_ccrev(sih)))
- oi->fn = &ipxotp_fn;
-
- if (oi->fn == NULL)
- return -EBADE;
-
- oi->sih = sih;
-
- ret = (oi->fn->init)(sih, oi);
-
- return ret;
-}
-
-int
-otp_read_region(struct si_pub *sih, int region, u16 *data, uint *wlen) {
- struct otpinfo otpinfo;
- struct otpinfo *oi = &otpinfo;
- int err = 0;
-
- if (ai_is_otp_disabled(sih)) {
- err = -EPERM;
- goto out;
- }
-
- err = otp_init(sih, oi);
- if (err)
- goto out;
-
- err = ((oi)->fn->read_region)(oi, region, data, wlen);
-
- out:
- return err;
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.h b/drivers/net/wireless/brcm80211/brcmsmac/otp.h
deleted file mode 100644
index 6b6d31cf9569..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_OTP_H_
-#define _BRCM_OTP_H_
-
-#include "types.h"
-
-/* OTP regions */
-#define OTP_HW_RGN 1
-#define OTP_SW_RGN 2
-#define OTP_CI_RGN 4
-#define OTP_FUSE_RGN 8
-/* From h/w region to end of OTP including checksum */
-#define OTP_ALL_RGN 0xf
-
-/* OTP Size */
-#define OTP_SZ_MAX (6144/8) /* maximum bytes in one CIS */
-
-extern int otp_read_region(struct si_pub *sih, int region, u16 *data,
- uint *wlen);
-
-#endif /* _BRCM_OTP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 0fce56235f38..abfd78822fb8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -4817,28 +4817,23 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
s8 txpwr = 0;
int i;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- struct phy_shim_info *shim = pi->sh->physhim;
+ struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
u16 cckpo = 0;
u32 offset_ofdm, offset_mcs;
- pi_lcn->lcnphy_tr_isolation_mid =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_TRISO2G);
+ pi_lcn->lcnphy_tr_isolation_mid = sprom->fem.ghz2.tr_iso;
- pi_lcn->lcnphy_rx_power_offset =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_RXPO2G);
+ pi_lcn->lcnphy_rx_power_offset = sprom->rxpo2g;
- pi->txpa_2g[0] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B0);
- pi->txpa_2g[1] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B1);
- pi->txpa_2g[2] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B2);
+ pi->txpa_2g[0] = sprom->pa0b0;
+ pi->txpa_2g[1] = sprom->pa0b1;
+ pi->txpa_2g[2] = sprom->pa0b2;
- pi_lcn->lcnphy_rssi_vf =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISMF2G);
- pi_lcn->lcnphy_rssi_vc =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISMC2G);
- pi_lcn->lcnphy_rssi_gs =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISAV2G);
+ pi_lcn->lcnphy_rssi_vf = sprom->rssismf2g;
+ pi_lcn->lcnphy_rssi_vc = sprom->rssismc2g;
+ pi_lcn->lcnphy_rssi_gs = sprom->rssisav2g;
pi_lcn->lcnphy_rssi_vf_lowtemp = pi_lcn->lcnphy_rssi_vf;
pi_lcn->lcnphy_rssi_vc_lowtemp = pi_lcn->lcnphy_rssi_vc;
@@ -4848,7 +4843,7 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
pi_lcn->lcnphy_rssi_vc_hightemp = pi_lcn->lcnphy_rssi_vc;
pi_lcn->lcnphy_rssi_gs_hightemp = pi_lcn->lcnphy_rssi_gs;
- txpwr = (s8)wlapi_getintvar(shim, BRCMS_SROM_MAXP2GA0);
+ txpwr = sprom->core_pwr_info[0].maxpwr_2g;
pi->tx_srom_max_2g = txpwr;
for (i = 0; i < PWRTBL_NUM_COEFF; i++) {
@@ -4856,8 +4851,8 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
pi->txpa_2g_high_temp[i] = pi->txpa_2g[i];
}
- cckpo = (u16)wlapi_getintvar(shim, BRCMS_SROM_CCK2GPO);
- offset_ofdm = (u32)wlapi_getintvar(shim, BRCMS_SROM_OFDM2GPO);
+ cckpo = sprom->cck2gpo;
+ offset_ofdm = sprom->ofdm2gpo;
if (cckpo) {
uint max_pwr_chan = txpwr;
@@ -4876,7 +4871,7 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
} else {
u8 opo = 0;
- opo = (u8)wlapi_getintvar(shim, BRCMS_SROM_OPO);
+ opo = sprom->opo;
for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++)
pi->tx_srom_max_rate_2g[i] = txpwr;
@@ -4886,12 +4881,8 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
((offset_ofdm & 0xf) * 2);
offset_ofdm >>= 4;
}
- offset_mcs =
- wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO1) << 16;
- offset_mcs |=
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO0);
+ offset_mcs = sprom->mcs2gpo[1] << 16;
+ offset_mcs |= sprom->mcs2gpo[0];
pi_lcn->lcnphy_mcs20_po = offset_mcs;
for (i = TXP_FIRST_SISO_MCS_20;
i <= TXP_LAST_SISO_MCS_20; i++) {
@@ -4901,25 +4892,17 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
}
}
- pi_lcn->lcnphy_rawtempsense =
- (u16)wlapi_getintvar(shim, BRCMS_SROM_RAWTEMPSENSE);
- pi_lcn->lcnphy_measPower =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_MEASPOWER);
- pi_lcn->lcnphy_tempsense_slope =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPSENSE_SLOPE);
- pi_lcn->lcnphy_hw_iqcal_en =
- (bool)wlapi_getintvar(shim, BRCMS_SROM_HW_IQCAL_EN);
- pi_lcn->lcnphy_iqcal_swp_dis =
- (bool)wlapi_getintvar(shim, BRCMS_SROM_IQCAL_SWP_DIS);
- pi_lcn->lcnphy_tempcorrx =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPCORRX);
- pi_lcn->lcnphy_tempsense_option =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPSENSE_OPTION);
- pi_lcn->lcnphy_freqoffset_corr =
- (u8)wlapi_getintvar(shim, BRCMS_SROM_FREQOFFSET_CORR);
- if ((u8)wlapi_getintvar(shim, BRCMS_SROM_AA2G) > 1)
+ pi_lcn->lcnphy_rawtempsense = sprom->rawtempsense;
+ pi_lcn->lcnphy_measPower = sprom->measpower;
+ pi_lcn->lcnphy_tempsense_slope = sprom->tempsense_slope;
+ pi_lcn->lcnphy_hw_iqcal_en = sprom->hw_iqcal_en;
+ pi_lcn->lcnphy_iqcal_swp_dis = sprom->iqcal_swp_dis;
+ pi_lcn->lcnphy_tempcorrx = sprom->tempcorrx;
+ pi_lcn->lcnphy_tempsense_option = sprom->tempsense_option;
+ pi_lcn->lcnphy_freqoffset_corr = sprom->freqoffset_corr;
+ if (sprom->ant_available_bg > 1)
wlc_phy_ant_rxdiv_set((struct brcms_phy_pub *) pi,
- (u8) wlapi_getintvar(shim, BRCMS_SROM_AA2G));
+ sprom->ant_available_bg);
}
pi_lcn->lcnphy_cck_dig_filt_type = -1;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 812b6e38526e..13b261517cce 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -14386,30 +14386,30 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
{
u16 bw40po, cddpo, stbcpo, bwduppo;
uint band_num;
- struct phy_shim_info *shim = pi->sh->physhim;
+ struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
if (pi->sh->sromrev >= 9)
return;
- bw40po = (u16) wlapi_getintvar(shim, BRCMS_SROM_BW40PO);
+ bw40po = sprom->bw40po;
pi->bw402gpo = bw40po & 0xf;
pi->bw405gpo = (bw40po & 0xf0) >> 4;
pi->bw405glpo = (bw40po & 0xf00) >> 8;
pi->bw405ghpo = (bw40po & 0xf000) >> 12;
- cddpo = (u16) wlapi_getintvar(shim, BRCMS_SROM_CDDPO);
+ cddpo = sprom->cddpo;
pi->cdd2gpo = cddpo & 0xf;
pi->cdd5gpo = (cddpo & 0xf0) >> 4;
pi->cdd5glpo = (cddpo & 0xf00) >> 8;
pi->cdd5ghpo = (cddpo & 0xf000) >> 12;
- stbcpo = (u16) wlapi_getintvar(shim, BRCMS_SROM_STBCPO);
+ stbcpo = sprom->stbcpo;
pi->stbc2gpo = stbcpo & 0xf;
pi->stbc5gpo = (stbcpo & 0xf0) >> 4;
pi->stbc5glpo = (stbcpo & 0xf00) >> 8;
pi->stbc5ghpo = (stbcpo & 0xf000) >> 12;
- bwduppo = (u16) wlapi_getintvar(shim, BRCMS_SROM_BWDUPPO);
+ bwduppo = sprom->bwduppo;
pi->bwdup2gpo = bwduppo & 0xf;
pi->bwdup5gpo = (bwduppo & 0xf0) >> 4;
pi->bwdup5glpo = (bwduppo & 0xf00) >> 8;
@@ -14419,242 +14419,137 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
band_num++) {
switch (band_num) {
case 0:
-
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP2GA0);
+ sprom->core_pwr_info[0].maxpwr_2g;
pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_2g =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP2GA1);
+ sprom->core_pwr_info[1].maxpwr_2g;
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW0A0);
+ sprom->core_pwr_info[0].pa_2g[0];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW0A1);
+ sprom->core_pwr_info[1].pa_2g[0];
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW1A0);
+ sprom->core_pwr_info[0].pa_2g[1];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW1A1);
+ sprom->core_pwr_info[1].pa_2g[1];
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_2g_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW2A0);
+ sprom->core_pwr_info[0].pa_2g[2];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_2g_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA2GW2A1);
+ sprom->core_pwr_info[1].pa_2g[2];
pi->nphy_pwrctrl_info[PHY_CORE_0].idle_targ_2g =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT2GA0);
+ sprom->core_pwr_info[0].itssi_2g;
pi->nphy_pwrctrl_info[PHY_CORE_1].idle_targ_2g =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT2GA1);
-
- pi->cck2gpo = (u16) wlapi_getintvar(shim,
- BRCMS_SROM_CCK2GPO);
-
- pi->ofdm2gpo =
- (u32) wlapi_getintvar(shim,
- BRCMS_SROM_OFDM2GPO);
-
- pi->mcs2gpo[0] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO0);
- pi->mcs2gpo[1] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO1);
- pi->mcs2gpo[2] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO2);
- pi->mcs2gpo[3] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO3);
- pi->mcs2gpo[4] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO4);
- pi->mcs2gpo[5] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO5);
- pi->mcs2gpo[6] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO6);
- pi->mcs2gpo[7] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS2GPO7);
+ sprom->core_pwr_info[1].itssi_2g;
+
+ pi->cck2gpo = sprom->cck2gpo;
+
+ pi->ofdm2gpo = sprom->ofdm2gpo;
+
+ pi->mcs2gpo[0] = sprom->mcs2gpo[0];
+ pi->mcs2gpo[1] = sprom->mcs2gpo[1];
+ pi->mcs2gpo[2] = sprom->mcs2gpo[2];
+ pi->mcs2gpo[3] = sprom->mcs2gpo[3];
+ pi->mcs2gpo[4] = sprom->mcs2gpo[4];
+ pi->mcs2gpo[5] = sprom->mcs2gpo[5];
+ pi->mcs2gpo[6] = sprom->mcs2gpo[6];
+ pi->mcs2gpo[7] = sprom->mcs2gpo[7];
break;
case 1:
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0);
+ sprom->core_pwr_info[0].maxpwr_5g;
pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GA1);
+ sprom->core_pwr_info[1].maxpwr_5g;
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW0A0);
+ sprom->core_pwr_info[0].pa_5g[0];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW0A1);
+ sprom->core_pwr_info[1].pa_5g[0];
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW1A0);
+ sprom->core_pwr_info[0].pa_5g[1];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW1A1);
+ sprom->core_pwr_info[1].pa_5g[1];
pi->nphy_pwrctrl_info[PHY_CORE_0].pwrdet_5gm_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW2A0);
+ sprom->core_pwr_info[0].pa_5g[2];
pi->nphy_pwrctrl_info[PHY_CORE_1].pwrdet_5gm_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GW2A1);
+ sprom->core_pwr_info[1].pa_5g[2];
pi->nphy_pwrctrl_info[PHY_CORE_0].idle_targ_5gm =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT5GA0);
+ sprom->core_pwr_info[0].itssi_5g;
pi->nphy_pwrctrl_info[PHY_CORE_1].idle_targ_5gm =
- (s8) wlapi_getintvar(shim, BRCMS_SROM_ITT5GA1);
-
- pi->ofdm5gpo =
- (u32) wlapi_getintvar(shim,
- BRCMS_SROM_OFDM5GPO);
-
- pi->mcs5gpo[0] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO0);
- pi->mcs5gpo[1] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO1);
- pi->mcs5gpo[2] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO2);
- pi->mcs5gpo[3] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO3);
- pi->mcs5gpo[4] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO4);
- pi->mcs5gpo[5] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO5);
- pi->mcs5gpo[6] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO6);
- pi->mcs5gpo[7] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GPO7);
+ sprom->core_pwr_info[1].itssi_5g;
+
+ pi->ofdm5gpo = sprom->ofdm5gpo;
+
+ pi->mcs5gpo[0] = sprom->mcs5gpo[0];
+ pi->mcs5gpo[1] = sprom->mcs5gpo[1];
+ pi->mcs5gpo[2] = sprom->mcs5gpo[2];
+ pi->mcs5gpo[3] = sprom->mcs5gpo[3];
+ pi->mcs5gpo[4] = sprom->mcs5gpo[4];
+ pi->mcs5gpo[5] = sprom->mcs5gpo[5];
+ pi->mcs5gpo[6] = sprom->mcs5gpo[6];
+ pi->mcs5gpo[7] = sprom->mcs5gpo[7];
break;
case 2:
pi->nphy_pwrctrl_info[0].max_pwr_5gl =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GLA0);
+ sprom->core_pwr_info[0].maxpwr_5gl;
pi->nphy_pwrctrl_info[1].max_pwr_5gl =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GLA1);
+ sprom->core_pwr_info[1].maxpwr_5gl;
pi->nphy_pwrctrl_info[0].pwrdet_5gl_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW0A0);
+ sprom->core_pwr_info[0].pa_5gl[0];
pi->nphy_pwrctrl_info[1].pwrdet_5gl_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW0A1);
+ sprom->core_pwr_info[1].pa_5gl[0];
pi->nphy_pwrctrl_info[0].pwrdet_5gl_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW1A0);
+ sprom->core_pwr_info[0].pa_5gl[1];
pi->nphy_pwrctrl_info[1].pwrdet_5gl_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW1A1);
+ sprom->core_pwr_info[1].pa_5gl[1];
pi->nphy_pwrctrl_info[0].pwrdet_5gl_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW2A0);
+ sprom->core_pwr_info[0].pa_5gl[2];
pi->nphy_pwrctrl_info[1].pwrdet_5gl_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GLW2A1);
+ sprom->core_pwr_info[1].pa_5gl[2];
pi->nphy_pwrctrl_info[0].idle_targ_5gl = 0;
pi->nphy_pwrctrl_info[1].idle_targ_5gl = 0;
- pi->ofdm5glpo =
- (u32) wlapi_getintvar(shim,
- BRCMS_SROM_OFDM5GLPO);
-
- pi->mcs5glpo[0] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO0);
- pi->mcs5glpo[1] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO1);
- pi->mcs5glpo[2] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO2);
- pi->mcs5glpo[3] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO3);
- pi->mcs5glpo[4] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO4);
- pi->mcs5glpo[5] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO5);
- pi->mcs5glpo[6] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO6);
- pi->mcs5glpo[7] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GLPO7);
+ pi->ofdm5glpo = sprom->ofdm5glpo;
+
+ pi->mcs5glpo[0] = sprom->mcs5glpo[0];
+ pi->mcs5glpo[1] = sprom->mcs5glpo[1];
+ pi->mcs5glpo[2] = sprom->mcs5glpo[2];
+ pi->mcs5glpo[3] = sprom->mcs5glpo[3];
+ pi->mcs5glpo[4] = sprom->mcs5glpo[4];
+ pi->mcs5glpo[5] = sprom->mcs5glpo[5];
+ pi->mcs5glpo[6] = sprom->mcs5glpo[6];
+ pi->mcs5glpo[7] = sprom->mcs5glpo[7];
break;
case 3:
pi->nphy_pwrctrl_info[0].max_pwr_5gh =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GHA0);
+ sprom->core_pwr_info[0].maxpwr_5gh;
pi->nphy_pwrctrl_info[1].max_pwr_5gh =
- (s8) wlapi_getintvar(shim,
- BRCMS_SROM_MAXP5GHA1);
+ sprom->core_pwr_info[1].maxpwr_5gh;
pi->nphy_pwrctrl_info[0].pwrdet_5gh_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW0A0);
+ sprom->core_pwr_info[0].pa_5gh[0];
pi->nphy_pwrctrl_info[1].pwrdet_5gh_a1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW0A1);
+ sprom->core_pwr_info[1].pa_5gh[0];
pi->nphy_pwrctrl_info[0].pwrdet_5gh_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW1A0);
+ sprom->core_pwr_info[0].pa_5gh[1];
pi->nphy_pwrctrl_info[1].pwrdet_5gh_b0 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW1A1);
+ sprom->core_pwr_info[1].pa_5gh[1];
pi->nphy_pwrctrl_info[0].pwrdet_5gh_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW2A0);
+ sprom->core_pwr_info[0].pa_5gh[2];
pi->nphy_pwrctrl_info[1].pwrdet_5gh_b1 =
- (s16) wlapi_getintvar(shim,
- BRCMS_SROM_PA5GHW2A1);
+ sprom->core_pwr_info[1].pa_5gh[2];
pi->nphy_pwrctrl_info[0].idle_targ_5gh = 0;
pi->nphy_pwrctrl_info[1].idle_targ_5gh = 0;
- pi->ofdm5ghpo =
- (u32) wlapi_getintvar(shim,
- BRCMS_SROM_OFDM5GHPO);
-
- pi->mcs5ghpo[0] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO0);
- pi->mcs5ghpo[1] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO1);
- pi->mcs5ghpo[2] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO2);
- pi->mcs5ghpo[3] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO3);
- pi->mcs5ghpo[4] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO4);
- pi->mcs5ghpo[5] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO5);
- pi->mcs5ghpo[6] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO6);
- pi->mcs5ghpo[7] =
- (u16) wlapi_getintvar(shim,
- BRCMS_SROM_MCS5GHPO7);
+ pi->ofdm5ghpo = sprom->ofdm5ghpo;
+
+ pi->mcs5ghpo[0] = sprom->mcs5ghpo[0];
+ pi->mcs5ghpo[1] = sprom->mcs5ghpo[1];
+ pi->mcs5ghpo[2] = sprom->mcs5ghpo[2];
+ pi->mcs5ghpo[3] = sprom->mcs5ghpo[3];
+ pi->mcs5ghpo[4] = sprom->mcs5ghpo[4];
+ pi->mcs5ghpo[5] = sprom->mcs5ghpo[5];
+ pi->mcs5ghpo[6] = sprom->mcs5ghpo[6];
+ pi->mcs5ghpo[7] = sprom->mcs5ghpo[7];
break;
}
}
@@ -14664,45 +14559,34 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
{
- struct phy_shim_info *shim = pi->sh->physhim;
-
- pi->antswitch = (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWITCH);
- pi->aa2g = (u8) wlapi_getintvar(shim, BRCMS_SROM_AA2G);
- pi->aa5g = (u8) wlapi_getintvar(shim, BRCMS_SROM_AA5G);
-
- pi->srom_fem2g.tssipos = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TSSIPOS2G);
- pi->srom_fem2g.extpagain = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_EXTPAGAIN2G);
- pi->srom_fem2g.pdetrange = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_PDETRANGE2G);
- pi->srom_fem2g.triso = (u8) wlapi_getintvar(shim, BRCMS_SROM_TRISO2G);
- pi->srom_fem2g.antswctrllut =
- (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWCTL2G);
-
- pi->srom_fem5g.tssipos = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TSSIPOS5G);
- pi->srom_fem5g.extpagain = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_EXTPAGAIN5G);
- pi->srom_fem5g.pdetrange = (u8) wlapi_getintvar(shim,
- BRCMS_SROM_PDETRANGE5G);
- pi->srom_fem5g.triso = (u8) wlapi_getintvar(shim, BRCMS_SROM_TRISO5G);
- if (wlapi_getvar(shim, BRCMS_SROM_ANTSWCTL5G))
- pi->srom_fem5g.antswctrllut =
- (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWCTL5G);
+ struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
+
+ pi->antswitch = sprom->antswitch;
+ pi->aa2g = sprom->ant_available_bg;
+ pi->aa5g = sprom->ant_available_a;
+
+ pi->srom_fem2g.tssipos = sprom->fem.ghz2.tssipos;
+ pi->srom_fem2g.extpagain = sprom->fem.ghz2.extpa_gain;
+ pi->srom_fem2g.pdetrange = sprom->fem.ghz2.pdet_range;
+ pi->srom_fem2g.triso = sprom->fem.ghz2.tr_iso;
+ pi->srom_fem2g.antswctrllut = sprom->fem.ghz2.antswlut;
+
+ pi->srom_fem5g.tssipos = sprom->fem.ghz5.tssipos;
+ pi->srom_fem5g.extpagain = sprom->fem.ghz5.extpa_gain;
+ pi->srom_fem5g.pdetrange = sprom->fem.ghz5.pdet_range;
+ pi->srom_fem5g.triso = sprom->fem.ghz5.tr_iso;
+ if (sprom->fem.ghz5.antswlut)
+ pi->srom_fem5g.antswctrllut = sprom->fem.ghz5.antswlut;
else
- pi->srom_fem5g.antswctrllut =
- (u8) wlapi_getintvar(shim, BRCMS_SROM_ANTSWCTL2G);
+ pi->srom_fem5g.antswctrllut = sprom->fem.ghz2.antswlut;
wlc_phy_txpower_ipa_upd(pi);
- pi->phy_txcore_disable_temp =
- (s16) wlapi_getintvar(shim, BRCMS_SROM_TEMPTHRESH);
+ pi->phy_txcore_disable_temp = sprom->tempthresh;
if (pi->phy_txcore_disable_temp == 0)
pi->phy_txcore_disable_temp = PHY_CHAIN_TX_DISABLE_TEMP;
- pi->phy_tempsense_offset = (s8) wlapi_getintvar(shim,
- BRCMS_SROM_TEMPOFFSET);
+ pi->phy_tempsense_offset = sprom->tempoffset;
if (pi->phy_tempsense_offset != 0) {
if (pi->phy_tempsense_offset >
(NPHY_SROM_TEMPSHIFT + NPHY_SROM_MAXTEMPOFFSET))
@@ -14717,8 +14601,7 @@ static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
pi->phy_txcore_enable_temp =
pi->phy_txcore_disable_temp - PHY_HYSTERESIS_DELTATEMP;
- pi->phycal_tempdelta =
- (u8) wlapi_getintvar(shim, BRCMS_SROM_PHYCAL_TEMPDELTA);
+ pi->phycal_tempdelta = sprom->phycal_tempdelta;
if (pi->phycal_tempdelta > NPHY_CAL_MAXTEMPDELTA)
pi->phycal_tempdelta = 0;
@@ -21460,7 +21343,7 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
write_phy_reg(pi, 0xc8, 0x0);
write_phy_reg(pi, 0xc9, 0x0);
- ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY);
+ bcma_chipco_gpio_control(&pi->d11core->bus->drv_cc, mask, mask);
mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
mc &= ~MCTL_GPOUT_SEL_MASK;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c
index 5926854f62e2..a0de5db0cd64 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.c
@@ -214,12 +214,3 @@ wlapi_copyto_objmem(struct phy_shim_info *physhim, uint offset, const void *buf,
{
brcms_b_copyto_objmem(physhim->wlc_hw, offset, buf, l, sel);
}
-
-char *wlapi_getvar(struct phy_shim_info *physhim, enum brcms_srom_id id)
-{
- return getvar(physhim->wlc_hw->sih, id);
-}
-int wlapi_getintvar(struct phy_shim_info *physhim, enum brcms_srom_id id)
-{
- return getintvar(physhim->wlc_hw->sih, id);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
index 9168c459b185..2c5b66b75970 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
@@ -175,8 +175,5 @@ extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
u32 phy_mode);
extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
-extern char *wlapi_getvar(struct phy_shim_info *physhim, enum brcms_srom_id id);
-extern int wlapi_getintvar(struct phy_shim_info *physhim,
- enum brcms_srom_id id);
#endif /* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index f0038ad7d7bf..aa5d67f8d874 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -22,232 +22,6 @@
#include "types.h"
#include "defs.h"
-enum brcms_srom_id {
- BRCMS_SROM_NULL,
- BRCMS_SROM_CONT,
- BRCMS_SROM_AA2G,
- BRCMS_SROM_AA5G,
- BRCMS_SROM_AG0,
- BRCMS_SROM_AG1,
- BRCMS_SROM_AG2,
- BRCMS_SROM_AG3,
- BRCMS_SROM_ANTSWCTL2G,
- BRCMS_SROM_ANTSWCTL5G,
- BRCMS_SROM_ANTSWITCH,
- BRCMS_SROM_BOARDFLAGS2,
- BRCMS_SROM_BOARDFLAGS,
- BRCMS_SROM_BOARDNUM,
- BRCMS_SROM_BOARDREV,
- BRCMS_SROM_BOARDTYPE,
- BRCMS_SROM_BW40PO,
- BRCMS_SROM_BWDUPPO,
- BRCMS_SROM_BXA2G,
- BRCMS_SROM_BXA5G,
- BRCMS_SROM_CC,
- BRCMS_SROM_CCK2GPO,
- BRCMS_SROM_CCKBW202GPO,
- BRCMS_SROM_CCKBW20UL2GPO,
- BRCMS_SROM_CCODE,
- BRCMS_SROM_CDDPO,
- BRCMS_SROM_DEVID,
- BRCMS_SROM_ET1MACADDR,
- BRCMS_SROM_EXTPAGAIN2G,
- BRCMS_SROM_EXTPAGAIN5G,
- BRCMS_SROM_FREQOFFSET_CORR,
- BRCMS_SROM_HW_IQCAL_EN,
- BRCMS_SROM_IL0MACADDR,
- BRCMS_SROM_IQCAL_SWP_DIS,
- BRCMS_SROM_LEDBH0,
- BRCMS_SROM_LEDBH1,
- BRCMS_SROM_LEDBH2,
- BRCMS_SROM_LEDBH3,
- BRCMS_SROM_LEDDC,
- BRCMS_SROM_LEGOFDM40DUPPO,
- BRCMS_SROM_LEGOFDMBW202GPO,
- BRCMS_SROM_LEGOFDMBW205GHPO,
- BRCMS_SROM_LEGOFDMBW205GLPO,
- BRCMS_SROM_LEGOFDMBW205GMPO,
- BRCMS_SROM_LEGOFDMBW20UL2GPO,
- BRCMS_SROM_LEGOFDMBW20UL5GHPO,
- BRCMS_SROM_LEGOFDMBW20UL5GLPO,
- BRCMS_SROM_LEGOFDMBW20UL5GMPO,
- BRCMS_SROM_MACADDR,
- BRCMS_SROM_MCS2GPO0,
- BRCMS_SROM_MCS2GPO1,
- BRCMS_SROM_MCS2GPO2,
- BRCMS_SROM_MCS2GPO3,
- BRCMS_SROM_MCS2GPO4,
- BRCMS_SROM_MCS2GPO5,
- BRCMS_SROM_MCS2GPO6,
- BRCMS_SROM_MCS2GPO7,
- BRCMS_SROM_MCS32PO,
- BRCMS_SROM_MCS5GHPO0,
- BRCMS_SROM_MCS5GHPO1,
- BRCMS_SROM_MCS5GHPO2,
- BRCMS_SROM_MCS5GHPO3,
- BRCMS_SROM_MCS5GHPO4,
- BRCMS_SROM_MCS5GHPO5,
- BRCMS_SROM_MCS5GHPO6,
- BRCMS_SROM_MCS5GHPO7,
- BRCMS_SROM_MCS5GLPO0,
- BRCMS_SROM_MCS5GLPO1,
- BRCMS_SROM_MCS5GLPO2,
- BRCMS_SROM_MCS5GLPO3,
- BRCMS_SROM_MCS5GLPO4,
- BRCMS_SROM_MCS5GLPO5,
- BRCMS_SROM_MCS5GLPO6,
- BRCMS_SROM_MCS5GLPO7,
- BRCMS_SROM_MCS5GPO0,
- BRCMS_SROM_MCS5GPO1,
- BRCMS_SROM_MCS5GPO2,
- BRCMS_SROM_MCS5GPO3,
- BRCMS_SROM_MCS5GPO4,
- BRCMS_SROM_MCS5GPO5,
- BRCMS_SROM_MCS5GPO6,
- BRCMS_SROM_MCS5GPO7,
- BRCMS_SROM_MCSBW202GPO,
- BRCMS_SROM_MCSBW205GHPO,
- BRCMS_SROM_MCSBW205GLPO,
- BRCMS_SROM_MCSBW205GMPO,
- BRCMS_SROM_MCSBW20UL2GPO,
- BRCMS_SROM_MCSBW20UL5GHPO,
- BRCMS_SROM_MCSBW20UL5GLPO,
- BRCMS_SROM_MCSBW20UL5GMPO,
- BRCMS_SROM_MCSBW402GPO,
- BRCMS_SROM_MCSBW405GHPO,
- BRCMS_SROM_MCSBW405GLPO,
- BRCMS_SROM_MCSBW405GMPO,
- BRCMS_SROM_MEASPOWER,
- BRCMS_SROM_OFDM2GPO,
- BRCMS_SROM_OFDM5GHPO,
- BRCMS_SROM_OFDM5GLPO,
- BRCMS_SROM_OFDM5GPO,
- BRCMS_SROM_OPO,
- BRCMS_SROM_PA0B0,
- BRCMS_SROM_PA0B1,
- BRCMS_SROM_PA0B2,
- BRCMS_SROM_PA0ITSSIT,
- BRCMS_SROM_PA0MAXPWR,
- BRCMS_SROM_PA1B0,
- BRCMS_SROM_PA1B1,
- BRCMS_SROM_PA1B2,
- BRCMS_SROM_PA1HIB0,
- BRCMS_SROM_PA1HIB1,
- BRCMS_SROM_PA1HIB2,
- BRCMS_SROM_PA1HIMAXPWR,
- BRCMS_SROM_PA1ITSSIT,
- BRCMS_SROM_PA1LOB0,
- BRCMS_SROM_PA1LOB1,
- BRCMS_SROM_PA1LOB2,
- BRCMS_SROM_PA1LOMAXPWR,
- BRCMS_SROM_PA1MAXPWR,
- BRCMS_SROM_PDETRANGE2G,
- BRCMS_SROM_PDETRANGE5G,
- BRCMS_SROM_PHYCAL_TEMPDELTA,
- BRCMS_SROM_RAWTEMPSENSE,
- BRCMS_SROM_REGREV,
- BRCMS_SROM_REV,
- BRCMS_SROM_RSSISAV2G,
- BRCMS_SROM_RSSISAV5G,
- BRCMS_SROM_RSSISMC2G,
- BRCMS_SROM_RSSISMC5G,
- BRCMS_SROM_RSSISMF2G,
- BRCMS_SROM_RSSISMF5G,
- BRCMS_SROM_RXCHAIN,
- BRCMS_SROM_RXPO2G,
- BRCMS_SROM_RXPO5G,
- BRCMS_SROM_STBCPO,
- BRCMS_SROM_TEMPCORRX,
- BRCMS_SROM_TEMPOFFSET,
- BRCMS_SROM_TEMPSENSE_OPTION,
- BRCMS_SROM_TEMPSENSE_SLOPE,
- BRCMS_SROM_TEMPTHRESH,
- BRCMS_SROM_TRI2G,
- BRCMS_SROM_TRI5GH,
- BRCMS_SROM_TRI5GL,
- BRCMS_SROM_TRI5G,
- BRCMS_SROM_TRISO2G,
- BRCMS_SROM_TRISO5G,
- BRCMS_SROM_TSSIPOS2G,
- BRCMS_SROM_TSSIPOS5G,
- BRCMS_SROM_TXCHAIN,
- /*
- * per-path identifiers (see srom.c)
- */
- BRCMS_SROM_ITT2GA0,
- BRCMS_SROM_ITT2GA1,
- BRCMS_SROM_ITT2GA2,
- BRCMS_SROM_ITT2GA3,
- BRCMS_SROM_ITT5GA0,
- BRCMS_SROM_ITT5GA1,
- BRCMS_SROM_ITT5GA2,
- BRCMS_SROM_ITT5GA3,
- BRCMS_SROM_MAXP2GA0,
- BRCMS_SROM_MAXP2GA1,
- BRCMS_SROM_MAXP2GA2,
- BRCMS_SROM_MAXP2GA3,
- BRCMS_SROM_MAXP5GA0,
- BRCMS_SROM_MAXP5GA1,
- BRCMS_SROM_MAXP5GA2,
- BRCMS_SROM_MAXP5GA3,
- BRCMS_SROM_MAXP5GHA0,
- BRCMS_SROM_MAXP5GHA1,
- BRCMS_SROM_MAXP5GHA2,
- BRCMS_SROM_MAXP5GHA3,
- BRCMS_SROM_MAXP5GLA0,
- BRCMS_SROM_MAXP5GLA1,
- BRCMS_SROM_MAXP5GLA2,
- BRCMS_SROM_MAXP5GLA3,
- BRCMS_SROM_PA2GW0A0,
- BRCMS_SROM_PA2GW0A1,
- BRCMS_SROM_PA2GW0A2,
- BRCMS_SROM_PA2GW0A3,
- BRCMS_SROM_PA2GW1A0,
- BRCMS_SROM_PA2GW1A1,
- BRCMS_SROM_PA2GW1A2,
- BRCMS_SROM_PA2GW1A3,
- BRCMS_SROM_PA2GW2A0,
- BRCMS_SROM_PA2GW2A1,
- BRCMS_SROM_PA2GW2A2,
- BRCMS_SROM_PA2GW2A3,
- BRCMS_SROM_PA5GHW0A0,
- BRCMS_SROM_PA5GHW0A1,
- BRCMS_SROM_PA5GHW0A2,
- BRCMS_SROM_PA5GHW0A3,
- BRCMS_SROM_PA5GHW1A0,
- BRCMS_SROM_PA5GHW1A1,
- BRCMS_SROM_PA5GHW1A2,
- BRCMS_SROM_PA5GHW1A3,
- BRCMS_SROM_PA5GHW2A0,
- BRCMS_SROM_PA5GHW2A1,
- BRCMS_SROM_PA5GHW2A2,
- BRCMS_SROM_PA5GHW2A3,
- BRCMS_SROM_PA5GLW0A0,
- BRCMS_SROM_PA5GLW0A1,
- BRCMS_SROM_PA5GLW0A2,
- BRCMS_SROM_PA5GLW0A3,
- BRCMS_SROM_PA5GLW1A0,
- BRCMS_SROM_PA5GLW1A1,
- BRCMS_SROM_PA5GLW1A2,
- BRCMS_SROM_PA5GLW1A3,
- BRCMS_SROM_PA5GLW2A0,
- BRCMS_SROM_PA5GLW2A1,
- BRCMS_SROM_PA5GLW2A2,
- BRCMS_SROM_PA5GLW2A3,
- BRCMS_SROM_PA5GW0A0,
- BRCMS_SROM_PA5GW0A1,
- BRCMS_SROM_PA5GW0A2,
- BRCMS_SROM_PA5GW0A3,
- BRCMS_SROM_PA5GW1A0,
- BRCMS_SROM_PA5GW1A1,
- BRCMS_SROM_PA5GW1A2,
- BRCMS_SROM_PA5GW1A3,
- BRCMS_SROM_PA5GW2A0,
- BRCMS_SROM_PA5GW2A1,
- BRCMS_SROM_PA5GW2A2,
- BRCMS_SROM_PA5GW2A3,
-};
-
#define BRCMS_NUMRATES 16 /* max # of rates in a rateset */
/* phy types */
@@ -565,8 +339,6 @@ extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
struct ieee80211_sta *sta, u16 tid);
extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern char *getvar(struct si_pub *sih, enum brcms_srom_id id);
-extern int getintvar(struct si_pub *sih, enum brcms_srom_id id);
extern int brcms_c_module_register(struct brcms_pub *pub,
const char *name, struct brcms_info *hdl,
int (*down_fn)(void *handle));
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
deleted file mode 100644
index b96f4b9d74bd..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c
+++ /dev/null
@@ -1,980 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/etherdevice.h>
-#include <linux/crc8.h>
-#include <stdarg.h>
-
-#include <chipcommon.h>
-#include <brcmu_utils.h>
-#include "pub.h"
-#include "nicpci.h"
-#include "aiutils.h"
-#include "otp.h"
-#include "srom.h"
-#include "soc.h"
-
-/*
- * SROM CRC8 polynomial value:
- *
- * x^8 + x^7 +x^6 + x^4 + x^2 + 1
- */
-#define SROM_CRC8_POLY 0xAB
-
-/* Maximum srom: 6 Kilobits == 768 bytes */
-#define SROM_MAX 768
-
-/* PCI fields */
-#define PCI_F0DEVID 48
-
-#define SROM_WORDS 64
-
-#define SROM_SSID 2
-
-#define SROM_WL1LHMAXP 29
-
-#define SROM_WL1LPAB0 30
-#define SROM_WL1LPAB1 31
-#define SROM_WL1LPAB2 32
-
-#define SROM_WL1HPAB0 33
-#define SROM_WL1HPAB1 34
-#define SROM_WL1HPAB2 35
-
-#define SROM_MACHI_IL0 36
-#define SROM_MACMID_IL0 37
-#define SROM_MACLO_IL0 38
-#define SROM_MACHI_ET1 42
-#define SROM_MACMID_ET1 43
-#define SROM_MACLO_ET1 44
-
-#define SROM_BXARSSI2G 40
-#define SROM_BXARSSI5G 41
-
-#define SROM_TRI52G 42
-#define SROM_TRI5GHL 43
-
-#define SROM_RXPO52G 45
-
-#define SROM_AABREV 46
-/* Fields in AABREV */
-#define SROM_BR_MASK 0x00ff
-#define SROM_CC_MASK 0x0f00
-#define SROM_CC_SHIFT 8
-#define SROM_AA0_MASK 0x3000
-#define SROM_AA0_SHIFT 12
-#define SROM_AA1_MASK 0xc000
-#define SROM_AA1_SHIFT 14
-
-#define SROM_WL0PAB0 47
-#define SROM_WL0PAB1 48
-#define SROM_WL0PAB2 49
-
-#define SROM_LEDBH10 50
-#define SROM_LEDBH32 51
-
-#define SROM_WL10MAXP 52
-
-#define SROM_WL1PAB0 53
-#define SROM_WL1PAB1 54
-#define SROM_WL1PAB2 55
-
-#define SROM_ITT 56
-
-#define SROM_BFL 57
-#define SROM_BFL2 28
-
-#define SROM_AG10 58
-
-#define SROM_CCODE 59
-
-#define SROM_OPO 60
-
-#define SROM_CRCREV 63
-
-#define SROM4_WORDS 220
-
-#define SROM4_TXCHAIN_MASK 0x000f
-#define SROM4_RXCHAIN_MASK 0x00f0
-#define SROM4_SWITCH_MASK 0xff00
-
-/* Per-path fields */
-#define MAX_PATH_SROM 4
-
-#define SROM4_CRCREV 219
-
-/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
- * This is acombined srom for both MIMO and SISO boards, usable in
- * the .130 4Kilobit OTP with hardware redundancy.
- */
-#define SROM8_BREV 65
-
-#define SROM8_BFL0 66
-#define SROM8_BFL1 67
-#define SROM8_BFL2 68
-#define SROM8_BFL3 69
-
-#define SROM8_MACHI 70
-#define SROM8_MACMID 71
-#define SROM8_MACLO 72
-
-#define SROM8_CCODE 73
-#define SROM8_REGREV 74
-
-#define SROM8_LEDBH10 75
-#define SROM8_LEDBH32 76
-
-#define SROM8_LEDDC 77
-
-#define SROM8_AA 78
-
-#define SROM8_AG10 79
-#define SROM8_AG32 80
-
-#define SROM8_TXRXC 81
-
-#define SROM8_BXARSSI2G 82
-#define SROM8_BXARSSI5G 83
-#define SROM8_TRI52G 84
-#define SROM8_TRI5GHL 85
-#define SROM8_RXPO52G 86
-
-#define SROM8_FEM2G 87
-#define SROM8_FEM5G 88
-#define SROM8_FEM_ANTSWLUT_MASK 0xf800
-#define SROM8_FEM_ANTSWLUT_SHIFT 11
-#define SROM8_FEM_TR_ISO_MASK 0x0700
-#define SROM8_FEM_TR_ISO_SHIFT 8
-#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
-#define SROM8_FEM_PDET_RANGE_SHIFT 3
-#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
-#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
-#define SROM8_FEM_TSSIPOS_MASK 0x0001
-#define SROM8_FEM_TSSIPOS_SHIFT 0
-
-#define SROM8_THERMAL 89
-
-/* Temp sense related entries */
-#define SROM8_MPWR_RAWTS 90
-#define SROM8_TS_SLP_OPT_CORRX 91
-/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable,
- * IQSWP: IQ CAL swap disable */
-#define SROM8_FOC_HWIQ_IQSWP 92
-
-/* Temperature delta for PHY calibration */
-#define SROM8_PHYCAL_TEMPDELTA 93
-
-/* Per-path offsets & fields */
-#define SROM8_PATH0 96
-#define SROM8_PATH1 112
-#define SROM8_PATH2 128
-#define SROM8_PATH3 144
-
-#define SROM8_2G_ITT_MAXP 0
-#define SROM8_2G_PA 1
-#define SROM8_5G_ITT_MAXP 4
-#define SROM8_5GLH_MAXP 5
-#define SROM8_5G_PA 6
-#define SROM8_5GL_PA 9
-#define SROM8_5GH_PA 12
-
-/* All the miriad power offsets */
-#define SROM8_2G_CCKPO 160
-
-#define SROM8_2G_OFDMPO 161
-#define SROM8_5G_OFDMPO 163
-#define SROM8_5GL_OFDMPO 165
-#define SROM8_5GH_OFDMPO 167
-
-#define SROM8_2G_MCSPO 169
-#define SROM8_5G_MCSPO 177
-#define SROM8_5GL_MCSPO 185
-#define SROM8_5GH_MCSPO 193
-
-#define SROM8_CDDPO 201
-#define SROM8_STBCPO 202
-#define SROM8_BW40PO 203
-#define SROM8_BWDUPPO 204
-
-/* SISO PA parameters are in the path0 spaces */
-#define SROM8_SISO 96
-
-/* Legacy names for SISO PA paramters */
-#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
-#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
-#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
-#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
-#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
-#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
-#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
-#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
-#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
-#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
-#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
-#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
-#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
-#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
-#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
-
-/* SROM REV 9 */
-#define SROM9_2GPO_CCKBW20 160
-#define SROM9_2GPO_CCKBW20UL 161
-#define SROM9_2GPO_LOFDMBW20 162
-#define SROM9_2GPO_LOFDMBW20UL 164
-
-#define SROM9_5GLPO_LOFDMBW20 166
-#define SROM9_5GLPO_LOFDMBW20UL 168
-#define SROM9_5GMPO_LOFDMBW20 170
-#define SROM9_5GMPO_LOFDMBW20UL 172
-#define SROM9_5GHPO_LOFDMBW20 174
-#define SROM9_5GHPO_LOFDMBW20UL 176
-
-#define SROM9_2GPO_MCSBW20 178
-#define SROM9_2GPO_MCSBW20UL 180
-#define SROM9_2GPO_MCSBW40 182
-
-#define SROM9_5GLPO_MCSBW20 184
-#define SROM9_5GLPO_MCSBW20UL 186
-#define SROM9_5GLPO_MCSBW40 188
-#define SROM9_5GMPO_MCSBW20 190
-#define SROM9_5GMPO_MCSBW20UL 192
-#define SROM9_5GMPO_MCSBW40 194
-#define SROM9_5GHPO_MCSBW20 196
-#define SROM9_5GHPO_MCSBW20UL 198
-#define SROM9_5GHPO_MCSBW40 200
-
-#define SROM9_PO_MCS32 202
-#define SROM9_PO_LOFDM40DUP 203
-
-/* SROM flags (see sromvar_t) */
-
-/* value continues as described by the next entry */
-#define SRFL_MORE 1
-#define SRFL_NOFFS 2 /* value bits can't be all one's */
-#define SRFL_PRHEX 4 /* value is in hexdecimal format */
-#define SRFL_PRSIGN 8 /* value is in signed decimal format */
-#define SRFL_CCODE 0x10 /* value is in country code format */
-#define SRFL_ETHADDR 0x20 /* value is an Ethernet address */
-#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */
-/* do not generate a nvram param, entry is for mfgc */
-#define SRFL_NOVAR 0x80
-
-/* Max. nvram variable table size */
-#define MAXSZ_NVRAM_VARS 4096
-
-/*
- * indicates type of value.
- */
-enum brcms_srom_var_type {
- BRCMS_SROM_STRING,
- BRCMS_SROM_SNUMBER,
- BRCMS_SROM_UNUMBER
-};
-
-/*
- * storage type for srom variable.
- *
- * var_list: for linked list operations.
- * varid: identifier of the variable.
- * var_type: type of variable.
- * buf: variable value when var_type == BRCMS_SROM_STRING.
- * uval: unsigned variable value when var_type == BRCMS_SROM_UNUMBER.
- * sval: signed variable value when var_type == BRCMS_SROM_SNUMBER.
- */
-struct brcms_srom_list_head {
- struct list_head var_list;
- enum brcms_srom_id varid;
- enum brcms_srom_var_type var_type;
- union {
- char buf[0];
- u32 uval;
- s32 sval;
- };
-};
-
-struct brcms_sromvar {
- enum brcms_srom_id varid;
- u32 revmask;
- u32 flags;
- u16 off;
- u16 mask;
-};
-
-struct brcms_varbuf {
- char *base; /* pointer to buffer base */
- char *buf; /* pointer to current position */
- unsigned int size; /* current (residual) size in bytes */
-};
-
-/*
- * Assumptions:
- * - Ethernet address spans across 3 consecutive words
- *
- * Table rules:
- * - Add multiple entries next to each other if a value spans across multiple
- * words (even multiple fields in the same word) with each entry except the
- * last having it's SRFL_MORE bit set.
- * - Ethernet address entry does not follow above rule and must not have
- * SRFL_MORE bit set. Its SRFL_ETHADDR bit implies it takes multiple words.
- * - The last entry's name field must be NULL to indicate the end of the table.
- * Other entries must have non-NULL name.
- */
-static const struct brcms_sromvar pci_sromvars[] = {
- {BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID,
- 0xffff},
- {BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff},
- {BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff},
- {BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff},
- {BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff},
- {BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
- {BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
- {BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
- {BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
- {BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00},
- {BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff},
- {BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff},
- {BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff},
- {BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00},
- {BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff},
- {BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00},
- {BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff},
- {BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00},
- {BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
- {BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
- {BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
- {BRCMS_SROM_PA1LOB0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0_LC, 0xffff},
- {BRCMS_SROM_PA1LOB1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1_LC, 0xffff},
- {BRCMS_SROM_PA1LOB2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2_LC, 0xffff},
- {BRCMS_SROM_PA1HIB0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0_HC, 0xffff},
- {BRCMS_SROM_PA1HIB1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1_HC, 0xffff},
- {BRCMS_SROM_PA1HIB2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2_HC, 0xffff},
- {BRCMS_SROM_PA1ITSSIT, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0xff00},
- {BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff},
- {BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00},
- {BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
- {BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800},
- {BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700},
- {BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0},
- {BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f},
- {BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800},
- {BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700},
- {BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0},
- {BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f},
- {BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff},
- {BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00},
- {BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff},
- {BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00},
- {BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
- {BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
- {BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
- SROM4_TXCHAIN_MASK},
- {BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
- SROM4_RXCHAIN_MASK},
- {BRCMS_SROM_ANTSWITCH, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
- SROM4_SWITCH_MASK},
- {BRCMS_SROM_TSSIPOS2G, 0xffffff00, 0, SROM8_FEM2G,
- SROM8_FEM_TSSIPOS_MASK},
- {BRCMS_SROM_EXTPAGAIN2G, 0xffffff00, 0, SROM8_FEM2G,
- SROM8_FEM_EXTPA_GAIN_MASK},
- {BRCMS_SROM_PDETRANGE2G, 0xffffff00, 0, SROM8_FEM2G,
- SROM8_FEM_PDET_RANGE_MASK},
- {BRCMS_SROM_TRISO2G, 0xffffff00, 0, SROM8_FEM2G, SROM8_FEM_TR_ISO_MASK},
- {BRCMS_SROM_ANTSWCTL2G, 0xffffff00, 0, SROM8_FEM2G,
- SROM8_FEM_ANTSWLUT_MASK},
- {BRCMS_SROM_TSSIPOS5G, 0xffffff00, 0, SROM8_FEM5G,
- SROM8_FEM_TSSIPOS_MASK},
- {BRCMS_SROM_EXTPAGAIN5G, 0xffffff00, 0, SROM8_FEM5G,
- SROM8_FEM_EXTPA_GAIN_MASK},
- {BRCMS_SROM_PDETRANGE5G, 0xffffff00, 0, SROM8_FEM5G,
- SROM8_FEM_PDET_RANGE_MASK},
- {BRCMS_SROM_TRISO5G, 0xffffff00, 0, SROM8_FEM5G, SROM8_FEM_TR_ISO_MASK},
- {BRCMS_SROM_ANTSWCTL5G, 0xffffff00, 0, SROM8_FEM5G,
- SROM8_FEM_ANTSWLUT_MASK},
- {BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00},
- {BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff},
-
- {BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff},
- {BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
- {BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC,
- 0xffff},
- {BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
- 0x01ff},
- {BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
- 0xfe00},
- {BRCMS_SROM_TEMPSENSE_SLOPE, 0xffffff00, SRFL_PRHEX,
- SROM8_TS_SLP_OPT_CORRX, 0x00ff},
- {BRCMS_SROM_TEMPCORRX, 0xffffff00, SRFL_PRHEX, SROM8_TS_SLP_OPT_CORRX,
- 0xfc00},
- {BRCMS_SROM_TEMPSENSE_OPTION, 0xffffff00, SRFL_PRHEX,
- SROM8_TS_SLP_OPT_CORRX, 0x0300},
- {BRCMS_SROM_FREQOFFSET_CORR, 0xffffff00, SRFL_PRHEX,
- SROM8_FOC_HWIQ_IQSWP, 0x000f},
- {BRCMS_SROM_IQCAL_SWP_DIS, 0xffffff00, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP,
- 0x0010},
- {BRCMS_SROM_HW_IQCAL_EN, 0xffffff00, SRFL_PRHEX, SROM8_FOC_HWIQ_IQSWP,
- 0x0020},
- {BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA,
- 0x00ff},
-
- {BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
- {BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_5G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GLPO, 0x00000100, SRFL_MORE, SROM8_5GL_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS2GPO3, 0x00000100, 0, SROM8_2G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS2GPO4, 0x00000100, 0, SROM8_2G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS2GPO5, 0x00000100, 0, SROM8_2G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS2GPO6, 0x00000100, 0, SROM8_2G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS2GPO7, 0x00000100, 0, SROM8_2G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GPO0, 0x00000100, 0, SROM8_5G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GPO1, 0x00000100, 0, SROM8_5G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GPO2, 0x00000100, 0, SROM8_5G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GPO3, 0x00000100, 0, SROM8_5G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GPO4, 0x00000100, 0, SROM8_5G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GPO5, 0x00000100, 0, SROM8_5G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GPO6, 0x00000100, 0, SROM8_5G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GPO7, 0x00000100, 0, SROM8_5G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GLPO0, 0x00000100, 0, SROM8_5GL_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GLPO1, 0x00000100, 0, SROM8_5GL_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GLPO2, 0x00000100, 0, SROM8_5GL_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GLPO3, 0x00000100, 0, SROM8_5GL_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GLPO4, 0x00000100, 0, SROM8_5GL_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GLPO5, 0x00000100, 0, SROM8_5GL_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GLPO6, 0x00000100, 0, SROM8_5GL_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GLPO7, 0x00000100, 0, SROM8_5GL_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GHPO0, 0x00000100, 0, SROM8_5GH_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GHPO1, 0x00000100, 0, SROM8_5GH_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GHPO2, 0x00000100, 0, SROM8_5GH_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GHPO3, 0x00000100, 0, SROM8_5GH_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GHPO4, 0x00000100, 0, SROM8_5GH_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
- {BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff},
- {BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff},
- {BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff},
- {BRCMS_SROM_BWDUPPO, 0x00000100, 0, SROM8_BWDUPPO, 0xffff},
-
- /* power per rate from sromrev 9 */
- {BRCMS_SROM_CCKBW202GPO, 0xfffffe00, 0, SROM9_2GPO_CCKBW20, 0xffff},
- {BRCMS_SROM_CCKBW20UL2GPO, 0xfffffe00, 0, SROM9_2GPO_CCKBW20UL, 0xffff},
- {BRCMS_SROM_LEGOFDMBW202GPO, 0xfffffe00, SRFL_MORE,
- SROM9_2GPO_LOFDMBW20, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_LOFDMBW20 + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW20UL2GPO, 0xfffffe00, SRFL_MORE,
- SROM9_2GPO_LOFDMBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_LOFDMBW20UL + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW205GLPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GLPO_LOFDMBW20, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_LOFDMBW20 + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW20UL5GLPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GLPO_LOFDMBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_LOFDMBW20UL + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW205GMPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GMPO_LOFDMBW20, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_LOFDMBW20 + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW20UL5GMPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GMPO_LOFDMBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_LOFDMBW20UL + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW205GHPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GHPO_LOFDMBW20, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_LOFDMBW20 + 1, 0xffff},
- {BRCMS_SROM_LEGOFDMBW20UL5GHPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GHPO_LOFDMBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_LOFDMBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW202GPO, 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW20,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_MCSBW20 + 1, 0xffff},
- {BRCMS_SROM_MCSBW20UL2GPO, 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW20UL,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_MCSBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW402GPO, 0xfffffe00, SRFL_MORE, SROM9_2GPO_MCSBW40,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_2GPO_MCSBW40 + 1, 0xffff},
- {BRCMS_SROM_MCSBW205GLPO, 0xfffffe00, SRFL_MORE, SROM9_5GLPO_MCSBW20,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_MCSBW20 + 1, 0xffff},
- {BRCMS_SROM_MCSBW20UL5GLPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GLPO_MCSBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_MCSBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW405GLPO, 0xfffffe00, SRFL_MORE, SROM9_5GLPO_MCSBW40,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GLPO_MCSBW40 + 1, 0xffff},
- {BRCMS_SROM_MCSBW205GMPO, 0xfffffe00, SRFL_MORE, SROM9_5GMPO_MCSBW20,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_MCSBW20 + 1, 0xffff},
- {BRCMS_SROM_MCSBW20UL5GMPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GMPO_MCSBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_MCSBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW405GMPO, 0xfffffe00, SRFL_MORE, SROM9_5GMPO_MCSBW40,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GMPO_MCSBW40 + 1, 0xffff},
- {BRCMS_SROM_MCSBW205GHPO, 0xfffffe00, SRFL_MORE, SROM9_5GHPO_MCSBW20,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_MCSBW20 + 1, 0xffff},
- {BRCMS_SROM_MCSBW20UL5GHPO, 0xfffffe00, SRFL_MORE,
- SROM9_5GHPO_MCSBW20UL, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_MCSBW20UL + 1, 0xffff},
- {BRCMS_SROM_MCSBW405GHPO, 0xfffffe00, SRFL_MORE, SROM9_5GHPO_MCSBW40,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM9_5GHPO_MCSBW40 + 1, 0xffff},
- {BRCMS_SROM_MCS32PO, 0xfffffe00, 0, SROM9_PO_MCS32, 0xffff},
- {BRCMS_SROM_LEGOFDM40DUPPO, 0xfffffe00, 0, SROM9_PO_LOFDM40DUP, 0xffff},
-
- {BRCMS_SROM_NULL, 0, 0, 0, 0}
-};
-
-static const struct brcms_sromvar perpath_pci_sromvars[] = {
- {BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_PA2GW0A0, 0xffffff00, SRFL_PRHEX, SROM8_2G_PA, 0xffff},
- {BRCMS_SROM_PA2GW1A0, 0xffffff00, SRFL_PRHEX, SROM8_2G_PA + 1, 0xffff},
- {BRCMS_SROM_PA2GW2A0, 0xffffff00, SRFL_PRHEX, SROM8_2G_PA + 2, 0xffff},
- {BRCMS_SROM_MAXP5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GHA0, 0xffffff00, 0, SROM8_5GLH_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GLA0, 0xffffff00, 0, SROM8_5GLH_MAXP, 0xff00},
- {BRCMS_SROM_PA5GW0A0, 0xffffff00, SRFL_PRHEX, SROM8_5G_PA, 0xffff},
- {BRCMS_SROM_PA5GW1A0, 0xffffff00, SRFL_PRHEX, SROM8_5G_PA + 1, 0xffff},
- {BRCMS_SROM_PA5GW2A0, 0xffffff00, SRFL_PRHEX, SROM8_5G_PA + 2, 0xffff},
- {BRCMS_SROM_PA5GLW0A0, 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA, 0xffff},
- {BRCMS_SROM_PA5GLW1A0, 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GLW2A0, 0xffffff00, SRFL_PRHEX, SROM8_5GL_PA + 2,
- 0xffff},
- {BRCMS_SROM_PA5GHW0A0, 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA, 0xffff},
- {BRCMS_SROM_PA5GHW1A0, 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GHW2A0, 0xffffff00, SRFL_PRHEX, SROM8_5GH_PA + 2,
- 0xffff},
- {BRCMS_SROM_NULL, 0, 0, 0, 0}
-};
-
-/* crc table has the same contents for every device instance, so it can be
- * shared between devices. */
-static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
-
-static uint mask_shift(u16 mask)
-{
- uint i;
- for (i = 0; i < (sizeof(mask) << 3); i++) {
- if (mask & (1 << i))
- return i;
- }
- return 0;
-}
-
-static uint mask_width(u16 mask)
-{
- int i;
- for (i = (sizeof(mask) << 3) - 1; i >= 0; i--) {
- if (mask & (1 << i))
- return (uint) (i - mask_shift(mask) + 1);
- }
- return 0;
-}
-
-static inline void le16_to_cpu_buf(u16 *buf, uint nwords)
-{
- while (nwords--)
- *(buf + nwords) = le16_to_cpu(*(__le16 *)(buf + nwords));
-}
-
-static inline void cpu_to_le16_buf(u16 *buf, uint nwords)
-{
- while (nwords--)
- *(__le16 *)(buf + nwords) = cpu_to_le16(*(buf + nwords));
-}
-
-/*
- * convert binary srom data into linked list of srom variable items.
- */
-static int
-_initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
-{
- struct brcms_srom_list_head *entry;
- enum brcms_srom_id id;
- u16 w;
- u32 val = 0;
- const struct brcms_sromvar *srv;
- uint width;
- uint flags;
- u32 sr = (1 << sromrev);
- uint p;
- uint pb = SROM8_PATH0;
- const uint psz = SROM8_PATH1 - SROM8_PATH0;
-
- /* first store the srom revision */
- entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- entry->varid = BRCMS_SROM_REV;
- entry->var_type = BRCMS_SROM_UNUMBER;
- entry->uval = sromrev;
- list_add(&entry->var_list, var_list);
-
- for (srv = pci_sromvars; srv->varid != BRCMS_SROM_NULL; srv++) {
- enum brcms_srom_var_type type;
- u8 ea[ETH_ALEN];
- u8 extra_space = 0;
-
- if ((srv->revmask & sr) == 0)
- continue;
-
- flags = srv->flags;
- id = srv->varid;
-
- /* This entry is for mfgc only. Don't generate param for it, */
- if (flags & SRFL_NOVAR)
- continue;
-
- if (flags & SRFL_ETHADDR) {
- /*
- * stored in string format XX:XX:XX:XX:XX:XX (17 chars)
- */
- ea[0] = (srom[srv->off] >> 8) & 0xff;
- ea[1] = srom[srv->off] & 0xff;
- ea[2] = (srom[srv->off + 1] >> 8) & 0xff;
- ea[3] = srom[srv->off + 1] & 0xff;
- ea[4] = (srom[srv->off + 2] >> 8) & 0xff;
- ea[5] = srom[srv->off + 2] & 0xff;
- /* 17 characters + string terminator - union size */
- extra_space = 18 - sizeof(s32);
- type = BRCMS_SROM_STRING;
- } else {
- w = srom[srv->off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
-
- while (srv->flags & SRFL_MORE) {
- srv++;
- if (srv->off == 0)
- continue;
-
- w = srom[srv->off];
- val +=
- ((w & srv->mask) >> mask_shift(srv->
- mask)) <<
- width;
- width += mask_width(srv->mask);
- }
-
- if ((flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
-
- if (flags & SRFL_CCODE) {
- type = BRCMS_SROM_STRING;
- } else if (flags & SRFL_LEDDC) {
- /* LED Powersave duty cycle has to be scaled:
- *(oncount >> 24) (offcount >> 8)
- */
- u32 w32 = /* oncount */
- (((val >> 8) & 0xff) << 24) |
- /* offcount */
- (((val & 0xff)) << 8);
- type = BRCMS_SROM_UNUMBER;
- val = w32;
- } else if ((flags & SRFL_PRSIGN)
- && (val & (1 << (width - 1)))) {
- type = BRCMS_SROM_SNUMBER;
- val |= ~0 << width;
- } else
- type = BRCMS_SROM_UNUMBER;
- }
-
- entry = kzalloc(sizeof(struct brcms_srom_list_head) +
- extra_space, GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- entry->varid = id;
- entry->var_type = type;
- if (flags & SRFL_ETHADDR) {
- snprintf(entry->buf, 18, "%pM", ea);
- } else if (flags & SRFL_CCODE) {
- if (val == 0)
- entry->buf[0] = '\0';
- else
- snprintf(entry->buf, 3, "%c%c",
- (val >> 8), (val & 0xff));
- } else {
- entry->uval = val;
- }
-
- list_add(&entry->var_list, var_list);
- }
-
- for (p = 0; p < MAX_PATH_SROM; p++) {
- for (srv = perpath_pci_sromvars;
- srv->varid != BRCMS_SROM_NULL; srv++) {
- if ((srv->revmask & sr) == 0)
- continue;
-
- if (srv->flags & SRFL_NOVAR)
- continue;
-
- w = srom[pb + srv->off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
-
- /* Cheating: no per-path var is more than
- * 1 word */
- if ((srv->flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
-
- entry =
- kzalloc(sizeof(struct brcms_srom_list_head),
- GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- entry->varid = srv->varid+p;
- entry->var_type = BRCMS_SROM_UNUMBER;
- entry->uval = val;
- list_add(&entry->var_list, var_list);
- }
- pb += psz;
- }
- return 0;
-}
-
-/*
- * The crc check is done on a little-endian array, we need
- * to switch the bytes around before checking crc (and
- * then switch it back).
- */
-static int do_crc_check(u16 *buf, unsigned nwords)
-{
- u8 crc;
-
- cpu_to_le16_buf(buf, nwords);
- crc = crc8(brcms_srom_crc8_table, (void *)buf, nwords << 1, CRC8_INIT_VALUE);
- le16_to_cpu_buf(buf, nwords);
-
- return crc == CRC8_GOOD_VALUE(brcms_srom_crc8_table);
-}
-
-/*
- * Read in and validate sprom.
- * Return 0 on success, nonzero on error.
- */
-static int
-sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc)
-{
- int err = 0;
- uint i;
- struct bcma_device *core;
- uint sprom_offset;
-
- /* determine core to read */
- if (ai_get_ccrev(sih) < 32) {
- core = ai_findcore(sih, BCMA_CORE_80211, 0);
- sprom_offset = PCI_BAR0_SPROM_OFFSET;
- } else {
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- sprom_offset = CHIPCREGOFFS(sromotp);
- }
-
- /* read the sprom */
- for (i = 0; i < nwords; i++)
- buf[i] = bcma_read16(core, sprom_offset+i*2);
-
- if (buf[0] == 0xffff)
- /*
- * The hardware thinks that an srom that starts with
- * 0xffff is blank, regardless of the rest of the
- * content, so declare it bad.
- */
- return -ENODATA;
-
- if (check_crc && !do_crc_check(buf, nwords))
- err = -EIO;
-
- return err;
-}
-
-static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
-{
- u8 *otp;
- uint sz = OTP_SZ_MAX / 2; /* size in words */
- int err = 0;
-
- otp = kzalloc(OTP_SZ_MAX, GFP_ATOMIC);
- if (otp == NULL)
- return -ENOMEM;
-
- err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
-
- sz = min_t(uint, sz, nwords);
- memcpy(buf, otp, sz * 2);
-
- kfree(otp);
-
- /* Check CRC */
- if (buf[0] == 0xffff)
- /* The hardware thinks that an srom that starts with 0xffff
- * is blank, regardless of the rest of the content, so declare
- * it bad.
- */
- return -ENODATA;
-
- /* fixup the endianness so crc8 will pass */
- cpu_to_le16_buf(buf, sz);
- if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2,
- CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table))
- err = -EIO;
- else
- /* now correct the endianness of the byte array */
- le16_to_cpu_buf(buf, sz);
-
- return err;
-}
-
-/*
- * Initialize nonvolatile variable table from sprom.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(struct si_pub *sih)
-{
- u16 *srom;
- u8 sromrev = 0;
- u32 sr;
- int err = 0;
-
- /*
- * Apply CRC over SROM content regardless SROM is present or not.
- */
- srom = kmalloc(SROM_MAX, GFP_ATOMIC);
- if (!srom)
- return -ENOMEM;
-
- crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
- if (ai_is_sprom_available(sih)) {
- err = sprom_read_pci(sih, srom, SROM4_WORDS, true);
-
- if (err == 0)
- /* srom read and passed crc */
- /* top word of sprom contains version and crc8 */
- sromrev = srom[SROM4_CRCREV] & 0xff;
- } else {
- /* Use OTP if SPROM not available */
- err = otp_read_pci(sih, srom, SROM4_WORDS);
- if (err == 0)
- /* OTP only contain SROM rev8/rev9 for now */
- sromrev = srom[SROM4_CRCREV] & 0xff;
- }
-
- if (!err) {
- struct si_info *sii = (struct si_info *)sih;
-
- /* Bitmask for the sromrev */
- sr = 1 << sromrev;
-
- /*
- * srom version check: Current valid versions: 8, 9
- */
- if ((sr & 0x300) == 0) {
- err = -EINVAL;
- goto errout;
- }
-
- INIT_LIST_HEAD(&sii->var_list);
-
- /* parse SROM into name=value pairs. */
- err = _initvars_srom_pci(sromrev, srom, &sii->var_list);
- if (err)
- srom_free_vars(sih);
- }
-
-errout:
- kfree(srom);
- return err;
-}
-
-void srom_free_vars(struct si_pub *sih)
-{
- struct si_info *sii;
- struct brcms_srom_list_head *entry, *next;
-
- sii = (struct si_info *)sih;
- list_for_each_entry_safe(entry, next, &sii->var_list, var_list) {
- list_del(&entry->var_list);
- kfree(entry);
- }
-}
-
-/*
- * Search the name=value vars for a specific one and return its value.
- * Returns NULL if not found.
- */
-char *getvar(struct si_pub *sih, enum brcms_srom_id id)
-{
- struct si_info *sii;
- struct brcms_srom_list_head *entry;
-
- sii = (struct si_info *)sih;
-
- list_for_each_entry(entry, &sii->var_list, var_list)
- if (entry->varid == id)
- return &entry->buf[0];
-
- /* nothing found */
- return NULL;
-}
-
-/*
- * Search the vars for a specific one and return its value as
- * an integer. Returns 0 if not found.-
- */
-int getintvar(struct si_pub *sih, enum brcms_srom_id id)
-{
- struct si_info *sii;
- struct brcms_srom_list_head *entry;
- unsigned long res;
-
- sii = (struct si_info *)sih;
-
- list_for_each_entry(entry, &sii->var_list, var_list)
- if (entry->varid == id) {
- if (entry->var_type == BRCMS_SROM_SNUMBER ||
- entry->var_type == BRCMS_SROM_UNUMBER)
- return (int)entry->sval;
- else if (!kstrtoul(&entry->buf[0], 0, &res))
- return (int)res;
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
deleted file mode 100644
index f2a58f262c99..000000000000
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCM_SROM_H_
-#define _BRCM_SROM_H_
-
-#include "types.h"
-
-/* Prototypes */
-extern int srom_var_init(struct si_pub *sih);
-extern void srom_free_vars(struct si_pub *sih);
-
-extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
- uint byteoff, uint nbytes, u16 *buf, bool check_crc);
-
-#endif /* _BRCM_SROM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
index d8f528eb180c..ed1d1aa71d2d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
@@ -370,9 +370,11 @@ void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc)
void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc)
{
+ struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
+
/* get available rx/tx chains */
- wlc->stf->hw_txchain = (u8) getintvar(wlc->hw->sih, BRCMS_SROM_TXCHAIN);
- wlc->stf->hw_rxchain = (u8) getintvar(wlc->hw->sih, BRCMS_SROM_RXCHAIN);
+ wlc->stf->hw_txchain = sprom->txchain;
+ wlc->stf->hw_rxchain = sprom->rxchain;
/* these parameter are intended to be used for all PHY types */
if (wlc->stf->hw_txchain == 0 || wlc->stf->hw_txchain == 0xf) {
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9cfae0c08707..95aa8e1683ec 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
netif_stop_queue(priv->net_dev);
}
-/* Called by register_netdev() */
-static int ipw2100_net_init(struct net_device *dev)
-{
- struct ipw2100_priv *priv = libipw_priv(dev);
-
- return ipw2100_up(priv, 1);
-}
-
static int ipw2100_wdev_init(struct net_device *dev)
{
struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_stop = ipw2100_close,
.ndo_start_xmit = libipw_xmit,
.ndo_change_mtu = libipw_change_mtu,
- .ndo_init = ipw2100_net_init,
.ndo_tx_timeout = ipw2100_tx_timeout,
.ndo_set_mac_address = ipw2100_set_address,
.ndo_validate_addr = eth_validate_addr,
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
printk(KERN_INFO DRV_NAME
": Detected Intel PRO/Wireless 2100 Network Connection\n");
+ err = ipw2100_up(priv, 1);
+ if (err)
+ goto fail;
+
err = ipw2100_wdev_init(dev);
if (err)
goto fail;
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
* network device we would call ipw2100_up. This introduced a race
* condition with newer hotplug configurations (network was coming
* up and making calls before the device was initialized).
- *
- * If we called ipw2100_up before we registered the device, then the
- * device name wasn't registered. So, we instead use the net_dev->init
- * member to call a function that then just turns and calls ipw2100_up.
- * net_dev->init is called after name allocation but before the
- * notifier chain is called */
+ */
err = register_netdev(dev);
if (err) {
printk(KERN_WARNING DRV_NAME
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 509301a5e7e2..ff5d689e13f3 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
return 0;
}
- if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
key_flags);
spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
il->stations[sta_id].sta.key.key_flags =
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
- il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index cbf2dc18341f..5d4807c2b56d 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4767,14 +4767,12 @@ il_bg_watchdog(unsigned long data)
return;
/* monitor and check for other stuck queues */
- if (il_is_any_associated(il)) {
- for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
- /* skip as we already checked the command queue */
- if (cnt == il->cmd_queue)
- continue;
- if (il_check_stuck_queue(il, cnt))
- return;
- }
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == il->cmd_queue)
+ continue;
+ if (il_check_stuck_queue(il, cnt))
+ return;
}
mod_timer(&il->watchdog,
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index db6c6e528022..2463c0626438 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -137,11 +137,3 @@ config IWLWIFI_EXPERIMENTAL_MFP
even if the microcode doesn't advertise it.
Say Y only if you want to experiment with MFP.
-
-config IWLWIFI_UCODE16
- bool "support uCode 16.0"
- depends on IWLWIFI
- help
- This option enables support for uCode version 16.0.
-
- Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 406f297a9a56..d615eacbf050 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -18,7 +18,6 @@ iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
-iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 7f793417c787..8133105ac645 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -79,7 +79,7 @@ static const struct iwl_base_params iwl2000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.hd_v2 = true,
};
@@ -97,7 +97,7 @@ static const struct iwl_base_params iwl2030_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.hd_v2 = true,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 381b02cf339c..e5e8ada4aaf6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -35,17 +35,20 @@
#define IWL6000_UCODE_API_MAX 6
#define IWL6050_UCODE_API_MAX 5
#define IWL6000G2_UCODE_API_MAX 6
+#define IWL6035_UCODE_API_MAX 6
/* Oldest version we won't warn about */
#define IWL6000_UCODE_API_OK 4
#define IWL6000G2_UCODE_API_OK 5
#define IWL6050_UCODE_API_OK 5
#define IWL6000G2B_UCODE_API_OK 6
+#define IWL6035_UCODE_API_OK 6
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4
-#define IWL6000G2_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 5
+#define IWL6035_UCODE_API_MIN 6
/* EEPROM versions */
#define EEPROM_6000_TX_POWER_VERSION (4)
@@ -86,7 +89,7 @@ static const struct iwl_base_params iwl6000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_base_params iwl6050_base_params = {
@@ -102,7 +105,7 @@ static const struct iwl_base_params iwl6050_base_params = {
.chain_noise_scale = 1500,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -118,7 +121,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_ht_params iwl6000_ht_params = {
@@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
IWL_DEVICE_6030,
};
+#define IWL_DEVICE_6035 \
+ .fw_name_pre = IWL6030_FW_PRE, \
+ .ucode_api_max = IWL6035_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6035_UCODE_API_OK, \
+ .ucode_api_min = IWL6035_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_6030, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
+ .base_params = &iwl6000_g2_base_params, \
+ .bt_params = &iwl6000_bt_params, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true
+
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
- IWL_DEVICE_6030,
+ IWL_DEVICE_6035,
.ht_params = &iwl6000_ht_params,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 01dc44267317..e55ec6c8a920 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <net/mac80211.h>
#include "iwl-dev.h"
#include "iwl-io.h"
@@ -273,9 +274,20 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
return;
}
+ /*
+ * Possible situations when BT needs to take over for receive,
+ * at the same time where STA needs to response to AP's frame(s),
+ * reduce the tx power of the required response frames, by that,
+ * allow the concurrent BT receive & WiFi transmit
+ * (BT - ANT A, WiFi -ANT B), without interference to one another
+ *
+ * Reduced tx power apply to control frames only (ACK/Back/CTS)
+ * when indicated by the BT config command
+ */
basic.kill_ack_mask = priv->kill_ack_mask;
basic.kill_cts_mask = priv->kill_cts_mask;
- basic.reduce_txpower = priv->reduced_txpower;
+ if (priv->reduced_txpower)
+ basic.reduce_txpower = IWLAGN_BT_REDUCED_TX_PWR;
basic.valid = priv->bt_valid;
/*
@@ -589,13 +601,31 @@ static bool iwlagn_set_kill_msk(struct iwl_priv *priv,
return need_update;
}
+/*
+ * Upon RSSI changes, sends a bt config command with following changes
+ * 1. enable/disable "reduced control frames tx power
+ * 2. update the "kill)ack_mask" and "kill_cts_mask"
+ *
+ * If "reduced tx power" is enabled, uCode shall
+ * 1. ACK/Back/CTS rate shall reduced to 6Mbps
+ * 2. not use duplciate 20/40MHz mode
+ */
static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
struct iwl_bt_uart_msg *uart_msg)
{
bool need_update = false;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ int ave_rssi;
+ ave_rssi = ieee80211_ave_rssi(ctx->vif);
+ if (!ave_rssi) {
+ /* no rssi data, no changes to reduce tx power */
+ IWL_DEBUG_COEX(priv, "no rssi data available\n");
+ return need_update;
+ }
if (!priv->reduced_txpower &&
!iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
+ (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) &&
(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
BT_UART_MSG_FRAME3OBEX_MSK)) &&
!(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
@@ -606,13 +636,14 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
need_update = true;
} else if (priv->reduced_txpower &&
(iwl_is_associated(priv, IWL_RXON_CTX_PAN) ||
+ (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) ||
(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) ||
!(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
BT_UART_MSG_FRAME3OBEX_MSK)))) {
/* disable reduced tx power */
priv->reduced_txpower = false;
- priv->bt_valid &= ~IWLAGN_BT_VALID_REDUCED_TX_PWR;
+ priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
need_update = true;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 51e1a69ffdda..8cebd7c363fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -884,6 +884,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
priv->bt_full_concurrent = full_concurrent;
+ priv->last_bt_traffic_load = priv->bt_traffic_load;
/* Update uCode's rate table. */
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 74fbee627306..0a3aa7c83003 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -61,6 +61,10 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
RXON_FILTER_ACCEPT_GRP_MSK;
break;
+ case NL80211_IFTYPE_MONITOR:
+ ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER;
+ break;
+
default:
IWL_ERR(priv, "Unsupported interface type %d\n",
ctx->vif->type);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index b31584e87bc7..eb6a8eaf42fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -772,7 +772,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
~IWL_STA_DRIVER_ACTIVE;
priv->stations[i].used &=
~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_bh(&priv->sta_lock);
+ continue;
}
/*
* Rate scaling has already been initialized, send
@@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
key_flags |= STA_KEY_MULTICAST_MSK;
sta_cmd.key.key_flags = key_flags;
- sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
+ sta_cmd.key.key_offset = keyconf->hw_key_idx;
sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index f2e9f298a947..3366e2e2f00f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -590,11 +590,17 @@ turn_off:
spin_unlock_bh(&priv->sta_lock);
if (test_bit(txq_id, priv->agg_q_alloc)) {
- /* If the transport didn't know that we wanted to start
- * agreggation, don't tell it that we want to stop them
+ /*
+ * If the transport didn't know that we wanted to start
+ * agreggation, don't tell it that we want to stop them.
+ * This can happen when we don't get the addBA response on
+ * time, or we hadn't time to drain the AC queues.
*/
- if (agg_state != IWL_AGG_STARTING)
+ if (agg_state == IWL_AGG_ON)
iwl_trans_tx_agg_disable(priv->trans, txq_id);
+ else
+ IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
+ agg_state);
iwlagn_dealloc_agg_txq(priv, txq_id);
}
@@ -1300,10 +1306,11 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
(u8 *) &ba_resp->sta_addr_lo32,
ba_resp->sta_id);
IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
- "scd_flow = %d, scd_ssn = %d\n",
+ "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
- scd_flow, ba_resp_scd_ssn);
+ scd_flow, ba_resp_scd_ssn, ba_resp->txed,
+ ba_resp->txed_2_done);
/* Mark that the expected block-ack response arrived */
agg->wait_for_ba = false;
@@ -1319,8 +1326,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
*/
ba_resp->txed = ba_resp->txed_2_done;
}
- IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
- ba_resp->txed, ba_resp->txed_2_done);
priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 8d7637083fcf..ec36e2b020b6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -603,7 +603,7 @@ void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MONITOR);
priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
BIT(NL80211_IFTYPE_STATION);
priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 83a6930f3658..9af6a239b384 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1910,6 +1910,8 @@ enum iwl_bt_kill_idx {
IWLAGN_BT_VALID_REDUCED_TX_PWR | \
IWLAGN_BT_VALID_3W_LUT)
+#define IWLAGN_BT_REDUCED_TX_PWR BIT(0)
+
#define IWLAGN_BT_DECISION_LUT_SIZE 12
struct iwl_basic_bt_cmd {
@@ -1923,6 +1925,10 @@ struct iwl_basic_bt_cmd {
u8 bt3_timer_t2_value;
__le16 bt4_reaction_time; /* unused */
__le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE];
+ /*
+ * bit 0: use reduced tx power for control frame
+ * bit 1 - 7: reserved
+ */
u8 reduce_txpower;
u8 reserved;
__le16 valid;
@@ -2272,7 +2278,6 @@ struct iwl_ssid_ie {
#define IWL_GOOD_CRC_TH_DISABLED 0
#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
-#define IWL_MAX_SCAN_SIZE 1024
#define IWL_MAX_CMD_SIZE 4096
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index e7c157e5ebeb..7f97dec8534d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2239,6 +2239,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
return count;
}
+#ifdef CONFIG_IWLWIFI_DEBUG
static ssize_t iwl_dbgfs_log_event_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2276,6 +2277,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
return count;
}
+#endif
static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
char __user *user_buf,
@@ -2345,7 +2347,9 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
DEBUGFS_READ_FILE_OPS(reply_tx_error);
DEBUGFS_WRITE_FILE_OPS(echo_test);
+#ifdef CONFIG_IWLWIFI_DEBUG
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+#endif
DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
/*
@@ -2405,7 +2409,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
+#ifdef CONFIG_IWLWIFI_DEBUG
DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
+#endif
if (iwl_advanced_bt_coexist(priv))
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 3c72bad0ae56..fac67a526a30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -657,17 +657,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
-static int alloc_pci_desc(struct iwl_drv *drv,
- struct iwl_firmware_pieces *pieces,
- enum iwl_ucode_type type)
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+ struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type)
{
int i;
for (i = 0;
i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
i++)
if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
- get_sec(pieces, type, i)))
- return -1;
+ get_sec(pieces, type, i)))
+ return -ENOMEM;
return 0;
}
@@ -825,8 +825,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
- if (alloc_pci_desc(drv, &pieces, i))
- goto err_pci_alloc;
+ if (iwl_alloc_ucode(drv, &pieces, i))
+ goto out_free_fw;
/* Now that we can no longer fail, copy information */
@@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
- complete(&drv->request_firmware_complete);
drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
if (!drv->op_mode)
goto out_unbind;
+ /*
+ * Complete the firmware request last so that
+ * a driver unbind (stop) doesn't run while we
+ * are doing the start() above.
+ */
+ complete(&drv->request_firmware_complete);
return;
try_again:
@@ -877,7 +882,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
goto out_unbind;
return;
- err_pci_alloc:
+ out_free_fw:
IWL_ERR(drv, "failed to allocate pci memory\n");
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 50c58911e718..b8e2b223ac36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
* iwl_get_max_txpower_avg - get the highest tx power from all chains.
* find the highest tx power from all chains for the channel
*/
-static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg,
+static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
int element, s8 *max_txpower_in_half_dbm)
{
s8 max_txpower_avg = 0; /* (dBm) */
/* Take the highest tx power from any valid chains */
- if ((cfg->valid_tx_ant & ANT_A) &&
+ if ((priv->hw_params.valid_tx_ant & ANT_A) &&
(enhanced_txpower[element].chain_a_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_a_max;
- if ((cfg->valid_tx_ant & ANT_B) &&
+ if ((priv->hw_params.valid_tx_ant & ANT_B) &&
(enhanced_txpower[element].chain_b_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_b_max;
- if ((cfg->valid_tx_ant & ANT_C) &&
+ if ((priv->hw_params.valid_tx_ant & ANT_C) &&
(enhanced_txpower[element].chain_c_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_c_max;
- if (((cfg->valid_tx_ant == ANT_AB) |
- (cfg->valid_tx_ant == ANT_BC) |
- (cfg->valid_tx_ant == ANT_AC)) &&
+ if (((priv->hw_params.valid_tx_ant == ANT_AB) |
+ (priv->hw_params.valid_tx_ant == ANT_BC) |
+ (priv->hw_params.valid_tx_ant == ANT_AC)) &&
(enhanced_txpower[element].mimo2_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo2_max;
- if ((cfg->valid_tx_ant == ANT_ABC) &&
+ if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
(enhanced_txpower[element].mimo3_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo3_max;
@@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
((txp->delta_20_in_40 & 0xf0) >> 4),
(txp->delta_20_in_40 & 0x0f));
- max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
+ max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
&max_txp_avg_halfdbm);
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index d33cc9cc7d3f..013680332f07 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -150,6 +150,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_QUEUE_CONTROL |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+ IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_SCAN_WHILE_IDLE;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
@@ -198,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
+#ifdef CONFIG_PM_SLEEP
if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
priv->trans->ops->wowlan_suspend &&
device_can_wakeup(priv->trans->dev)) {
@@ -216,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->wiphy->wowlan.pattern_max_len =
IWLAGN_WOWLAN_MAX_PATTERN_LEN;
}
+#endif
if (iwlwifi_mod_params.power_save)
hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -223,8 +226,8 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
+ /* we create the 802.11 header and a max-length SSID element */
+ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 34;
/*
* We don't use all queues: 4 and 9 are unused and any
@@ -248,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+ iwl_leds_exit(priv);
return ret;
}
priv->mac80211_registered = 1;
@@ -792,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
switch (op) {
case ADD:
ret = iwlagn_mac_sta_add(hw, vif, sta);
+ if (ret)
+ break;
+ /*
+ * Clear the in-progress flag, the AP station entry was added
+ * but we'll initialize LQ only when we've associated (which
+ * would also clear the in-progress flag). This is necessary
+ * in case we never initialize LQ because association fails.
+ */
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[iwl_sta_id(sta)].used &=
+ ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_bh(&priv->sta_lock);
break;
case REMOVE:
ret = iwlagn_mac_sta_remove(hw, vif, sta);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
deleted file mode 100644
index f166955340fe..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include "iwl-debug.h"
-#include "iwl-dev.h"
-
-#include "iwl-phy-db.h"
-
-#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
-{
- struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
- GFP_KERNEL);
-
- if (!phy_db)
- return phy_db;
-
- phy_db->dev = dev;
-
- /* TODO: add default values of the phy db. */
- return phy_db;
-}
-
-/*
- * get phy db section: returns a pointer to a phy db section specified by
- * type and channel group id.
- */
-static struct iwl_phy_db_entry *
-iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type,
- u16 chg_id)
-{
- if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
- return NULL;
-
- switch (type) {
- case IWL_PHY_DB_CFG:
- return &phy_db->cfg;
- case IWL_PHY_DB_CALIB_NCH:
- return &phy_db->calib_nch;
- case IWL_PHY_DB_CALIB_CH:
- return &phy_db->calib_ch;
- case IWL_PHY_DB_CALIB_CHG_PAPD:
- if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
- return NULL;
- return &phy_db->calib_ch_group_papd[chg_id];
- case IWL_PHY_DB_CALIB_CHG_TXP:
- if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
- return NULL;
- return &phy_db->calib_ch_group_txp[chg_id];
- default:
- return NULL;
- }
- return NULL;
-}
-
-static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type,
- u16 chg_id)
-{
- struct iwl_phy_db_entry *entry =
- iwl_phy_db_get_section(phy_db, type, chg_id);
- if (!entry)
- return;
-
- kfree(entry->data);
- entry->data = NULL;
- entry->size = 0;
-}
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db)
-{
- int i;
-
- if (!phy_db)
- return;
-
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
- for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
-
- kfree(phy_db);
-}
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 *data,
- u16 size, gfp_t alloc_ctx)
-{
- struct iwl_phy_db_entry *entry;
- u16 chg_id = 0;
-
- if (!phy_db)
- return -EINVAL;
-
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
- type == IWL_PHY_DB_CALIB_CHG_TXP)
- chg_id = le16_to_cpup((__le16 *)data);
-
- entry = iwl_phy_db_get_section(phy_db, type, chg_id);
- if (!entry)
- return -EINVAL;
-
- kfree(entry->data);
- entry->data = kmemdup(data, size, alloc_ctx);
- if (!entry->data) {
- entry->size = 0;
- return -ENOMEM;
- }
-
- entry->size = size;
-
- if (type == IWL_PHY_DB_CALIB_CH) {
- phy_db->channel_num = le32_to_cpup((__le32 *)data);
- phy_db->channel_size =
- (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
- }
-
- return 0;
-}
-
-static int is_valid_channel(u16 ch_id)
-{
- if (ch_id <= 14 ||
- (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
- (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
- (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
- return 1;
- return 0;
-}
-
-static u8 ch_id_to_ch_index(u16 ch_id)
-{
- if (WARN_ON(!is_valid_channel(ch_id)))
- return 0xff;
-
- if (ch_id <= 14)
- return ch_id - 1;
- if (ch_id <= 64)
- return (ch_id + 20) / 4;
- if (ch_id <= 140)
- return (ch_id - 12) / 4;
- return (ch_id - 13) / 4;
-}
-
-
-static u16 channel_id_to_papd(u16 ch_id)
-{
- if (WARN_ON(!is_valid_channel(ch_id)))
- return 0xff;
-
- if (1 <= ch_id && ch_id <= 14)
- return 0;
- if (36 <= ch_id && ch_id <= 64)
- return 1;
- if (100 <= ch_id && ch_id <= 140)
- return 2;
- return 3;
-}
-
-static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
-{
- struct iwl_phy_db_chg_txp *txp_chg;
- int i;
- u8 ch_index = ch_id_to_ch_index(ch_id);
- if (ch_index == 0xff)
- return 0xff;
-
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
- txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
- if (!txp_chg)
- return 0xff;
- /*
- * Looking for the first channel group that its max channel is
- * higher then wanted channel.
- */
- if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
- return i;
- }
- return 0xff;
-}
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 **data,
- u16 *size, u16 ch_id)
-{
- struct iwl_phy_db_entry *entry;
- u32 channel_num;
- u32 channel_size;
- u16 ch_group_id = 0;
- u16 index;
-
- if (!phy_db)
- return -EINVAL;
-
- /* find wanted channel group */
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
- ch_group_id = channel_id_to_papd(ch_id);
- else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
- ch_group_id = channel_id_to_txp(phy_db, ch_id);
-
- entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
- if (!entry)
- return -EINVAL;
-
- if (type == IWL_PHY_DB_CALIB_CH) {
- index = ch_id_to_ch_index(ch_id);
- channel_num = phy_db->channel_num;
- channel_size = phy_db->channel_size;
- if (index >= channel_num) {
- IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
- return -EINVAL;
- }
- *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
- *size = channel_size;
- } else {
- *data = entry->data;
- *size = entry->size;
- }
- return 0;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
deleted file mode 100644
index c34c6a9303ab..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#ifndef __IWL_PHYDB_H__
-#define __IWL_PHYDB_H__
-
-#include <linux/types.h>
-
-#define IWL_NUM_PAPD_CH_GROUPS 4
-#define IWL_NUM_TXP_CH_GROUPS 8
-
-struct iwl_phy_db_entry {
- u16 size;
- u8 *data;
-};
-
-struct iwl_shared;
-
-/**
- * struct iwl_phy_db - stores phy configuration and calibration data.
- *
- * @cfg: phy configuration.
- * @calib_nch: non channel specific calibration data.
- * @calib_ch: channel specific calibration data.
- * @calib_ch_group_papd: calibration data related to papd channel group.
- * @calib_ch_group_txp: calibration data related to tx power chanel group.
- */
-struct iwl_phy_db {
- struct iwl_phy_db_entry cfg;
- struct iwl_phy_db_entry calib_nch;
- struct iwl_phy_db_entry calib_ch;
- struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
- struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
-
- u32 channel_num;
- u32 channel_size;
-
- /* for an access to the logger */
- struct device *dev;
-};
-
-enum iwl_phy_db_section_type {
- IWL_PHY_DB_CFG = 1,
- IWL_PHY_DB_CALIB_NCH,
- IWL_PHY_DB_CALIB_CH,
- IWL_PHY_DB_CALIB_CHG_PAPD,
- IWL_PHY_DB_CALIB_CHG_TXP,
- IWL_PHY_DB_MAX
-};
-
-/* for parsing of tx power channel group data that comes from the firmware*/
-struct iwl_phy_db_chg_txp {
- __le32 space;
- __le16 max_channel_idx;
-} __packed;
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db);
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 *data,
- u16 size, gfp_t alloc_ctx);
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 **data,
- u16 *size, u16 ch_id);
-
-#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 8352265dbc4b..544ddf17f5bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -253,6 +253,8 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
skip, period);
+ /* The power level here is 0-4 (used as array index), but user expects
+ to see 1-5 (according to spec). */
IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
}
@@ -308,10 +310,12 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
priv->power_data.debug_sleep_level_override,
dtimper);
else {
+ /* Note that the user parameter is 1-5 (according to spec),
+ but we pass 0-4 because it acts as an array index. */
if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 &&
- iwlwifi_mod_params.power_level <= IWL_POWER_INDEX_5)
+ iwlwifi_mod_params.power_level <= IWL_POWER_NUM)
iwl_static_sleep_cmd(priv, cmd,
- iwlwifi_mod_params.power_level, dtimper);
+ iwlwifi_mod_params.power_level - 1, dtimper);
else
iwl_static_sleep_cmd(priv, cmd,
IWL_POWER_INDEX_1, dtimper);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 3b1069290fa9..dfd54662e3e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -224,6 +224,7 @@
#define SCD_TXFACT (SCD_BASE + 0x10)
#define SCD_ACTIVE (SCD_BASE + 0x14)
#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
+#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
#define SCD_AGGR_SEL (SCD_BASE + 0x248)
#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a8437a6bc18e..031d8e21f82f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -52,6 +52,7 @@
#define IWL_PASSIVE_DWELL_TIME_52 (10)
#define IWL_PASSIVE_DWELL_BASE (100)
#define IWL_CHANNEL_TUNE_TIME 5
+#define MAX_SCAN_CHANNEL 50
static int iwl_send_scan_abort(struct iwl_priv *priv)
{
@@ -616,7 +617,8 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
*/
static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
- const u8 *ies, int ie_len, int left)
+ const u8 *ies, int ie_len, const u8 *ssid,
+ u8 ssid_len, int left)
{
int len = 0;
u8 *pos = NULL;
@@ -638,14 +640,18 @@ static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
/* ...next IE... */
pos = &frame->u.probe_req.variable[0];
- /* fill in our indirect SSID IE */
- left -= 2;
+ /* fill in our SSID IE */
+ left -= ssid_len + 2;
if (left < 0)
return 0;
*pos++ = WLAN_EID_SSID;
- *pos++ = 0;
+ *pos++ = ssid_len;
+ if (ssid && ssid_len) {
+ memcpy(pos, ssid, ssid_len);
+ pos += ssid_len;
+ }
- len += 2;
+ len += ssid_len + 2;
if (WARN_ON(left < ie_len))
return len;
@@ -679,6 +685,15 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u8 active_chains;
u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
int ret;
+ int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
+ MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
+ priv->fw->ucode_capa.max_probe_length;
+ const u8 *ssid = NULL;
+ u8 ssid_len = 0;
+
+ if (WARN_ON_ONCE(priv->scan_request &&
+ priv->scan_request->n_channels > MAX_SCAN_CHANNEL))
+ return -EINVAL;
lockdep_assert_held(&priv->mutex);
@@ -686,8 +701,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
ctx = iwl_rxon_ctx_from_vif(vif);
if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
+ priv->scan_cmd = kmalloc(scan_cmd_size, GFP_KERNEL);
if (!priv->scan_cmd) {
IWL_DEBUG_SCAN(priv,
"fail to allocate memory for scan\n");
@@ -695,7 +709,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
}
}
scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
+ memset(scan, 0, scan_cmd_size);
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
@@ -746,10 +760,18 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
+ /*
+ * The highest priority SSID is inserted to the
+ * probe request template.
+ */
+ ssid_len = priv->scan_request->ssids[0].ssid_len;
+ ssid = priv->scan_request->ssids[0].ssid;
+
+ /*
+ * Invert the order of ssids, the firmware will invert
+ * it back.
+ */
+ for (i = priv->scan_request->n_ssids - 1; i >= 1; i--) {
scan->direct_scan[p].id = WLAN_EID_SSID;
scan->direct_scan[p].len =
priv->scan_request->ssids[i].ssid_len;
@@ -883,7 +905,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
vif->addr,
priv->scan_request->ie,
priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ ssid, ssid_len,
+ scan_cmd_size - sizeof(*scan));
break;
case IWL_SCAN_RADIO_RESET:
case IWL_SCAN_ROC:
@@ -891,7 +914,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
cmd_len = iwl_fill_probe_req(
(struct ieee80211_mgmt *)scan->data,
iwl_bcast_addr, NULL, 0,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ NULL, 0,
+ scan_cmd_size - sizeof(*scan));
break;
default:
BUG();
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 6213c05a4b52..e959207c630a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -347,7 +347,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn);
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int index, enum dma_data_direction dma_dir);
+ enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
struct sk_buff_head *skbs);
int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 21a8a672fbb2..a8750238ee09 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -204,33 +204,39 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
for (i = 1; i < num_tbs; i++)
dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
iwl_tfd_tb_get_len(tfd, i), dma_dir);
+
+ tfd->num_tbs = 0;
}
/**
* iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @trans - transport private data
* @txq - tx queue
- * @index - the index of the TFD to be freed
- *@dma_dir - the direction of the DMA mapping
+ * @dma_dir - the direction of the DMA mapping
*
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int index, enum dma_data_direction dma_dir)
+ enum dma_data_direction dma_dir)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
+ /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+ int rd_ptr = txq->q.read_ptr;
+ int idx = get_cmd_index(&txq->q, rd_ptr);
+
lockdep_assert_held(&txq->lock);
- iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
- &tfd_tmp[index], dma_dir);
+ /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+ iwlagn_unmap_tfd(trans, &txq->entries[idx].meta,
+ &tfd_tmp[rd_ptr], dma_dir);
/* free SKB */
if (txq->entries) {
struct sk_buff *skb;
- skb = txq->entries[index].skb;
+ skb = txq->entries[idx].skb;
/* Can be called from irqs-disabled context
* If skb is not NULL, it means that the whole queue is being
@@ -238,7 +244,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
*/
if (skb) {
iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[index].skb = NULL;
+ txq->entries[idx].skb = NULL;
}
}
}
@@ -973,7 +979,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
- iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
+ iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
freed++;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 2e57161854b9..79c6b91417f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -435,9 +435,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
- /* The read_ptr needs to bound by q->n_window */
- iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
- dma_dir);
+ iwlagn_txq_free_tfd(trans, txq, dma_dir);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
spin_unlock_bh(&txq->lock);
@@ -1060,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans)
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
+ /* The chain extension of the SCD doesn't work well. This feature is
+ * enabled by default by the HW, so we need to disable it manually.
+ */
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
/* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 03c0c6b1372c..a0b7cfd34685 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -746,6 +746,11 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
hwsim_check_sta_magic(txi->control.sta);
ieee80211_tx_info_clear_status(txi);
+
+ /* frame was transmitted at most favorable rate at first attempt */
+ txi->control.rates[0].count = 1;
+ txi->control.rates[1].idx = -1;
+
if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
txi->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(hw, skb);
@@ -1550,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
hdr = (struct ieee80211_hdr *) skb->data;
mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
}
+ txi->flags |= IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status_irqsafe(data2->hw, skb);
return 0;
@@ -1716,6 +1722,24 @@ static void hwsim_exit_netlink(void)
"unregister family %i\n", ret);
}
+static const struct ieee80211_iface_limit hwsim_if_limits[] = {
+ { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb = {
+ .limits = hwsim_if_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+};
+
static int __init init_mac80211_hwsim(void)
{
int i, err = 0;
@@ -1777,6 +1801,9 @@ static int __init init_mac80211_hwsim(void)
hw->wiphy->n_addresses = 2;
hw->wiphy->addresses = data->addresses;
+ hw->wiphy->iface_combinations = &hwsim_if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
if (fake_hw_scan) {
hw->wiphy->max_scan_ssids = 255;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 9c44088054dd..900ee129e825 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
else
last_seq = priv->rx_seq[tid];
- if (last_seq >= new_node->start_win)
+ if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+ last_seq >= new_node->start_win)
new_node->start_win = last_seq + 1;
new_node->win_size = win_size;
@@ -596,5 +597,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
- memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+ mwifiex_reset_11n_rx_seq_num(priv);
}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index f1bffebabc60..6c9815a0f5d8 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -37,6 +37,13 @@
#define ADDBA_RSP_STATUS_ACCEPT 0
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+ memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
u16 seqNum,
u16 tid, u8 *ta,
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 5c1a46bf1e11..3f66ebb0a630 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -29,6 +29,8 @@ mwifiex-y += scan.o
mwifiex-y += join.o
mwifiex-y += sta_ioctl.o
mwifiex-y += sta_cmd.o
+mwifiex-y += uap_cmd.o
+mwifiex-y += ie.o
mwifiex-y += sta_cmdresp.o
mwifiex-y += sta_event.o
mwifiex-y += sta_tx.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index c78ea873a63a..5c7fd185373c 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -20,6 +20,23 @@
#include "cfg80211.h"
#include "main.h"
+static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
+ {
+ .max = 1, .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1, .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
+ .limits = mwifiex_ap_sta_limits,
+ .num_different_channels = 1,
+ .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+ .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+ .beacon_int_infra_match = true,
+};
+
/*
* This function maps the nl802.11 channel type into driver channel type.
*
@@ -67,7 +84,7 @@ mwifiex_is_alg_wep(u32 cipher)
/*
* This function retrieves the private structure from kernel wiphy structure.
*/
-static void *mwifiex_cfg80211_get_priv(struct wiphy *wiphy)
+static void *mwifiex_cfg80211_get_adapter(struct wiphy *wiphy)
{
return (void *) (*(unsigned long *) wiphy_priv(wiphy));
}
@@ -80,8 +97,10 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
+ const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
- if (mwifiex_set_encode(priv, NULL, 0, key_index, 1)) {
+ if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) {
wiphy_err(wiphy, "deleting the crypto keys\n");
return -EFAULT;
}
@@ -98,7 +117,8 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
int mbm)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv;
struct mwifiex_power_cfg power_cfg;
int dbm = MBM_TO_DBM(mbm);
@@ -109,6 +129,8 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
power_cfg.is_power_auto = 1;
}
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+
return mwifiex_set_tx_power(priv, &power_cfg);
}
@@ -148,7 +170,7 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
if (!priv->sec_info.wep_enabled)
return 0;
- if (mwifiex_set_encode(priv, NULL, 0, key_index, 0)) {
+ if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
wiphy_err(wiphy, "set default Tx key index\n");
return -EFAULT;
}
@@ -165,9 +187,11 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
struct key_params *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
+ const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
if (mwifiex_set_encode(priv, params->key, params->key_len,
- key_index, 0)) {
+ key_index, peer_mac, 0)) {
wiphy_err(wiphy, "crypto keys added\n");
return -EFAULT;
}
@@ -192,13 +216,13 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv;
struct mwifiex_802_11d_domain_reg *domain_info = &adapter->domain_reg;
/* Set country code */
- domain_info->country_code[0] = priv->country_code[0];
- domain_info->country_code[1] = priv->country_code[1];
+ domain_info->country_code[0] = adapter->country_code[0];
+ domain_info->country_code[1] = adapter->country_code[1];
domain_info->country_code[2] = ' ';
band = mwifiex_band_to_radio_type(adapter->config_bands);
@@ -250,6 +274,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
domain_info->no_of_triplet = no_of_triplet;
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+
if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
HostCmd_ACT_GEN_SET, 0, NULL)) {
wiphy_err(wiphy, "11D: setting domain info in FW\n");
@@ -272,12 +298,12 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
static int mwifiex_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
- wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for domain"
- " %c%c\n", request->alpha2[0], request->alpha2[1]);
+ wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n",
+ request->alpha2[0], request->alpha2[1]);
- memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2));
+ memcpy(adapter->country_code, request->alpha2, sizeof(request->alpha2));
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
@@ -361,33 +387,10 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
if (mwifiex_bss_set_channel(priv, &cfp))
return -EFAULT;
- return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
-}
-
-/*
- * CFG802.11 operation handler to set channel.
- *
- * This function can only be used when station is not connected.
- */
-static int
-mwifiex_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
-{
- struct mwifiex_private *priv;
-
- if (dev)
- priv = mwifiex_netdev_get_priv(dev);
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
+ return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
else
- priv = mwifiex_cfg80211_get_priv(wiphy);
-
- if (priv->media_connected) {
- wiphy_err(wiphy, "This setting is valid only when station "
- "is not connected\n");
- return -EINVAL;
- }
-
- return mwifiex_set_rf_channel(priv, chan, channel_type);
+ return mwifiex_uap_set_channel(priv, cfp.channel);
}
/*
@@ -399,18 +402,13 @@ mwifiex_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
static int
mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr)
{
- int ret;
-
if (frag_thr < MWIFIEX_FRAG_MIN_VALUE ||
frag_thr > MWIFIEX_FRAG_MAX_VALUE)
- return -EINVAL;
+ frag_thr = MWIFIEX_FRAG_MAX_VALUE;
- /* Send request to firmware */
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
- &frag_thr);
-
- return ret;
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
+ &frag_thr);
}
/*
@@ -439,19 +437,85 @@ mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr)
static int
mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- int ret = 0;
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv;
+ struct mwifiex_uap_bss_param *bss_cfg;
+ int ret, bss_started, i;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+
+ switch (priv->bss_role) {
+ case MWIFIEX_BSS_ROLE_UAP:
+ bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param),
+ GFP_KERNEL);
+ if (!bss_cfg)
+ return -ENOMEM;
+
+ mwifiex_set_sys_config_invalid_data(bss_cfg);
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD)
+ bss_cfg->rts_threshold = wiphy->rts_threshold;
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
+ bss_cfg->frag_threshold = wiphy->frag_threshold;
+ if (changed & WIPHY_PARAM_RETRY_LONG)
+ bss_cfg->retry_limit = wiphy->retry_long;
+
+ bss_started = priv->bss_started;
+
+ ret = mwifiex_send_cmd_sync(priv,
+ HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0,
+ NULL);
+ if (ret) {
+ wiphy_err(wiphy, "Failed to stop the BSS\n");
+ kfree(bss_cfg);
+ return ret;
+ }
- if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- ret = mwifiex_set_rts(priv, wiphy->rts_threshold);
- if (ret)
- return ret;
- }
+ ret = mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg);
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
- ret = mwifiex_set_frag(priv, wiphy->frag_threshold);
+ kfree(bss_cfg);
- return ret;
+ if (ret) {
+ wiphy_err(wiphy, "Failed to set bss config\n");
+ return ret;
+ }
+
+ if (!bss_started)
+ break;
+
+ ret = mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_UAP_BSS_START,
+ HostCmd_ACT_GEN_SET, 0,
+ NULL);
+ if (ret) {
+ wiphy_err(wiphy, "Failed to start BSS\n");
+ return ret;
+ }
+
+ break;
+ case MWIFIEX_BSS_ROLE_STA:
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ ret = mwifiex_set_rts(priv,
+ wiphy->rts_threshold);
+ if (ret)
+ return ret;
+ }
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+ ret = mwifiex_set_frag(priv,
+ wiphy->frag_threshold);
+ if (ret)
+ return ret;
+ }
+ break;
+ }
+ }
+
+ return 0;
}
/*
@@ -466,31 +530,59 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
int ret;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- if (priv->bss_mode == type) {
- wiphy_warn(wiphy, "already set to required type\n");
- return 0;
- }
-
- priv->bss_mode = type;
-
- switch (type) {
+ switch (dev->ieee80211_ptr->iftype) {
case NL80211_IFTYPE_ADHOC:
- dev->ieee80211_ptr->iftype = NL80211_IFTYPE_ADHOC;
- wiphy_dbg(wiphy, "info: setting interface type to adhoc\n");
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name);
+ case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
+ return 0;
+ case NL80211_IFTYPE_AP:
+ default:
+ wiphy_err(wiphy, "%s: changing to %d not supported\n",
+ dev->name, type);
+ return -EOPNOTSUPP;
+ }
break;
case NL80211_IFTYPE_STATION:
- dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
- wiphy_dbg(wiphy, "info: setting interface type to managed\n");
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
+ case NL80211_IFTYPE_STATION: /* This shouldn't happen */
+ return 0;
+ case NL80211_IFTYPE_AP:
+ default:
+ wiphy_err(wiphy, "%s: changing to %d not supported\n",
+ dev->name, type);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name);
+ case NL80211_IFTYPE_AP: /* This shouldn't happen */
+ return 0;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ default:
+ wiphy_err(wiphy, "%s: changing to %d not supported\n",
+ dev->name, type);
+ return -EOPNOTSUPP;
+ }
break;
- case NL80211_IFTYPE_UNSPECIFIED:
- dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
- wiphy_dbg(wiphy, "info: setting interface type to auto\n");
- return 0;
default:
- wiphy_err(wiphy, "unknown interface type: %d\n", type);
- return -EINVAL;
+ wiphy_err(wiphy, "%s: unknown iftype: %d\n",
+ dev->name, dev->ieee80211_ptr->iftype);
+ return -EOPNOTSUPP;
}
+ dev->ieee80211_ptr->iftype = type;
+ priv->bss_mode = type;
mwifiex_deauthenticate(priv, NULL);
priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
@@ -804,6 +896,104 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
return 0;
}
+/* cfg80211 operation handler for stop ap.
+ * Function stops BSS running at uAP interface.
+ */
+static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (mwifiex_del_mgmt_ies(priv))
+ wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL)) {
+ wiphy_err(wiphy, "Failed to stop the BSS\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* cfg80211 operation handler for start_ap.
+ * Function sets beacon period, DTIM period, SSID and security into
+ * AP config structure.
+ * AP is configured with these settings and BSS is started.
+ */
+static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ap_settings *params)
+{
+ struct mwifiex_uap_bss_param *bss_cfg;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP)
+ return -1;
+ if (mwifiex_set_mgmt_ies(priv, params))
+ return -1;
+
+ bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
+ if (!bss_cfg)
+ return -ENOMEM;
+
+ mwifiex_set_sys_config_invalid_data(bss_cfg);
+
+ if (params->beacon_interval)
+ bss_cfg->beacon_period = params->beacon_interval;
+ if (params->dtim_period)
+ bss_cfg->dtim_period = params->dtim_period;
+
+ if (params->ssid && params->ssid_len) {
+ memcpy(bss_cfg->ssid.ssid, params->ssid, params->ssid_len);
+ bss_cfg->ssid.ssid_len = params->ssid_len;
+ }
+
+ switch (params->hidden_ssid) {
+ case NL80211_HIDDEN_SSID_NOT_IN_USE:
+ bss_cfg->bcast_ssid_ctl = 1;
+ break;
+ case NL80211_HIDDEN_SSID_ZERO_LEN:
+ bss_cfg->bcast_ssid_ctl = 0;
+ break;
+ case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+ /* firmware doesn't support this type of hidden SSID */
+ default:
+ kfree(bss_cfg);
+ return -EINVAL;
+ }
+
+ if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
+ kfree(bss_cfg);
+ wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
+ return -1;
+ }
+
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL)) {
+ wiphy_err(wiphy, "Failed to stop the BSS\n");
+ kfree(bss_cfg);
+ return -1;
+ }
+
+ if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg)) {
+ wiphy_err(wiphy, "Failed to set the SSID\n");
+ kfree(bss_cfg);
+ return -1;
+ }
+
+ kfree(bss_cfg);
+
+ if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_BSS_START,
+ HostCmd_ACT_GEN_SET, 0, NULL)) {
+ wiphy_err(wiphy, "Failed to start the BSS\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* CFG802.11 operation handler for disconnection request.
*
@@ -923,7 +1113,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
priv->wep_key_curr_index = 0;
priv->sec_info.encryption_mode = 0;
priv->sec_info.is_authtype_auto = 0;
- ret = mwifiex_set_encode(priv, NULL, 0, 0, 1);
+ ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1);
if (mode == NL80211_IFTYPE_ADHOC) {
/* "privacy" is set only for ad-hoc mode */
@@ -971,7 +1161,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
" with key len %d\n", sme->key_len);
priv->wep_key_curr_index = sme->key_idx;
ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
- sme->key_idx, 0);
+ sme->key_idx, NULL, 0);
}
}
done:
@@ -1050,6 +1240,11 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
goto done;
}
+ if (priv->bss_mode == NL80211_IFTYPE_AP) {
+ wiphy_err(wiphy, "skip association request for AP interface\n");
+ goto done;
+ }
+
wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
(char *) sme->ssid, sme->bssid);
@@ -1283,28 +1478,34 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
u32 *flags,
struct vif_params *params)
{
- struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- struct mwifiex_adapter *adapter;
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv;
struct net_device *dev;
void *mdev_priv;
+ struct wireless_dev *wdev;
- if (!priv)
- return NULL;
-
- adapter = priv->adapter;
if (!adapter)
- return NULL;
+ return ERR_PTR(-EFAULT);
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
+ priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
if (priv->bss_mode) {
- wiphy_err(wiphy, "cannot create multiple"
- " station/adhoc interfaces\n");
- return NULL;
+ wiphy_err(wiphy,
+ "cannot create multiple sta/adhoc ifaces\n");
+ return ERR_PTR(-EINVAL);
}
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ wdev->wiphy = wiphy;
+ priv->wdev = wdev;
+ wdev->iftype = NL80211_IFTYPE_STATION;
+
if (type == NL80211_IFTYPE_UNSPECIFIED)
priv->bss_mode = NL80211_IFTYPE_STATION;
else
@@ -1312,23 +1513,58 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_type = MWIFIEX_BSS_TYPE_STA;
priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = 0;
+ priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
priv->bss_num = 0;
break;
+ case NL80211_IFTYPE_AP:
+ priv = adapter->priv[MWIFIEX_BSS_TYPE_UAP];
+
+ if (priv->bss_mode) {
+ wiphy_err(wiphy, "Can't create multiple AP interfaces");
+ return ERR_PTR(-EINVAL);
+ }
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev)
+ return ERR_PTR(-ENOMEM);
+
+ priv->wdev = wdev;
+ wdev->wiphy = wiphy;
+ wdev->iftype = NL80211_IFTYPE_AP;
+
+ priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
+ priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
+ priv->bss_priority = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_started = 0;
+ priv->bss_num = 0;
+ priv->bss_mode = type;
+
+ break;
default:
wiphy_err(wiphy, "type not supported\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
ether_setup, 1);
if (!dev) {
wiphy_err(wiphy, "no memory available for netdevice\n");
- goto error;
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+ return ERR_PTR(-ENOMEM);
}
+ mwifiex_init_priv_params(priv, dev);
+ priv->netdev = dev;
+
+ mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv);
+
+ if (adapter->config_bands & BAND_A)
+ mwifiex_setup_ht_caps(
+ &wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv);
+
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = priv->wdev;
dev->ieee80211_ptr->iftype = priv->bss_mode;
@@ -1343,15 +1579,14 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mdev_priv = netdev_priv(dev);
*((unsigned long *) mdev_priv) = (unsigned long) priv;
- priv->netdev = dev;
- mwifiex_init_priv_params(priv, dev);
-
SET_NETDEV_DEV(dev, adapter->dev);
/* Register network device */
if (register_netdevice(dev)) {
wiphy_err(wiphy, "cannot register virtual network device\n");
- goto error;
+ free_netdev(dev);
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+ return ERR_PTR(-EFAULT);
}
sema_init(&priv->async_sem, 1);
@@ -1363,12 +1598,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mwifiex_dev_debugfs_init(priv);
#endif
return dev;
-error:
- if (dev && (dev->reg_state == NETREG_UNREGISTERED))
- free_netdev(dev);
- priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
-
- return NULL;
}
EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
@@ -1417,7 +1646,6 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.get_station = mwifiex_cfg80211_get_station,
.dump_station = mwifiex_cfg80211_dump_station,
.set_wiphy_params = mwifiex_cfg80211_set_wiphy_params,
- .set_channel = mwifiex_cfg80211_set_channel,
.join_ibss = mwifiex_cfg80211_join_ibss,
.leave_ibss = mwifiex_cfg80211_leave_ibss,
.add_key = mwifiex_cfg80211_add_key,
@@ -1426,6 +1654,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
.set_tx_power = mwifiex_cfg80211_set_tx_power,
.set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
+ .start_ap = mwifiex_cfg80211_start_ap,
+ .stop_ap = mwifiex_cfg80211_stop_ap,
.set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
};
@@ -1436,82 +1666,67 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
* default parameters and handler function pointers, and finally
* registers the device.
*/
-int mwifiex_register_cfg80211(struct mwifiex_private *priv)
+
+int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
{
int ret;
void *wdev_priv;
- struct wireless_dev *wdev;
- struct ieee80211_sta_ht_cap *ht_info;
+ struct wiphy *wiphy;
+ struct mwifiex_private *priv = adapter->priv[MWIFIEX_BSS_TYPE_STA];
u8 *country_code;
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- dev_err(priv->adapter->dev, "%s: allocating wireless device\n",
- __func__);
- return -ENOMEM;
- }
- wdev->wiphy =
- wiphy_new(&mwifiex_cfg80211_ops,
- sizeof(struct mwifiex_private *));
- if (!wdev->wiphy) {
- kfree(wdev);
+ /* create a new wiphy for use with cfg80211 */
+ wiphy = wiphy_new(&mwifiex_cfg80211_ops,
+ sizeof(struct mwifiex_adapter *));
+ if (!wiphy) {
+ dev_err(adapter->dev, "%s: creating new wiphy\n", __func__);
return -ENOMEM;
}
- wdev->iftype = NL80211_IFTYPE_STATION;
- wdev->wiphy->max_scan_ssids = 10;
- wdev->wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
-
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
- ht_info = &wdev->wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap;
- mwifiex_setup_ht_caps(ht_info, priv);
-
- if (priv->adapter->config_bands & BAND_A) {
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
- ht_info = &wdev->wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap;
- mwifiex_setup_ht_caps(ht_info, priv);
- } else {
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
- }
+ wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
+ wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+ if (adapter->config_bands & BAND_A)
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+ else
+ wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
+ wiphy->n_iface_combinations = 1;
/* Initialize cipher suits */
- wdev->wiphy->cipher_suites = mwifiex_cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
+ wiphy->cipher_suites = mwifiex_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
- memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_CUSTOM_REGULATORY;
/* Reserve space for mwifiex specific private data for BSS */
- wdev->wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
-
- wdev->wiphy->reg_notifier = mwifiex_reg_notifier;
+ wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
- /* Set struct mwifiex_private pointer in wiphy_priv */
- wdev_priv = wiphy_priv(wdev->wiphy);
+ wiphy->reg_notifier = mwifiex_reg_notifier;
- *(unsigned long *) wdev_priv = (unsigned long) priv;
+ /* Set struct mwifiex_adapter pointer in wiphy_priv */
+ wdev_priv = wiphy_priv(wiphy);
+ *(unsigned long *)wdev_priv = (unsigned long)adapter;
- set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev);
+ set_wiphy_dev(wiphy, (struct device *)priv->adapter->dev);
- ret = wiphy_register(wdev->wiphy);
+ ret = wiphy_register(wiphy);
if (ret < 0) {
- dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
- __func__);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
+ dev_err(adapter->dev,
+ "%s: wiphy_register failed: %d\n", __func__, ret);
+ wiphy_free(wiphy);
return ret;
- } else {
- dev_dbg(priv->adapter->dev,
- "info: successfully registered wiphy device\n");
}
-
country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
- if (country_code && regulatory_hint(wdev->wiphy, country_code))
- dev_err(priv->adapter->dev,
- "%s: regulatory_hint failed\n", __func__);
-
- priv->wdev = wdev;
+ if (country_code && regulatory_hint(wiphy, country_code))
+ dev_err(adapter->dev, "regulatory_hint() failed\n");
+ adapter->wiphy = wiphy;
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
index 76c76c60438b..c5848934f111 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -24,6 +24,6 @@
#include "main.h"
-int mwifiex_register_cfg80211(struct mwifiex_private *);
+int mwifiex_register_cfg80211(struct mwifiex_adapter *);
#endif
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1710beffb93a..51e023ec1de4 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -440,6 +440,11 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
do_gettimeofday(&tstamp);
dev_dbg(adapter->dev, "event: %lu.%lu: cause: %#x\n",
tstamp.tv_sec, tstamp.tv_usec, eventcause);
+ } else {
+ /* Handle PS_SLEEP/AWAKE events on STA */
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+ if (!priv)
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
}
ret = mwifiex_process_sta_event(priv);
@@ -540,8 +545,20 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
/* Prepare command */
if (cmd_no) {
- ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action,
- cmd_oid, data_buf, cmd_ptr);
+ switch (cmd_no) {
+ case HostCmd_CMD_UAP_SYS_CONFIG:
+ case HostCmd_CMD_UAP_BSS_START:
+ case HostCmd_CMD_UAP_BSS_STOP:
+ ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action,
+ cmd_oid, data_buf,
+ cmd_ptr);
+ break;
+ default:
+ ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action,
+ cmd_oid, data_buf,
+ cmd_ptr);
+ break;
+ }
} else {
ret = mwifiex_cmd_host_cmd(priv, cmd_ptr, data_buf);
cmd_node->cmd_flag |= CMD_F_HOSTCMD;
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index d04aba4131dc..f918f66e5e27 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -28,7 +28,7 @@
#include <linux/ieee80211.h>
-#define MWIFIEX_MAX_BSS_NUM (1)
+#define MWIFIEX_MAX_BSS_NUM (2)
#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd)
* + 4 byte alignment
@@ -55,11 +55,17 @@
#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024)
#define MWIFIEX_RX_CMD_BUF_SIZE (2 * 1024)
+#define MAX_BEACON_PERIOD (4000)
+#define MIN_BEACON_PERIOD (50)
+#define MAX_DTIM_PERIOD (100)
+#define MIN_DTIM_PERIOD (1)
+
#define MWIFIEX_RTS_MIN_VALUE (0)
#define MWIFIEX_RTS_MAX_VALUE (2347)
#define MWIFIEX_FRAG_MIN_VALUE (256)
#define MWIFIEX_FRAG_MAX_VALUE (2346)
+#define MWIFIEX_RETRY_LIMIT 14
#define MWIFIEX_SDIO_BLOCK_SIZE 256
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
@@ -92,6 +98,11 @@ struct mwifiex_fw_image {
u32 fw_len;
};
+struct mwifiex_802_11_ssid {
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+};
+
struct mwifiex_wait_queue {
wait_queue_head_t wait;
int status;
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 5f6adeb9b950..561452a5c818 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -93,6 +93,20 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define CAL_SNR(RSSI, NF) ((s16)((s16)(RSSI)-(s16)(NF)))
+#define UAP_BSS_PARAMS_I 0
+#define UAP_CUSTOM_IE_I 1
+#define MWIFIEX_AUTO_IDX_MASK 0xffff
+#define MWIFIEX_DELETE_MASK 0x0000
+#define MGMT_MASK_ASSOC_REQ 0x01
+#define MGMT_MASK_REASSOC_REQ 0x04
+#define MGMT_MASK_ASSOC_RESP 0x02
+#define MGMT_MASK_REASSOC_RESP 0x08
+#define MGMT_MASK_PROBE_REQ 0x10
+#define MGMT_MASK_PROBE_RESP 0x20
+#define MGMT_MASK_BEACON 0x100
+
+#define TLV_TYPE_UAP_SSID 0x0000
+
#define PROPRIETARY_TLV_BASE_ID 0x0100
#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0)
#define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1)
@@ -104,14 +118,27 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
+#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
+#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
+#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
+#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
+#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
+#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
+#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
+#define TLV_TYPE_UAP_AKMP (PROPRIETARY_TLV_BASE_ID + 65)
+#define TLV_TYPE_UAP_FRAG_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 70)
#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
+#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
+#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105)
#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114)
+#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
+#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -209,6 +236,9 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_RSSI_INFO 0x00a4
#define HostCmd_CMD_FUNC_INIT 0x00a9
#define HostCmd_CMD_FUNC_SHUTDOWN 0x00aa
+#define HostCmd_CMD_UAP_SYS_CONFIG 0x00b0
+#define HostCmd_CMD_UAP_BSS_START 0x00b1
+#define HostCmd_CMD_UAP_BSS_STOP 0x00b2
#define HostCmd_CMD_11N_CFG 0x00cd
#define HostCmd_CMD_11N_ADDBA_REQ 0x00ce
#define HostCmd_CMD_11N_ADDBA_RSP 0x00cf
@@ -223,6 +253,19 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
+#define PROTOCOL_NO_SECURITY 0x01
+#define PROTOCOL_STATIC_WEP 0x02
+#define PROTOCOL_WPA 0x08
+#define PROTOCOL_WPA2 0x20
+#define PROTOCOL_WPA2_MIXED 0x28
+#define PROTOCOL_EAP 0x40
+#define KEY_MGMT_NONE 0x04
+#define KEY_MGMT_PSK 0x02
+#define KEY_MGMT_EAP 0x01
+#define CIPHER_TKIP 0x04
+#define CIPHER_AES_CCMP 0x08
+#define VALID_CIPHER_BITMAP 0x0c
+
enum ENH_PS_MODES {
EN_PS = 1,
DIS_PS = 2,
@@ -313,15 +356,20 @@ enum ENH_PS_MODES {
#define EVENT_DATA_SNR_HIGH 0x00000027
#define EVENT_LINK_QUALITY 0x00000028
#define EVENT_PORT_RELEASE 0x0000002b
+#define EVENT_UAP_STA_DEAUTH 0x0000002c
+#define EVENT_UAP_STA_ASSOC 0x0000002d
+#define EVENT_UAP_BSS_START 0x0000002e
#define EVENT_PRE_BEACON_LOST 0x00000031
#define EVENT_ADDBA 0x00000033
#define EVENT_DELBA 0x00000034
#define EVENT_BA_STREAM_TIEMOUT 0x00000037
#define EVENT_AMSDU_AGGR_CTRL 0x00000042
+#define EVENT_UAP_BSS_IDLE 0x00000043
+#define EVENT_UAP_BSS_ACTIVE 0x00000044
#define EVENT_WEP_ICV_ERR 0x00000046
#define EVENT_HS_ACT_REQ 0x00000047
#define EVENT_BW_CHANGE 0x00000048
-
+#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
#define EVENT_ID_MASK 0xffff
@@ -1103,6 +1151,106 @@ struct host_cmd_ds_802_11_eeprom_access {
u8 value;
} __packed;
+struct host_cmd_tlv {
+ __le16 type;
+ __le16 len;
+} __packed;
+
+struct mwifiex_assoc_event {
+ u8 sta_addr[ETH_ALEN];
+ __le16 type;
+ __le16 len;
+ __le16 frame_control;
+ __le16 cap_info;
+ __le16 listen_interval;
+ u8 data[0];
+} __packed;
+
+struct host_cmd_ds_sys_config {
+ __le16 action;
+ u8 tlv[0];
+};
+
+struct host_cmd_tlv_akmp {
+ struct host_cmd_tlv tlv;
+ __le16 key_mgmt;
+ __le16 key_mgmt_operation;
+} __packed;
+
+struct host_cmd_tlv_pwk_cipher {
+ struct host_cmd_tlv tlv;
+ __le16 proto;
+ u8 cipher;
+ u8 reserved;
+} __packed;
+
+struct host_cmd_tlv_gwk_cipher {
+ struct host_cmd_tlv tlv;
+ u8 cipher;
+ u8 reserved;
+} __packed;
+
+struct host_cmd_tlv_passphrase {
+ struct host_cmd_tlv tlv;
+ u8 passphrase[0];
+} __packed;
+
+struct host_cmd_tlv_auth_type {
+ struct host_cmd_tlv tlv;
+ u8 auth_type;
+} __packed;
+
+struct host_cmd_tlv_encrypt_protocol {
+ struct host_cmd_tlv tlv;
+ __le16 proto;
+} __packed;
+
+struct host_cmd_tlv_ssid {
+ struct host_cmd_tlv tlv;
+ u8 ssid[0];
+} __packed;
+
+struct host_cmd_tlv_bcast_ssid {
+ struct host_cmd_tlv tlv;
+ u8 bcast_ctl;
+} __packed;
+
+struct host_cmd_tlv_beacon_period {
+ struct host_cmd_tlv tlv;
+ __le16 period;
+} __packed;
+
+struct host_cmd_tlv_dtim_period {
+ struct host_cmd_tlv tlv;
+ u8 period;
+} __packed;
+
+struct host_cmd_tlv_frag_threshold {
+ struct host_cmd_tlv tlv;
+ __le16 frag_thr;
+} __packed;
+
+struct host_cmd_tlv_rts_threshold {
+ struct host_cmd_tlv tlv;
+ __le16 rts_thr;
+} __packed;
+
+struct host_cmd_tlv_retry_limit {
+ struct host_cmd_tlv tlv;
+ u8 limit;
+} __packed;
+
+struct host_cmd_tlv_mac_addr {
+ struct host_cmd_tlv tlv;
+ u8 mac_addr[ETH_ALEN];
+} __packed;
+
+struct host_cmd_tlv_channel_band {
+ struct host_cmd_tlv tlv;
+ u8 band_config;
+ u8 channel;
+} __packed;
+
struct host_cmd_ds_802_11_rf_channel {
__le16 action;
__le16 current_channel;
@@ -1167,6 +1315,20 @@ struct host_cmd_ds_802_11_subsc_evt {
__le16 events;
} __packed;
+struct mwifiex_ie {
+ __le16 ie_index;
+ __le16 mgmt_subtype_mask;
+ __le16 ie_length;
+ u8 ie_buffer[IEEE_MAX_IE_SIZE];
+} __packed;
+
+#define MAX_MGMT_IE_INDEX 16
+struct mwifiex_ie_list {
+ __le16 type;
+ __le16 len;
+ struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX];
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -1217,6 +1379,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_pcie_details pcie_host_spec;
struct host_cmd_ds_802_11_eeprom_access eeprom;
struct host_cmd_ds_802_11_subsc_evt subsc_evt;
+ struct host_cmd_ds_sys_config uap_sys_config;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
new file mode 100644
index 000000000000..383820a52beb
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -0,0 +1,397 @@
+/*
+ * Marvell Wireless LAN device driver: management IE handling- setting and
+ * deleting IE.
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+
+/* This function checks if current IE index is used by any on other interface.
+ * Return: -1: yes, current IE index is used by someone else.
+ * 0: no, current IE index is NOT used by other interface.
+ */
+static int
+mwifiex_ie_index_used_by_other_intf(struct mwifiex_private *priv, u16 idx)
+{
+ int i;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_ie *ie;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (adapter->priv[i] != priv) {
+ ie = &adapter->priv[i]->mgmt_ie[idx];
+ if (ie->mgmt_subtype_mask && ie->ie_length)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* Get unused IE index. This index will be used for setting new IE */
+static int
+mwifiex_ie_get_autoidx(struct mwifiex_private *priv, u16 subtype_mask,
+ struct mwifiex_ie *ie, u16 *index)
+{
+ u16 mask, len, i;
+
+ for (i = 0; i < priv->adapter->max_mgmt_ie_index; i++) {
+ mask = le16_to_cpu(priv->mgmt_ie[i].mgmt_subtype_mask);
+ len = le16_to_cpu(priv->mgmt_ie[i].ie_length) +
+ le16_to_cpu(ie->ie_length);
+
+ if (mask == MWIFIEX_AUTO_IDX_MASK)
+ continue;
+
+ if (mask == subtype_mask) {
+ if (len > IEEE_MAX_IE_SIZE)
+ continue;
+
+ *index = i;
+ return 0;
+ }
+
+ if (!priv->mgmt_ie[i].ie_length) {
+ if (mwifiex_ie_index_used_by_other_intf(priv, i))
+ continue;
+
+ *index = i;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+/* This function prepares IE data buffer for command to be sent to FW */
+static int
+mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
+ struct mwifiex_ie_list *ie_list)
+{
+ u16 travel_len, index, mask;
+ s16 input_len;
+ struct mwifiex_ie *ie;
+ u8 *tmp;
+
+ input_len = le16_to_cpu(ie_list->len);
+ travel_len = sizeof(struct host_cmd_tlv);
+
+ ie_list->len = 0;
+
+ while (input_len > 0) {
+ ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len);
+ input_len -= le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE;
+ travel_len += le16_to_cpu(ie->ie_length) + MWIFIEX_IE_HDR_SIZE;
+
+ index = le16_to_cpu(ie->ie_index);
+ mask = le16_to_cpu(ie->mgmt_subtype_mask);
+
+ if (index == MWIFIEX_AUTO_IDX_MASK) {
+ /* automatic addition */
+ if (mwifiex_ie_get_autoidx(priv, mask, ie, &index))
+ return -1;
+ if (index == MWIFIEX_AUTO_IDX_MASK)
+ return -1;
+
+ tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer;
+ tmp += le16_to_cpu(priv->mgmt_ie[index].ie_length);
+ memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length));
+ le16_add_cpu(&priv->mgmt_ie[index].ie_length,
+ le16_to_cpu(ie->ie_length));
+ priv->mgmt_ie[index].ie_index = cpu_to_le16(index);
+ priv->mgmt_ie[index].mgmt_subtype_mask =
+ cpu_to_le16(mask);
+
+ ie->ie_index = cpu_to_le16(index);
+ ie->ie_length = priv->mgmt_ie[index].ie_length;
+ memcpy(&ie->ie_buffer, &priv->mgmt_ie[index].ie_buffer,
+ le16_to_cpu(priv->mgmt_ie[index].ie_length));
+ } else {
+ if (mask != MWIFIEX_DELETE_MASK)
+ return -1;
+ /*
+ * Check if this index is being used on any
+ * other interface.
+ */
+ if (mwifiex_ie_index_used_by_other_intf(priv, index))
+ return -1;
+
+ ie->ie_length = 0;
+ memcpy(&priv->mgmt_ie[index], ie,
+ sizeof(struct mwifiex_ie));
+ }
+
+ le16_add_cpu(&ie_list->len,
+ le16_to_cpu(priv->mgmt_ie[index].ie_length) +
+ MWIFIEX_IE_HDR_SIZE);
+ }
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
+ return mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_CUSTOM_IE_I, ie_list);
+
+ return 0;
+}
+
+/* Copy individual custom IEs for beacon, probe response and assoc response
+ * and prepare single structure for IE setting.
+ * This function also updates allocated IE indices from driver.
+ */
+static int
+mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
+ struct mwifiex_ie *beacon_ie, u16 *beacon_idx,
+ struct mwifiex_ie *pr_ie, u16 *probe_idx,
+ struct mwifiex_ie *ar_ie, u16 *assoc_idx)
+{
+ struct mwifiex_ie_list *ap_custom_ie;
+ u8 *pos;
+ u16 len;
+ int ret;
+
+ ap_custom_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!ap_custom_ie)
+ return -ENOMEM;
+
+ ap_custom_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
+ pos = (u8 *)ap_custom_ie->ie_list;
+
+ if (beacon_ie) {
+ len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(beacon_ie->ie_length);
+ memcpy(pos, beacon_ie, len);
+ pos += len;
+ le16_add_cpu(&ap_custom_ie->len, len);
+ }
+ if (pr_ie) {
+ len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(pr_ie->ie_length);
+ memcpy(pos, pr_ie, len);
+ pos += len;
+ le16_add_cpu(&ap_custom_ie->len, len);
+ }
+ if (ar_ie) {
+ len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(ar_ie->ie_length);
+ memcpy(pos, ar_ie, len);
+ pos += len;
+ le16_add_cpu(&ap_custom_ie->len, len);
+ }
+
+ ret = mwifiex_update_autoindex_ies(priv, ap_custom_ie);
+
+ pos = (u8 *)(&ap_custom_ie->ie_list[0].ie_index);
+ if (beacon_ie && *beacon_idx == MWIFIEX_AUTO_IDX_MASK) {
+ /* save beacon ie index after auto-indexing */
+ *beacon_idx = le16_to_cpu(ap_custom_ie->ie_list[0].ie_index);
+ len = sizeof(*beacon_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(beacon_ie->ie_length);
+ pos += len;
+ }
+ if (pr_ie && le16_to_cpu(pr_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK) {
+ /* save probe resp ie index after auto-indexing */
+ *probe_idx = *((u16 *)pos);
+ len = sizeof(*pr_ie) - IEEE_MAX_IE_SIZE +
+ le16_to_cpu(pr_ie->ie_length);
+ pos += len;
+ }
+ if (ar_ie && le16_to_cpu(ar_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK)
+ /* save assoc resp ie index after auto-indexing */
+ *assoc_idx = *((u16 *)pos);
+
+ kfree(ap_custom_ie);
+ return ret;
+}
+
+/* This function parses different IEs- Tail IEs, beacon IEs, probe response IEs,
+ * association response IEs from cfg80211_ap_settings function and sets these IE
+ * to FW.
+ */
+int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
+ struct cfg80211_ap_settings *params)
+{
+ struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
+ struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL;
+ struct ieee_types_header *ie = NULL;
+ u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
+ u16 ar_idx = MWIFIEX_AUTO_IDX_MASK, rsn_idx = MWIFIEX_AUTO_IDX_MASK;
+ u16 mask;
+ int ret = 0;
+
+ if (params->beacon.tail && params->beacon.tail_len) {
+ ie = (void *)cfg80211_find_ie(WLAN_EID_RSN, params->beacon.tail,
+ params->beacon.tail_len);
+ if (ie) {
+ rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!rsn_ie)
+ return -ENOMEM;
+
+ rsn_ie->ie_index = cpu_to_le16(rsn_idx);
+ mask = MGMT_MASK_BEACON | MGMT_MASK_PROBE_RESP |
+ MGMT_MASK_ASSOC_RESP;
+ rsn_ie->mgmt_subtype_mask = cpu_to_le16(mask);
+ rsn_ie->ie_length = cpu_to_le16(ie->len + 2);
+ memcpy(rsn_ie->ie_buffer, ie, ie->len + 2);
+
+ if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &rsn_idx,
+ NULL, NULL,
+ NULL, NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ priv->rsn_idx = rsn_idx;
+ }
+ }
+
+ if (params->beacon.beacon_ies && params->beacon.beacon_ies_len) {
+ beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!beacon_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ beacon_ie->ie_index = cpu_to_le16(beacon_idx);
+ beacon_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON);
+ beacon_ie->ie_length =
+ cpu_to_le16(params->beacon.beacon_ies_len);
+ memcpy(beacon_ie->ie_buffer, params->beacon.beacon_ies,
+ params->beacon.beacon_ies_len);
+ }
+
+ if (params->beacon.proberesp_ies && params->beacon.proberesp_ies_len) {
+ pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!pr_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ pr_ie->ie_index = cpu_to_le16(pr_idx);
+ pr_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_PROBE_RESP);
+ pr_ie->ie_length =
+ cpu_to_le16(params->beacon.proberesp_ies_len);
+ memcpy(pr_ie->ie_buffer, params->beacon.proberesp_ies,
+ params->beacon.proberesp_ies_len);
+ }
+
+ if (params->beacon.assocresp_ies && params->beacon.assocresp_ies_len) {
+ ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!ar_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ar_ie->ie_index = cpu_to_le16(ar_idx);
+ mask = MGMT_MASK_ASSOC_RESP | MGMT_MASK_REASSOC_RESP;
+ ar_ie->mgmt_subtype_mask = cpu_to_le16(mask);
+ ar_ie->ie_length =
+ cpu_to_le16(params->beacon.assocresp_ies_len);
+ memcpy(ar_ie->ie_buffer, params->beacon.assocresp_ies,
+ params->beacon.assocresp_ies_len);
+ }
+
+ if (beacon_ie || pr_ie || ar_ie) {
+ ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
+ &beacon_idx, pr_ie,
+ &pr_idx, ar_ie, &ar_idx);
+ if (ret)
+ goto done;
+ }
+
+ priv->beacon_idx = beacon_idx;
+ priv->proberesp_idx = pr_idx;
+ priv->assocresp_idx = ar_idx;
+
+done:
+ kfree(beacon_ie);
+ kfree(pr_ie);
+ kfree(ar_ie);
+ kfree(rsn_ie);
+
+ return ret;
+}
+
+/* This function removes management IE set */
+int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
+{
+ struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
+ struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL;
+ int ret = 0;
+
+ if (priv->rsn_idx != MWIFIEX_AUTO_IDX_MASK) {
+ rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!rsn_ie)
+ return -ENOMEM;
+
+ rsn_ie->ie_index = cpu_to_le16(priv->rsn_idx);
+ rsn_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
+ rsn_ie->ie_length = 0;
+ if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &priv->rsn_idx,
+ NULL, &priv->proberesp_idx,
+ NULL, &priv->assocresp_idx)) {
+ ret = -1;
+ goto done;
+ }
+
+ priv->rsn_idx = MWIFIEX_AUTO_IDX_MASK;
+ }
+
+ if (priv->beacon_idx != MWIFIEX_AUTO_IDX_MASK) {
+ beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!beacon_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ beacon_ie->ie_index = cpu_to_le16(priv->beacon_idx);
+ beacon_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
+ beacon_ie->ie_length = 0;
+ }
+ if (priv->proberesp_idx != MWIFIEX_AUTO_IDX_MASK) {
+ pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!pr_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ pr_ie->ie_index = cpu_to_le16(priv->proberesp_idx);
+ pr_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
+ pr_ie->ie_length = 0;
+ }
+ if (priv->assocresp_idx != MWIFIEX_AUTO_IDX_MASK) {
+ ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!ar_ie) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ ar_ie->ie_index = cpu_to_le16(priv->assocresp_idx);
+ ar_ie->mgmt_subtype_mask = cpu_to_le16(MWIFIEX_DELETE_MASK);
+ ar_ie->ie_length = 0;
+ }
+
+ if (beacon_ie || pr_ie || ar_ie)
+ ret = mwifiex_update_uap_custom_ie(priv,
+ beacon_ie, &priv->beacon_idx,
+ pr_ie, &priv->proberesp_idx,
+ ar_ie, &priv->assocresp_idx);
+
+done:
+ kfree(beacon_ie);
+ kfree(pr_ie);
+ kfree(ar_ie);
+ kfree(rsn_ie);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index d440c3eb640b..c1cb004db913 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -279,6 +279,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
adapter->channel_type = NL80211_CHAN_HT20;
+ adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
}
/*
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index f0f95524e96b..e6be6ee75951 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -62,6 +62,36 @@ enum {
BAND_AN = 16,
};
+#define MWIFIEX_WPA_PASSHPHRASE_LEN 64
+struct wpa_param {
+ u8 pairwise_cipher_wpa;
+ u8 pairwise_cipher_wpa2;
+ u8 group_cipher;
+ u32 length;
+ u8 passphrase[MWIFIEX_WPA_PASSHPHRASE_LEN];
+};
+
+#define KEY_MGMT_ON_HOST 0x03
+#define MWIFIEX_AUTH_MODE_AUTO 0xFF
+#define BAND_CONFIG_MANUAL 0x00
+struct mwifiex_uap_bss_param {
+ u8 channel;
+ u8 band_cfg;
+ u16 rts_threshold;
+ u16 frag_threshold;
+ u8 retry_limit;
+ struct mwifiex_802_11_ssid ssid;
+ u8 bcast_ssid_ctl;
+ u8 radio_ctl;
+ u8 dtim_period;
+ u16 beacon_period;
+ u16 auth_mode;
+ u16 protocol;
+ u16 key_mgmt;
+ u16 key_mgmt_operation;
+ struct wpa_param wpa_cfg;
+};
+
enum {
ADHOC_IDLE,
ADHOC_STARTED,
@@ -269,6 +299,8 @@ struct mwifiex_ds_read_eeprom {
#define IEEE_MAX_IE_SIZE 256
+#define MWIFIEX_IE_HDR_SIZE (sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE)
+
struct mwifiex_ds_misc_gen_ie {
u32 type;
u32 len;
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 8a390982463e..d6b4fb04011f 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1374,22 +1374,28 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
*
* In case of infra made, it sends deauthentication request, and
* in case of ad-hoc mode, a stop network request is sent to the firmware.
+ * In AP mode, a command to stop bss is sent to firmware.
*/
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
{
- int ret = 0;
+ if (!priv->media_connected)
+ return 0;
- if (priv->media_connected) {
- if (priv->bss_mode == NL80211_IFTYPE_STATION) {
- ret = mwifiex_deauthenticate_infra(priv, mac);
- } else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
- ret = mwifiex_send_cmd_sync(priv,
- HostCmd_CMD_802_11_AD_HOC_STOP,
- HostCmd_ACT_GEN_SET, 0, NULL);
- }
+ switch (priv->bss_mode) {
+ case NL80211_IFTYPE_STATION:
+ return mwifiex_deauthenticate_infra(priv, mac);
+ case NL80211_IFTYPE_ADHOC:
+ return mwifiex_send_cmd_sync(priv,
+ HostCmd_CMD_802_11_AD_HOC_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL);
+ case NL80211_IFTYPE_AP:
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
+ HostCmd_ACT_GEN_SET, 0, NULL);
+ default:
+ break;
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index be0f0e583f75..3192855c31c0 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -64,17 +64,17 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
adapter->priv_num = 0;
- /* Allocate memory for private structure */
- adapter->priv[0] = kzalloc(sizeof(struct mwifiex_private), GFP_KERNEL);
- if (!adapter->priv[0]) {
- dev_err(adapter->dev,
- "%s: failed to alloc priv[0]\n", __func__);
- goto error;
- }
-
- adapter->priv_num++;
+ for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++) {
+ /* Allocate memory for private structure */
+ adapter->priv[i] =
+ kzalloc(sizeof(struct mwifiex_private), GFP_KERNEL);
+ if (!adapter->priv[i])
+ goto error;
- adapter->priv[0]->adapter = adapter;
+ adapter->priv[i]->adapter = adapter;
+ adapter->priv[i]->bss_priority = i;
+ adapter->priv_num++;
+ }
mwifiex_init_lock_list(adapter);
init_timer(&adapter->cmd_timer);
@@ -349,19 +349,26 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
goto done;
- priv = adapter->priv[0];
- if (mwifiex_register_cfg80211(priv) != 0) {
+ priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
+ if (mwifiex_register_cfg80211(adapter)) {
dev_err(adapter->dev, "cannot register with cfg80211\n");
goto err_init_fw;
}
rtnl_lock();
/* Create station interface by default */
- if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
+ if (!mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d",
NL80211_IFTYPE_STATION, NULL, NULL)) {
dev_err(adapter->dev, "cannot create default STA interface\n");
goto err_add_intf;
}
+
+ /* Create AP interface by default */
+ if (!mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
+ NL80211_IFTYPE_AP, NULL, NULL)) {
+ dev_err(adapter->dev, "cannot create default AP interface\n");
+ goto err_add_intf;
+ }
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -369,7 +376,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
- mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->netdev);
rtnl_unlock();
err_init_fw:
pr_debug("info: %s: unregister device\n", __func__);
@@ -633,6 +640,12 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
priv->current_key_index = 0;
priv->media_connected = false;
memset(&priv->nick_name, 0, sizeof(priv->nick_name));
+ memset(priv->mgmt_ie, 0,
+ sizeof(struct mwifiex_ie) * MAX_MGMT_IE_INDEX);
+ priv->beacon_idx = MWIFIEX_AUTO_IDX_MASK;
+ priv->proberesp_idx = MWIFIEX_AUTO_IDX_MASK;
+ priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK;
+ priv->rsn_idx = MWIFIEX_AUTO_IDX_MASK;
priv->num_tx_timeout = 0;
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
}
@@ -830,19 +843,21 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
rtnl_lock();
if (priv->wdev && priv->netdev)
- mwifiex_del_virtual_intf(priv->wdev->wiphy,
- priv->netdev);
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->netdev);
rtnl_unlock();
}
priv = adapter->priv[0];
- if (!priv)
+ if (!priv || !priv->wdev)
goto exit_remove;
- if (priv->wdev) {
- wiphy_unregister(priv->wdev->wiphy);
- wiphy_free(priv->wdev->wiphy);
- kfree(priv->wdev);
+ wiphy_unregister(priv->wdev->wiphy);
+ wiphy_free(priv->wdev->wiphy);
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv)
+ kfree(priv->wdev);
}
mwifiex_terminate_workqueue(adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 324ad390cacd..bd3b0bf94b9e 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -116,6 +116,7 @@ enum {
#define MAX_FREQUENCY_BAND_BG 2484
#define MWIFIEX_EVENT_HEADER_LEN 4
+#define MWIFIEX_UAP_EVENT_EXTRA_HEADER 2
#define MWIFIEX_TYPE_LEN 4
#define MWIFIEX_USB_TYPE_CMD 0xF00DFACE
@@ -370,6 +371,7 @@ struct mwifiex_private {
u8 bss_role;
u8 bss_priority;
u8 bss_num;
+ u8 bss_started;
u8 frame_type;
u8 curr_addr[ETH_ALEN];
u8 media_connected;
@@ -470,12 +472,16 @@ struct mwifiex_private {
struct cfg80211_scan_request *scan_request;
struct mwifiex_user_scan_cfg *user_scan_cfg;
u8 cfg_bssid[6];
- u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
struct wps wps;
u8 scan_block;
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
u8 subsc_evt_rssi_state;
+ struct mwifiex_ie mgmt_ie[MAX_MGMT_IE_INDEX];
+ u16 beacon_idx;
+ u16 proberesp_idx;
+ u16 assocresp_idx;
+ u16 rsn_idx;
};
enum mwifiex_ba_status {
@@ -571,6 +577,7 @@ struct mwifiex_adapter {
char fw_name[32];
int winner;
struct device *dev;
+ struct wiphy *wiphy;
bool surprise_removed;
u32 fw_release_number;
u16 init_wait_q_woken;
@@ -677,6 +684,8 @@ struct mwifiex_adapter {
struct cmd_ctrl_node *cmd_queued;
spinlock_t queue_lock; /* lock for tx queues */
struct completion fw_load;
+ u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
+ u16 max_mgmt_ie_index;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -760,6 +769,9 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no,
u16 cmd_action, u32 cmd_oid,
void *data_buf, void *cmd_buf);
+int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
+ u16 cmd_action, u32 cmd_oid,
+ void *data_buf, void *cmd_buf);
int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
struct host_cmd_ds_command *resp);
int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
@@ -820,6 +832,9 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
int is_command_pending(struct mwifiex_adapter *adapter);
void mwifiex_init_priv_params(struct mwifiex_private *priv,
struct net_device *dev);
+int mwifiex_set_secure_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_config,
+ struct cfg80211_ap_settings *params);
/*
* This function checks if the queuing is RA based or not.
@@ -933,7 +948,8 @@ int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel);
int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
- int key_len, u8 key_index, int disable);
+ int key_len, u8 key_index, const u8 *mac_addr,
+ int disable);
int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
@@ -969,6 +985,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
int mwifiex_main_process(struct mwifiex_adapter *);
+int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel);
int mwifiex_bss_set_channel(struct mwifiex_private *,
struct mwifiex_chan_freq_power *cfp);
int mwifiex_get_bss_info(struct mwifiex_private *,
@@ -986,6 +1003,11 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
u32 *flags, struct vif_params *params);
int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev);
+void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config);
+
+int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
+ struct cfg80211_ap_settings *params);
+int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
u8 *mwifiex_11d_code_2_region(u8 code);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index e0377473282f..fc8a9bfa1248 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
adapter->event_cause = *(u32 *) skb->data;
- skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
-
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
- memcpy(adapter->event_body, skb->data, skb->len);
+ memcpy(adapter->event_body,
+ skb->data + MWIFIEX_EVENT_HEADER_LEN,
+ skb->len);
/* event cause has been saved to adapter->event_cause */
adapter->event_received = true;
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 87ed2a1f6cd9..40e025da6bc2 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -498,7 +498,8 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
{
struct host_cmd_ds_802_11_key_material *key_material =
&cmd->params.key_material;
- u16 key_param_len = 0;
+ struct host_cmd_tlv_mac_addr *tlv_mac;
+ u16 key_param_len = 0, cmd_size;
int ret = 0;
const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -614,11 +615,26 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
cpu_to_le16((u16) enc_key->key_len +
KEYPARAMSET_FIXED_LEN);
- key_param_len = (u16) (enc_key->key_len + KEYPARAMSET_FIXED_LEN)
+ key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN)
+ sizeof(struct mwifiex_ie_types_header);
cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN
+ key_param_len);
+
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
+ tlv_mac = (void *)((u8 *)&key_material->key_param_set +
+ key_param_len);
+ tlv_mac->tlv.type = cpu_to_le16(TLV_TYPE_STA_MAC_ADDR);
+ tlv_mac->tlv.len = cpu_to_le16(ETH_ALEN);
+ memcpy(tlv_mac->mac_addr, enc_key->mac_addr, ETH_ALEN);
+ cmd_size = key_param_len + S_DS_GEN +
+ sizeof(key_material->action) +
+ sizeof(struct host_cmd_tlv_mac_addr);
+ } else {
+ cmd_size = key_param_len + S_DS_GEN +
+ sizeof(key_material->action);
+ }
+ cmd->size = cpu_to_le16(cmd_size);
}
return ret;
@@ -1248,13 +1264,15 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
return -1;
- /* Enable IEEE PS by default */
- priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_PS_MODE_ENH,
- EN_AUTO_PS, BITMAP_STA_PS, NULL);
- if (ret)
- return -1;
+ if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+ /* Enable IEEE PS by default */
+ priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
+ ret = mwifiex_send_cmd_async(
+ priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ EN_AUTO_PS, BITMAP_STA_PS, NULL);
+ if (ret)
+ return -1;
+ }
}
/* get tx rate */
@@ -1270,12 +1288,14 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
return -1;
- /* set ibss coalescing_status */
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
- HostCmd_ACT_GEN_SET, 0, &enable);
- if (ret)
- return -1;
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
+ /* set ibss coalescing_status */
+ ret = mwifiex_send_cmd_async(
+ priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
+ HostCmd_ACT_GEN_SET, 0, &enable);
+ if (ret)
+ return -1;
+ }
memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
amsdu_aggr_ctrl.enable = true;
@@ -1293,7 +1313,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
return -1;
- if (first_sta && (priv->adapter->iface_type != MWIFIEX_USB)) {
+ if (first_sta && priv->adapter->iface_type != MWIFIEX_USB &&
+ priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
/* Enable auto deep sleep */
auto_ds.auto_ds = DEEP_SLEEP_ON;
auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
@@ -1305,12 +1326,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
return -1;
}
- /* Send cmd to FW to enable/disable 11D function */
- state_11d = ENABLE_11D;
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB,
- HostCmd_ACT_GEN_SET, DOT11D_I, &state_11d);
- if (ret)
- dev_err(priv->adapter->dev, "11D: failed to enable 11D\n");
+ if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+ /* Send cmd to FW to enable/disable 11D function */
+ state_11d = ENABLE_11D;
+ ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SNMP_MIB,
+ HostCmd_ACT_GEN_SET, DOT11D_I,
+ &state_11d);
+ if (ret)
+ dev_err(priv->adapter->dev,
+ "11D: failed to enable 11D\n");
+ }
/* Send cmd to FW to configure 11n specific configuration
* (Short GI, Channel BW, Green field support etc.) for transmit
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 3aa54243dea9..a79ed9bd9695 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -944,6 +944,14 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
ret = mwifiex_ret_subsc_evt(priv, resp, data_buf);
break;
+ case HostCmd_CMD_UAP_SYS_CONFIG:
+ break;
+ case HostCmd_CMD_UAP_BSS_START:
+ priv->bss_started = 1;
+ break;
+ case HostCmd_CMD_UAP_BSS_STOP:
+ priv->bss_started = 0;
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f6bbb9307f86..11e731f3581c 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -184,8 +184,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
int mwifiex_process_sta_event(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
- int ret = 0;
+ int len, ret = 0;
u32 eventcause = adapter->event_cause;
+ struct station_info sinfo;
+ struct mwifiex_assoc_event *event;
switch (eventcause) {
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -402,6 +404,52 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_HOSTWAKE_STAIE:
dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
break;
+
+ case EVENT_UAP_STA_ASSOC:
+ memset(&sinfo, 0, sizeof(sinfo));
+ event = (struct mwifiex_assoc_event *)
+ (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
+ if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
+ len = -1;
+
+ if (ieee80211_is_assoc_req(event->frame_control))
+ len = 0;
+ else if (ieee80211_is_reassoc_req(event->frame_control))
+ /* There will be ETH_ALEN bytes of
+ * current_ap_addr before the re-assoc ies.
+ */
+ len = ETH_ALEN;
+
+ if (len != -1) {
+ sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
+ sinfo.assoc_req_ies = (u8 *)&event->data[len];
+ len = (u8 *)sinfo.assoc_req_ies -
+ (u8 *)&event->frame_control;
+ sinfo.assoc_req_ies_len =
+ le16_to_cpu(event->len) - (u16)len;
+ }
+ }
+ cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo,
+ GFP_KERNEL);
+ break;
+ case EVENT_UAP_STA_DEAUTH:
+ cfg80211_del_sta(priv->netdev, adapter->event_body +
+ MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
+ break;
+ case EVENT_UAP_BSS_IDLE:
+ priv->media_connected = false;
+ break;
+ case EVENT_UAP_BSS_ACTIVE:
+ priv->media_connected = true;
+ break;
+ case EVENT_UAP_BSS_START:
+ dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN);
+ break;
+ case EVENT_UAP_MIC_COUNTERMEASURES:
+ /* For future development */
+ dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ break;
default:
dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 58970e0f7d13..106c449477b2 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -462,7 +462,7 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
info->bss_chan = bss_desc->channel;
- memcpy(info->country_code, priv->country_code,
+ memcpy(info->country_code, adapter->country_code,
IEEE80211_COUNTRY_STRING_LEN);
info->media_connected = priv->media_connected;
@@ -1219,7 +1219,8 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
* with requisite parameters and calls the IOCTL handler.
*/
int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
- int key_len, u8 key_index, int disable)
+ int key_len, u8 key_index,
+ const u8 *mac_addr, int disable)
{
struct mwifiex_ds_encrypt_key encrypt_key;
@@ -1229,8 +1230,12 @@ int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
encrypt_key.key_index = key_index;
if (key_len)
memcpy(encrypt_key.key_material, key, key_len);
+ if (mac_addr)
+ memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
} else {
encrypt_key.key_disable = true;
+ if (mac_addr)
+ memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
}
return mwifiex_sec_ioctl_encrypt_key(priv, &encrypt_key);
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index e2faec4db108..cecb27283196 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -161,15 +161,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
goto done;
for (i = 0; i < adapter->priv_num; i++) {
-
tpriv = adapter->priv[i];
- if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) &&
- (tpriv->media_connected)) {
- if (netif_queue_stopped(tpriv->netdev))
- mwifiex_wake_up_net_dev_queue(tpriv->netdev,
- adapter);
- }
+ if (tpriv->media_connected &&
+ netif_queue_stopped(tpriv->netdev))
+ mwifiex_wake_up_net_dev_queue(tpriv->netdev, adapter);
}
done:
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
new file mode 100644
index 000000000000..89f9a2a45de3
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -0,0 +1,453 @@
+/*
+ * Marvell Wireless LAN device driver: AP specific command handling
+ *
+ * Copyright (C) 2012, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+
+/* This function parses security related parameters from cfg80211_ap_settings
+ * and sets into FW understandable bss_config structure.
+ */
+int mwifiex_set_secure_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_config,
+ struct cfg80211_ap_settings *params) {
+ int i;
+
+ if (!params->privacy) {
+ bss_config->protocol = PROTOCOL_NO_SECURITY;
+ bss_config->key_mgmt = KEY_MGMT_NONE;
+ bss_config->wpa_cfg.length = 0;
+ priv->sec_info.wep_enabled = 0;
+ priv->sec_info.wpa_enabled = 0;
+ priv->sec_info.wpa2_enabled = 0;
+
+ return 0;
+ }
+
+ switch (params->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ bss_config->auth_mode = WLAN_AUTH_OPEN;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ bss_config->auth_mode = WLAN_AUTH_SHARED_KEY;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ bss_config->auth_mode = WLAN_AUTH_LEAP;
+ break;
+ default:
+ bss_config->auth_mode = MWIFIEX_AUTH_MODE_AUTO;
+ break;
+ }
+
+ bss_config->key_mgmt_operation |= KEY_MGMT_ON_HOST;
+
+ for (i = 0; i < params->crypto.n_akm_suites; i++) {
+ switch (params->crypto.akm_suites[i]) {
+ case WLAN_AKM_SUITE_8021X:
+ if (params->crypto.wpa_versions &
+ NL80211_WPA_VERSION_1) {
+ bss_config->protocol = PROTOCOL_WPA;
+ bss_config->key_mgmt = KEY_MGMT_EAP;
+ }
+ if (params->crypto.wpa_versions &
+ NL80211_WPA_VERSION_2) {
+ bss_config->protocol = PROTOCOL_WPA2;
+ bss_config->key_mgmt = KEY_MGMT_EAP;
+ }
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ if (params->crypto.wpa_versions &
+ NL80211_WPA_VERSION_1) {
+ bss_config->protocol = PROTOCOL_WPA;
+ bss_config->key_mgmt = KEY_MGMT_PSK;
+ }
+ if (params->crypto.wpa_versions &
+ NL80211_WPA_VERSION_2) {
+ bss_config->protocol = PROTOCOL_WPA2;
+ bss_config->key_mgmt = KEY_MGMT_PSK;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ for (i = 0; i < params->crypto.n_ciphers_pairwise; i++) {
+ switch (params->crypto.ciphers_pairwise[i]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ bss_config->wpa_cfg.pairwise_cipher_wpa = CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ bss_config->wpa_cfg.pairwise_cipher_wpa2 =
+ CIPHER_AES_CCMP;
+ default:
+ break;
+ }
+ }
+
+ switch (params->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ bss_config->wpa_cfg.group_cipher = CIPHER_AES_CCMP;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* This function initializes some of mwifiex_uap_bss_param variables.
+ * This helps FW in ignoring invalid values. These values may or may not
+ * be get updated to valid ones at later stage.
+ */
+void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
+{
+ config->bcast_ssid_ctl = 0x7F;
+ config->radio_ctl = 0x7F;
+ config->dtim_period = 0x7F;
+ config->beacon_period = 0x7FFF;
+ config->auth_mode = 0x7F;
+ config->rts_threshold = 0x7FFF;
+ config->frag_threshold = 0x7FFF;
+ config->retry_limit = 0x7F;
+}
+
+/* This function parses BSS related parameters from structure
+ * and prepares TLVs. These TLVs are appended to command buffer.
+*/
+static int
+mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
+{
+ struct host_cmd_tlv_dtim_period *dtim_period;
+ struct host_cmd_tlv_beacon_period *beacon_period;
+ struct host_cmd_tlv_ssid *ssid;
+ struct host_cmd_tlv_bcast_ssid *bcast_ssid;
+ struct host_cmd_tlv_channel_band *chan_band;
+ struct host_cmd_tlv_frag_threshold *frag_threshold;
+ struct host_cmd_tlv_rts_threshold *rts_threshold;
+ struct host_cmd_tlv_retry_limit *retry_limit;
+ struct host_cmd_tlv_pwk_cipher *pwk_cipher;
+ struct host_cmd_tlv_gwk_cipher *gwk_cipher;
+ struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
+ struct host_cmd_tlv_auth_type *auth_type;
+ struct host_cmd_tlv_passphrase *passphrase;
+ struct host_cmd_tlv_akmp *tlv_akmp;
+ struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
+ u16 cmd_size = *param_size;
+
+ if (bss_cfg->ssid.ssid_len) {
+ ssid = (struct host_cmd_tlv_ssid *)tlv;
+ ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
+ ssid->tlv.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
+ memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
+ cmd_size += sizeof(struct host_cmd_tlv) +
+ bss_cfg->ssid.ssid_len;
+ tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+
+ bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
+ bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
+ bcast_ssid->tlv.len =
+ cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
+ bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
+ cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
+ tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
+ }
+ if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
+ chan_band = (struct host_cmd_tlv_channel_band *)tlv;
+ chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
+ chan_band->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
+ sizeof(struct host_cmd_tlv));
+ chan_band->band_config = bss_cfg->band_cfg;
+ chan_band->channel = bss_cfg->channel;
+ cmd_size += sizeof(struct host_cmd_tlv_channel_band);
+ tlv += sizeof(struct host_cmd_tlv_channel_band);
+ }
+ if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
+ bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
+ beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
+ beacon_period->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
+ beacon_period->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
+ sizeof(struct host_cmd_tlv));
+ beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
+ cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
+ tlv += sizeof(struct host_cmd_tlv_beacon_period);
+ }
+ if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
+ bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
+ dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
+ dtim_period->tlv.type = cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
+ dtim_period->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
+ sizeof(struct host_cmd_tlv));
+ dtim_period->period = bss_cfg->dtim_period;
+ cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
+ tlv += sizeof(struct host_cmd_tlv_dtim_period);
+ }
+ if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
+ rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
+ rts_threshold->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
+ rts_threshold->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
+ sizeof(struct host_cmd_tlv));
+ rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
+ cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
+ tlv += sizeof(struct host_cmd_tlv_frag_threshold);
+ }
+ if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
+ (bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
+ frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
+ frag_threshold->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
+ frag_threshold->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
+ sizeof(struct host_cmd_tlv));
+ frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
+ cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
+ tlv += sizeof(struct host_cmd_tlv_frag_threshold);
+ }
+ if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
+ retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
+ retry_limit->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
+ retry_limit->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
+ sizeof(struct host_cmd_tlv));
+ retry_limit->limit = (u8)bss_cfg->retry_limit;
+ cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
+ tlv += sizeof(struct host_cmd_tlv_retry_limit);
+ }
+ if ((bss_cfg->protocol & PROTOCOL_WPA) ||
+ (bss_cfg->protocol & PROTOCOL_WPA2) ||
+ (bss_cfg->protocol & PROTOCOL_EAP)) {
+ tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
+ tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
+ tlv_akmp->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
+ sizeof(struct host_cmd_tlv));
+ tlv_akmp->key_mgmt_operation =
+ cpu_to_le16(bss_cfg->key_mgmt_operation);
+ tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
+ cmd_size += sizeof(struct host_cmd_tlv_akmp);
+ tlv += sizeof(struct host_cmd_tlv_akmp);
+
+ if (bss_cfg->wpa_cfg.pairwise_cipher_wpa &
+ VALID_CIPHER_BITMAP) {
+ pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
+ pwk_cipher->tlv.type =
+ cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->tlv.len = cpu_to_le16(
+ sizeof(struct host_cmd_tlv_pwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
+ pwk_cipher->cipher =
+ bss_cfg->wpa_cfg.pairwise_cipher_wpa;
+ cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
+ }
+ if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 &
+ VALID_CIPHER_BITMAP) {
+ pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
+ pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->tlv.len = cpu_to_le16(
+ sizeof(struct host_cmd_tlv_pwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
+ pwk_cipher->cipher =
+ bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
+ cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
+ }
+ if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
+ gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
+ gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
+ gwk_cipher->tlv.len = cpu_to_le16(
+ sizeof(struct host_cmd_tlv_gwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
+ cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
+ }
+ if (bss_cfg->wpa_cfg.length) {
+ passphrase = (struct host_cmd_tlv_passphrase *)tlv;
+ passphrase->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
+ passphrase->tlv.len =
+ cpu_to_le16(bss_cfg->wpa_cfg.length);
+ memcpy(passphrase->passphrase,
+ bss_cfg->wpa_cfg.passphrase,
+ bss_cfg->wpa_cfg.length);
+ cmd_size += sizeof(struct host_cmd_tlv) +
+ bss_cfg->wpa_cfg.length;
+ tlv += sizeof(struct host_cmd_tlv) +
+ bss_cfg->wpa_cfg.length;
+ }
+ }
+ if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
+ (bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
+ auth_type = (struct host_cmd_tlv_auth_type *)tlv;
+ auth_type->tlv.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
+ auth_type->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
+ sizeof(struct host_cmd_tlv));
+ auth_type->auth_type = (u8)bss_cfg->auth_mode;
+ cmd_size += sizeof(struct host_cmd_tlv_auth_type);
+ tlv += sizeof(struct host_cmd_tlv_auth_type);
+ }
+ if (bss_cfg->protocol) {
+ encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
+ encrypt_protocol->tlv.type =
+ cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
+ encrypt_protocol->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
+ - sizeof(struct host_cmd_tlv));
+ encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
+ cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
+ tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
+ }
+
+ *param_size = cmd_size;
+
+ return 0;
+}
+
+/* This function parses custom IEs from IE list and prepares command buffer */
+static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
+{
+ struct mwifiex_ie_list *ap_ie = cmd_buf;
+ struct host_cmd_tlv *tlv_ie = (struct host_cmd_tlv *)tlv;
+
+ if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
+ return -1;
+
+ *ie_size += le16_to_cpu(ap_ie->len) + sizeof(struct host_cmd_tlv);
+
+ tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
+ tlv_ie->len = ap_ie->len;
+ tlv += sizeof(struct host_cmd_tlv);
+
+ memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
+
+ return 0;
+}
+
+/* Parse AP config structure and prepare TLV based command structure
+ * to be sent to FW for uAP configuration
+ */
+static int
+mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
+ u32 type, void *cmd_buf)
+{
+ u8 *tlv;
+ u16 cmd_size, param_size, ie_size;
+ struct host_cmd_ds_sys_config *sys_cfg;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_UAP_SYS_CONFIG);
+ cmd_size = (u16)(sizeof(struct host_cmd_ds_sys_config) + S_DS_GEN);
+ sys_cfg = (struct host_cmd_ds_sys_config *)&cmd->params.uap_sys_config;
+ sys_cfg->action = cpu_to_le16(cmd_action);
+ tlv = sys_cfg->tlv;
+
+ switch (type) {
+ case UAP_BSS_PARAMS_I:
+ param_size = cmd_size;
+ if (mwifiex_uap_bss_param_prepare(tlv, cmd_buf, &param_size))
+ return -1;
+ cmd->size = cpu_to_le16(param_size);
+ break;
+ case UAP_CUSTOM_IE_I:
+ ie_size = cmd_size;
+ if (mwifiex_uap_custom_ie_prepare(tlv, cmd_buf, &ie_size))
+ return -1;
+ cmd->size = cpu_to_le16(ie_size);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/* This function prepares the AP specific commands before sending them
+ * to the firmware.
+ * This is a generic function which calls specific command preparation
+ * routines based upon the command number.
+ */
+int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
+ u16 cmd_action, u32 type,
+ void *data_buf, void *cmd_buf)
+{
+ struct host_cmd_ds_command *cmd = cmd_buf;
+
+ switch (cmd_no) {
+ case HostCmd_CMD_UAP_SYS_CONFIG:
+ if (mwifiex_cmd_uap_sys_config(cmd, cmd_action, type, data_buf))
+ return -1;
+ break;
+ case HostCmd_CMD_UAP_BSS_START:
+ case HostCmd_CMD_UAP_BSS_STOP:
+ cmd->command = cpu_to_le16(cmd_no);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+ break;
+ default:
+ dev_err(priv->adapter->dev,
+ "PREP_CMD: unknown cmd %#x\n", cmd_no);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* This function sets the RF channel for AP.
+ *
+ * This function populates channel information in AP config structure
+ * and sends command to configure channel information in AP.
+ */
+int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
+{
+ struct mwifiex_uap_bss_param *bss_cfg;
+ struct wiphy *wiphy = priv->wdev->wiphy;
+
+ bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
+ if (!bss_cfg)
+ return -ENOMEM;
+
+ mwifiex_set_sys_config_invalid_data(bss_cfg);
+ bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
+ bss_cfg->channel = channel;
+
+ if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+ HostCmd_ACT_GEN_SET,
+ UAP_BSS_PARAMS_I, bss_cfg)) {
+ wiphy_err(wiphy, "Failed to set the uAP channel\n");
+ kfree(bss_cfg);
+ return -1;
+ }
+
+ kfree(bss_cfg);
+ return 0;
+}
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 49ebf20c56eb..22a5916564b8 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
struct device *dev = adapter->dev;
u32 recv_type;
__le32 tmp;
+ int ret;
if (adapter->hs_activated)
mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
case MWIFIEX_USB_TYPE_CMD:
if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
dev_err(dev, "CMD: skb->len too large\n");
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
} else if (!adapter->curr_cmd) {
dev_dbg(dev, "CMD: no curr_cmd\n");
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(
adapter, skb->data,
skb->len);
- return 0;
+ ret = 0;
+ goto exit_restore_skb;
}
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
case MWIFIEX_USB_TYPE_EVENT:
if (skb->len < sizeof(u32)) {
dev_err(dev, "EVENT: skb->len too small\n");
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
adapter->event_cause = le32_to_cpu(tmp);
- skb_pull(skb, sizeof(u32));
dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
if (skb->len > MAX_EVENT_SIZE) {
dev_err(dev, "EVENT: event body too large\n");
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
- skb_copy_from_linear_data(skb, adapter->event_body,
- skb->len);
+ memcpy(adapter->event_body, skb->data +
+ MWIFIEX_EVENT_HEADER_LEN, skb->len);
+
adapter->event_received = true;
adapter->event_skb = skb;
break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
}
return -EINPROGRESS;
+
+exit_restore_skb:
+ /* The buffer will be reused for further cmds/events */
+ skb_push(skb, INTF_HEADER_LEN);
+
+ return ret;
}
static void mwifiex_usb_rx_complete(struct urb *urb)
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 429a1dee2d26..3fa4d4176993 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
+ mwifiex_reset_11n_rx_seq_num(priv);
+
atomic_set(&priv->wmm.tx_pkts_queued, 0);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
@@ -885,6 +887,10 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
tid_ptr = &(priv_tmp)->wmm.
tid_tbl_ptr[tos_to_tid[i]];
+ /* For non-STA ra_list_curr may be NULL */
+ if (!tid_ptr->ra_list_curr)
+ continue;
+
spin_lock_irqsave(&tid_ptr->tid_tbl_lock,
flags);
is_list_empty =
@@ -1217,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
if (!ptr->is_11n_enabled ||
mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+ priv->wps.session_enable ||
((priv->sec_info.wpa_enabled ||
priv->sec_info.wpa2_enabled) &&
!priv->wpa_is_gtk_set)) {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index c5404eb82b2f..dfcd02ab6cae 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -505,9 +505,6 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
-static int rndis_set_channel(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan, enum nl80211_channel_type channel_type);
-
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr,
struct key_params *params);
@@ -549,7 +546,6 @@ static const struct cfg80211_ops rndis_config_ops = {
.disconnect = rndis_disconnect,
.join_ibss = rndis_join_ibss,
.leave_ibss = rndis_leave_ibss,
- .set_channel = rndis_set_channel,
.add_key = rndis_add_key,
.del_key = rndis_del_key,
.set_default_key = rndis_set_default_key,
@@ -2114,7 +2110,7 @@ resize_buf:
while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
matched) {
- if (!ether_addr_equal(bssid->mac, match_bssid))
+ if (ether_addr_equal(bssid->mac, match_bssid))
*matched = true;
}
@@ -2398,16 +2394,6 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
return deauthenticate(usbdev);
}
-static int rndis_set_channel(struct wiphy *wiphy, struct net_device *netdev,
- struct ieee80211_channel *chan, enum nl80211_channel_type channel_type)
-{
- struct rndis_wlan_private *priv = wiphy_priv(wiphy);
- struct usbnet *usbdev = priv->usbdev;
-
- return set_channel(usbdev,
- ieee80211_frequency_to_channel(chan->center_freq));
-}
-
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr,
struct key_params *params)
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 931331d95217..cad25bfebd7a 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -1192,6 +1192,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x5390) },
{ PCI_DEVICE(0x1814, 0x5392) },
{ PCI_DEVICE(0x1814, 0x539a) },
+ { PCI_DEVICE(0x1814, 0x539b) },
{ PCI_DEVICE(0x1814, 0x539f) },
#endif
{ 0, }
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index ca36cccaba31..8f754025b06e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -396,8 +396,7 @@ struct rt2x00_intf {
* for hardware which doesn't support hardware
* sequence counting.
*/
- spinlock_t seqlock;
- u16 seqno;
+ atomic_t seqno;
};
static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index b49773ef72f2..dd24b2663b5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
else
rt2x00dev->intf_sta_count++;
- spin_lock_init(&intf->seqlock);
mutex_init(&intf->beacon_skb_mutex);
intf->beacon = entry;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4c662eccf53c..2fd830103415 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
+ u16 seqno;
if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
return;
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
* sequence counting per-frame, since those will override the
* sequence counter given by mac80211.
*/
- spin_lock(&intf->seqlock);
-
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
- intf->seqno += 0x10;
- hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
-
- spin_unlock(&intf->seqlock);
+ seqno = atomic_add_return(0x10, &intf->seqno);
+ else
+ seqno = atomic_read(&intf->seqno);
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seqno);
}
static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index d357d1ed92f6..74ecc33fdd90 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -436,8 +436,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
case QID_RX:
if (!rt2x00queue_full(queue))
rt2x00queue_for_each_entry(queue,
- Q_INDEX_DONE,
Q_INDEX,
+ Q_INDEX_DONE,
NULL,
rt2x00usb_kick_rx_entry);
break;
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
index 2e0de2f5f0f9..c2d5b495c179 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
radio_on = true;
} else if (radio_on) {
radio_on = false;
- cancel_delayed_work_sync(&priv->led_on);
+ cancel_delayed_work(&priv->led_on);
ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
}
} else if (radio_on) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index d228358e6a40..9970c2b1b199 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
{RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
{RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
{RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index ad87a1ac6462..db6430c1a084 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -869,7 +869,7 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
}
*mactime = tsf_info->current_tsf_lsb |
- (tsf_info->current_tsf_msb << 31);
+ ((u64)tsf_info->current_tsf_msb << 32);
out:
kfree(tsf_info);
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 9f15ccaf8f05..5ec50a476a69 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -76,8 +76,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
}
}
- if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID &&
- wl->station_mode != STATION_ACTIVE_MODE) {
+ if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
/* indicate to the stack, that beacons have been lost */
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 1b851f650e07..e2750a12c6f1 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -260,6 +260,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
}
if (wl->irq) {
+ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
@@ -267,7 +268,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
}
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
- disable_irq(wl->irq);
wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 6248c354fc5c..567660cd2fcd 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -73,6 +73,8 @@ static void wl1251_spi_reset(struct wl1251 *wl)
spi_sync(wl_to_spi(wl), &m);
wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+
+ kfree(cmd);
}
static void wl1251_spi_wake(struct wl1251 *wl)
@@ -127,6 +129,8 @@ static void wl1251_spi_wake(struct wl1251 *wl)
spi_sync(wl_to_spi(wl), &m);
wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+
+ kfree(cmd);
}
static void wl1251_spi_reset_wake(struct wl1251 *wl)
@@ -281,6 +285,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
wl->use_eeprom = pdata->use_eeprom;
+ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
@@ -289,8 +294,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
- disable_irq(wl->irq);
-
ret = wl1251_init_ieee80211(wl);
if (ret)
goto out_irq;
diff --git a/drivers/net/wireless/ti/wl12xx/Kconfig b/drivers/net/wireless/ti/wl12xx/Kconfig
index 5b92329122c4..c2183594655a 100644
--- a/drivers/net/wireless/ti/wl12xx/Kconfig
+++ b/drivers/net/wireless/ti/wl12xx/Kconfig
@@ -1,5 +1,6 @@
config WL12XX
tristate "TI wl12xx support"
+ depends on MAC80211
select WLCORE
---help---
This module adds support for wireless adapters based on TI wl1271,
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
index 9d04c38938bc..d7b907e67170 100644
--- a/drivers/net/wireless/ti/wlcore/Kconfig
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -1,7 +1,6 @@
config WLCORE
tristate "TI wlcore support"
- depends on WL_TI && GENERIC_HARDIRQS
- depends on INET
+ depends on WL_TI && GENERIC_HARDIRQS && MAC80211
select FW_LOADER
---help---
This module contains the main code for TI WLAN chips. It abstracts
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 5912541a925e..f3d6fa508269 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1714,3 +1714,85 @@ out:
return ret;
}
+
+#ifdef CONFIG_PM
+/* Set the global behaviour of RX filters - On/Off + default action */
+int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
+ enum rx_filter_action action)
+{
+ struct acx_default_rx_filter *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx default rx filter en: %d act: %d",
+ enable, action);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->enable = enable;
+ acx->default_action = action;
+
+ ret = wl1271_cmd_configure(wl, ACX_ENABLE_RX_DATA_FILTER, acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx default rx filter enable failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+/* Configure or disable a specific RX filter pattern */
+int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
+ struct wl12xx_rx_filter *filter)
+{
+ struct acx_rx_filter_cfg *acx;
+ int fields_size = 0;
+ int acx_size;
+ int ret;
+
+ WARN_ON(enable && !filter);
+ WARN_ON(index >= WL1271_MAX_RX_FILTERS);
+
+ wl1271_debug(DEBUG_ACX,
+ "acx set rx filter idx: %d enable: %d filter: %p",
+ index, enable, filter);
+
+ if (enable) {
+ fields_size = wl1271_rx_filter_get_fields_size(filter);
+
+ wl1271_debug(DEBUG_ACX, "act: %d num_fields: %d field_size: %d",
+ filter->action, filter->num_fields, fields_size);
+ }
+
+ acx_size = ALIGN(sizeof(*acx) + fields_size, 4);
+ acx = kzalloc(acx_size, GFP_KERNEL);
+
+ if (!acx)
+ return -ENOMEM;
+
+ acx->enable = enable;
+ acx->index = index;
+
+ if (enable) {
+ acx->num_fields = filter->num_fields;
+ acx->action = filter->action;
+ wl1271_rx_filter_flatten_fields(filter, acx->fields);
+ }
+
+ wl1271_dump(DEBUG_ACX, "RX_FILTER: ", acx, acx_size);
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_RX_DATA_FILTER, acx, acx_size);
+ if (ret < 0) {
+ wl1271_warning("setting rx filter failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index b2f88831b7a9..e6a74869a5ff 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1147,6 +1147,32 @@ struct wl12xx_acx_config_hangover {
u8 padding[2];
} __packed;
+
+struct acx_default_rx_filter {
+ struct acx_header header;
+ u8 enable;
+
+ /* action of type FILTER_XXX */
+ u8 default_action;
+
+ u8 pad[2];
+} __packed;
+
+
+struct acx_rx_filter_cfg {
+ struct acx_header header;
+
+ u8 enable;
+
+ /* 0 - WL1271_MAX_RX_FILTERS-1 */
+ u8 index;
+
+ u8 action;
+
+ u8 num_fields;
+ u8 fields[0];
+} __packed;
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0000,
ACX_MEM_CFG = 0x0001,
@@ -1305,4 +1331,10 @@ int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
int wl12xx_acx_config_hangover(struct wl1271 *wl);
+#ifdef CONFIG_PM
+int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
+ enum rx_filter_action action);
+int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
+ struct wl12xx_rx_filter *filter);
+#endif /* CONFIG_PM */
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 3a2207db5405..9b98230f84ce 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -72,7 +72,7 @@ static int wlcore_boot_fw_version(struct wl1271 *wl)
struct wl1271_static_data *static_data;
int ret;
- static_data = kmalloc(sizeof(*static_data), GFP_DMA);
+ static_data = kmalloc(sizeof(*static_data), GFP_KERNEL | GFP_DMA);
if (!static_data) {
wl1271_error("Couldn't allocate memory for static data!");
return -ENOMEM;
@@ -413,6 +413,7 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
/* unmask required mbox events */
wl->event_mask = BSS_LOSE_EVENT_ID |
+ REGAINED_BSS_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
ROLE_STOP_COMPLETE_EVENT_ID |
RSSI_SNR_TRIGGER_0_EVENT_ID |
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 5c4716c6f040..5b128a971449 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -123,7 +123,9 @@ static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
unsigned long timeout;
int ret = 0;
- events_vector = kmalloc(sizeof(*events_vector), GFP_DMA);
+ events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA);
+ if (!events_vector)
+ return -ENOMEM;
timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
@@ -1034,7 +1036,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);
tmpl = (struct wl12xx_arp_rsp_template *)skb_put(skb, sizeof(*tmpl));
- memset(tmpl, 0, sizeof(tmpl));
+ memset(tmpl, 0, sizeof(*tmpl));
/* llc layer */
memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
@@ -1083,7 +1085,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* mac80211 header */
hdr = (struct ieee80211_hdr_3addr *)skb_push(skb, sizeof(*hdr));
- memset(hdr, 0, sizeof(hdr));
+ memset(hdr, 0, sizeof(*hdr));
fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS;
if (wlvif->sta.qos)
fc |= IEEE80211_STYPE_QOS_DATA;
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 292632ddf890..28e2a633c3be 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -103,7 +103,6 @@ static int wl1271_event_process(struct wl1271 *wl)
struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
u32 vector;
- bool beacon_loss = false;
bool disconnect_sta = false;
unsigned long sta_bitmap = 0;
@@ -141,20 +140,23 @@ static int wl1271_event_process(struct wl1271 *wl)
mbox->soft_gemini_sense_info);
/*
- * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
- * filtering) is enabled. Without PSM, the stack will receive all
- * beacons and can detect beacon loss by itself.
- *
- * As there's possibility that the driver disables PSM before receiving
- * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
- *
+ * We are HW_MONITOR device. On beacon loss - queue
+ * connection loss work. Cancel it on REGAINED event.
*/
if (vector & BSS_LOSE_EVENT_ID) {
/* TODO: check for multi-role */
+ int delay = wl->conf.conn.synch_fail_thold *
+ wl->conf.conn.bss_lose_timeout;
wl1271_info("Beacon loss detected.");
+ cancel_delayed_work_sync(&wl->connection_loss_work);
+ ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work,
+ msecs_to_jiffies(delay));
+ }
- /* indicate to the stack, that beacons have been lost */
- beacon_loss = true;
+ if (vector & REGAINED_BSS_EVENT_ID) {
+ /* TODO: check for multi-role */
+ wl1271_info("Beacon regained.");
+ cancel_delayed_work_sync(&wl->connection_loss_work);
}
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
@@ -257,13 +259,6 @@ static int wl1271_event_process(struct wl1271 *wl)
rcu_read_unlock();
}
}
-
- if (beacon_loss)
- wl12xx_for_each_wlvif_sta(wl, wlvif) {
- vif = wl12xx_wlvif_to_vif(wlvif);
- ieee80211_connection_loss(vif);
- }
-
return 0;
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 2b0f987660c6..acef93390d3d 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1120,6 +1120,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
cancel_work_sync(&wl->recovery_work);
cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
+ cancel_delayed_work_sync(&wl->connection_loss_work);
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
@@ -1261,8 +1262,270 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
#ifdef CONFIG_PM
+static int
+wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
+{
+ int num_fields = 0, in_field = 0, fields_size = 0;
+ int i, pattern_len = 0;
+
+ if (!p->mask) {
+ wl1271_warning("No mask in WoWLAN pattern");
+ return -EINVAL;
+ }
+
+ /*
+ * The pattern is broken up into segments of bytes at different offsets
+ * that need to be checked by the FW filter. Each segment is called
+ * a field in the FW API. We verify that the total number of fields
+ * required for this pattern won't exceed FW limits (8)
+ * as well as the total fields buffer won't exceed the FW limit.
+ * Note that if there's a pattern which crosses Ethernet/IP header
+ * boundary a new field is required.
+ */
+ for (i = 0; i < p->pattern_len; i++) {
+ if (test_bit(i, (unsigned long *)p->mask)) {
+ if (!in_field) {
+ in_field = 1;
+ pattern_len = 1;
+ } else {
+ if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
+ num_fields++;
+ fields_size += pattern_len +
+ RX_FILTER_FIELD_OVERHEAD;
+ pattern_len = 1;
+ } else
+ pattern_len++;
+ }
+ } else {
+ if (in_field) {
+ in_field = 0;
+ fields_size += pattern_len +
+ RX_FILTER_FIELD_OVERHEAD;
+ num_fields++;
+ }
+ }
+ }
+
+ if (in_field) {
+ fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
+ num_fields++;
+ }
+
+ if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
+ wl1271_warning("RX Filter too complex. Too many segments");
+ return -EINVAL;
+ }
+
+ if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
+ wl1271_warning("RX filter pattern is too big");
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
+{
+ return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
+}
+
+void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
+{
+ int i;
+
+ if (filter == NULL)
+ return;
+
+ for (i = 0; i < filter->num_fields; i++)
+ kfree(filter->fields[i].pattern);
+
+ kfree(filter);
+}
+
+int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
+ u16 offset, u8 flags,
+ u8 *pattern, u8 len)
+{
+ struct wl12xx_rx_filter_field *field;
+
+ if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
+ wl1271_warning("Max fields per RX filter. can't alloc another");
+ return -EINVAL;
+ }
+
+ field = &filter->fields[filter->num_fields];
+
+ field->pattern = kzalloc(len, GFP_KERNEL);
+ if (!field->pattern) {
+ wl1271_warning("Failed to allocate RX filter pattern");
+ return -ENOMEM;
+ }
+
+ filter->num_fields++;
+
+ field->offset = cpu_to_le16(offset);
+ field->flags = flags;
+ field->len = len;
+ memcpy(field->pattern, pattern, len);
+
+ return 0;
+}
+
+int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
+{
+ int i, fields_size = 0;
+
+ for (i = 0; i < filter->num_fields; i++)
+ fields_size += filter->fields[i].len +
+ sizeof(struct wl12xx_rx_filter_field) -
+ sizeof(u8 *);
+
+ return fields_size;
+}
+
+void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
+ u8 *buf)
+{
+ int i;
+ struct wl12xx_rx_filter_field *field;
+
+ for (i = 0; i < filter->num_fields; i++) {
+ field = (struct wl12xx_rx_filter_field *)buf;
+
+ field->offset = filter->fields[i].offset;
+ field->flags = filter->fields[i].flags;
+ field->len = filter->fields[i].len;
+
+ memcpy(&field->pattern, filter->fields[i].pattern, field->len);
+ buf += sizeof(struct wl12xx_rx_filter_field) -
+ sizeof(u8 *) + field->len;
+ }
+}
+
+/*
+ * Allocates an RX filter returned through f
+ * which needs to be freed using rx_filter_free()
+ */
+static int wl1271_convert_wowlan_pattern_to_rx_filter(
+ struct cfg80211_wowlan_trig_pkt_pattern *p,
+ struct wl12xx_rx_filter **f)
+{
+ int i, j, ret = 0;
+ struct wl12xx_rx_filter *filter;
+ u16 offset;
+ u8 flags, len;
+
+ filter = wl1271_rx_filter_alloc();
+ if (!filter) {
+ wl1271_warning("Failed to alloc rx filter");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i = 0;
+ while (i < p->pattern_len) {
+ if (!test_bit(i, (unsigned long *)p->mask)) {
+ i++;
+ continue;
+ }
+
+ for (j = i; j < p->pattern_len; j++) {
+ if (!test_bit(j, (unsigned long *)p->mask))
+ break;
+
+ if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
+ j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
+ break;
+ }
+
+ if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
+ offset = i;
+ flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
+ } else {
+ offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
+ flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
+ }
+
+ len = j - i;
+
+ ret = wl1271_rx_filter_alloc_field(filter,
+ offset,
+ flags,
+ &p->pattern[i], len);
+ if (ret)
+ goto err;
+
+ i = j;
+ }
+
+ filter->action = FILTER_SIGNAL;
+
+ *f = filter;
+ return 0;
+
+err:
+ wl1271_rx_filter_free(filter);
+ *f = NULL;
+
+ return ret;
+}
+
+static int wl1271_configure_wowlan(struct wl1271 *wl,
+ struct cfg80211_wowlan *wow)
+{
+ int i, ret;
+
+ if (!wow || wow->any || !wow->n_patterns) {
+ wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
+ wl1271_rx_filter_clear_all(wl);
+ return 0;
+ }
+
+ if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
+ return -EINVAL;
+
+ /* Validate all incoming patterns before clearing current FW state */
+ for (i = 0; i < wow->n_patterns; i++) {
+ ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
+ if (ret) {
+ wl1271_warning("Bad wowlan pattern %d", i);
+ return ret;
+ }
+ }
+
+ wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
+ wl1271_rx_filter_clear_all(wl);
+
+ /* Translate WoWLAN patterns into filters */
+ for (i = 0; i < wow->n_patterns; i++) {
+ struct cfg80211_wowlan_trig_pkt_pattern *p;
+ struct wl12xx_rx_filter *filter = NULL;
+
+ p = &wow->patterns[i];
+
+ ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
+ if (ret) {
+ wl1271_warning("Failed to create an RX filter from "
+ "wowlan pattern %d", i);
+ goto out;
+ }
+
+ ret = wl1271_rx_filter_enable(wl, i, 1, filter);
+
+ wl1271_rx_filter_free(filter);
+ if (ret)
+ goto out;
+ }
+
+ ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
+
+out:
+ return ret;
+}
+
static int wl1271_configure_suspend_sta(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_wowlan *wow)
{
int ret = 0;
@@ -1273,6 +1536,7 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (ret < 0)
goto out;
+ wl1271_configure_wowlan(wl, wow);
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.suspend_wake_up_event,
wl->conf.conn.suspend_listen_interval);
@@ -1308,10 +1572,11 @@ out:
}
static int wl1271_configure_suspend(struct wl1271 *wl,
- struct wl12xx_vif *wlvif)
+ struct wl12xx_vif *wlvif,
+ struct cfg80211_wowlan *wow)
{
if (wlvif->bss_type == BSS_TYPE_STA_BSS)
- return wl1271_configure_suspend_sta(wl, wlvif);
+ return wl1271_configure_suspend_sta(wl, wlvif, wow);
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
return wl1271_configure_suspend_ap(wl, wlvif);
return 0;
@@ -1332,6 +1597,8 @@ static void wl1271_configure_resume(struct wl1271 *wl,
return;
if (is_sta) {
+ wl1271_configure_wowlan(wl, NULL);
+
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.wake_up_event,
wl->conf.conn.listen_interval);
@@ -1355,15 +1622,16 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
- WARN_ON(!wow || !wow->any);
+ WARN_ON(!wow);
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
wl->wow_enabled = true;
wl12xx_for_each_wlvif(wl, wlvif) {
- ret = wl1271_configure_suspend(wl, wlvif);
+ ret = wl1271_configure_suspend(wl, wlvif, wow);
if (ret < 0) {
+ mutex_unlock(&wl->mutex);
wl1271_warning("couldn't prepare device to suspend");
return ret;
}
@@ -1487,6 +1755,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->elp_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
+ cancel_delayed_work_sync(&wl->connection_loss_work);
/* let's notify MAC80211 about the remaining pending TX frames */
wl12xx_tx_reset(wl, true);
@@ -3439,6 +3708,9 @@ sta_not_found:
do_join = true;
set_assoc = true;
+ /* Cancel connection_loss_work */
+ cancel_delayed_work_sync(&wl->connection_loss_work);
+
/*
* use basic rates from AP, and determine lowest rate
* to use with control frames.
@@ -4549,6 +4821,34 @@ static struct bin_attribute fwlog_attr = {
.read = wl1271_sysfs_read_fwlog,
};
+static void wl1271_connection_loss_work(struct work_struct *work)
+{
+ struct delayed_work *dwork;
+ struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+
+ dwork = container_of(work, struct delayed_work, work);
+ wl = container_of(dwork, struct wl1271, connection_loss_work);
+
+ wl1271_info("Connection loss work.");
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ /* Call mac80211 connection loss */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ goto out;
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_connection_loss(vif);
+ }
+out:
+ mutex_unlock(&wl->mutex);
+}
+
static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
u32 oui, u32 nic, int n)
{
@@ -4804,6 +5104,8 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
+ INIT_DELAYED_WORK(&wl->connection_loss_work,
+ wl1271_connection_loss_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
@@ -4861,7 +5163,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
goto err_dummy_packet;
}
- wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_DMA);
+ wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_KERNEL | GFP_DMA);
if (!wl->mbox) {
ret = -ENOMEM;
goto err_fwlog;
@@ -5003,9 +5305,14 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
if (!ret) {
wl->irq_wake_enabled = true;
device_init_wakeup(wl->dev, 1);
- if (pdata->pwr_in_suspend)
+ if (pdata->pwr_in_suspend) {
wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
-
+ wl->hw->wiphy->wowlan.n_patterns =
+ WL1271_MAX_RX_FILTERS;
+ wl->hw->wiphy->wowlan.pattern_min_len = 1;
+ wl->hw->wiphy->wowlan.pattern_max_len =
+ WL1271_RX_FILTER_MAX_PATTERN_SIZE;
+ }
}
disable_irq(wl->irq);
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 89bd9385e90b..d6a3c6b07827 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -278,3 +278,41 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
wl12xx_rearm_rx_streaming(wl, active_hlids);
}
+
+#ifdef CONFIG_PM
+int wl1271_rx_filter_enable(struct wl1271 *wl,
+ int index, bool enable,
+ struct wl12xx_rx_filter *filter)
+{
+ int ret;
+
+ if (wl->rx_filter_enabled[index] == enable) {
+ wl1271_warning("Request to enable an already "
+ "enabled rx filter %d", index);
+ return 0;
+ }
+
+ ret = wl1271_acx_set_rx_filter(wl, index, enable, filter);
+
+ if (ret) {
+ wl1271_error("Failed to %s rx data filter %d (err=%d)",
+ enable ? "enable" : "disable", index, ret);
+ return ret;
+ }
+
+ wl->rx_filter_enabled[index] = enable;
+
+ return 0;
+}
+
+void wl1271_rx_filter_clear_all(struct wl1271 *wl)
+{
+ int i;
+
+ for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
+ if (!wl->rx_filter_enabled[i])
+ continue;
+ wl1271_rx_filter_enable(wl, i, 0, NULL);
+ }
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 6e129e2a8546..e9a162a864ca 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -138,5 +138,9 @@ struct wl1271_rx_descriptor {
void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+int wl1271_rx_filter_enable(struct wl1271 *wl,
+ int index, bool enable,
+ struct wl12xx_rx_filter *filter);
+void wl1271_rx_filter_clear_all(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx.h b/drivers/net/wireless/ti/wlcore/wl12xx.h
index a9b220c43e54..f12bdf745180 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx.h
@@ -279,6 +279,39 @@ struct wl1271_link {
u8 ba_bitmap;
};
+#define WL1271_MAX_RX_FILTERS 5
+#define WL1271_RX_FILTER_MAX_FIELDS 8
+
+#define WL1271_RX_FILTER_ETH_HEADER_SIZE 14
+#define WL1271_RX_FILTER_MAX_FIELDS_SIZE 95
+#define RX_FILTER_FIELD_OVERHEAD \
+ (sizeof(struct wl12xx_rx_filter_field) - sizeof(u8 *))
+#define WL1271_RX_FILTER_MAX_PATTERN_SIZE \
+ (WL1271_RX_FILTER_MAX_FIELDS_SIZE - RX_FILTER_FIELD_OVERHEAD)
+
+#define WL1271_RX_FILTER_FLAG_MASK BIT(0)
+#define WL1271_RX_FILTER_FLAG_IP_HEADER 0
+#define WL1271_RX_FILTER_FLAG_ETHERNET_HEADER BIT(1)
+
+enum rx_filter_action {
+ FILTER_DROP = 0,
+ FILTER_SIGNAL = 1,
+ FILTER_FW_HANDLE = 2
+};
+
+struct wl12xx_rx_filter_field {
+ __le16 offset;
+ u8 len;
+ u8 flags;
+ u8 *pattern;
+} __packed;
+
+struct wl12xx_rx_filter {
+ u8 action;
+ int num_fields;
+ struct wl12xx_rx_filter_field fields[WL1271_RX_FILTER_MAX_FIELDS];
+};
+
struct wl1271_station {
u8 hlid;
};
@@ -439,6 +472,14 @@ int wl1271_plt_stop(struct wl1271 *wl);
int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
+int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
+ u16 offset, u8 flags,
+ u8 *pattern, u8 len);
+void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter);
+struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void);
+int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter);
+void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
+ u8 *buf);
#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 39f9fadfebd9..0b3f0b586f4b 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -243,6 +243,9 @@ struct wl1271 {
struct wl1271_scan scan;
struct delayed_work scan_complete_work;
+ /* Connection loss work */
+ struct delayed_work connection_loss_work;
+
bool sched_scanning;
/* The current band */
@@ -349,6 +352,9 @@ struct wl1271 {
/* size of the private FW status data */
size_t fw_status_priv_len;
+
+ /* RX Data filter rule state - enabled/disabled */
+ bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
};
int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 2596401308a8..f4a6fcaeffb1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -325,8 +325,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
unsigned int count;
int i, copy_off;
- count = DIV_ROUND_UP(
- offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
+ count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
copy_off = skb_headlen(skb) % PAGE_SIZE;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 0ebbb1906c30..30899901aef5 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1935,14 +1935,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
dev_dbg(&dev->dev, "%s\n", dev->nodename);
- unregister_netdev(info->netdev);
-
xennet_disconnect_backend(info);
- del_timer_sync(&info->rx_refill_timer);
-
xennet_sysfs_delif(info->netdev);
+ unregister_netdev(info->netdev);
+
+ del_timer_sync(&info->rx_refill_timer);
+
free_percpu(info->stats);
free_netdev(info->netdev);
@@ -1962,9 +1962,6 @@ static int __init netif_init(void)
if (!xen_domain())
return -ENODEV;
- if (xen_initial_domain())
- return 0;
-
if (xen_hvm_domain() && !xen_platform_pci_unplug)
return -ENODEV;
@@ -1977,9 +1974,6 @@ module_init(netif_init);
static void __exit netif_exit(void)
{
- if (xen_initial_domain())
- return;
-
xenbus_unregister_driver(&netfront_driver);
}
module_exit(netif_exit);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 5af959274d4e..3b20b73ee649 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -17,6 +17,19 @@ config PN544_NFC
To compile this driver as a module, choose m here. The module will
be called pn544.
+config PN544_HCI_NFC
+ tristate "HCI PN544 NFC driver"
+ depends on I2C && NFC_SHDLC
+ select CRC_CCITT
+ default n
+ ---help---
+ NXP PN544 i2c driver.
+ This is a driver based on the SHDLC and HCI NFC kernel layers and
+ will thus not work with NXP libnfc library.
+
+ To compile this driver as a module, choose m here. The module will
+ be called pn544_hci.
+
config NFC_PN533
tristate "NXP PN533 USB driver"
depends on USB
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index ab99e8572f02..473e44cef612 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_PN544_NFC) += pn544.o
+obj-$(CONFIG_PN544_HCI_NFC) += pn544_hci.o
obj-$(CONFIG_NFC_PN533) += pn533.o
obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index e6ec16d92e65..19110f0eb15f 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -394,9 +394,6 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
struct pn533_frame *in_frame;
int rc;
- if (dev == NULL)
- return;
-
in_frame = dev->wq_in_frame;
if (dev->wq_in_error)
@@ -1194,8 +1191,8 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
return rc;
}
-static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
- u32 protocol)
+static int pn533_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, u32 protocol)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
@@ -1243,7 +1240,8 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
return 0;
}
-static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
+static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
u8 tg;
@@ -1351,7 +1349,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
return 0;
}
-static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx,
+static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 comm_mode, u8* gb, size_t gb_len)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
@@ -1552,10 +1550,9 @@ error:
return 0;
}
-static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
- struct sk_buff *skb,
- data_exchange_cb_t cb,
- void *cb_context)
+static int pn533_data_exchange(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_frame *out_frame, *in_frame;
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
new file mode 100644
index 000000000000..281f18c2fb82
--- /dev/null
+++ b/drivers/nfc/pn544_hci.c
@@ -0,0 +1,947 @@
+/*
+ * HCI based Driver for NXP PN544 NFC Chip
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/crc-ccitt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/shdlc.h>
+
+#include <linux/nfc/pn544.h>
+
+#define DRIVER_DESC "HCI NFC driver for PN544"
+
+#define PN544_HCI_DRIVER_NAME "pn544_hci"
+
+/* Timing restrictions (ms) */
+#define PN544_HCI_RESETVEN_TIME 30
+
+static struct i2c_device_id pn544_hci_id_table[] = {
+ {"pn544", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pn544_hci_id_table);
+
+#define HCI_MODE 0
+#define FW_MODE 1
+
+/* framing in HCI mode */
+#define PN544_HCI_LLC_LEN 1
+#define PN544_HCI_LLC_CRC 2
+#define PN544_HCI_LLC_LEN_CRC (PN544_HCI_LLC_LEN + PN544_HCI_LLC_CRC)
+#define PN544_HCI_LLC_MIN_SIZE (1 + PN544_HCI_LLC_LEN_CRC)
+#define PN544_HCI_LLC_MAX_PAYLOAD 29
+#define PN544_HCI_LLC_MAX_SIZE (PN544_HCI_LLC_LEN_CRC + 1 + \
+ PN544_HCI_LLC_MAX_PAYLOAD)
+
+enum pn544_state {
+ PN544_ST_COLD,
+ PN544_ST_FW_READY,
+ PN544_ST_READY,
+};
+
+#define FULL_VERSION_LEN 11
+
+/* Proprietary commands */
+#define PN544_WRITE 0x3f
+
+/* Proprietary gates, events, commands and registers */
+
+/* NFC_HCI_RF_READER_A_GATE additional registers and commands */
+#define PN544_RF_READER_A_AUTO_ACTIVATION 0x10
+#define PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION 0x12
+#define PN544_MIFARE_CMD 0x21
+
+/* Commands that apply to all RF readers */
+#define PN544_RF_READER_CMD_PRESENCE_CHECK 0x30
+#define PN544_RF_READER_CMD_ACTIVATE_NEXT 0x32
+
+/* NFC_HCI_ID_MGMT_GATE additional registers */
+#define PN544_ID_MGMT_FULL_VERSION_SW 0x10
+
+#define PN544_RF_READER_ISO15693_GATE 0x12
+
+#define PN544_RF_READER_F_GATE 0x14
+#define PN544_FELICA_ID 0x04
+#define PN544_FELICA_RAW 0x20
+
+#define PN544_RF_READER_JEWEL_GATE 0x15
+#define PN544_JEWEL_RAW_CMD 0x23
+
+#define PN544_RF_READER_NFCIP1_INITIATOR_GATE 0x30
+#define PN544_RF_READER_NFCIP1_TARGET_GATE 0x31
+
+#define PN544_SYS_MGMT_GATE 0x90
+#define PN544_SYS_MGMT_INFO_NOTIFICATION 0x02
+
+#define PN544_POLLING_LOOP_MGMT_GATE 0x94
+#define PN544_PL_RDPHASES 0x06
+#define PN544_PL_EMULATION 0x07
+#define PN544_PL_NFCT_DEACTIVATED 0x09
+
+#define PN544_SWP_MGMT_GATE 0xA0
+
+#define PN544_NFC_WI_MGMT_GATE 0xA1
+
+static u8 pn544_custom_gates[] = {
+ PN544_SYS_MGMT_GATE,
+ PN544_SWP_MGMT_GATE,
+ PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_NFC_WI_MGMT_GATE,
+ PN544_RF_READER_F_GATE,
+ PN544_RF_READER_JEWEL_GATE,
+ PN544_RF_READER_ISO15693_GATE,
+ PN544_RF_READER_NFCIP1_INITIATOR_GATE,
+ PN544_RF_READER_NFCIP1_TARGET_GATE
+};
+
+/* Largest headroom needed for outgoing custom commands */
+#define PN544_CMDS_HEADROOM 2
+
+struct pn544_hci_info {
+ struct i2c_client *i2c_dev;
+ struct nfc_shdlc *shdlc;
+
+ enum pn544_state state;
+
+ struct mutex info_lock;
+
+ unsigned int gpio_en;
+ unsigned int gpio_irq;
+ unsigned int gpio_fw;
+ unsigned int en_polarity;
+
+ int hard_fault; /*
+ * < 0 if hardware error occured (e.g. i2c err)
+ * and prevents normal operation.
+ */
+};
+
+static void pn544_hci_platform_init(struct pn544_hci_info *info)
+{
+ int polarity, retry, ret;
+ char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
+ int count = sizeof(rset_cmd);
+
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+ dev_info(&info->i2c_dev->dev, "Detecting nfc_en polarity\n");
+
+ /* Disable fw download */
+ gpio_set_value(info->gpio_fw, 0);
+
+ for (polarity = 0; polarity < 2; polarity++) {
+ info->en_polarity = polarity;
+ retry = 3;
+ while (retry--) {
+ /* power off */
+ gpio_set_value(info->gpio_en, !info->en_polarity);
+ usleep_range(10000, 15000);
+
+ /* power on */
+ gpio_set_value(info->gpio_en, info->en_polarity);
+ usleep_range(10000, 15000);
+
+ /* send reset */
+ dev_dbg(&info->i2c_dev->dev, "Sending reset cmd\n");
+ ret = i2c_master_send(info->i2c_dev, rset_cmd, count);
+ if (ret == count) {
+ dev_info(&info->i2c_dev->dev,
+ "nfc_en polarity : active %s\n",
+ (polarity == 0 ? "low" : "high"));
+ goto out;
+ }
+ }
+ }
+
+ dev_err(&info->i2c_dev->dev,
+ "Could not detect nfc_en polarity, fallback to active high\n");
+
+out:
+ gpio_set_value(info->gpio_en, !info->en_polarity);
+}
+
+static int pn544_hci_enable(struct pn544_hci_info *info, int mode)
+{
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+
+ gpio_set_value(info->gpio_fw, 0);
+ gpio_set_value(info->gpio_en, info->en_polarity);
+ usleep_range(10000, 15000);
+
+ return 0;
+}
+
+static void pn544_hci_disable(struct pn544_hci_info *info)
+{
+ pr_info(DRIVER_DESC ": %s\n", __func__);
+
+ gpio_set_value(info->gpio_fw, 0);
+ gpio_set_value(info->gpio_en, !info->en_polarity);
+ usleep_range(10000, 15000);
+
+ gpio_set_value(info->gpio_en, info->en_polarity);
+ usleep_range(10000, 15000);
+
+ gpio_set_value(info->gpio_en, !info->en_polarity);
+ usleep_range(10000, 15000);
+}
+
+static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
+{
+ int r;
+
+ usleep_range(3000, 6000);
+
+ r = i2c_master_send(client, buf, len);
+
+ if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ usleep_range(6000, 10000);
+ r = i2c_master_send(client, buf, len);
+ }
+
+ if (r >= 0 && r != len)
+ r = -EREMOTEIO;
+
+ return r;
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+ int len;
+ u16 crc;
+
+ len = buf[0] + 1;
+ crc = crc_ccitt(0xffff, buf, len - 2);
+ crc = ~crc;
+
+ if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
+ pr_err(PN544_HCI_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
+ crc, buf[len - 1], buf[len - 2]);
+
+ pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+ print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+ 16, 2, buf, buflen, false);
+ return -EPERM;
+ }
+ return 0;
+}
+
+/*
+ * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
+ * that i2c bus will be flushed and that next read will start on a new frame.
+ * returned skb contains only LLC header and payload.
+ * returns:
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * -ENOMEM : cannot allocate skb, frame dropped
+ */
+static int pn544_hci_i2c_read(struct i2c_client *client, struct sk_buff **skb)
+{
+ int r;
+ u8 len;
+ u8 tmp[PN544_HCI_LLC_MAX_SIZE - 1];
+
+ r = i2c_master_recv(client, &len, 1);
+ if (r != 1) {
+ dev_err(&client->dev, "cannot read len byte\n");
+ return -EREMOTEIO;
+ }
+
+ if ((len < (PN544_HCI_LLC_MIN_SIZE - 1)) ||
+ (len > (PN544_HCI_LLC_MAX_SIZE - 1))) {
+ dev_err(&client->dev, "invalid len byte\n");
+ r = -EBADMSG;
+ goto flush;
+ }
+
+ *skb = alloc_skb(1 + len, GFP_KERNEL);
+ if (*skb == NULL) {
+ r = -ENOMEM;
+ goto flush;
+ }
+
+ *skb_put(*skb, 1) = len;
+
+ r = i2c_master_recv(client, skb_put(*skb, len), len);
+ if (r != len) {
+ kfree_skb(*skb);
+ return -EREMOTEIO;
+ }
+
+ r = check_crc((*skb)->data, (*skb)->len);
+ if (r != 0) {
+ kfree_skb(*skb);
+ r = -EBADMSG;
+ goto flush;
+ }
+
+ skb_pull(*skb, 1);
+ skb_trim(*skb, (*skb)->len - 2);
+
+ usleep_range(3000, 6000);
+
+ return 0;
+
+flush:
+ if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
+ r = -EREMOTEIO;
+
+ usleep_range(3000, 6000);
+
+ return r;
+}
+
+/*
+ * Reads an shdlc frame from the chip. This is not as straightforward as it
+ * seems. There are cases where we could loose the frame start synchronization.
+ * The frame format is len-data-crc, and corruption can occur anywhere while
+ * transiting on i2c bus, such that we could read an invalid len.
+ * In order to recover synchronization with the next frame, we must be sure
+ * to read the real amount of data without using the len byte. We do this by
+ * assuming the following:
+ * - the chip will always present only one single complete frame on the bus
+ * before triggering the interrupt
+ * - the chip will not present a new frame until we have completely read
+ * the previous one (or until we have handled the interrupt).
+ * The tricky case is when we read a corrupted len that is less than the real
+ * len. We must detect this here in order to determine that we need to flush
+ * the bus. This is the reason why we check the crc here.
+ */
+static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
+{
+ struct pn544_hci_info *info = dev_id;
+ struct i2c_client *client = info->i2c_dev;
+ struct sk_buff *skb = NULL;
+ int r;
+
+ BUG_ON(!info);
+ BUG_ON(irq != info->i2c_dev->irq);
+
+ dev_dbg(&client->dev, "IRQ\n");
+
+ if (info->hard_fault != 0)
+ return IRQ_HANDLED;
+
+ r = pn544_hci_i2c_read(client, &skb);
+ if (r == -EREMOTEIO) {
+ info->hard_fault = r;
+
+ nfc_shdlc_recv_frame(info->shdlc, NULL);
+
+ return IRQ_HANDLED;
+ } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
+ return IRQ_HANDLED;
+ }
+
+ nfc_shdlc_recv_frame(info->shdlc, skb);
+
+ return IRQ_HANDLED;
+}
+
+static int pn544_hci_open(struct nfc_shdlc *shdlc)
+{
+ struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+ int r = 0;
+
+ mutex_lock(&info->info_lock);
+
+ if (info->state != PN544_ST_COLD) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ r = pn544_hci_enable(info, HCI_MODE);
+
+out:
+ mutex_unlock(&info->info_lock);
+ return r;
+}
+
+static void pn544_hci_close(struct nfc_shdlc *shdlc)
+{
+ struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+
+ mutex_lock(&info->info_lock);
+
+ if (info->state == PN544_ST_COLD)
+ goto out;
+
+ pn544_hci_disable(info);
+
+out:
+ mutex_unlock(&info->info_lock);
+}
+
+static int pn544_hci_ready(struct nfc_shdlc *shdlc)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+ struct sk_buff *skb;
+ static struct hw_config {
+ u8 adr[2];
+ u8 value;
+ } hw_config[] = {
+ {{0x9f, 0x9a}, 0x00},
+
+ {{0x98, 0x10}, 0xbc},
+
+ {{0x9e, 0x71}, 0x00},
+
+ {{0x98, 0x09}, 0x00},
+
+ {{0x9e, 0xb4}, 0x00},
+
+ {{0x9e, 0xd9}, 0xff},
+ {{0x9e, 0xda}, 0xff},
+ {{0x9e, 0xdb}, 0x23},
+ {{0x9e, 0xdc}, 0x21},
+ {{0x9e, 0xdd}, 0x22},
+ {{0x9e, 0xde}, 0x24},
+
+ {{0x9c, 0x01}, 0x08},
+
+ {{0x9e, 0xaa}, 0x01},
+
+ {{0x9b, 0xd1}, 0x0d},
+ {{0x9b, 0xd2}, 0x24},
+ {{0x9b, 0xd3}, 0x0a},
+ {{0x9b, 0xd4}, 0x22},
+ {{0x9b, 0xd5}, 0x08},
+ {{0x9b, 0xd6}, 0x1e},
+ {{0x9b, 0xdd}, 0x1c},
+
+ {{0x9b, 0x84}, 0x13},
+ {{0x99, 0x81}, 0x7f},
+ {{0x99, 0x31}, 0x70},
+
+ {{0x98, 0x00}, 0x3f},
+
+ {{0x9f, 0x09}, 0x00},
+
+ {{0x9f, 0x0a}, 0x05},
+
+ {{0x9e, 0xd1}, 0xa1},
+ {{0x99, 0x23}, 0x00},
+
+ {{0x9e, 0x74}, 0x80},
+
+ {{0x9f, 0x28}, 0x10},
+
+ {{0x9f, 0x35}, 0x14},
+
+ {{0x9f, 0x36}, 0x60},
+
+ {{0x9c, 0x31}, 0x00},
+
+ {{0x9c, 0x32}, 0xc8},
+
+ {{0x9c, 0x19}, 0x40},
+
+ {{0x9c, 0x1a}, 0x40},
+
+ {{0x9c, 0x0c}, 0x00},
+
+ {{0x9c, 0x0d}, 0x00},
+
+ {{0x9c, 0x12}, 0x00},
+
+ {{0x9c, 0x13}, 0x00},
+
+ {{0x98, 0xa2}, 0x0e},
+
+ {{0x98, 0x93}, 0x40},
+
+ {{0x98, 0x7d}, 0x02},
+ {{0x98, 0x7e}, 0x00},
+ {{0x9f, 0xc8}, 0x01},
+ };
+ struct hw_config *p = hw_config;
+ int count = ARRAY_SIZE(hw_config);
+ struct sk_buff *res_skb;
+ u8 param[4];
+ int r;
+
+ param[0] = 0;
+ while (count--) {
+ param[1] = p->adr[0];
+ param[2] = p->adr[1];
+ param[3] = p->value;
+
+ r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_WRITE,
+ param, 4, &res_skb);
+ if (r < 0)
+ return r;
+
+ if (res_skb->len != 1) {
+ kfree_skb(res_skb);
+ return -EPROTO;
+ }
+
+ if (res_skb->data[0] != p->value) {
+ kfree_skb(res_skb);
+ return -EIO;
+ }
+
+ kfree_skb(res_skb);
+
+ p++;
+ }
+
+ param[0] = NFC_HCI_UICC_HOST_ID;
+ r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+ NFC_HCI_ADMIN_WHITELIST, param, 1);
+ if (r < 0)
+ return r;
+
+ param[0] = 0x3d;
+ r = nfc_hci_set_param(hdev, PN544_SYS_MGMT_GATE,
+ PN544_SYS_MGMT_INFO_NOTIFICATION, param, 1);
+ if (r < 0)
+ return r;
+
+ param[0] = 0x0;
+ r = nfc_hci_set_param(hdev, NFC_HCI_RF_READER_A_GATE,
+ PN544_RF_READER_A_AUTO_ACTIVATION, param, 1);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (r < 0)
+ return r;
+
+ param[0] = 0x1;
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_NFCT_DEACTIVATED, param, 1);
+ if (r < 0)
+ return r;
+
+ param[0] = 0x0;
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_RDPHASES, param, 1);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+ PN544_ID_MGMT_FULL_VERSION_SW, &skb);
+ if (r < 0)
+ return r;
+
+ if (skb->len != FULL_VERSION_LEN) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ print_hex_dump(KERN_DEBUG, "FULL VERSION SOFTWARE INFO: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ skb->data, FULL_VERSION_LEN, false);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb)
+{
+ struct pn544_hci_info *info = nfc_shdlc_get_clientdata(shdlc);
+ struct i2c_client *client = info->i2c_dev;
+
+ if (info->hard_fault != 0)
+ return info->hard_fault;
+
+ return pn544_hci_i2c_write(client, skb->data, skb->len);
+}
+
+static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+ u8 phases = 0;
+ int r;
+ u8 duration[2];
+ u8 activated;
+
+ pr_info(DRIVER_DESC ": %s protocols = %d\n", __func__, protocols);
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (r < 0)
+ return r;
+
+ duration[0] = 0x18;
+ duration[1] = 0x6a;
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_EMULATION, duration, 2);
+ if (r < 0)
+ return r;
+
+ activated = 0;
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_NFCT_DEACTIVATED, &activated, 1);
+ if (r < 0)
+ return r;
+
+ if (protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
+ NFC_PROTO_JEWEL_MASK))
+ phases |= 1; /* Type A */
+ if (protocols & NFC_PROTO_FELICA_MASK) {
+ phases |= (1 << 2); /* Type F 212 */
+ phases |= (1 << 3); /* Type F 424 */
+ }
+
+ phases |= (1 << 5); /* NFC active */
+
+ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE,
+ PN544_PL_RDPHASES, &phases, 1);
+ if (r < 0)
+ return r;
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
+ if (r < 0)
+ nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+
+ return r;
+}
+
+static int pn544_hci_target_from_gate(struct nfc_shdlc *shdlc, u8 gate,
+ struct nfc_target *target)
+{
+ switch (gate) {
+ case PN544_RF_READER_F_GATE:
+ target->supported_protocols = NFC_PROTO_FELICA_MASK;
+ break;
+ case PN544_RF_READER_JEWEL_GATE:
+ target->supported_protocols = NFC_PROTO_JEWEL_MASK;
+ target->sens_res = 0x0c00;
+ break;
+ default:
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int pn544_hci_complete_target_discovered(struct nfc_shdlc *shdlc,
+ u8 gate,
+ struct nfc_target *target)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+ struct sk_buff *uid_skb;
+ int r = 0;
+
+ if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
+ if (target->nfcid1_len != 4 && target->nfcid1_len != 7 &&
+ target->nfcid1_len != 10)
+ return -EPROTO;
+
+ r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+ PN544_RF_READER_CMD_ACTIVATE_NEXT,
+ target->nfcid1, target->nfcid1_len, NULL);
+ } else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) {
+ r = nfc_hci_get_param(hdev, PN544_RF_READER_F_GATE,
+ PN544_FELICA_ID, &uid_skb);
+ if (r < 0)
+ return r;
+
+ if (uid_skb->len != 8) {
+ kfree_skb(uid_skb);
+ return -EPROTO;
+ }
+
+ r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
+ PN544_RF_READER_CMD_ACTIVATE_NEXT,
+ uid_skb->data, uid_skb->len, NULL);
+ kfree_skb(uid_skb);
+ } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) {
+ /*
+ * TODO: maybe other ISO 14443 require some kind of continue
+ * activation, but for now we've seen only this one below.
+ */
+ if (target->sens_res == 0x4403) /* Type 4 Mifare DESFire */
+ r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+ PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION,
+ NULL, 0, NULL);
+ }
+
+ return r;
+}
+
+#define MIFARE_CMD_AUTH_KEY_A 0x60
+#define MIFARE_CMD_AUTH_KEY_B 0x61
+#define MIFARE_CMD_HEADER 2
+#define MIFARE_UID_LEN 4
+#define MIFARE_KEY_LEN 6
+#define MIFARE_CMD_LEN 12
+/*
+ * Returns:
+ * <= 0: driver handled the data exchange
+ * 1: driver doesn't especially handle, please do standard processing
+ */
+static int pn544_hci_data_exchange(struct nfc_shdlc *shdlc,
+ struct nfc_target *target,
+ struct sk_buff *skb,
+ struct sk_buff **res_skb)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+ int r;
+
+ pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__,
+ target->hci_reader_gate);
+
+ switch (target->hci_reader_gate) {
+ case NFC_HCI_RF_READER_A_GATE:
+ if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
+ /*
+ * It seems that pn544 is inverting key and UID for
+ * MIFARE authentication commands.
+ */
+ if (skb->len == MIFARE_CMD_LEN &&
+ (skb->data[0] == MIFARE_CMD_AUTH_KEY_A ||
+ skb->data[0] == MIFARE_CMD_AUTH_KEY_B)) {
+ u8 uid[MIFARE_UID_LEN];
+ u8 *data = skb->data + MIFARE_CMD_HEADER;
+
+ memcpy(uid, data + MIFARE_KEY_LEN,
+ MIFARE_UID_LEN);
+ memmove(data + MIFARE_UID_LEN, data,
+ MIFARE_KEY_LEN);
+ memcpy(data, uid, MIFARE_UID_LEN);
+ }
+
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_MIFARE_CMD,
+ skb->data, skb->len, res_skb);
+ } else
+ return 1;
+ case PN544_RF_READER_F_GATE:
+ *skb_push(skb, 1) = 0;
+ *skb_push(skb, 1) = 0;
+
+ r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_FELICA_RAW,
+ skb->data, skb->len, res_skb);
+ if (r == 0)
+ skb_pull(*res_skb, 1);
+ return r;
+ case PN544_RF_READER_JEWEL_GATE:
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_JEWEL_RAW_CMD,
+ skb->data, skb->len, res_skb);
+ default:
+ return 1;
+ }
+}
+
+static int pn544_hci_check_presence(struct nfc_shdlc *shdlc,
+ struct nfc_target *target)
+{
+ struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
+
+ return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
+ PN544_RF_READER_CMD_PRESENCE_CHECK,
+ NULL, 0, NULL);
+}
+
+static struct nfc_shdlc_ops pn544_shdlc_ops = {
+ .open = pn544_hci_open,
+ .close = pn544_hci_close,
+ .hci_ready = pn544_hci_ready,
+ .xmit = pn544_hci_xmit,
+ .start_poll = pn544_hci_start_poll,
+ .target_from_gate = pn544_hci_target_from_gate,
+ .complete_target_discovered = pn544_hci_complete_target_discovered,
+ .data_exchange = pn544_hci_data_exchange,
+ .check_presence = pn544_hci_check_presence,
+};
+
+static int __devinit pn544_hci_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pn544_hci_info *info;
+ struct pn544_nfc_platform_data *pdata;
+ int r = 0;
+ u32 protocols;
+ struct nfc_hci_init_data init_data;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
+ return -ENODEV;
+ }
+
+ info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&client->dev,
+ "Cannot allocate memory for pn544_hci_info.\n");
+ r = -ENOMEM;
+ goto err_info_alloc;
+ }
+
+ info->i2c_dev = client;
+ info->state = PN544_ST_COLD;
+ mutex_init(&info->info_lock);
+ i2c_set_clientdata(client, info);
+
+ pdata = client->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
+ r = -EINVAL;
+ goto err_pdata;
+ }
+
+ if (pdata->request_resources == NULL) {
+ dev_err(&client->dev, "request_resources() missing\n");
+ r = -EINVAL;
+ goto err_pdata;
+ }
+
+ r = pdata->request_resources(client);
+ if (r) {
+ dev_err(&client->dev, "Cannot get platform resources\n");
+ goto err_pdata;
+ }
+
+ info->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
+ info->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
+ info->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
+
+ pn544_hci_platform_init(info);
+
+ r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn,
+ IRQF_TRIGGER_RISING, PN544_HCI_DRIVER_NAME,
+ info);
+ if (r < 0) {
+ dev_err(&client->dev, "Unable to register IRQ handler\n");
+ goto err_rti;
+ }
+
+ init_data.gate_count = ARRAY_SIZE(pn544_custom_gates);
+
+ memcpy(init_data.gates, pn544_custom_gates,
+ ARRAY_SIZE(pn544_custom_gates));
+
+ /*
+ * TODO: Session id must include the driver name + some bus addr
+ * persistent info to discriminate 2 identical chips
+ */
+ strcpy(init_data.session_id, "ID544HCI");
+
+ protocols = NFC_PROTO_JEWEL_MASK |
+ NFC_PROTO_MIFARE_MASK |
+ NFC_PROTO_FELICA_MASK |
+ NFC_PROTO_ISO14443_MASK |
+ NFC_PROTO_NFC_DEP_MASK;
+
+ info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops,
+ &init_data, protocols,
+ PN544_CMDS_HEADROOM, 0,
+ PN544_HCI_LLC_MAX_PAYLOAD,
+ dev_name(&client->dev));
+ if (!info->shdlc) {
+ dev_err(&client->dev, "Cannot allocate nfc shdlc.\n");
+ r = -ENOMEM;
+ goto err_allocshdlc;
+ }
+
+ nfc_shdlc_set_clientdata(info->shdlc, info);
+
+ return 0;
+
+err_allocshdlc:
+ free_irq(client->irq, info);
+
+err_rti:
+ if (pdata->free_resources != NULL)
+ pdata->free_resources();
+
+err_pdata:
+ kfree(info);
+
+err_info_alloc:
+ return r;
+}
+
+static __devexit int pn544_hci_remove(struct i2c_client *client)
+{
+ struct pn544_hci_info *info = i2c_get_clientdata(client);
+ struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ nfc_shdlc_free(info->shdlc);
+
+ if (info->state != PN544_ST_COLD) {
+ if (pdata->disable)
+ pdata->disable();
+ }
+
+ free_irq(client->irq, info);
+ if (pdata->free_resources)
+ pdata->free_resources();
+
+ kfree(info);
+
+ return 0;
+}
+
+static struct i2c_driver pn544_hci_driver = {
+ .driver = {
+ .name = PN544_HCI_DRIVER_NAME,
+ },
+ .probe = pn544_hci_probe,
+ .id_table = pn544_hci_id_table,
+ .remove = __devexit_p(pn544_hci_remove),
+};
+
+static int __init pn544_hci_init(void)
+{
+ int r;
+
+ pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+ r = i2c_add_driver(&pn544_hci_driver);
+ if (r) {
+ pr_err(PN544_HCI_DRIVER_NAME ": driver registration failed\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static void __exit pn544_hci_exit(void)
+{
+ i2c_del_driver(&pn544_hci_driver);
+}
+
+module_init(pn544_hci_init);
+module_exit(pn544_hci_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 8e84ce9765a9..dfba3e64d595 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -51,12 +51,6 @@ config OF_IRQ
config OF_DEVICE
def_bool y
-config OF_GPIO
- def_bool y
- depends on GPIOLIB && !SPARC
- help
- OpenFirmware GPIO accessors
-
config OF_I2C
def_tristate I2C
depends on I2C && !SPARC
@@ -67,12 +61,6 @@ config OF_NET
depends on NETDEVICES
def_bool y
-config OF_SPI
- def_tristate SPI
- depends on SPI && !SPARC
- help
- OpenFirmware SPI accessors
-
config OF_MDIO
def_tristate PHYLIB
depends on PHYLIB
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index aa90e602c8a7..e027f444d10c 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -4,10 +4,8 @@ obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
obj-$(CONFIG_OF_DEVICE) += device.o platform.o
-obj-$(CONFIG_OF_GPIO) += gpio.o
obj-$(CONFIG_OF_I2C) += of_i2c.o
obj-$(CONFIG_OF_NET) += of_net.o
-obj-$(CONFIG_OF_SPI) += of_spi.o
obj-$(CONFIG_OF_SELFTEST) += selftest.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index f37fbeb66a44..1e173f357674 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -90,8 +90,22 @@ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
if (!dev)
return NULL;
- return to_i2c_client(dev);
+ return i2c_verify_client(dev);
}
EXPORT_SYMBOL(of_find_i2c_device_by_node);
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, node,
+ of_dev_node_match);
+ if (!dev)
+ return NULL;
+
+ return i2c_verify_adapter(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 93125163dea2..677053813211 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -15,7 +15,7 @@
* PCI tree until an device-node is found, at which point it will finish
* resolving using the OF tree walking.
*/
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
{
struct device_node *dn, *ppnode;
struct pci_dev *ppdev;
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
deleted file mode 100644
index 6dbc074e4876..000000000000
--- a/drivers/of/of_spi.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * SPI OF support routines
- * Copyright (C) 2008 Secret Lab Technologies Ltd.
- *
- * Support routines for deriving SPI device attachments from the device
- * tree.
- */
-
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/of_irq.h>
-#include <linux/of_spi.h>
-
-/**
- * of_register_spi_devices - Register child devices onto the SPI bus
- * @master: Pointer to spi_master device
- *
- * Registers an spi_device for each child node of master node which has a 'reg'
- * property.
- */
-void of_register_spi_devices(struct spi_master *master)
-{
- struct spi_device *spi;
- struct device_node *nc;
- const __be32 *prop;
- int rc;
- int len;
-
- if (!master->dev.of_node)
- return;
-
- for_each_child_of_node(master->dev.of_node, nc) {
- /* Alloc an spi_device */
- spi = spi_alloc_device(master);
- if (!spi) {
- dev_err(&master->dev, "spi_device alloc error for %s\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
-
- /* Select device driver */
- if (of_modalias_node(nc, spi->modalias,
- sizeof(spi->modalias)) < 0) {
- dev_err(&master->dev, "cannot find modalias for %s\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
-
- /* Device address */
- prop = of_get_property(nc, "reg", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'reg' property\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
- spi->chip_select = be32_to_cpup(prop);
-
- /* Mode (clock phase/polarity/etc.) */
- if (of_find_property(nc, "spi-cpha", NULL))
- spi->mode |= SPI_CPHA;
- if (of_find_property(nc, "spi-cpol", NULL))
- spi->mode |= SPI_CPOL;
- if (of_find_property(nc, "spi-cs-high", NULL))
- spi->mode |= SPI_CS_HIGH;
-
- /* Device speed */
- prop = of_get_property(nc, "spi-max-frequency", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
- nc->full_name);
- spi_dev_put(spi);
- continue;
- }
- spi->max_speed_hz = be32_to_cpup(prop);
-
- /* IRQ */
- spi->irq = irq_of_parse_and_map(nc, 0);
-
- /* Store a pointer to the node in the device structure */
- of_node_get(nc);
- spi->dev.of_node = nc;
-
- /* Register the new device */
- request_module(spi->modalias);
- rc = spi_add_device(spi);
- if (rc) {
- dev_err(&master->dev, "spi_device register error %s\n",
- nc->full_name);
- spi_dev_put(spi);
- }
-
- }
-}
-EXPORT_SYMBOL(of_register_spi_devices);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 343ad29e211c..e44f8c2d239d 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -317,10 +317,9 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
for(; lookup->compatible != NULL; lookup++) {
if (!of_device_is_compatible(np, lookup->compatible))
continue;
- if (of_address_to_resource(np, 0, &res))
- continue;
- if (res.start != lookup->phys_addr)
- continue;
+ if (!of_address_to_resource(np, 0, &res))
+ if (res.start != lookup->phys_addr)
+ continue;
pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
return lookup;
}
@@ -462,4 +461,5 @@ int of_platform_populate(struct device_node *root,
of_node_put(root);
return rc;
}
+EXPORT_SYMBOL_GPL(of_platform_populate);
#endif /* CONFIG_OF_ADDRESS */
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index da14432806c6..efc4b7f308cf 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
static DEFINE_MUTEX(oprofile_perf_mutex);
static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static struct perf_event **perf_events[NR_CPUS];
static int num_counters;
/*
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index e3b76d409dee..5003458980d3 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -348,7 +348,7 @@ int superio_fixup_irq(struct pci_dev *pcidev)
BUG();
return -1;
}
- printk("superio_fixup_irq(%s) ven 0x%x dev 0x%x from %p\n",
+ printk("superio_fixup_irq(%s) ven 0x%x dev 0x%x from %pf\n",
pci_name(pcidev),
pcidev->vendor, pcidev->device,
__builtin_return_address(0));
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index bf0cee629b60..099f46cd8e87 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -748,6 +748,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_pm_set_unknown_state(pci_dev);
+ /*
+ * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+ * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+ * hasn't been quiesced and tries to turn it off. If the controller
+ * is already in D3, this can hang or cause memory corruption.
+ *
+ * Since the value of the COMMAND register doesn't matter once the
+ * device has been suspended, we can safely set it to 0 here.
+ */
+ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
return 0;
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index a55e248618cd..86c63fe45d11 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -27,6 +27,7 @@
#include <linux/security.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
+#include <linux/vgaarb.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -417,6 +418,10 @@ static ssize_t
boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *vga_dev = vga_default_device();
+
+ if (vga_dev)
+ return sprintf(buf, "%u\n", (pdev == vga_dev));
return sprintf(buf, "%u\n",
!!(pdev->resource[PCI_ROM_RESOURCE].flags &
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8f169002dc7e..447e83472c01 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2370,7 +2370,7 @@ void pci_enable_acs(struct pci_dev *dev)
* number is always 0 (see the Implementation Note in section 2.2.8.1 of
* the PCI Express Base Specification, Revision 2.1)
*/
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
{
int slot;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index c3b331b74fa0..0cc053af70bd 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps);
list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
_i_ < _maps_node_->num_maps; \
- i++, _map_ = &_maps_node_->maps[_i_])
+ _i_++, _map_ = &_maps_node_->maps[_i_])
/**
* pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index f6e7c670906c..90c837f469a6 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -27,16 +27,16 @@
#include "core.h"
#include "pinctrl-imx.h"
-#define IMX_PMX_DUMP(info, p, m, c, n) \
-{ \
- int i, j; \
- printk("Format: Pin Mux Config\n"); \
- for (i = 0; i < n; i++) { \
- j = p[i]; \
- printk("%s %d 0x%lx\n", \
- info->pins[j].name, \
- m[i], c[i]); \
- } \
+#define IMX_PMX_DUMP(info, p, m, c, n) \
+{ \
+ int i, j; \
+ printk(KERN_DEBUG "Format: Pin Mux Config\n"); \
+ for (i = 0; i < n; i++) { \
+ j = p[i]; \
+ printk(KERN_DEBUG "%s %d 0x%lx\n", \
+ info->pins[j].name, \
+ m[i], c[i]); \
+ } \
}
/* The bits in CONFIG cell defined in binding doc*/
@@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create mux map */
parent = of_get_parent(np);
- if (!parent)
+ if (!parent) {
+ kfree(new_map);
return -EINVAL;
+ }
new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
new_map[0].data.mux.function = parent->name;
new_map[0].data.mux.group = np->name;
@@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
}
dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
- new_map->data.mux.function, new_map->data.mux.group, map_num);
+ (*map)->data.mux.function, (*map)->data.mux.group, map_num);
return 0;
}
@@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
static void imx_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
- int i;
-
- for (i = 0; i < num_maps; i++)
- kfree(map);
+ kfree(map);
}
static struct pinctrl_ops imx_pctrl_ops = {
@@ -478,6 +477,7 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
#ifdef DEBUG
IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
#endif
+
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-imx6q.c b/drivers/pinctrl/pinctrl-imx6q.c
index 7737d4d71a3c..e9bf71fbedca 100644
--- a/drivers/pinctrl/pinctrl-imx6q.c
+++ b/drivers/pinctrl/pinctrl-imx6q.c
@@ -1950,6 +1950,8 @@ static struct imx_pin_reg imx6q_pin_regs[] = {
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */
+ IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID */
+ IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID */
};
/* Pad names for the pinmux subsystem */
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
index 556e45a213eb..4ba4636b6a4a 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
/* Compose group name */
group = kzalloc(length, GFP_KERNEL);
- if (!group)
- return -ENOMEM;
+ if (!group) {
+ ret = -ENOMEM;
+ goto free;
+ }
snprintf(group, length, "%s.%d", np->name, reg);
new_map[i].data.mux.group = group;
i++;
@@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL);
if (!pconfig) {
ret = -ENOMEM;
- goto free;
+ goto free_group;
}
new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
@@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
return 0;
+free_group:
+ if (!purecfg)
+ kfree(group);
free:
kfree(new_map);
return ret;
@@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev,
return 0;
err:
+ platform_set_drvdata(pdev, NULL);
iounmap(d->base);
return ret;
}
@@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev)
{
struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
pinctrl_unregister(d->pctl);
iounmap(d->base);
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index b8e01c3eaa95..3e7e47d6b385 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
@@ -672,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
* wakeup is anyhow controlled by the RIMSC and FIMSC registers.
*/
if (nmk_chip->sleepmode && on) {
- __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base,
+ __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
NMK_GPIO_SLPM_WAKEUP_ENABLE);
}
@@ -1245,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
ret = PTR_ERR(clk);
goto out_unmap;
}
+ clk_prepare(clk);
nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
if (!nmk_chip) {
@@ -1436,7 +1438,27 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
- /* Handle this special glitch on altfunction C */
+ /*
+ * If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
+ * we may pass through an undesired state. In this case we take
+ * some extra care.
+ *
+ * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+ * - Save SLPM registers (since we have a shadow register in the
+ * nmk_chip we're using that as backup)
+ * - Set SLPM=0 for the IOs you want to switch and others to 1
+ * - Configure the GPIO registers for the IOs that are being switched
+ * - Set IOFORCE=1
+ * - Modify the AFLSA/B registers for the IOs that are being switched
+ * - Set IOFORCE=0
+ * - Restore SLPM registers
+ * - Any spurious wake up event during switch sequence to be ignored
+ * and cleared
+ *
+ * We REALLY need to save ALL slpm registers, because the external
+ * IOFORCE will switch *all* ports to their sleepmode setting to as
+ * to avoid glitches. (Not just one port!)
+ */
glitch = (g->altsetting == NMK_GPIO_ALT_C);
if (glitch) {
@@ -1688,18 +1710,34 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
.owner = THIS_MODULE,
};
+static const struct of_device_id nmk_pinctrl_match[] = {
+ {
+ .compatible = "stericsson,nmk_pinctrl",
+ .data = (void *)PINCTRL_NMK_DB8500,
+ },
+ {},
+};
+
static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
{
const struct platform_device_id *platid = platform_get_device_id(pdev);
+ struct device_node *np = pdev->dev.of_node;
struct nmk_pinctrl *npct;
+ unsigned int version = 0;
int i;
npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL);
if (!npct)
return -ENOMEM;
+ if (platid)
+ version = platid->driver_data;
+ else if (np)
+ version = (unsigned int)
+ of_match_device(nmk_pinctrl_match, &pdev->dev)->data;
+
/* Poke in other ASIC variants here */
- if (platid->driver_data == PINCTRL_NMK_DB8500)
+ if (version == PINCTRL_NMK_DB8500)
nmk_pinctrl_db8500_init(&npct->soc);
/*
@@ -1758,6 +1796,7 @@ static struct platform_driver nmk_pinctrl_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "pinctrl-nomadik",
+ .of_match_table = nmk_pinctrl_match,
},
.probe = nmk_pinctrl_probe,
.id_table = nmk_pinctrl_id,
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index ba15b1a29e52..e9f8e7d11001 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1184,7 +1184,7 @@ out_no_gpio_remap:
return ret;
}
-static const struct of_device_id pinmux_ids[] = {
+static const struct of_device_id pinmux_ids[] __devinitconst = {
{ .compatible = "sirf,prima2-gpio-pinmux" },
{}
};
diff --git a/drivers/pinctrl/spear/Kconfig b/drivers/pinctrl/spear/Kconfig
index 6a2596b4f359..91558791e766 100644
--- a/drivers/pinctrl/spear/Kconfig
+++ b/drivers/pinctrl/spear/Kconfig
@@ -31,4 +31,14 @@ config PINCTRL_SPEAR320
depends on MACH_SPEAR320
select PINCTRL_SPEAR3XX
+config PINCTRL_SPEAR1310
+ bool "ST Microelectronics SPEAr1310 SoC pin controller driver"
+ depends on MACH_SPEAR1310
+ select PINCTRL_SPEAR
+
+config PINCTRL_SPEAR1340
+ bool "ST Microelectronics SPEAr1340 SoC pin controller driver"
+ depends on MACH_SPEAR1340
+ select PINCTRL_SPEAR
+
endif
diff --git a/drivers/pinctrl/spear/Makefile b/drivers/pinctrl/spear/Makefile
index 15dcb85da22d..b28a7ba22443 100644
--- a/drivers/pinctrl/spear/Makefile
+++ b/drivers/pinctrl/spear/Makefile
@@ -5,3 +5,5 @@ obj-$(CONFIG_PINCTRL_SPEAR3XX) += pinctrl-spear3xx.o
obj-$(CONFIG_PINCTRL_SPEAR300) += pinctrl-spear300.o
obj-$(CONFIG_PINCTRL_SPEAR310) += pinctrl-spear310.o
obj-$(CONFIG_PINCTRL_SPEAR320) += pinctrl-spear320.o
+obj-$(CONFIG_PINCTRL_SPEAR1310) += pinctrl-spear1310.o
+obj-$(CONFIG_PINCTRL_SPEAR1340) += pinctrl-spear1340.o
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 5ae50aadf885..b3f6b2873fdd 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* Inspired from:
* - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 47a6b5b72f90..d950eb78d939 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
* Driver header file for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -139,4 +139,255 @@ void __devinit pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
int __devinit spear_pinctrl_probe(struct platform_device *pdev,
struct spear_pinctrl_machdata *machdata);
int __devexit spear_pinctrl_remove(struct platform_device *pdev);
+
+#define SPEAR_PIN_0_TO_101 \
+ PINCTRL_PIN(0, "PLGPIO0"), \
+ PINCTRL_PIN(1, "PLGPIO1"), \
+ PINCTRL_PIN(2, "PLGPIO2"), \
+ PINCTRL_PIN(3, "PLGPIO3"), \
+ PINCTRL_PIN(4, "PLGPIO4"), \
+ PINCTRL_PIN(5, "PLGPIO5"), \
+ PINCTRL_PIN(6, "PLGPIO6"), \
+ PINCTRL_PIN(7, "PLGPIO7"), \
+ PINCTRL_PIN(8, "PLGPIO8"), \
+ PINCTRL_PIN(9, "PLGPIO9"), \
+ PINCTRL_PIN(10, "PLGPIO10"), \
+ PINCTRL_PIN(11, "PLGPIO11"), \
+ PINCTRL_PIN(12, "PLGPIO12"), \
+ PINCTRL_PIN(13, "PLGPIO13"), \
+ PINCTRL_PIN(14, "PLGPIO14"), \
+ PINCTRL_PIN(15, "PLGPIO15"), \
+ PINCTRL_PIN(16, "PLGPIO16"), \
+ PINCTRL_PIN(17, "PLGPIO17"), \
+ PINCTRL_PIN(18, "PLGPIO18"), \
+ PINCTRL_PIN(19, "PLGPIO19"), \
+ PINCTRL_PIN(20, "PLGPIO20"), \
+ PINCTRL_PIN(21, "PLGPIO21"), \
+ PINCTRL_PIN(22, "PLGPIO22"), \
+ PINCTRL_PIN(23, "PLGPIO23"), \
+ PINCTRL_PIN(24, "PLGPIO24"), \
+ PINCTRL_PIN(25, "PLGPIO25"), \
+ PINCTRL_PIN(26, "PLGPIO26"), \
+ PINCTRL_PIN(27, "PLGPIO27"), \
+ PINCTRL_PIN(28, "PLGPIO28"), \
+ PINCTRL_PIN(29, "PLGPIO29"), \
+ PINCTRL_PIN(30, "PLGPIO30"), \
+ PINCTRL_PIN(31, "PLGPIO31"), \
+ PINCTRL_PIN(32, "PLGPIO32"), \
+ PINCTRL_PIN(33, "PLGPIO33"), \
+ PINCTRL_PIN(34, "PLGPIO34"), \
+ PINCTRL_PIN(35, "PLGPIO35"), \
+ PINCTRL_PIN(36, "PLGPIO36"), \
+ PINCTRL_PIN(37, "PLGPIO37"), \
+ PINCTRL_PIN(38, "PLGPIO38"), \
+ PINCTRL_PIN(39, "PLGPIO39"), \
+ PINCTRL_PIN(40, "PLGPIO40"), \
+ PINCTRL_PIN(41, "PLGPIO41"), \
+ PINCTRL_PIN(42, "PLGPIO42"), \
+ PINCTRL_PIN(43, "PLGPIO43"), \
+ PINCTRL_PIN(44, "PLGPIO44"), \
+ PINCTRL_PIN(45, "PLGPIO45"), \
+ PINCTRL_PIN(46, "PLGPIO46"), \
+ PINCTRL_PIN(47, "PLGPIO47"), \
+ PINCTRL_PIN(48, "PLGPIO48"), \
+ PINCTRL_PIN(49, "PLGPIO49"), \
+ PINCTRL_PIN(50, "PLGPIO50"), \
+ PINCTRL_PIN(51, "PLGPIO51"), \
+ PINCTRL_PIN(52, "PLGPIO52"), \
+ PINCTRL_PIN(53, "PLGPIO53"), \
+ PINCTRL_PIN(54, "PLGPIO54"), \
+ PINCTRL_PIN(55, "PLGPIO55"), \
+ PINCTRL_PIN(56, "PLGPIO56"), \
+ PINCTRL_PIN(57, "PLGPIO57"), \
+ PINCTRL_PIN(58, "PLGPIO58"), \
+ PINCTRL_PIN(59, "PLGPIO59"), \
+ PINCTRL_PIN(60, "PLGPIO60"), \
+ PINCTRL_PIN(61, "PLGPIO61"), \
+ PINCTRL_PIN(62, "PLGPIO62"), \
+ PINCTRL_PIN(63, "PLGPIO63"), \
+ PINCTRL_PIN(64, "PLGPIO64"), \
+ PINCTRL_PIN(65, "PLGPIO65"), \
+ PINCTRL_PIN(66, "PLGPIO66"), \
+ PINCTRL_PIN(67, "PLGPIO67"), \
+ PINCTRL_PIN(68, "PLGPIO68"), \
+ PINCTRL_PIN(69, "PLGPIO69"), \
+ PINCTRL_PIN(70, "PLGPIO70"), \
+ PINCTRL_PIN(71, "PLGPIO71"), \
+ PINCTRL_PIN(72, "PLGPIO72"), \
+ PINCTRL_PIN(73, "PLGPIO73"), \
+ PINCTRL_PIN(74, "PLGPIO74"), \
+ PINCTRL_PIN(75, "PLGPIO75"), \
+ PINCTRL_PIN(76, "PLGPIO76"), \
+ PINCTRL_PIN(77, "PLGPIO77"), \
+ PINCTRL_PIN(78, "PLGPIO78"), \
+ PINCTRL_PIN(79, "PLGPIO79"), \
+ PINCTRL_PIN(80, "PLGPIO80"), \
+ PINCTRL_PIN(81, "PLGPIO81"), \
+ PINCTRL_PIN(82, "PLGPIO82"), \
+ PINCTRL_PIN(83, "PLGPIO83"), \
+ PINCTRL_PIN(84, "PLGPIO84"), \
+ PINCTRL_PIN(85, "PLGPIO85"), \
+ PINCTRL_PIN(86, "PLGPIO86"), \
+ PINCTRL_PIN(87, "PLGPIO87"), \
+ PINCTRL_PIN(88, "PLGPIO88"), \
+ PINCTRL_PIN(89, "PLGPIO89"), \
+ PINCTRL_PIN(90, "PLGPIO90"), \
+ PINCTRL_PIN(91, "PLGPIO91"), \
+ PINCTRL_PIN(92, "PLGPIO92"), \
+ PINCTRL_PIN(93, "PLGPIO93"), \
+ PINCTRL_PIN(94, "PLGPIO94"), \
+ PINCTRL_PIN(95, "PLGPIO95"), \
+ PINCTRL_PIN(96, "PLGPIO96"), \
+ PINCTRL_PIN(97, "PLGPIO97"), \
+ PINCTRL_PIN(98, "PLGPIO98"), \
+ PINCTRL_PIN(99, "PLGPIO99"), \
+ PINCTRL_PIN(100, "PLGPIO100"), \
+ PINCTRL_PIN(101, "PLGPIO101")
+
+#define SPEAR_PIN_102_TO_245 \
+ PINCTRL_PIN(102, "PLGPIO102"), \
+ PINCTRL_PIN(103, "PLGPIO103"), \
+ PINCTRL_PIN(104, "PLGPIO104"), \
+ PINCTRL_PIN(105, "PLGPIO105"), \
+ PINCTRL_PIN(106, "PLGPIO106"), \
+ PINCTRL_PIN(107, "PLGPIO107"), \
+ PINCTRL_PIN(108, "PLGPIO108"), \
+ PINCTRL_PIN(109, "PLGPIO109"), \
+ PINCTRL_PIN(110, "PLGPIO110"), \
+ PINCTRL_PIN(111, "PLGPIO111"), \
+ PINCTRL_PIN(112, "PLGPIO112"), \
+ PINCTRL_PIN(113, "PLGPIO113"), \
+ PINCTRL_PIN(114, "PLGPIO114"), \
+ PINCTRL_PIN(115, "PLGPIO115"), \
+ PINCTRL_PIN(116, "PLGPIO116"), \
+ PINCTRL_PIN(117, "PLGPIO117"), \
+ PINCTRL_PIN(118, "PLGPIO118"), \
+ PINCTRL_PIN(119, "PLGPIO119"), \
+ PINCTRL_PIN(120, "PLGPIO120"), \
+ PINCTRL_PIN(121, "PLGPIO121"), \
+ PINCTRL_PIN(122, "PLGPIO122"), \
+ PINCTRL_PIN(123, "PLGPIO123"), \
+ PINCTRL_PIN(124, "PLGPIO124"), \
+ PINCTRL_PIN(125, "PLGPIO125"), \
+ PINCTRL_PIN(126, "PLGPIO126"), \
+ PINCTRL_PIN(127, "PLGPIO127"), \
+ PINCTRL_PIN(128, "PLGPIO128"), \
+ PINCTRL_PIN(129, "PLGPIO129"), \
+ PINCTRL_PIN(130, "PLGPIO130"), \
+ PINCTRL_PIN(131, "PLGPIO131"), \
+ PINCTRL_PIN(132, "PLGPIO132"), \
+ PINCTRL_PIN(133, "PLGPIO133"), \
+ PINCTRL_PIN(134, "PLGPIO134"), \
+ PINCTRL_PIN(135, "PLGPIO135"), \
+ PINCTRL_PIN(136, "PLGPIO136"), \
+ PINCTRL_PIN(137, "PLGPIO137"), \
+ PINCTRL_PIN(138, "PLGPIO138"), \
+ PINCTRL_PIN(139, "PLGPIO139"), \
+ PINCTRL_PIN(140, "PLGPIO140"), \
+ PINCTRL_PIN(141, "PLGPIO141"), \
+ PINCTRL_PIN(142, "PLGPIO142"), \
+ PINCTRL_PIN(143, "PLGPIO143"), \
+ PINCTRL_PIN(144, "PLGPIO144"), \
+ PINCTRL_PIN(145, "PLGPIO145"), \
+ PINCTRL_PIN(146, "PLGPIO146"), \
+ PINCTRL_PIN(147, "PLGPIO147"), \
+ PINCTRL_PIN(148, "PLGPIO148"), \
+ PINCTRL_PIN(149, "PLGPIO149"), \
+ PINCTRL_PIN(150, "PLGPIO150"), \
+ PINCTRL_PIN(151, "PLGPIO151"), \
+ PINCTRL_PIN(152, "PLGPIO152"), \
+ PINCTRL_PIN(153, "PLGPIO153"), \
+ PINCTRL_PIN(154, "PLGPIO154"), \
+ PINCTRL_PIN(155, "PLGPIO155"), \
+ PINCTRL_PIN(156, "PLGPIO156"), \
+ PINCTRL_PIN(157, "PLGPIO157"), \
+ PINCTRL_PIN(158, "PLGPIO158"), \
+ PINCTRL_PIN(159, "PLGPIO159"), \
+ PINCTRL_PIN(160, "PLGPIO160"), \
+ PINCTRL_PIN(161, "PLGPIO161"), \
+ PINCTRL_PIN(162, "PLGPIO162"), \
+ PINCTRL_PIN(163, "PLGPIO163"), \
+ PINCTRL_PIN(164, "PLGPIO164"), \
+ PINCTRL_PIN(165, "PLGPIO165"), \
+ PINCTRL_PIN(166, "PLGPIO166"), \
+ PINCTRL_PIN(167, "PLGPIO167"), \
+ PINCTRL_PIN(168, "PLGPIO168"), \
+ PINCTRL_PIN(169, "PLGPIO169"), \
+ PINCTRL_PIN(170, "PLGPIO170"), \
+ PINCTRL_PIN(171, "PLGPIO171"), \
+ PINCTRL_PIN(172, "PLGPIO172"), \
+ PINCTRL_PIN(173, "PLGPIO173"), \
+ PINCTRL_PIN(174, "PLGPIO174"), \
+ PINCTRL_PIN(175, "PLGPIO175"), \
+ PINCTRL_PIN(176, "PLGPIO176"), \
+ PINCTRL_PIN(177, "PLGPIO177"), \
+ PINCTRL_PIN(178, "PLGPIO178"), \
+ PINCTRL_PIN(179, "PLGPIO179"), \
+ PINCTRL_PIN(180, "PLGPIO180"), \
+ PINCTRL_PIN(181, "PLGPIO181"), \
+ PINCTRL_PIN(182, "PLGPIO182"), \
+ PINCTRL_PIN(183, "PLGPIO183"), \
+ PINCTRL_PIN(184, "PLGPIO184"), \
+ PINCTRL_PIN(185, "PLGPIO185"), \
+ PINCTRL_PIN(186, "PLGPIO186"), \
+ PINCTRL_PIN(187, "PLGPIO187"), \
+ PINCTRL_PIN(188, "PLGPIO188"), \
+ PINCTRL_PIN(189, "PLGPIO189"), \
+ PINCTRL_PIN(190, "PLGPIO190"), \
+ PINCTRL_PIN(191, "PLGPIO191"), \
+ PINCTRL_PIN(192, "PLGPIO192"), \
+ PINCTRL_PIN(193, "PLGPIO193"), \
+ PINCTRL_PIN(194, "PLGPIO194"), \
+ PINCTRL_PIN(195, "PLGPIO195"), \
+ PINCTRL_PIN(196, "PLGPIO196"), \
+ PINCTRL_PIN(197, "PLGPIO197"), \
+ PINCTRL_PIN(198, "PLGPIO198"), \
+ PINCTRL_PIN(199, "PLGPIO199"), \
+ PINCTRL_PIN(200, "PLGPIO200"), \
+ PINCTRL_PIN(201, "PLGPIO201"), \
+ PINCTRL_PIN(202, "PLGPIO202"), \
+ PINCTRL_PIN(203, "PLGPIO203"), \
+ PINCTRL_PIN(204, "PLGPIO204"), \
+ PINCTRL_PIN(205, "PLGPIO205"), \
+ PINCTRL_PIN(206, "PLGPIO206"), \
+ PINCTRL_PIN(207, "PLGPIO207"), \
+ PINCTRL_PIN(208, "PLGPIO208"), \
+ PINCTRL_PIN(209, "PLGPIO209"), \
+ PINCTRL_PIN(210, "PLGPIO210"), \
+ PINCTRL_PIN(211, "PLGPIO211"), \
+ PINCTRL_PIN(212, "PLGPIO212"), \
+ PINCTRL_PIN(213, "PLGPIO213"), \
+ PINCTRL_PIN(214, "PLGPIO214"), \
+ PINCTRL_PIN(215, "PLGPIO215"), \
+ PINCTRL_PIN(216, "PLGPIO216"), \
+ PINCTRL_PIN(217, "PLGPIO217"), \
+ PINCTRL_PIN(218, "PLGPIO218"), \
+ PINCTRL_PIN(219, "PLGPIO219"), \
+ PINCTRL_PIN(220, "PLGPIO220"), \
+ PINCTRL_PIN(221, "PLGPIO221"), \
+ PINCTRL_PIN(222, "PLGPIO222"), \
+ PINCTRL_PIN(223, "PLGPIO223"), \
+ PINCTRL_PIN(224, "PLGPIO224"), \
+ PINCTRL_PIN(225, "PLGPIO225"), \
+ PINCTRL_PIN(226, "PLGPIO226"), \
+ PINCTRL_PIN(227, "PLGPIO227"), \
+ PINCTRL_PIN(228, "PLGPIO228"), \
+ PINCTRL_PIN(229, "PLGPIO229"), \
+ PINCTRL_PIN(230, "PLGPIO230"), \
+ PINCTRL_PIN(231, "PLGPIO231"), \
+ PINCTRL_PIN(232, "PLGPIO232"), \
+ PINCTRL_PIN(233, "PLGPIO233"), \
+ PINCTRL_PIN(234, "PLGPIO234"), \
+ PINCTRL_PIN(235, "PLGPIO235"), \
+ PINCTRL_PIN(236, "PLGPIO236"), \
+ PINCTRL_PIN(237, "PLGPIO237"), \
+ PINCTRL_PIN(238, "PLGPIO238"), \
+ PINCTRL_PIN(239, "PLGPIO239"), \
+ PINCTRL_PIN(240, "PLGPIO240"), \
+ PINCTRL_PIN(241, "PLGPIO241"), \
+ PINCTRL_PIN(242, "PLGPIO242"), \
+ PINCTRL_PIN(243, "PLGPIO243"), \
+ PINCTRL_PIN(244, "PLGPIO244"), \
+ PINCTRL_PIN(245, "PLGPIO245")
+
#endif /* __PINMUX_SPEAR_H__ */
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
new file mode 100644
index 000000000000..d6cca8c81b92
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -0,0 +1,2198 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1310 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1310-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1310_pins[] = {
+ SPEAR_PIN_0_TO_101,
+ SPEAR_PIN_102_TO_245,
+};
+
+/* registers */
+#define PERIP_CFG 0x32C
+ #define MCIF_SEL_SHIFT 3
+ #define MCIF_SEL_SD (0x1 << MCIF_SEL_SHIFT)
+ #define MCIF_SEL_CF (0x2 << MCIF_SEL_SHIFT)
+ #define MCIF_SEL_XD (0x3 << MCIF_SEL_SHIFT)
+ #define MCIF_SEL_MASK (0x3 << MCIF_SEL_SHIFT)
+
+#define PCIE_SATA_CFG 0x3A4
+ #define PCIE_SATA2_SEL_PCIE (0 << 31)
+ #define PCIE_SATA1_SEL_PCIE (0 << 30)
+ #define PCIE_SATA0_SEL_PCIE (0 << 29)
+ #define PCIE_SATA2_SEL_SATA (1 << 31)
+ #define PCIE_SATA1_SEL_SATA (1 << 30)
+ #define PCIE_SATA0_SEL_SATA (1 << 29)
+ #define SATA2_CFG_TX_CLK_EN (1 << 27)
+ #define SATA2_CFG_RX_CLK_EN (1 << 26)
+ #define SATA2_CFG_POWERUP_RESET (1 << 25)
+ #define SATA2_CFG_PM_CLK_EN (1 << 24)
+ #define SATA1_CFG_TX_CLK_EN (1 << 23)
+ #define SATA1_CFG_RX_CLK_EN (1 << 22)
+ #define SATA1_CFG_POWERUP_RESET (1 << 21)
+ #define SATA1_CFG_PM_CLK_EN (1 << 20)
+ #define SATA0_CFG_TX_CLK_EN (1 << 19)
+ #define SATA0_CFG_RX_CLK_EN (1 << 18)
+ #define SATA0_CFG_POWERUP_RESET (1 << 17)
+ #define SATA0_CFG_PM_CLK_EN (1 << 16)
+ #define PCIE2_CFG_DEVICE_PRESENT (1 << 11)
+ #define PCIE2_CFG_POWERUP_RESET (1 << 10)
+ #define PCIE2_CFG_CORE_CLK_EN (1 << 9)
+ #define PCIE2_CFG_AUX_CLK_EN (1 << 8)
+ #define PCIE1_CFG_DEVICE_PRESENT (1 << 7)
+ #define PCIE1_CFG_POWERUP_RESET (1 << 6)
+ #define PCIE1_CFG_CORE_CLK_EN (1 << 5)
+ #define PCIE1_CFG_AUX_CLK_EN (1 << 4)
+ #define PCIE0_CFG_DEVICE_PRESENT (1 << 3)
+ #define PCIE0_CFG_POWERUP_RESET (1 << 2)
+ #define PCIE0_CFG_CORE_CLK_EN (1 << 1)
+ #define PCIE0_CFG_AUX_CLK_EN (1 << 0)
+
+#define PAD_FUNCTION_EN_0 0x650
+ #define PMX_UART0_MASK (1 << 1)
+ #define PMX_I2C0_MASK (1 << 2)
+ #define PMX_I2S0_MASK (1 << 3)
+ #define PMX_SSP0_MASK (1 << 4)
+ #define PMX_CLCD1_MASK (1 << 5)
+ #define PMX_EGPIO00_MASK (1 << 6)
+ #define PMX_EGPIO01_MASK (1 << 7)
+ #define PMX_EGPIO02_MASK (1 << 8)
+ #define PMX_EGPIO03_MASK (1 << 9)
+ #define PMX_EGPIO04_MASK (1 << 10)
+ #define PMX_EGPIO05_MASK (1 << 11)
+ #define PMX_EGPIO06_MASK (1 << 12)
+ #define PMX_EGPIO07_MASK (1 << 13)
+ #define PMX_EGPIO08_MASK (1 << 14)
+ #define PMX_EGPIO09_MASK (1 << 15)
+ #define PMX_SMI_MASK (1 << 16)
+ #define PMX_NAND8_MASK (1 << 17)
+ #define PMX_GMIICLK_MASK (1 << 18)
+ #define PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK (1 << 19)
+ #define PMX_RXCLK_RDV_TXEN_D03_MASK (1 << 20)
+ #define PMX_GMIID47_MASK (1 << 21)
+ #define PMX_MDC_MDIO_MASK (1 << 22)
+ #define PMX_MCI_DATA8_15_MASK (1 << 23)
+ #define PMX_NFAD23_MASK (1 << 24)
+ #define PMX_NFAD24_MASK (1 << 25)
+ #define PMX_NFAD25_MASK (1 << 26)
+ #define PMX_NFCE3_MASK (1 << 27)
+ #define PMX_NFWPRT3_MASK (1 << 28)
+ #define PMX_NFRSTPWDWN0_MASK (1 << 29)
+ #define PMX_NFRSTPWDWN1_MASK (1 << 30)
+ #define PMX_NFRSTPWDWN2_MASK (1 << 31)
+
+#define PAD_FUNCTION_EN_1 0x654
+ #define PMX_NFRSTPWDWN3_MASK (1 << 0)
+ #define PMX_SMINCS2_MASK (1 << 1)
+ #define PMX_SMINCS3_MASK (1 << 2)
+ #define PMX_CLCD2_MASK (1 << 3)
+ #define PMX_KBD_ROWCOL68_MASK (1 << 4)
+ #define PMX_EGPIO10_MASK (1 << 5)
+ #define PMX_EGPIO11_MASK (1 << 6)
+ #define PMX_EGPIO12_MASK (1 << 7)
+ #define PMX_EGPIO13_MASK (1 << 8)
+ #define PMX_EGPIO14_MASK (1 << 9)
+ #define PMX_EGPIO15_MASK (1 << 10)
+ #define PMX_UART0_MODEM_MASK (1 << 11)
+ #define PMX_GPT0_TMR0_MASK (1 << 12)
+ #define PMX_GPT0_TMR1_MASK (1 << 13)
+ #define PMX_GPT1_TMR0_MASK (1 << 14)
+ #define PMX_GPT1_TMR1_MASK (1 << 15)
+ #define PMX_I2S1_MASK (1 << 16)
+ #define PMX_KBD_ROWCOL25_MASK (1 << 17)
+ #define PMX_NFIO8_15_MASK (1 << 18)
+ #define PMX_KBD_COL1_MASK (1 << 19)
+ #define PMX_NFCE1_MASK (1 << 20)
+ #define PMX_KBD_COL0_MASK (1 << 21)
+ #define PMX_NFCE2_MASK (1 << 22)
+ #define PMX_KBD_ROW1_MASK (1 << 23)
+ #define PMX_NFWPRT1_MASK (1 << 24)
+ #define PMX_KBD_ROW0_MASK (1 << 25)
+ #define PMX_NFWPRT2_MASK (1 << 26)
+ #define PMX_MCIDATA0_MASK (1 << 27)
+ #define PMX_MCIDATA1_MASK (1 << 28)
+ #define PMX_MCIDATA2_MASK (1 << 29)
+ #define PMX_MCIDATA3_MASK (1 << 30)
+ #define PMX_MCIDATA4_MASK (1 << 31)
+
+#define PAD_FUNCTION_EN_2 0x658
+ #define PMX_MCIDATA5_MASK (1 << 0)
+ #define PMX_MCIDATA6_MASK (1 << 1)
+ #define PMX_MCIDATA7_MASK (1 << 2)
+ #define PMX_MCIDATA1SD_MASK (1 << 3)
+ #define PMX_MCIDATA2SD_MASK (1 << 4)
+ #define PMX_MCIDATA3SD_MASK (1 << 5)
+ #define PMX_MCIADDR0ALE_MASK (1 << 6)
+ #define PMX_MCIADDR1CLECLK_MASK (1 << 7)
+ #define PMX_MCIADDR2_MASK (1 << 8)
+ #define PMX_MCICECF_MASK (1 << 9)
+ #define PMX_MCICEXD_MASK (1 << 10)
+ #define PMX_MCICESDMMC_MASK (1 << 11)
+ #define PMX_MCICDCF1_MASK (1 << 12)
+ #define PMX_MCICDCF2_MASK (1 << 13)
+ #define PMX_MCICDXD_MASK (1 << 14)
+ #define PMX_MCICDSDMMC_MASK (1 << 15)
+ #define PMX_MCIDATADIR_MASK (1 << 16)
+ #define PMX_MCIDMARQWP_MASK (1 << 17)
+ #define PMX_MCIIORDRE_MASK (1 << 18)
+ #define PMX_MCIIOWRWE_MASK (1 << 19)
+ #define PMX_MCIRESETCF_MASK (1 << 20)
+ #define PMX_MCICS0CE_MASK (1 << 21)
+ #define PMX_MCICFINTR_MASK (1 << 22)
+ #define PMX_MCIIORDY_MASK (1 << 23)
+ #define PMX_MCICS1_MASK (1 << 24)
+ #define PMX_MCIDMAACK_MASK (1 << 25)
+ #define PMX_MCISDCMD_MASK (1 << 26)
+ #define PMX_MCILEDS_MASK (1 << 27)
+ #define PMX_TOUCH_XY_MASK (1 << 28)
+ #define PMX_SSP0_CS0_MASK (1 << 29)
+ #define PMX_SSP0_CS1_2_MASK (1 << 30)
+
+/* combined macros */
+#define PMX_GMII_MASK (PMX_GMIICLK_MASK | \
+ PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
+ PMX_RXCLK_RDV_TXEN_D03_MASK | \
+ PMX_GMIID47_MASK | PMX_MDC_MDIO_MASK)
+
+#define PMX_EGPIO_0_GRP_MASK (PMX_EGPIO00_MASK | PMX_EGPIO01_MASK | \
+ PMX_EGPIO02_MASK | \
+ PMX_EGPIO03_MASK | PMX_EGPIO04_MASK | \
+ PMX_EGPIO05_MASK | PMX_EGPIO06_MASK | \
+ PMX_EGPIO07_MASK | PMX_EGPIO08_MASK | \
+ PMX_EGPIO09_MASK)
+#define PMX_EGPIO_1_GRP_MASK (PMX_EGPIO10_MASK | PMX_EGPIO11_MASK | \
+ PMX_EGPIO12_MASK | PMX_EGPIO13_MASK | \
+ PMX_EGPIO14_MASK | PMX_EGPIO15_MASK)
+
+#define PMX_KEYBOARD_6X6_MASK (PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
+ PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL0_MASK | \
+ PMX_KBD_COL1_MASK)
+
+#define PMX_NAND8BIT_0_MASK (PMX_NAND8_MASK | PMX_NFAD23_MASK | \
+ PMX_NFAD24_MASK | PMX_NFAD25_MASK | \
+ PMX_NFWPRT3_MASK | PMX_NFRSTPWDWN0_MASK | \
+ PMX_NFRSTPWDWN1_MASK | PMX_NFRSTPWDWN2_MASK | \
+ PMX_NFCE3_MASK)
+#define PMX_NAND8BIT_1_MASK PMX_NFRSTPWDWN3_MASK
+
+#define PMX_NAND16BIT_1_MASK (PMX_KBD_ROWCOL25_MASK | PMX_NFIO8_15_MASK)
+#define PMX_NAND_4CHIPS_MASK (PMX_NFCE1_MASK | PMX_NFCE2_MASK | \
+ PMX_NFWPRT1_MASK | PMX_NFWPRT2_MASK | \
+ PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
+ PMX_KBD_COL0_MASK | PMX_KBD_COL1_MASK)
+
+#define PMX_MCIFALL_1_MASK 0xF8000000
+#define PMX_MCIFALL_2_MASK 0x0FFFFFFF
+
+#define PMX_PCI_REG1_MASK (PMX_SMINCS2_MASK | PMX_SMINCS3_MASK | \
+ PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK | \
+ PMX_EGPIO_1_GRP_MASK | PMX_GPT0_TMR0_MASK | \
+ PMX_GPT0_TMR1_MASK | PMX_GPT1_TMR0_MASK | \
+ PMX_GPT1_TMR1_MASK | PMX_I2S1_MASK | \
+ PMX_NFCE2_MASK)
+#define PMX_PCI_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+ PMX_SSP0_CS1_2_MASK)
+
+#define PMX_SMII_0_1_2_MASK (PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK)
+#define PMX_RGMII_REG0_MASK (PMX_MCI_DATA8_15_MASK | \
+ PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
+ PMX_GMIID47_MASK)
+#define PMX_RGMII_REG1_MASK (PMX_KBD_ROWCOL68_MASK | PMX_EGPIO_1_GRP_MASK |\
+ PMX_KBD_ROW1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_KBD_ROW0_MASK | PMX_NFWPRT2_MASK)
+#define PMX_RGMII_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+ PMX_SSP0_CS1_2_MASK)
+
+#define PCIE_CFG_VAL(x) (PCIE_SATA##x##_SEL_PCIE | \
+ PCIE##x##_CFG_AUX_CLK_EN | \
+ PCIE##x##_CFG_CORE_CLK_EN | \
+ PCIE##x##_CFG_POWERUP_RESET | \
+ PCIE##x##_CFG_DEVICE_PRESENT)
+#define SATA_CFG_VAL(x) (PCIE_SATA##x##_SEL_SATA | \
+ SATA##x##_CFG_PM_CLK_EN | \
+ SATA##x##_CFG_POWERUP_RESET | \
+ SATA##x##_CFG_RX_CLK_EN | \
+ SATA##x##_CFG_TX_CLK_EN)
+
+/* Pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 102, 103 };
+static struct spear_muxreg i2c0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2C0_MASK,
+ .val = PMX_I2C0_MASK,
+ },
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+ {
+ .muxregs = i2c0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .npins = ARRAY_SIZE(i2c0_pins),
+ .modemuxs = i2c0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+ .name = "i2c0",
+ .groups = i2c0_grps,
+ .ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* Pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 109, 110, 111, 112 };
+static struct spear_muxreg ssp0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SSP0_MASK,
+ .val = PMX_SSP0_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+ {
+ .muxregs = ssp0_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+ .name = "ssp0_grp",
+ .pins = ssp0_pins,
+ .npins = ARRAY_SIZE(ssp0_pins),
+ .modemuxs = ssp0_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* Pad multiplexing for ssp0_cs0 device */
+static const unsigned ssp0_cs0_pins[] = { 96 };
+static struct spear_muxreg ssp0_cs0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_SSP0_CS0_MASK,
+ .val = PMX_SSP0_CS0_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs0_modemux[] = {
+ {
+ .muxregs = ssp0_cs0_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs0_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs0_pingroup = {
+ .name = "ssp0_cs0_grp",
+ .pins = ssp0_cs0_pins,
+ .npins = ARRAY_SIZE(ssp0_cs0_pins),
+ .modemuxs = ssp0_cs0_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs0_modemux),
+};
+
+/* ssp0_cs1_2 device */
+static const unsigned ssp0_cs1_2_pins[] = { 94, 95 };
+static struct spear_muxreg ssp0_cs1_2_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_SSP0_CS1_2_MASK,
+ .val = PMX_SSP0_CS1_2_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs1_2_modemux[] = {
+ {
+ .muxregs = ssp0_cs1_2_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs1_2_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs1_2_pingroup = {
+ .name = "ssp0_cs1_2_grp",
+ .pins = ssp0_cs1_2_pins,
+ .npins = ARRAY_SIZE(ssp0_cs1_2_pins),
+ .modemuxs = ssp0_cs1_2_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs1_2_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs0_grp",
+ "ssp0_cs1_2_grp" };
+static struct spear_function ssp0_function = {
+ .name = "ssp0",
+ .groups = ssp0_grps,
+ .ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* Pad multiplexing for i2s0 device */
+static const unsigned i2s0_pins[] = { 104, 105, 106, 107, 108 };
+static struct spear_muxreg i2s0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2S0_MASK,
+ .val = PMX_I2S0_MASK,
+ },
+};
+
+static struct spear_modemux i2s0_modemux[] = {
+ {
+ .muxregs = i2s0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s0_pingroup = {
+ .name = "i2s0_grp",
+ .pins = i2s0_pins,
+ .npins = ARRAY_SIZE(i2s0_pins),
+ .modemuxs = i2s0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s0_modemux),
+};
+
+static const char *const i2s0_grps[] = { "i2s0_grp" };
+static struct spear_function i2s0_function = {
+ .name = "i2s0",
+ .groups = i2s0_grps,
+ .ngroups = ARRAY_SIZE(i2s0_grps),
+};
+
+/* Pad multiplexing for i2s1 device */
+static const unsigned i2s1_pins[] = { 0, 1, 2, 3 };
+static struct spear_muxreg i2s1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_I2S1_MASK,
+ .val = PMX_I2S1_MASK,
+ },
+};
+
+static struct spear_modemux i2s1_modemux[] = {
+ {
+ .muxregs = i2s1_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s1_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s1_pingroup = {
+ .name = "i2s1_grp",
+ .pins = i2s1_pins,
+ .npins = ARRAY_SIZE(i2s1_pins),
+ .modemuxs = i2s1_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s1_modemux),
+};
+
+static const char *const i2s1_grps[] = { "i2s1_grp" };
+static struct spear_function i2s1_function = {
+ .name = "i2s1",
+ .groups = i2s1_grps,
+ .ngroups = ARRAY_SIZE(i2s1_grps),
+};
+
+/* Pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142 };
+static struct spear_muxreg clcd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
+ },
+};
+
+static struct spear_modemux clcd_modemux[] = {
+ {
+ .muxregs = clcd_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_pingroup = {
+ .name = "clcd_grp",
+ .pins = clcd_pins,
+ .npins = ARRAY_SIZE(clcd_pins),
+ .modemuxs = clcd_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const unsigned clcd_high_res_pins[] = { 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 };
+static struct spear_muxreg clcd_high_res_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_CLCD2_MASK,
+ .val = PMX_CLCD2_MASK,
+ },
+};
+
+static struct spear_modemux clcd_high_res_modemux[] = {
+ {
+ .muxregs = clcd_high_res_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_high_res_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_high_res_pingroup = {
+ .name = "clcd_high_res_grp",
+ .pins = clcd_high_res_pins,
+ .npins = ARRAY_SIZE(clcd_high_res_pins),
+ .modemuxs = clcd_high_res_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" };
+static struct spear_function clcd_function = {
+ .name = "clcd",
+ .groups = clcd_grps,
+ .ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+static const unsigned arm_gpio_pins[] = { 18, 19, 20, 21, 22, 23, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152 };
+static struct spear_muxreg arm_gpio_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_EGPIO_0_GRP_MASK,
+ .val = PMX_EGPIO_0_GRP_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_EGPIO_1_GRP_MASK,
+ .val = PMX_EGPIO_1_GRP_MASK,
+ },
+};
+
+static struct spear_modemux arm_gpio_modemux[] = {
+ {
+ .muxregs = arm_gpio_muxreg,
+ .nmuxregs = ARRAY_SIZE(arm_gpio_muxreg),
+ },
+};
+
+static struct spear_pingroup arm_gpio_pingroup = {
+ .name = "arm_gpio_grp",
+ .pins = arm_gpio_pins,
+ .npins = ARRAY_SIZE(arm_gpio_pins),
+ .modemuxs = arm_gpio_modemux,
+ .nmodemuxs = ARRAY_SIZE(arm_gpio_modemux),
+};
+
+static const char *const arm_gpio_grps[] = { "arm_gpio_grp" };
+static struct spear_function arm_gpio_function = {
+ .name = "arm_gpio",
+ .groups = arm_gpio_grps,
+ .ngroups = ARRAY_SIZE(arm_gpio_grps),
+};
+
+/* Pad multiplexing for smi 2 chips device */
+static const unsigned smi_2_chips_pins[] = { 153, 154, 155, 156, 157 };
+static struct spear_muxreg smi_2_chips_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
+ },
+};
+
+static struct spear_modemux smi_2_chips_modemux[] = {
+ {
+ .muxregs = smi_2_chips_muxreg,
+ .nmuxregs = ARRAY_SIZE(smi_2_chips_muxreg),
+ },
+};
+
+static struct spear_pingroup smi_2_chips_pingroup = {
+ .name = "smi_2_chips_grp",
+ .pins = smi_2_chips_pins,
+ .npins = ARRAY_SIZE(smi_2_chips_pins),
+ .modemuxs = smi_2_chips_modemux,
+ .nmodemuxs = ARRAY_SIZE(smi_2_chips_modemux),
+};
+
+static const unsigned smi_4_chips_pins[] = { 54, 55 };
+static struct spear_muxreg smi_4_chips_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ },
+};
+
+static struct spear_modemux smi_4_chips_modemux[] = {
+ {
+ .muxregs = smi_4_chips_muxreg,
+ .nmuxregs = ARRAY_SIZE(smi_4_chips_muxreg),
+ },
+};
+
+static struct spear_pingroup smi_4_chips_pingroup = {
+ .name = "smi_4_chips_grp",
+ .pins = smi_4_chips_pins,
+ .npins = ARRAY_SIZE(smi_4_chips_pins),
+ .modemuxs = smi_4_chips_modemux,
+ .nmodemuxs = ARRAY_SIZE(smi_4_chips_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_2_chips_grp", "smi_4_chips_grp" };
+static struct spear_function smi_function = {
+ .name = "smi",
+ .groups = smi_grps,
+ .ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* Pad multiplexing for gmii device */
+static const unsigned gmii_pins[] = { 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg gmii_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_GMII_MASK,
+ .val = PMX_GMII_MASK,
+ },
+};
+
+static struct spear_modemux gmii_modemux[] = {
+ {
+ .muxregs = gmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(gmii_muxreg),
+ },
+};
+
+static struct spear_pingroup gmii_pingroup = {
+ .name = "gmii_grp",
+ .pins = gmii_pins,
+ .npins = ARRAY_SIZE(gmii_pins),
+ .modemuxs = gmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+static const char *const gmii_grps[] = { "gmii_grp" };
+static struct spear_function gmii_function = {
+ .name = "gmii",
+ .groups = gmii_grps,
+ .ngroups = ARRAY_SIZE(gmii_grps),
+};
+
+/* Pad multiplexing for rgmii device */
+static const unsigned rgmii_pins[] = { 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 175,
+ 180, 181, 182, 183, 185, 188, 193, 194, 195, 196, 197, 198, 211, 212 };
+static struct spear_muxreg rgmii_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_RGMII_REG0_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_RGMII_REG1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_RGMII_REG2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+ {
+ .muxregs = rgmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+ },
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+ .name = "rgmii_grp",
+ .pins = rgmii_pins,
+ .npins = ARRAY_SIZE(rgmii_pins),
+ .modemuxs = rgmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+static const char *const rgmii_grps[] = { "rgmii_grp" };
+static struct spear_function rgmii_function = {
+ .name = "rgmii",
+ .groups = rgmii_grps,
+ .ngroups = ARRAY_SIZE(rgmii_grps),
+};
+
+/* Pad multiplexing for smii_0_1_2 device */
+static const unsigned smii_0_1_2_pins[] = { 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55 };
+static struct spear_muxreg smii_0_1_2_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_SMII_0_1_2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux smii_0_1_2_modemux[] = {
+ {
+ .muxregs = smii_0_1_2_muxreg,
+ .nmuxregs = ARRAY_SIZE(smii_0_1_2_muxreg),
+ },
+};
+
+static struct spear_pingroup smii_0_1_2_pingroup = {
+ .name = "smii_0_1_2_grp",
+ .pins = smii_0_1_2_pins,
+ .npins = ARRAY_SIZE(smii_0_1_2_pins),
+ .modemuxs = smii_0_1_2_modemux,
+ .nmodemuxs = ARRAY_SIZE(smii_0_1_2_modemux),
+};
+
+static const char *const smii_0_1_2_grps[] = { "smii_0_1_2_grp" };
+static struct spear_function smii_0_1_2_function = {
+ .name = "smii_0_1_2",
+ .groups = smii_0_1_2_grps,
+ .ngroups = ARRAY_SIZE(smii_0_1_2_grps),
+};
+
+/* Pad multiplexing for ras_mii_txclk device */
+static const unsigned ras_mii_txclk_pins[] = { 98, 99 };
+static struct spear_muxreg ras_mii_txclk_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NFCE2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux ras_mii_txclk_modemux[] = {
+ {
+ .muxregs = ras_mii_txclk_muxreg,
+ .nmuxregs = ARRAY_SIZE(ras_mii_txclk_muxreg),
+ },
+};
+
+static struct spear_pingroup ras_mii_txclk_pingroup = {
+ .name = "ras_mii_txclk_grp",
+ .pins = ras_mii_txclk_pins,
+ .npins = ARRAY_SIZE(ras_mii_txclk_pins),
+ .modemuxs = ras_mii_txclk_modemux,
+ .nmodemuxs = ARRAY_SIZE(ras_mii_txclk_modemux),
+};
+
+static const char *const ras_mii_txclk_grps[] = { "ras_mii_txclk_grp" };
+static struct spear_function ras_mii_txclk_function = {
+ .name = "ras_mii_txclk",
+ .groups = ras_mii_txclk_grps,
+ .ngroups = ARRAY_SIZE(ras_mii_txclk_grps),
+};
+
+/* Pad multiplexing for nand 8bit device (cs0 only) */
+static const unsigned nand_8bit_pins[] = { 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212 };
+static struct spear_muxreg nand_8bit_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_NAND8BIT_0_MASK,
+ .val = PMX_NAND8BIT_0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NAND8BIT_1_MASK,
+ .val = PMX_NAND8BIT_1_MASK,
+ },
+};
+
+static struct spear_modemux nand_8bit_modemux[] = {
+ {
+ .muxregs = nand_8bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(nand_8bit_muxreg),
+ },
+};
+
+static struct spear_pingroup nand_8bit_pingroup = {
+ .name = "nand_8bit_grp",
+ .pins = nand_8bit_pins,
+ .npins = ARRAY_SIZE(nand_8bit_pins),
+ .modemuxs = nand_8bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(nand_8bit_modemux),
+};
+
+/* Pad multiplexing for nand 16bit device */
+static const unsigned nand_16bit_pins[] = { 201, 202, 203, 204, 207, 208, 209,
+ 210 };
+static struct spear_muxreg nand_16bit_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NAND16BIT_1_MASK,
+ .val = PMX_NAND16BIT_1_MASK,
+ },
+};
+
+static struct spear_modemux nand_16bit_modemux[] = {
+ {
+ .muxregs = nand_16bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(nand_16bit_muxreg),
+ },
+};
+
+static struct spear_pingroup nand_16bit_pingroup = {
+ .name = "nand_16bit_grp",
+ .pins = nand_16bit_pins,
+ .npins = ARRAY_SIZE(nand_16bit_pins),
+ .modemuxs = nand_16bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(nand_16bit_modemux),
+};
+
+/* Pad multiplexing for nand 4 chips */
+static const unsigned nand_4_chips_pins[] = { 205, 206, 211, 212 };
+static struct spear_muxreg nand_4_chips_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NAND_4CHIPS_MASK,
+ .val = PMX_NAND_4CHIPS_MASK,
+ },
+};
+
+static struct spear_modemux nand_4_chips_modemux[] = {
+ {
+ .muxregs = nand_4_chips_muxreg,
+ .nmuxregs = ARRAY_SIZE(nand_4_chips_muxreg),
+ },
+};
+
+static struct spear_pingroup nand_4_chips_pingroup = {
+ .name = "nand_4_chips_grp",
+ .pins = nand_4_chips_pins,
+ .npins = ARRAY_SIZE(nand_4_chips_pins),
+ .modemuxs = nand_4_chips_modemux,
+ .nmodemuxs = ARRAY_SIZE(nand_4_chips_modemux),
+};
+
+static const char *const nand_grps[] = { "nand_8bit_grp", "nand_16bit_grp",
+ "nand_4_chips_grp" };
+static struct spear_function nand_function = {
+ .name = "nand",
+ .groups = nand_grps,
+ .ngroups = ARRAY_SIZE(nand_grps),
+};
+
+/* Pad multiplexing for keyboard_6x6 device */
+static const unsigned keyboard_6x6_pins[] = { 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212 };
+static struct spear_muxreg keyboard_6x6_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KEYBOARD_6X6_MASK | PMX_NFIO8_15_MASK |
+ PMX_NFCE1_MASK | PMX_NFCE2_MASK | PMX_NFWPRT1_MASK |
+ PMX_NFWPRT2_MASK,
+ .val = PMX_KEYBOARD_6X6_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_6x6_modemux[] = {
+ {
+ .muxregs = keyboard_6x6_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_6x6_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_6x6_pingroup = {
+ .name = "keyboard_6x6_grp",
+ .pins = keyboard_6x6_pins,
+ .npins = ARRAY_SIZE(keyboard_6x6_pins),
+ .modemuxs = keyboard_6x6_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_6x6_modemux),
+};
+
+/* Pad multiplexing for keyboard_rowcol6_8 device */
+static const unsigned keyboard_rowcol6_8_pins[] = { 24, 25, 26, 27, 28, 29 };
+static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL68_MASK,
+ .val = PMX_KBD_ROWCOL68_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_rowcol6_8_modemux[] = {
+ {
+ .muxregs = keyboard_rowcol6_8_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_rowcol6_8_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_rowcol6_8_pingroup = {
+ .name = "keyboard_rowcol6_8_grp",
+ .pins = keyboard_rowcol6_8_pins,
+ .npins = ARRAY_SIZE(keyboard_rowcol6_8_pins),
+ .modemuxs = keyboard_rowcol6_8_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_rowcol6_8_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_6x6_grp",
+ "keyboard_rowcol6_8_grp" };
+static struct spear_function keyboard_function = {
+ .name = "keyboard",
+ .groups = keyboard_grps,
+ .ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* Pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 100, 101 };
+static struct spear_muxreg uart0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_UART0_MASK,
+ .val = PMX_UART0_MASK,
+ },
+};
+
+static struct spear_modemux uart0_modemux[] = {
+ {
+ .muxregs = uart0_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_pingroup = {
+ .name = "uart0_grp",
+ .pins = uart0_pins,
+ .npins = ARRAY_SIZE(uart0_pins),
+ .modemuxs = uart0_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* Pad multiplexing for uart0_modem device */
+static const unsigned uart0_modem_pins[] = { 12, 13, 14, 15, 16, 17 };
+static struct spear_muxreg uart0_modem_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_UART0_MODEM_MASK,
+ .val = PMX_UART0_MODEM_MASK,
+ },
+};
+
+static struct spear_modemux uart0_modem_modemux[] = {
+ {
+ .muxregs = uart0_modem_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_modem_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_modem_pingroup = {
+ .name = "uart0_modem_grp",
+ .pins = uart0_modem_pins,
+ .npins = ARRAY_SIZE(uart0_modem_pins),
+ .modemuxs = uart0_modem_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_modem_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_modem_grp" };
+static struct spear_function uart0_function = {
+ .name = "uart0",
+ .groups = uart0_grps,
+ .ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* Pad multiplexing for gpt0_tmr0 device */
+static const unsigned gpt0_tmr0_pins[] = { 10, 11 };
+static struct spear_muxreg gpt0_tmr0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT0_TMR0_MASK,
+ .val = PMX_GPT0_TMR0_MASK,
+ },
+};
+
+static struct spear_modemux gpt0_tmr0_modemux[] = {
+ {
+ .muxregs = gpt0_tmr0_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt0_tmr0_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt0_tmr0_pingroup = {
+ .name = "gpt0_tmr0_grp",
+ .pins = gpt0_tmr0_pins,
+ .npins = ARRAY_SIZE(gpt0_tmr0_pins),
+ .modemuxs = gpt0_tmr0_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt0_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt0_tmr1 device */
+static const unsigned gpt0_tmr1_pins[] = { 8, 9 };
+static struct spear_muxreg gpt0_tmr1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT0_TMR1_MASK,
+ .val = PMX_GPT0_TMR1_MASK,
+ },
+};
+
+static struct spear_modemux gpt0_tmr1_modemux[] = {
+ {
+ .muxregs = gpt0_tmr1_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt0_tmr1_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt0_tmr1_pingroup = {
+ .name = "gpt0_tmr1_grp",
+ .pins = gpt0_tmr1_pins,
+ .npins = ARRAY_SIZE(gpt0_tmr1_pins),
+ .modemuxs = gpt0_tmr1_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt0_tmr1_modemux),
+};
+
+static const char *const gpt0_grps[] = { "gpt0_tmr0_grp", "gpt0_tmr1_grp" };
+static struct spear_function gpt0_function = {
+ .name = "gpt0",
+ .groups = gpt0_grps,
+ .ngroups = ARRAY_SIZE(gpt0_grps),
+};
+
+/* Pad multiplexing for gpt1_tmr0 device */
+static const unsigned gpt1_tmr0_pins[] = { 6, 7 };
+static struct spear_muxreg gpt1_tmr0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT1_TMR0_MASK,
+ .val = PMX_GPT1_TMR0_MASK,
+ },
+};
+
+static struct spear_modemux gpt1_tmr0_modemux[] = {
+ {
+ .muxregs = gpt1_tmr0_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt1_tmr0_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt1_tmr0_pingroup = {
+ .name = "gpt1_tmr0_grp",
+ .pins = gpt1_tmr0_pins,
+ .npins = ARRAY_SIZE(gpt1_tmr0_pins),
+ .modemuxs = gpt1_tmr0_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt1_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt1_tmr1 device */
+static const unsigned gpt1_tmr1_pins[] = { 4, 5 };
+static struct spear_muxreg gpt1_tmr1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT1_TMR1_MASK,
+ .val = PMX_GPT1_TMR1_MASK,
+ },
+};
+
+static struct spear_modemux gpt1_tmr1_modemux[] = {
+ {
+ .muxregs = gpt1_tmr1_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt1_tmr1_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt1_tmr1_pingroup = {
+ .name = "gpt1_tmr1_grp",
+ .pins = gpt1_tmr1_pins,
+ .npins = ARRAY_SIZE(gpt1_tmr1_pins),
+ .modemuxs = gpt1_tmr1_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt1_tmr1_modemux),
+};
+
+static const char *const gpt1_grps[] = { "gpt1_tmr1_grp", "gpt1_tmr0_grp" };
+static struct spear_function gpt1_function = {
+ .name = "gpt1",
+ .groups = gpt1_grps,
+ .ngroups = ARRAY_SIZE(gpt1_grps),
+};
+
+/* Pad multiplexing for mcif device */
+static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245 };
+#define MCIF_MUXREG \
+ { \
+ .reg = PAD_FUNCTION_EN_0, \
+ .mask = PMX_MCI_DATA8_15_MASK, \
+ .val = PMX_MCI_DATA8_15_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_1, \
+ .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_NFWPRT2_MASK, \
+ .val = PMX_MCIFALL_1_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_2, \
+ .mask = PMX_MCIFALL_2_MASK, \
+ .val = PMX_MCIFALL_2_MASK, \
+ }
+
+/* sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_SD,
+ },
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+ {
+ .muxregs = sdhci_muxreg,
+ .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+ },
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+ .name = "sdhci_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = sdhci_modemux,
+ .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+ .name = "sdhci",
+ .groups = sdhci_grps,
+ .ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* cf device */
+static struct spear_muxreg cf_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_CF,
+ },
+};
+
+static struct spear_modemux cf_modemux[] = {
+ {
+ .muxregs = cf_muxreg,
+ .nmuxregs = ARRAY_SIZE(cf_muxreg),
+ },
+};
+
+static struct spear_pingroup cf_pingroup = {
+ .name = "cf_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = cf_modemux,
+ .nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+ .name = "cf",
+ .groups = cf_grps,
+ .ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* xd device */
+static struct spear_muxreg xd_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_XD,
+ },
+};
+
+static struct spear_modemux xd_modemux[] = {
+ {
+ .muxregs = xd_muxreg,
+ .nmuxregs = ARRAY_SIZE(xd_muxreg),
+ },
+};
+
+static struct spear_pingroup xd_pingroup = {
+ .name = "xd_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = xd_modemux,
+ .nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+ .name = "xd",
+ .groups = xd_grps,
+ .ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* Pad multiplexing for touch_xy device */
+static const unsigned touch_xy_pins[] = { 97 };
+static struct spear_muxreg touch_xy_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_TOUCH_XY_MASK,
+ .val = PMX_TOUCH_XY_MASK,
+ },
+};
+
+static struct spear_modemux touch_xy_modemux[] = {
+ {
+ .muxregs = touch_xy_muxreg,
+ .nmuxregs = ARRAY_SIZE(touch_xy_muxreg),
+ },
+};
+
+static struct spear_pingroup touch_xy_pingroup = {
+ .name = "touch_xy_grp",
+ .pins = touch_xy_pins,
+ .npins = ARRAY_SIZE(touch_xy_pins),
+ .modemuxs = touch_xy_modemux,
+ .nmodemuxs = ARRAY_SIZE(touch_xy_modemux),
+};
+
+static const char *const touch_xy_grps[] = { "touch_xy_grp" };
+static struct spear_function touch_xy_function = {
+ .name = "touchscreen",
+ .groups = touch_xy_grps,
+ .ngroups = ARRAY_SIZE(touch_xy_grps),
+};
+
+/* Pad multiplexing for uart1 device */
+/* Muxed with I2C */
+static const unsigned uart1_dis_i2c_pins[] = { 102, 103 };
+static struct spear_muxreg uart1_dis_i2c_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2C0_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart1_dis_i2c_modemux[] = {
+ {
+ .muxregs = uart1_dis_i2c_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart1_dis_i2c_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_1_dis_i2c_pingroup = {
+ .name = "uart1_disable_i2c_grp",
+ .pins = uart1_dis_i2c_pins,
+ .npins = ARRAY_SIZE(uart1_dis_i2c_pins),
+ .modemuxs = uart1_dis_i2c_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart1_dis_i2c_modemux),
+};
+
+/* Muxed with SD/MMC */
+static const unsigned uart1_dis_sd_pins[] = { 214, 215 };
+static struct spear_muxreg uart1_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_MCIDATA1_MASK |
+ PMX_MCIDATA2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart1_dis_sd_modemux[] = {
+ {
+ .muxregs = uart1_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart1_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_1_dis_sd_pingroup = {
+ .name = "uart1_disable_sd_grp",
+ .pins = uart1_dis_sd_pins,
+ .npins = ARRAY_SIZE(uart1_dis_sd_pins),
+ .modemuxs = uart1_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart1_dis_sd_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_disable_i2c_grp",
+ "uart1_disable_sd_grp" };
+static struct spear_function uart1_function = {
+ .name = "uart1",
+ .groups = uart1_grps,
+ .ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* Pad multiplexing for uart2_3 device */
+static const unsigned uart2_3_pins[] = { 104, 105, 106, 107 };
+static struct spear_muxreg uart2_3_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2S0_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart2_3_modemux[] = {
+ {
+ .muxregs = uart2_3_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart2_3_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_2_3_pingroup = {
+ .name = "uart2_3_grp",
+ .pins = uart2_3_pins,
+ .npins = ARRAY_SIZE(uart2_3_pins),
+ .modemuxs = uart2_3_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart2_3_modemux),
+};
+
+static const char *const uart2_3_grps[] = { "uart2_3_grp" };
+static struct spear_function uart2_3_function = {
+ .name = "uart2_3",
+ .groups = uart2_3_grps,
+ .ngroups = ARRAY_SIZE(uart2_3_grps),
+};
+
+/* Pad multiplexing for uart4 device */
+static const unsigned uart4_pins[] = { 108, 113 };
+static struct spear_muxreg uart4_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart4_modemux[] = {
+ {
+ .muxregs = uart4_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart4_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_4_pingroup = {
+ .name = "uart4_grp",
+ .pins = uart4_pins,
+ .npins = ARRAY_SIZE(uart4_pins),
+ .modemuxs = uart4_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart4_modemux),
+};
+
+static const char *const uart4_grps[] = { "uart4_grp" };
+static struct spear_function uart4_function = {
+ .name = "uart4",
+ .groups = uart4_grps,
+ .ngroups = ARRAY_SIZE(uart4_grps),
+};
+
+/* Pad multiplexing for uart5 device */
+static const unsigned uart5_pins[] = { 114, 115 };
+static struct spear_muxreg uart5_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart5_modemux[] = {
+ {
+ .muxregs = uart5_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart5_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_5_pingroup = {
+ .name = "uart5_grp",
+ .pins = uart5_pins,
+ .npins = ARRAY_SIZE(uart5_pins),
+ .modemuxs = uart5_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart5_modemux),
+};
+
+static const char *const uart5_grps[] = { "uart5_grp" };
+static struct spear_function uart5_function = {
+ .name = "uart5",
+ .groups = uart5_grps,
+ .ngroups = ARRAY_SIZE(uart5_grps),
+};
+
+/* Pad multiplexing for rs485_0_1_tdm_0_1 device */
+static const unsigned rs485_0_1_tdm_0_1_pins[] = { 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137 };
+static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux rs485_0_1_tdm_0_1_modemux[] = {
+ {
+ .muxregs = rs485_0_1_tdm_0_1_muxreg,
+ .nmuxregs = ARRAY_SIZE(rs485_0_1_tdm_0_1_muxreg),
+ },
+};
+
+static struct spear_pingroup rs485_0_1_tdm_0_1_pingroup = {
+ .name = "rs485_0_1_tdm_0_1_grp",
+ .pins = rs485_0_1_tdm_0_1_pins,
+ .npins = ARRAY_SIZE(rs485_0_1_tdm_0_1_pins),
+ .modemuxs = rs485_0_1_tdm_0_1_modemux,
+ .nmodemuxs = ARRAY_SIZE(rs485_0_1_tdm_0_1_modemux),
+};
+
+static const char *const rs485_0_1_tdm_0_1_grps[] = { "rs485_0_1_tdm_0_1_grp" };
+static struct spear_function rs485_0_1_tdm_0_1_function = {
+ .name = "rs485_0_1_tdm_0_1",
+ .groups = rs485_0_1_tdm_0_1_grps,
+ .ngroups = ARRAY_SIZE(rs485_0_1_tdm_0_1_grps),
+};
+
+/* Pad multiplexing for i2c_1_2 device */
+static const unsigned i2c_1_2_pins[] = { 138, 139, 140, 141 };
+static struct spear_muxreg i2c_1_2_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c_1_2_modemux[] = {
+ {
+ .muxregs = i2c_1_2_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c_1_2_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c_1_2_pingroup = {
+ .name = "i2c_1_2_grp",
+ .pins = i2c_1_2_pins,
+ .npins = ARRAY_SIZE(i2c_1_2_pins),
+ .modemuxs = i2c_1_2_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c_1_2_modemux),
+};
+
+static const char *const i2c_1_2_grps[] = { "i2c_1_2_grp" };
+static struct spear_function i2c_1_2_function = {
+ .name = "i2c_1_2",
+ .groups = i2c_1_2_grps,
+ .ngroups = ARRAY_SIZE(i2c_1_2_grps),
+};
+
+/* Pad multiplexing for i2c3_dis_smi_clcd device */
+/* Muxed with SMI & CLCD */
+static const unsigned i2c3_dis_smi_clcd_pins[] = { 142, 153 };
+static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c3_dis_smi_clcd_modemux[] = {
+ {
+ .muxregs = i2c3_dis_smi_clcd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c3_dis_smi_clcd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c3_dis_smi_clcd_pingroup = {
+ .name = "i2c3_dis_smi_clcd_grp",
+ .pins = i2c3_dis_smi_clcd_pins,
+ .npins = ARRAY_SIZE(i2c3_dis_smi_clcd_pins),
+ .modemuxs = i2c3_dis_smi_clcd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c3_dis_smi_clcd_modemux),
+};
+
+/* Pad multiplexing for i2c3_dis_sd_i2s0 device */
+/* Muxed with SD/MMC & I2S1 */
+static const unsigned i2c3_dis_sd_i2s0_pins[] = { 0, 216 };
+static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c3_dis_sd_i2s0_modemux[] = {
+ {
+ .muxregs = i2c3_dis_sd_i2s0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c3_dis_sd_i2s0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c3_dis_sd_i2s0_pingroup = {
+ .name = "i2c3_dis_sd_i2s0_grp",
+ .pins = i2c3_dis_sd_i2s0_pins,
+ .npins = ARRAY_SIZE(i2c3_dis_sd_i2s0_pins),
+ .modemuxs = i2c3_dis_sd_i2s0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c3_dis_sd_i2s0_modemux),
+};
+
+static const char *const i2c3_grps[] = { "i2c3_dis_smi_clcd_grp",
+ "i2c3_dis_sd_i2s0_grp" };
+static struct spear_function i2c3_unction = {
+ .name = "i2c3_i2s1",
+ .groups = i2c3_grps,
+ .ngroups = ARRAY_SIZE(i2c3_grps),
+};
+
+/* Pad multiplexing for i2c_4_5_dis_smi device */
+/* Muxed with SMI */
+static const unsigned i2c_4_5_dis_smi_pins[] = { 154, 155, 156, 157 };
+static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SMI_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c_4_5_dis_smi_modemux[] = {
+ {
+ .muxregs = i2c_4_5_dis_smi_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c_4_5_dis_smi_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c_4_5_dis_smi_pingroup = {
+ .name = "i2c_4_5_dis_smi_grp",
+ .pins = i2c_4_5_dis_smi_pins,
+ .npins = ARRAY_SIZE(i2c_4_5_dis_smi_pins),
+ .modemuxs = i2c_4_5_dis_smi_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c_4_5_dis_smi_modemux),
+};
+
+/* Pad multiplexing for i2c4_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c4_dis_sd_pins[] = { 217, 218 };
+static struct spear_muxreg i2c4_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_MCIDATA4_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIDATA5_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c4_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c4_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c4_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c4_dis_sd_pingroup = {
+ .name = "i2c4_dis_sd_grp",
+ .pins = i2c4_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c4_dis_sd_pins),
+ .modemuxs = i2c4_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c4_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c5_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c5_dis_sd_pins[] = { 219, 220 };
+static struct spear_muxreg i2c5_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIDATA6_MASK |
+ PMX_MCIDATA7_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c5_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c5_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c5_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c5_dis_sd_pingroup = {
+ .name = "i2c5_dis_sd_grp",
+ .pins = i2c5_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c5_dis_sd_pins),
+ .modemuxs = i2c5_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c5_dis_sd_modemux),
+};
+
+static const char *const i2c_4_5_grps[] = { "i2c5_dis_sd_grp",
+ "i2c4_dis_sd_grp", "i2c_4_5_dis_smi_grp" };
+static struct spear_function i2c_4_5_function = {
+ .name = "i2c_4_5",
+ .groups = i2c_4_5_grps,
+ .ngroups = ARRAY_SIZE(i2c_4_5_grps),
+};
+
+/* Pad multiplexing for i2c_6_7_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned i2c_6_7_dis_kbd_pins[] = { 207, 208, 209, 210 };
+static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c_6_7_dis_kbd_modemux[] = {
+ {
+ .muxregs = i2c_6_7_dis_kbd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c_6_7_dis_kbd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c_6_7_dis_kbd_pingroup = {
+ .name = "i2c_6_7_dis_kbd_grp",
+ .pins = i2c_6_7_dis_kbd_pins,
+ .npins = ARRAY_SIZE(i2c_6_7_dis_kbd_pins),
+ .modemuxs = i2c_6_7_dis_kbd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c_6_7_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for i2c6_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c6_dis_sd_pins[] = { 236, 237 };
+static struct spear_muxreg i2c6_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIIORDRE_MASK |
+ PMX_MCIIOWRWE_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c6_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c6_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c6_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c6_dis_sd_pingroup = {
+ .name = "i2c6_dis_sd_grp",
+ .pins = i2c6_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c6_dis_sd_pins),
+ .modemuxs = i2c6_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c6_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c7_dis_sd device */
+static const unsigned i2c7_dis_sd_pins[] = { 238, 239 };
+static struct spear_muxreg i2c7_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIRESETCF_MASK |
+ PMX_MCICS0CE_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c7_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c7_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c7_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c7_dis_sd_pingroup = {
+ .name = "i2c7_dis_sd_grp",
+ .pins = i2c7_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c7_dis_sd_pins),
+ .modemuxs = i2c7_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c7_dis_sd_modemux),
+};
+
+static const char *const i2c_6_7_grps[] = { "i2c6_dis_sd_grp",
+ "i2c7_dis_sd_grp", "i2c_6_7_dis_kbd_grp" };
+static struct spear_function i2c_6_7_function = {
+ .name = "i2c_6_7",
+ .groups = i2c_6_7_grps,
+ .ngroups = ARRAY_SIZE(i2c_6_7_grps),
+};
+
+/* Pad multiplexing for can0_dis_nor device */
+/* Muxed with NOR */
+static const unsigned can0_dis_nor_pins[] = { 56, 57 };
+static struct spear_muxreg can0_dis_nor_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_NFRSTPWDWN2_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NFRSTPWDWN3_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can0_dis_nor_modemux[] = {
+ {
+ .muxregs = can0_dis_nor_muxreg,
+ .nmuxregs = ARRAY_SIZE(can0_dis_nor_muxreg),
+ },
+};
+
+static struct spear_pingroup can0_dis_nor_pingroup = {
+ .name = "can0_dis_nor_grp",
+ .pins = can0_dis_nor_pins,
+ .npins = ARRAY_SIZE(can0_dis_nor_pins),
+ .modemuxs = can0_dis_nor_modemux,
+ .nmodemuxs = ARRAY_SIZE(can0_dis_nor_modemux),
+};
+
+/* Pad multiplexing for can0_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can0_dis_sd_pins[] = { 240, 241 };
+static struct spear_muxreg can0_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can0_dis_sd_modemux[] = {
+ {
+ .muxregs = can0_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(can0_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup can0_dis_sd_pingroup = {
+ .name = "can0_dis_sd_grp",
+ .pins = can0_dis_sd_pins,
+ .npins = ARRAY_SIZE(can0_dis_sd_pins),
+ .modemuxs = can0_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(can0_dis_sd_modemux),
+};
+
+static const char *const can0_grps[] = { "can0_dis_nor_grp", "can0_dis_sd_grp"
+};
+static struct spear_function can0_function = {
+ .name = "can0",
+ .groups = can0_grps,
+ .ngroups = ARRAY_SIZE(can0_grps),
+};
+
+/* Pad multiplexing for can1_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can1_dis_sd_pins[] = { 242, 243 };
+static struct spear_muxreg can1_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can1_dis_sd_modemux[] = {
+ {
+ .muxregs = can1_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(can1_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup can1_dis_sd_pingroup = {
+ .name = "can1_dis_sd_grp",
+ .pins = can1_dis_sd_pins,
+ .npins = ARRAY_SIZE(can1_dis_sd_pins),
+ .modemuxs = can1_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(can1_dis_sd_modemux),
+};
+
+/* Pad multiplexing for can1_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned can1_dis_kbd_pins[] = { 201, 202 };
+static struct spear_muxreg can1_dis_kbd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can1_dis_kbd_modemux[] = {
+ {
+ .muxregs = can1_dis_kbd_muxreg,
+ .nmuxregs = ARRAY_SIZE(can1_dis_kbd_muxreg),
+ },
+};
+
+static struct spear_pingroup can1_dis_kbd_pingroup = {
+ .name = "can1_dis_kbd_grp",
+ .pins = can1_dis_kbd_pins,
+ .npins = ARRAY_SIZE(can1_dis_kbd_pins),
+ .modemuxs = can1_dis_kbd_modemux,
+ .nmodemuxs = ARRAY_SIZE(can1_dis_kbd_modemux),
+};
+
+static const char *const can1_grps[] = { "can1_dis_sd_grp", "can1_dis_kbd_grp"
+};
+static struct spear_function can1_function = {
+ .name = "can1",
+ .groups = can1_grps,
+ .ngroups = ARRAY_SIZE(can1_grps),
+};
+
+/* Pad multiplexing for pci device */
+static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 };
+#define PCI_SATA_MUXREG \
+ { \
+ .reg = PAD_FUNCTION_EN_0, \
+ .mask = PMX_MCI_DATA8_15_MASK, \
+ .val = 0, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_1, \
+ .mask = PMX_PCI_REG1_MASK, \
+ .val = 0, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_2, \
+ .mask = PMX_PCI_REG2_MASK, \
+ .val = 0, \
+ }
+
+/* pad multiplexing for pcie0 device */
+static struct spear_muxreg pcie0_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = PCIE_CFG_VAL(0),
+ .val = PCIE_CFG_VAL(0),
+ },
+};
+
+static struct spear_modemux pcie0_modemux[] = {
+ {
+ .muxregs = pcie0_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie0_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie0_pingroup = {
+ .name = "pcie0_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = pcie0_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie0_modemux),
+};
+
+/* pad multiplexing for pcie1 device */
+static struct spear_muxreg pcie1_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = PCIE_CFG_VAL(1),
+ .val = PCIE_CFG_VAL(1),
+ },
+};
+
+static struct spear_modemux pcie1_modemux[] = {
+ {
+ .muxregs = pcie1_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie1_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie1_pingroup = {
+ .name = "pcie1_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = pcie1_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie1_modemux),
+};
+
+/* pad multiplexing for pcie2 device */
+static struct spear_muxreg pcie2_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = PCIE_CFG_VAL(2),
+ .val = PCIE_CFG_VAL(2),
+ },
+};
+
+static struct spear_modemux pcie2_modemux[] = {
+ {
+ .muxregs = pcie2_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie2_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie2_pingroup = {
+ .name = "pcie2_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = pcie2_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie2_modemux),
+};
+
+static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" };
+static struct spear_function pci_function = {
+ .name = "pci",
+ .groups = pci_grps,
+ .ngroups = ARRAY_SIZE(pci_grps),
+};
+
+/* pad multiplexing for sata0 device */
+static struct spear_muxreg sata0_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_CFG_VAL(0),
+ .val = SATA_CFG_VAL(0),
+ },
+};
+
+static struct spear_modemux sata0_modemux[] = {
+ {
+ .muxregs = sata0_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata0_muxreg),
+ },
+};
+
+static struct spear_pingroup sata0_pingroup = {
+ .name = "sata0_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = sata0_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata0_modemux),
+};
+
+/* pad multiplexing for sata1 device */
+static struct spear_muxreg sata1_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_CFG_VAL(1),
+ .val = SATA_CFG_VAL(1),
+ },
+};
+
+static struct spear_modemux sata1_modemux[] = {
+ {
+ .muxregs = sata1_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata1_muxreg),
+ },
+};
+
+static struct spear_pingroup sata1_pingroup = {
+ .name = "sata1_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = sata1_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata1_modemux),
+};
+
+/* pad multiplexing for sata2 device */
+static struct spear_muxreg sata2_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_CFG_VAL(2),
+ .val = SATA_CFG_VAL(2),
+ },
+};
+
+static struct spear_modemux sata2_modemux[] = {
+ {
+ .muxregs = sata2_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata2_muxreg),
+ },
+};
+
+static struct spear_pingroup sata2_pingroup = {
+ .name = "sata2_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = sata2_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata2_modemux),
+};
+
+static const char *const sata_grps[] = { "sata0_grp", "sata1_grp", "sata2_grp"
+};
+static struct spear_function sata_function = {
+ .name = "sata",
+ .groups = sata_grps,
+ .ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* Pad multiplexing for ssp1_dis_kbd device */
+static const unsigned ssp1_dis_kbd_pins[] = { 203, 204, 205, 206 };
+static struct spear_muxreg ssp1_dis_kbd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+ PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+ PMX_NFCE2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux ssp1_dis_kbd_modemux[] = {
+ {
+ .muxregs = ssp1_dis_kbd_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp1_dis_kbd_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp1_dis_kbd_pingroup = {
+ .name = "ssp1_dis_kbd_grp",
+ .pins = ssp1_dis_kbd_pins,
+ .npins = ARRAY_SIZE(ssp1_dis_kbd_pins),
+ .modemuxs = ssp1_dis_kbd_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp1_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for ssp1_dis_sd device */
+static const unsigned ssp1_dis_sd_pins[] = { 224, 226, 227, 228 };
+static struct spear_muxreg ssp1_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+ PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux ssp1_dis_sd_modemux[] = {
+ {
+ .muxregs = ssp1_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp1_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp1_dis_sd_pingroup = {
+ .name = "ssp1_dis_sd_grp",
+ .pins = ssp1_dis_sd_pins,
+ .npins = ARRAY_SIZE(ssp1_dis_sd_pins),
+ .modemuxs = ssp1_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp1_dis_sd_modemux),
+};
+
+static const char *const ssp1_grps[] = { "ssp1_dis_kbd_grp",
+ "ssp1_dis_sd_grp" };
+static struct spear_function ssp1_function = {
+ .name = "ssp1",
+ .groups = ssp1_grps,
+ .ngroups = ARRAY_SIZE(ssp1_grps),
+};
+
+/* Pad multiplexing for gpt64 device */
+static const unsigned gpt64_pins[] = { 230, 231, 232, 245 };
+static struct spear_muxreg gpt64_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+ | PMX_MCILEDS_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux gpt64_modemux[] = {
+ {
+ .muxregs = gpt64_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt64_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt64_pingroup = {
+ .name = "gpt64_grp",
+ .pins = gpt64_pins,
+ .npins = ARRAY_SIZE(gpt64_pins),
+ .modemuxs = gpt64_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt64_modemux),
+};
+
+static const char *const gpt64_grps[] = { "gpt64_grp" };
+static struct spear_function gpt64_function = {
+ .name = "gpt64",
+ .groups = gpt64_grps,
+ .ngroups = ARRAY_SIZE(gpt64_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1310_pingroups[] = {
+ &i2c0_pingroup,
+ &ssp0_pingroup,
+ &i2s0_pingroup,
+ &i2s1_pingroup,
+ &clcd_pingroup,
+ &clcd_high_res_pingroup,
+ &arm_gpio_pingroup,
+ &smi_2_chips_pingroup,
+ &smi_4_chips_pingroup,
+ &gmii_pingroup,
+ &rgmii_pingroup,
+ &smii_0_1_2_pingroup,
+ &ras_mii_txclk_pingroup,
+ &nand_8bit_pingroup,
+ &nand_16bit_pingroup,
+ &nand_4_chips_pingroup,
+ &keyboard_6x6_pingroup,
+ &keyboard_rowcol6_8_pingroup,
+ &uart0_pingroup,
+ &uart0_modem_pingroup,
+ &gpt0_tmr0_pingroup,
+ &gpt0_tmr1_pingroup,
+ &gpt1_tmr0_pingroup,
+ &gpt1_tmr1_pingroup,
+ &sdhci_pingroup,
+ &cf_pingroup,
+ &xd_pingroup,
+ &touch_xy_pingroup,
+ &ssp0_cs0_pingroup,
+ &ssp0_cs1_2_pingroup,
+ &uart_1_dis_i2c_pingroup,
+ &uart_1_dis_sd_pingroup,
+ &uart_2_3_pingroup,
+ &uart_4_pingroup,
+ &uart_5_pingroup,
+ &rs485_0_1_tdm_0_1_pingroup,
+ &i2c_1_2_pingroup,
+ &i2c3_dis_smi_clcd_pingroup,
+ &i2c3_dis_sd_i2s0_pingroup,
+ &i2c_4_5_dis_smi_pingroup,
+ &i2c4_dis_sd_pingroup,
+ &i2c5_dis_sd_pingroup,
+ &i2c_6_7_dis_kbd_pingroup,
+ &i2c6_dis_sd_pingroup,
+ &i2c7_dis_sd_pingroup,
+ &can0_dis_nor_pingroup,
+ &can0_dis_sd_pingroup,
+ &can1_dis_sd_pingroup,
+ &can1_dis_kbd_pingroup,
+ &pcie0_pingroup,
+ &pcie1_pingroup,
+ &pcie2_pingroup,
+ &sata0_pingroup,
+ &sata1_pingroup,
+ &sata2_pingroup,
+ &ssp1_dis_kbd_pingroup,
+ &ssp1_dis_sd_pingroup,
+ &gpt64_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1310_functions[] = {
+ &i2c0_function,
+ &ssp0_function,
+ &i2s0_function,
+ &i2s1_function,
+ &clcd_function,
+ &arm_gpio_function,
+ &smi_function,
+ &gmii_function,
+ &rgmii_function,
+ &smii_0_1_2_function,
+ &ras_mii_txclk_function,
+ &nand_function,
+ &keyboard_function,
+ &uart0_function,
+ &gpt0_function,
+ &gpt1_function,
+ &sdhci_function,
+ &cf_function,
+ &xd_function,
+ &touch_xy_function,
+ &uart1_function,
+ &uart2_3_function,
+ &uart4_function,
+ &uart5_function,
+ &rs485_0_1_tdm_0_1_function,
+ &i2c_1_2_function,
+ &i2c3_unction,
+ &i2c_4_5_function,
+ &i2c_6_7_function,
+ &can0_function,
+ &can1_function,
+ &pci_function,
+ &sata_function,
+ &ssp1_function,
+ &gpt64_function,
+};
+
+static struct spear_pinctrl_machdata spear1310_machdata = {
+ .pins = spear1310_pins,
+ .npins = ARRAY_SIZE(spear1310_pins),
+ .groups = spear1310_pingroups,
+ .ngroups = ARRAY_SIZE(spear1310_pingroups),
+ .functions = spear1310_functions,
+ .nfunctions = ARRAY_SIZE(spear1310_functions),
+ .modes_supported = false,
+};
+
+static struct of_device_id spear1310_pinctrl_of_match[] __devinitdata = {
+ {
+ .compatible = "st,spear1310-pinmux",
+ },
+ {},
+};
+
+static int __devinit spear1310_pinctrl_probe(struct platform_device *pdev)
+{
+ return spear_pinctrl_probe(pdev, &spear1310_machdata);
+}
+
+static int __devexit spear1310_pinctrl_remove(struct platform_device *pdev)
+{
+ return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1310_pinctrl_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = spear1310_pinctrl_of_match,
+ },
+ .probe = spear1310_pinctrl_probe,
+ .remove = __devexit_p(spear1310_pinctrl_remove),
+};
+
+static int __init spear1310_pinctrl_init(void)
+{
+ return platform_driver_register(&spear1310_pinctrl_driver);
+}
+arch_initcall(spear1310_pinctrl_init);
+
+static void __exit spear1310_pinctrl_exit(void)
+{
+ platform_driver_unregister(&spear1310_pinctrl_driver);
+}
+module_exit(spear1310_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
new file mode 100644
index 000000000000..a0eb057e55bd
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -0,0 +1,1989 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1340 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1340-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1340_pins[] = {
+ SPEAR_PIN_0_TO_101,
+ SPEAR_PIN_102_TO_245,
+ PINCTRL_PIN(246, "PLGPIO246"),
+ PINCTRL_PIN(247, "PLGPIO247"),
+ PINCTRL_PIN(248, "PLGPIO248"),
+ PINCTRL_PIN(249, "PLGPIO249"),
+ PINCTRL_PIN(250, "PLGPIO250"),
+ PINCTRL_PIN(251, "PLGPIO251"),
+};
+
+/* In SPEAr1340 there are two levels of pad muxing */
+/* - pads as gpio OR peripherals */
+#define PAD_FUNCTION_EN_1 0x668
+#define PAD_FUNCTION_EN_2 0x66C
+#define PAD_FUNCTION_EN_3 0x670
+#define PAD_FUNCTION_EN_4 0x674
+#define PAD_FUNCTION_EN_5 0x690
+#define PAD_FUNCTION_EN_6 0x694
+#define PAD_FUNCTION_EN_7 0x698
+#define PAD_FUNCTION_EN_8 0x69C
+
+/* - If peripherals, then primary OR alternate peripheral */
+#define PAD_SHARED_IP_EN_1 0x6A0
+#define PAD_SHARED_IP_EN_2 0x6A4
+
+/*
+ * Macro's for first level of pmx - pads as gpio OR peripherals. There are 8
+ * registers with 32 bits each for handling gpio pads, register 8 has only 26
+ * relevant bits.
+ */
+/* macro's for making pads as gpio's */
+#define PADS_AS_GPIO_REG0_MASK 0xFFFFFFFE
+#define PADS_AS_GPIO_REGS_MASK 0xFFFFFFFF
+#define PADS_AS_GPIO_REG7_MASK 0x07FFFFFF
+
+/* macro's for making pads as peripherals */
+#define FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK 0x00000FFE
+#define UART0_ENH_AND_GPT_REG0_MASK 0x0003F000
+#define PWM1_AND_KBD_COL5_REG0_MASK 0x00040000
+#define I2C1_REG0_MASK 0x01080000
+#define SPDIF_IN_REG0_MASK 0x00100000
+#define PWM2_AND_GPT0_TMR0_CPT_REG0_MASK 0x00400000
+#define PWM3_AND_GPT0_TMR1_CLK_REG0_MASK 0x00800000
+#define PWM0_AND_SSP0_CS1_REG0_MASK 0x02000000
+#define VIP_AND_CAM3_REG0_MASK 0xFC200000
+#define VIP_AND_CAM3_REG1_MASK 0x0000000F
+#define VIP_REG1_MASK 0x00001EF0
+#define VIP_AND_CAM2_REG1_MASK 0x007FE100
+#define VIP_AND_CAM1_REG1_MASK 0xFF800000
+#define VIP_AND_CAM1_REG2_MASK 0x00000003
+#define VIP_AND_CAM0_REG2_MASK 0x00001FFC
+#define SMI_REG2_MASK 0x0021E000
+#define SSP0_REG2_MASK 0x001E0000
+#define TS_AND_SSP0_CS2_REG2_MASK 0x00400000
+#define UART0_REG2_MASK 0x01800000
+#define UART1_REG2_MASK 0x06000000
+#define I2S_IN_REG2_MASK 0xF8000000
+#define DEVS_GRP_AND_MIPHY_DBG_REG3_MASK 0x000001FE
+#define I2S_OUT_REG3_MASK 0x000001EF
+#define I2S_IN_REG3_MASK 0x00000010
+#define GMAC_REG3_MASK 0xFFFFFE00
+#define GMAC_REG4_MASK 0x0000001F
+#define DEVS_GRP_AND_MIPHY_DBG_REG4_MASK 0x7FFFFF20
+#define SSP0_CS3_REG4_MASK 0x00000020
+#define I2C0_REG4_MASK 0x000000C0
+#define CEC0_REG4_MASK 0x00000100
+#define CEC1_REG4_MASK 0x00000200
+#define SPDIF_OUT_REG4_MASK 0x00000400
+#define CLCD_REG4_MASK 0x7FFFF800
+#define CLCD_AND_ARM_TRACE_REG4_MASK 0x80000000
+#define CLCD_AND_ARM_TRACE_REG5_MASK 0xFFFFFFFF
+#define CLCD_AND_ARM_TRACE_REG6_MASK 0x00000001
+#define FSMC_PNOR_AND_MCIF_REG6_MASK 0x073FFFFE
+#define MCIF_REG6_MASK 0xF8C00000
+#define MCIF_REG7_MASK 0x000043FF
+#define FSMC_8BIT_REG7_MASK 0x07FFBC00
+
+/* other registers */
+#define PERIP_CFG 0x42C
+ /* PERIP_CFG register masks */
+ #define SSP_CS_CTL_HW 0
+ #define SSP_CS_CTL_SW 1
+ #define SSP_CS_CTL_MASK 1
+ #define SSP_CS_CTL_SHIFT 21
+ #define SSP_CS_VAL_MASK 1
+ #define SSP_CS_VAL_SHIFT 20
+ #define SSP_CS_SEL_CS0 0
+ #define SSP_CS_SEL_CS1 1
+ #define SSP_CS_SEL_CS2 2
+ #define SSP_CS_SEL_MASK 3
+ #define SSP_CS_SEL_SHIFT 18
+
+ #define I2S_CHNL_2_0 (0)
+ #define I2S_CHNL_3_1 (1)
+ #define I2S_CHNL_5_1 (2)
+ #define I2S_CHNL_7_1 (3)
+ #define I2S_CHNL_PLAY_SHIFT (4)
+ #define I2S_CHNL_PLAY_MASK (3 << 4)
+ #define I2S_CHNL_REC_SHIFT (6)
+ #define I2S_CHNL_REC_MASK (3 << 6)
+
+ #define SPDIF_OUT_ENB_MASK (1 << 2)
+ #define SPDIF_OUT_ENB_SHIFT 2
+
+ #define MCIF_SEL_SD 1
+ #define MCIF_SEL_CF 2
+ #define MCIF_SEL_XD 3
+ #define MCIF_SEL_MASK 3
+ #define MCIF_SEL_SHIFT 0
+
+#define GMAC_CLK_CFG 0x248
+ #define GMAC_PHY_IF_GMII_VAL (0 << 3)
+ #define GMAC_PHY_IF_RGMII_VAL (1 << 3)
+ #define GMAC_PHY_IF_SGMII_VAL (2 << 3)
+ #define GMAC_PHY_IF_RMII_VAL (4 << 3)
+ #define GMAC_PHY_IF_SEL_MASK (7 << 3)
+ #define GMAC_PHY_INPUT_ENB_VAL 0
+ #define GMAC_PHY_SYNT_ENB_VAL 1
+ #define GMAC_PHY_CLK_MASK 1
+ #define GMAC_PHY_CLK_SHIFT 2
+ #define GMAC_PHY_125M_PAD_VAL 0
+ #define GMAC_PHY_PLL2_VAL 1
+ #define GMAC_PHY_OSC3_VAL 2
+ #define GMAC_PHY_INPUT_CLK_MASK 3
+ #define GMAC_PHY_INPUT_CLK_SHIFT 0
+
+#define PCIE_SATA_CFG 0x424
+ /* PCIE CFG MASks */
+ #define PCIE_CFG_DEVICE_PRESENT (1 << 11)
+ #define PCIE_CFG_POWERUP_RESET (1 << 10)
+ #define PCIE_CFG_CORE_CLK_EN (1 << 9)
+ #define PCIE_CFG_AUX_CLK_EN (1 << 8)
+ #define SATA_CFG_TX_CLK_EN (1 << 4)
+ #define SATA_CFG_RX_CLK_EN (1 << 3)
+ #define SATA_CFG_POWERUP_RESET (1 << 2)
+ #define SATA_CFG_PM_CLK_EN (1 << 1)
+ #define PCIE_SATA_SEL_PCIE (0)
+ #define PCIE_SATA_SEL_SATA (1)
+ #define SATA_PCIE_CFG_MASK 0xF1F
+ #define PCIE_CFG_VAL (PCIE_SATA_SEL_PCIE | PCIE_CFG_AUX_CLK_EN | \
+ PCIE_CFG_CORE_CLK_EN | PCIE_CFG_POWERUP_RESET |\
+ PCIE_CFG_DEVICE_PRESENT)
+ #define SATA_CFG_VAL (PCIE_SATA_SEL_SATA | SATA_CFG_PM_CLK_EN | \
+ SATA_CFG_POWERUP_RESET | SATA_CFG_RX_CLK_EN | \
+ SATA_CFG_TX_CLK_EN)
+
+/* Macro's for second level of pmx - pads as primary OR alternate peripheral */
+/* Write 0 to enable FSMC_16_BIT */
+#define KBD_ROW_COL_MASK (1 << 0)
+
+/* Write 0 to enable UART0_ENH */
+#define GPT_MASK (1 << 1) /* Only clk & cpt */
+
+/* Write 0 to enable PWM1 */
+#define KBD_COL5_MASK (1 << 2)
+
+/* Write 0 to enable PWM2 */
+#define GPT0_TMR0_CPT_MASK (1 << 3) /* Only clk & cpt */
+
+/* Write 0 to enable PWM3 */
+#define GPT0_TMR1_CLK_MASK (1 << 4) /* Only clk & cpt */
+
+/* Write 0 to enable PWM0 */
+#define SSP0_CS1_MASK (1 << 5)
+
+/* Write 0 to enable VIP */
+#define CAM3_MASK (1 << 6)
+
+/* Write 0 to enable VIP */
+#define CAM2_MASK (1 << 7)
+
+/* Write 0 to enable VIP */
+#define CAM1_MASK (1 << 8)
+
+/* Write 0 to enable VIP */
+#define CAM0_MASK (1 << 9)
+
+/* Write 0 to enable TS */
+#define SSP0_CS2_MASK (1 << 10)
+
+/* Write 0 to enable FSMC PNOR */
+#define MCIF_MASK (1 << 11)
+
+/* Write 0 to enable CLCD */
+#define ARM_TRACE_MASK (1 << 12)
+
+/* Write 0 to enable I2S, SSP0_CS2, CEC0, 1, SPDIF out, CLCD */
+#define MIPHY_DBG_MASK (1 << 13)
+
+/*
+ * Pad multiplexing for making all pads as gpio's. This is done to override the
+ * values passed from bootloader and start from scratch.
+ */
+static const unsigned pads_as_gpio_pins[] = { 251 };
+static struct spear_muxreg pads_as_gpio_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PADS_AS_GPIO_REG0_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_4,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_8,
+ .mask = PADS_AS_GPIO_REG7_MASK,
+ .val = 0x0,
+ },
+};
+
+static struct spear_modemux pads_as_gpio_modemux[] = {
+ {
+ .muxregs = pads_as_gpio_muxreg,
+ .nmuxregs = ARRAY_SIZE(pads_as_gpio_muxreg),
+ },
+};
+
+static struct spear_pingroup pads_as_gpio_pingroup = {
+ .name = "pads_as_gpio_grp",
+ .pins = pads_as_gpio_pins,
+ .npins = ARRAY_SIZE(pads_as_gpio_pins),
+ .modemuxs = pads_as_gpio_modemux,
+ .nmodemuxs = ARRAY_SIZE(pads_as_gpio_modemux),
+};
+
+static const char *const pads_as_gpio_grps[] = { "pads_as_gpio_grp" };
+static struct spear_function pads_as_gpio_function = {
+ .name = "pads_as_gpio",
+ .groups = pads_as_gpio_grps,
+ .ngroups = ARRAY_SIZE(pads_as_gpio_grps),
+};
+
+/* Pad multiplexing for fsmc_8bit device */
+static const unsigned fsmc_8bit_pins[] = { 233, 234, 235, 236, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249 };
+static struct spear_muxreg fsmc_8bit_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_8,
+ .mask = FSMC_8BIT_REG7_MASK,
+ .val = FSMC_8BIT_REG7_MASK,
+ }
+};
+
+static struct spear_modemux fsmc_8bit_modemux[] = {
+ {
+ .muxregs = fsmc_8bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(fsmc_8bit_muxreg),
+ },
+};
+
+static struct spear_pingroup fsmc_8bit_pingroup = {
+ .name = "fsmc_8bit_grp",
+ .pins = fsmc_8bit_pins,
+ .npins = ARRAY_SIZE(fsmc_8bit_pins),
+ .modemuxs = fsmc_8bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(fsmc_8bit_modemux),
+};
+
+/* Pad multiplexing for fsmc_16bit device */
+static const unsigned fsmc_16bit_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+static struct spear_muxreg fsmc_16bit_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_ROW_COL_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ },
+};
+
+static struct spear_modemux fsmc_16bit_modemux[] = {
+ {
+ .muxregs = fsmc_16bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(fsmc_16bit_muxreg),
+ },
+};
+
+static struct spear_pingroup fsmc_16bit_pingroup = {
+ .name = "fsmc_16bit_grp",
+ .pins = fsmc_16bit_pins,
+ .npins = ARRAY_SIZE(fsmc_16bit_pins),
+ .modemuxs = fsmc_16bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(fsmc_16bit_modemux),
+};
+
+/* pad multiplexing for fsmc_pnor device */
+static const unsigned fsmc_pnor_pins[] = { 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 215, 216, 217 };
+static struct spear_muxreg fsmc_pnor_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = MCIF_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = FSMC_PNOR_AND_MCIF_REG6_MASK,
+ .val = FSMC_PNOR_AND_MCIF_REG6_MASK,
+ },
+};
+
+static struct spear_modemux fsmc_pnor_modemux[] = {
+ {
+ .muxregs = fsmc_pnor_muxreg,
+ .nmuxregs = ARRAY_SIZE(fsmc_pnor_muxreg),
+ },
+};
+
+static struct spear_pingroup fsmc_pnor_pingroup = {
+ .name = "fsmc_pnor_grp",
+ .pins = fsmc_pnor_pins,
+ .npins = ARRAY_SIZE(fsmc_pnor_pins),
+ .modemuxs = fsmc_pnor_modemux,
+ .nmodemuxs = ARRAY_SIZE(fsmc_pnor_modemux),
+};
+
+static const char *const fsmc_grps[] = { "fsmc_8bit_grp", "fsmc_16bit_grp",
+ "fsmc_pnor_grp" };
+static struct spear_function fsmc_function = {
+ .name = "fsmc",
+ .groups = fsmc_grps,
+ .ngroups = ARRAY_SIZE(fsmc_grps),
+};
+
+/* pad multiplexing for keyboard rows-cols device */
+static const unsigned keyboard_row_col_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10 };
+static struct spear_muxreg keyboard_row_col_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_ROW_COL_MASK,
+ .val = KBD_ROW_COL_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_row_col_modemux[] = {
+ {
+ .muxregs = keyboard_row_col_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_row_col_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_row_col_pingroup = {
+ .name = "keyboard_row_col_grp",
+ .pins = keyboard_row_col_pins,
+ .npins = ARRAY_SIZE(keyboard_row_col_pins),
+ .modemuxs = keyboard_row_col_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_row_col_modemux),
+};
+
+/* pad multiplexing for keyboard col5 device */
+static const unsigned keyboard_col5_pins[] = { 17 };
+static struct spear_muxreg keyboard_col5_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_COL5_MASK,
+ .val = KBD_COL5_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM1_AND_KBD_COL5_REG0_MASK,
+ .val = PWM1_AND_KBD_COL5_REG0_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_col5_modemux[] = {
+ {
+ .muxregs = keyboard_col5_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_col5_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_col5_pingroup = {
+ .name = "keyboard_col5_grp",
+ .pins = keyboard_col5_pins,
+ .npins = ARRAY_SIZE(keyboard_col5_pins),
+ .modemuxs = keyboard_col5_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_col5_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_row_col_grp",
+ "keyboard_col5_grp" };
+static struct spear_function keyboard_function = {
+ .name = "keyboard",
+ .groups = keyboard_grps,
+ .ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* pad multiplexing for spdif_in device */
+static const unsigned spdif_in_pins[] = { 19 };
+static struct spear_muxreg spdif_in_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = SPDIF_IN_REG0_MASK,
+ .val = SPDIF_IN_REG0_MASK,
+ },
+};
+
+static struct spear_modemux spdif_in_modemux[] = {
+ {
+ .muxregs = spdif_in_muxreg,
+ .nmuxregs = ARRAY_SIZE(spdif_in_muxreg),
+ },
+};
+
+static struct spear_pingroup spdif_in_pingroup = {
+ .name = "spdif_in_grp",
+ .pins = spdif_in_pins,
+ .npins = ARRAY_SIZE(spdif_in_pins),
+ .modemuxs = spdif_in_modemux,
+ .nmodemuxs = ARRAY_SIZE(spdif_in_modemux),
+};
+
+static const char *const spdif_in_grps[] = { "spdif_in_grp" };
+static struct spear_function spdif_in_function = {
+ .name = "spdif_in",
+ .groups = spdif_in_grps,
+ .ngroups = ARRAY_SIZE(spdif_in_grps),
+};
+
+/* pad multiplexing for spdif_out device */
+static const unsigned spdif_out_pins[] = { 137 };
+static struct spear_muxreg spdif_out_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = SPDIF_OUT_REG4_MASK,
+ .val = SPDIF_OUT_REG4_MASK,
+ }, {
+ .reg = PERIP_CFG,
+ .mask = SPDIF_OUT_ENB_MASK,
+ .val = SPDIF_OUT_ENB_MASK,
+ }
+};
+
+static struct spear_modemux spdif_out_modemux[] = {
+ {
+ .muxregs = spdif_out_muxreg,
+ .nmuxregs = ARRAY_SIZE(spdif_out_muxreg),
+ },
+};
+
+static struct spear_pingroup spdif_out_pingroup = {
+ .name = "spdif_out_grp",
+ .pins = spdif_out_pins,
+ .npins = ARRAY_SIZE(spdif_out_pins),
+ .modemuxs = spdif_out_modemux,
+ .nmodemuxs = ARRAY_SIZE(spdif_out_modemux),
+};
+
+static const char *const spdif_out_grps[] = { "spdif_out_grp" };
+static struct spear_function spdif_out_function = {
+ .name = "spdif_out",
+ .groups = spdif_out_grps,
+ .ngroups = ARRAY_SIZE(spdif_out_grps),
+};
+
+/* pad multiplexing for gpt_0_1 device */
+static const unsigned gpt_0_1_pins[] = { 11, 12, 13, 14, 15, 16, 21, 22 };
+static struct spear_muxreg gpt_0_1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+ .val = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = UART0_ENH_AND_GPT_REG0_MASK |
+ PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+ PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ .val = UART0_ENH_AND_GPT_REG0_MASK |
+ PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+ PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ },
+};
+
+static struct spear_modemux gpt_0_1_modemux[] = {
+ {
+ .muxregs = gpt_0_1_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt_0_1_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt_0_1_pingroup = {
+ .name = "gpt_0_1_grp",
+ .pins = gpt_0_1_pins,
+ .npins = ARRAY_SIZE(gpt_0_1_pins),
+ .modemuxs = gpt_0_1_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt_0_1_modemux),
+};
+
+static const char *const gpt_0_1_grps[] = { "gpt_0_1_grp" };
+static struct spear_function gpt_0_1_function = {
+ .name = "gpt_0_1",
+ .groups = gpt_0_1_grps,
+ .ngroups = ARRAY_SIZE(gpt_0_1_grps),
+};
+
+/* pad multiplexing for pwm0 device */
+static const unsigned pwm0_pins[] = { 24 };
+static struct spear_muxreg pwm0_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = SSP0_CS1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+ .val = PWM0_AND_SSP0_CS1_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm0_modemux[] = {
+ {
+ .muxregs = pwm0_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm0_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm0_pingroup = {
+ .name = "pwm0_grp",
+ .pins = pwm0_pins,
+ .npins = ARRAY_SIZE(pwm0_pins),
+ .modemuxs = pwm0_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm0_modemux),
+};
+
+/* pad multiplexing for pwm1 device */
+static const unsigned pwm1_pins[] = { 17 };
+static struct spear_muxreg pwm1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_COL5_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM1_AND_KBD_COL5_REG0_MASK,
+ .val = PWM1_AND_KBD_COL5_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm1_modemux[] = {
+ {
+ .muxregs = pwm1_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm1_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm1_pingroup = {
+ .name = "pwm1_grp",
+ .pins = pwm1_pins,
+ .npins = ARRAY_SIZE(pwm1_pins),
+ .modemuxs = pwm1_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm1_modemux),
+};
+
+/* pad multiplexing for pwm2 device */
+static const unsigned pwm2_pins[] = { 21 };
+static struct spear_muxreg pwm2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT0_TMR0_CPT_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+ .val = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm2_modemux[] = {
+ {
+ .muxregs = pwm2_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm2_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm2_pingroup = {
+ .name = "pwm2_grp",
+ .pins = pwm2_pins,
+ .npins = ARRAY_SIZE(pwm2_pins),
+ .modemuxs = pwm2_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm2_modemux),
+};
+
+/* pad multiplexing for pwm3 device */
+static const unsigned pwm3_pins[] = { 22 };
+static struct spear_muxreg pwm3_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT0_TMR1_CLK_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ .val = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm3_modemux[] = {
+ {
+ .muxregs = pwm3_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm3_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm3_pingroup = {
+ .name = "pwm3_grp",
+ .pins = pwm3_pins,
+ .npins = ARRAY_SIZE(pwm3_pins),
+ .modemuxs = pwm3_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm3_modemux),
+};
+
+static const char *const pwm_grps[] = { "pwm0_grp", "pwm1_grp", "pwm2_grp",
+ "pwm3_grp" };
+static struct spear_function pwm_function = {
+ .name = "pwm",
+ .groups = pwm_grps,
+ .ngroups = ARRAY_SIZE(pwm_grps),
+};
+
+/* pad multiplexing for vip_mux device */
+static const unsigned vip_mux_pins[] = { 35, 36, 37, 38, 40, 41, 42, 43 };
+static struct spear_muxreg vip_mux_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_REG1_MASK,
+ .val = VIP_REG1_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_modemux[] = {
+ {
+ .muxregs = vip_mux_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_pingroup = {
+ .name = "vip_mux_grp",
+ .pins = vip_mux_pins,
+ .npins = ARRAY_SIZE(vip_mux_pins),
+ .modemuxs = vip_mux_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam0 (disables cam0) device */
+static const unsigned vip_mux_cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75 };
+static struct spear_muxreg vip_mux_cam0_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM0_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM0_REG2_MASK,
+ .val = VIP_AND_CAM0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam0_modemux[] = {
+ {
+ .muxregs = vip_mux_cam0_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam0_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam0_pingroup = {
+ .name = "vip_mux_cam0_grp",
+ .pins = vip_mux_cam0_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam0_pins),
+ .modemuxs = vip_mux_cam0_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam0_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam1 (disables cam1) device */
+static const unsigned vip_mux_cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64 };
+static struct spear_muxreg vip_mux_cam1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM1_REG1_MASK,
+ .val = VIP_AND_CAM1_REG1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM1_REG2_MASK,
+ .val = VIP_AND_CAM1_REG2_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam1_modemux[] = {
+ {
+ .muxregs = vip_mux_cam1_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam1_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam1_pingroup = {
+ .name = "vip_mux_cam1_grp",
+ .pins = vip_mux_cam1_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam1_pins),
+ .modemuxs = vip_mux_cam1_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam1_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam2 (disables cam2) device */
+static const unsigned vip_mux_cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53 };
+static struct spear_muxreg vip_mux_cam2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM2_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM2_REG1_MASK,
+ .val = VIP_AND_CAM2_REG1_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam2_modemux[] = {
+ {
+ .muxregs = vip_mux_cam2_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam2_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam2_pingroup = {
+ .name = "vip_mux_cam2_grp",
+ .pins = vip_mux_cam2_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam2_pins),
+ .modemuxs = vip_mux_cam2_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam2_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam3 (disables cam3) device */
+static const unsigned vip_mux_cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34 };
+static struct spear_muxreg vip_mux_cam3_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM3_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = VIP_AND_CAM3_REG0_MASK,
+ .val = VIP_AND_CAM3_REG0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM3_REG1_MASK,
+ .val = VIP_AND_CAM3_REG1_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam3_modemux[] = {
+ {
+ .muxregs = vip_mux_cam3_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam3_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam3_pingroup = {
+ .name = "vip_mux_cam3_grp",
+ .pins = vip_mux_cam3_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam3_pins),
+ .modemuxs = vip_mux_cam3_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam3_modemux),
+};
+
+static const char *const vip_grps[] = { "vip_mux_grp", "vip_mux_cam0_grp" ,
+ "vip_mux_cam1_grp" , "vip_mux_cam2_grp", "vip_mux_cam3_grp" };
+static struct spear_function vip_function = {
+ .name = "vip",
+ .groups = vip_grps,
+ .ngroups = ARRAY_SIZE(vip_grps),
+};
+
+/* pad multiplexing for cam0 device */
+static const unsigned cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75
+};
+static struct spear_muxreg cam0_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM0_MASK,
+ .val = CAM0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM0_REG2_MASK,
+ .val = VIP_AND_CAM0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux cam0_modemux[] = {
+ {
+ .muxregs = cam0_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam0_muxreg),
+ },
+};
+
+static struct spear_pingroup cam0_pingroup = {
+ .name = "cam0_grp",
+ .pins = cam0_pins,
+ .npins = ARRAY_SIZE(cam0_pins),
+ .modemuxs = cam0_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam0_modemux),
+};
+
+static const char *const cam0_grps[] = { "cam0_grp" };
+static struct spear_function cam0_function = {
+ .name = "cam0",
+ .groups = cam0_grps,
+ .ngroups = ARRAY_SIZE(cam0_grps),
+};
+
+/* pad multiplexing for cam1 device */
+static const unsigned cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
+static struct spear_muxreg cam1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM1_MASK,
+ .val = CAM1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM1_REG1_MASK,
+ .val = VIP_AND_CAM1_REG1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM1_REG2_MASK,
+ .val = VIP_AND_CAM1_REG2_MASK,
+ },
+};
+
+static struct spear_modemux cam1_modemux[] = {
+ {
+ .muxregs = cam1_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam1_muxreg),
+ },
+};
+
+static struct spear_pingroup cam1_pingroup = {
+ .name = "cam1_grp",
+ .pins = cam1_pins,
+ .npins = ARRAY_SIZE(cam1_pins),
+ .modemuxs = cam1_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam1_modemux),
+};
+
+static const char *const cam1_grps[] = { "cam1_grp" };
+static struct spear_function cam1_function = {
+ .name = "cam1",
+ .groups = cam1_grps,
+ .ngroups = ARRAY_SIZE(cam1_grps),
+};
+
+/* pad multiplexing for cam2 device */
+static const unsigned cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53
+};
+static struct spear_muxreg cam2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM2_MASK,
+ .val = CAM2_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM2_REG1_MASK,
+ .val = VIP_AND_CAM2_REG1_MASK,
+ },
+};
+
+static struct spear_modemux cam2_modemux[] = {
+ {
+ .muxregs = cam2_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam2_muxreg),
+ },
+};
+
+static struct spear_pingroup cam2_pingroup = {
+ .name = "cam2_grp",
+ .pins = cam2_pins,
+ .npins = ARRAY_SIZE(cam2_pins),
+ .modemuxs = cam2_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam2_modemux),
+};
+
+static const char *const cam2_grps[] = { "cam2_grp" };
+static struct spear_function cam2_function = {
+ .name = "cam2",
+ .groups = cam2_grps,
+ .ngroups = ARRAY_SIZE(cam2_grps),
+};
+
+/* pad multiplexing for cam3 device */
+static const unsigned cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34
+};
+static struct spear_muxreg cam3_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM3_MASK,
+ .val = CAM3_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = VIP_AND_CAM3_REG0_MASK,
+ .val = VIP_AND_CAM3_REG0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM3_REG1_MASK,
+ .val = VIP_AND_CAM3_REG1_MASK,
+ },
+};
+
+static struct spear_modemux cam3_modemux[] = {
+ {
+ .muxregs = cam3_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam3_muxreg),
+ },
+};
+
+static struct spear_pingroup cam3_pingroup = {
+ .name = "cam3_grp",
+ .pins = cam3_pins,
+ .npins = ARRAY_SIZE(cam3_pins),
+ .modemuxs = cam3_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam3_modemux),
+};
+
+static const char *const cam3_grps[] = { "cam3_grp" };
+static struct spear_function cam3_function = {
+ .name = "cam3",
+ .groups = cam3_grps,
+ .ngroups = ARRAY_SIZE(cam3_grps),
+};
+
+/* pad multiplexing for smi device */
+static const unsigned smi_pins[] = { 76, 77, 78, 79, 84 };
+static struct spear_muxreg smi_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = SMI_REG2_MASK,
+ .val = SMI_REG2_MASK,
+ },
+};
+
+static struct spear_modemux smi_modemux[] = {
+ {
+ .muxregs = smi_muxreg,
+ .nmuxregs = ARRAY_SIZE(smi_muxreg),
+ },
+};
+
+static struct spear_pingroup smi_pingroup = {
+ .name = "smi_grp",
+ .pins = smi_pins,
+ .npins = ARRAY_SIZE(smi_pins),
+ .modemuxs = smi_modemux,
+ .nmodemuxs = ARRAY_SIZE(smi_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_grp" };
+static struct spear_function smi_function = {
+ .name = "smi",
+ .groups = smi_grps,
+ .ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 80, 81, 82, 83 };
+static struct spear_muxreg ssp0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = SSP0_REG2_MASK,
+ .val = SSP0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+ {
+ .muxregs = ssp0_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+ .name = "ssp0_grp",
+ .pins = ssp0_pins,
+ .npins = ARRAY_SIZE(ssp0_pins),
+ .modemuxs = ssp0_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* pad multiplexing for ssp0_cs1 device */
+static const unsigned ssp0_cs1_pins[] = { 24 };
+static struct spear_muxreg ssp0_cs1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = SSP0_CS1_MASK,
+ .val = SSP0_CS1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+ .val = PWM0_AND_SSP0_CS1_REG0_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs1_modemux[] = {
+ {
+ .muxregs = ssp0_cs1_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs1_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs1_pingroup = {
+ .name = "ssp0_cs1_grp",
+ .pins = ssp0_cs1_pins,
+ .npins = ARRAY_SIZE(ssp0_cs1_pins),
+ .modemuxs = ssp0_cs1_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs1_modemux),
+};
+
+/* pad multiplexing for ssp0_cs2 device */
+static const unsigned ssp0_cs2_pins[] = { 85 };
+static struct spear_muxreg ssp0_cs2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = SSP0_CS2_MASK,
+ .val = SSP0_CS2_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = TS_AND_SSP0_CS2_REG2_MASK,
+ .val = TS_AND_SSP0_CS2_REG2_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs2_modemux[] = {
+ {
+ .muxregs = ssp0_cs2_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs2_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs2_pingroup = {
+ .name = "ssp0_cs2_grp",
+ .pins = ssp0_cs2_pins,
+ .npins = ARRAY_SIZE(ssp0_cs2_pins),
+ .modemuxs = ssp0_cs2_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs2_modemux),
+};
+
+/* pad multiplexing for ssp0_cs3 device */
+static const unsigned ssp0_cs3_pins[] = { 132 };
+static struct spear_muxreg ssp0_cs3_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = SSP0_CS3_REG4_MASK,
+ .val = SSP0_CS3_REG4_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs3_modemux[] = {
+ {
+ .muxregs = ssp0_cs3_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs3_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs3_pingroup = {
+ .name = "ssp0_cs3_grp",
+ .pins = ssp0_cs3_pins,
+ .npins = ARRAY_SIZE(ssp0_cs3_pins),
+ .modemuxs = ssp0_cs3_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs3_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs1_grp",
+ "ssp0_cs2_grp", "ssp0_cs3_grp" };
+static struct spear_function ssp0_function = {
+ .name = "ssp0",
+ .groups = ssp0_grps,
+ .ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 86, 87 };
+static struct spear_muxreg uart0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = UART0_REG2_MASK,
+ .val = UART0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux uart0_modemux[] = {
+ {
+ .muxregs = uart0_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_pingroup = {
+ .name = "uart0_grp",
+ .pins = uart0_pins,
+ .npins = ARRAY_SIZE(uart0_pins),
+ .modemuxs = uart0_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* pad multiplexing for uart0_enh device */
+static const unsigned uart0_enh_pins[] = { 11, 12, 13, 14, 15, 16 };
+static struct spear_muxreg uart0_enh_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = UART0_ENH_AND_GPT_REG0_MASK,
+ .val = UART0_ENH_AND_GPT_REG0_MASK,
+ },
+};
+
+static struct spear_modemux uart0_enh_modemux[] = {
+ {
+ .muxregs = uart0_enh_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_enh_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_enh_pingroup = {
+ .name = "uart0_enh_grp",
+ .pins = uart0_enh_pins,
+ .npins = ARRAY_SIZE(uart0_enh_pins),
+ .modemuxs = uart0_enh_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_enh_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_enh_grp" };
+static struct spear_function uart0_function = {
+ .name = "uart0",
+ .groups = uart0_grps,
+ .ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* pad multiplexing for uart1 device */
+static const unsigned uart1_pins[] = { 88, 89 };
+static struct spear_muxreg uart1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = UART1_REG2_MASK,
+ .val = UART1_REG2_MASK,
+ },
+};
+
+static struct spear_modemux uart1_modemux[] = {
+ {
+ .muxregs = uart1_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart1_muxreg),
+ },
+};
+
+static struct spear_pingroup uart1_pingroup = {
+ .name = "uart1_grp",
+ .pins = uart1_pins,
+ .npins = ARRAY_SIZE(uart1_pins),
+ .modemuxs = uart1_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart1_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_grp" };
+static struct spear_function uart1_function = {
+ .name = "uart1",
+ .groups = uart1_grps,
+ .ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* pad multiplexing for i2s_in device */
+static const unsigned i2s_in_pins[] = { 90, 91, 92, 93, 94, 99 };
+static struct spear_muxreg i2s_in_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = I2S_IN_REG2_MASK,
+ .val = I2S_IN_REG2_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_4,
+ .mask = I2S_IN_REG3_MASK,
+ .val = I2S_IN_REG3_MASK,
+ },
+};
+
+static struct spear_modemux i2s_in_modemux[] = {
+ {
+ .muxregs = i2s_in_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s_in_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s_in_pingroup = {
+ .name = "i2s_in_grp",
+ .pins = i2s_in_pins,
+ .npins = ARRAY_SIZE(i2s_in_pins),
+ .modemuxs = i2s_in_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s_in_modemux),
+};
+
+/* pad multiplexing for i2s_out device */
+static const unsigned i2s_out_pins[] = { 95, 96, 97, 98, 100, 101, 102, 103 };
+static struct spear_muxreg i2s_out_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_4,
+ .mask = I2S_OUT_REG3_MASK,
+ .val = I2S_OUT_REG3_MASK,
+ },
+};
+
+static struct spear_modemux i2s_out_modemux[] = {
+ {
+ .muxregs = i2s_out_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s_out_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s_out_pingroup = {
+ .name = "i2s_out_grp",
+ .pins = i2s_out_pins,
+ .npins = ARRAY_SIZE(i2s_out_pins),
+ .modemuxs = i2s_out_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s_out_modemux),
+};
+
+static const char *const i2s_grps[] = { "i2s_in_grp", "i2s_out_grp" };
+static struct spear_function i2s_function = {
+ .name = "i2s",
+ .groups = i2s_grps,
+ .ngroups = ARRAY_SIZE(i2s_grps),
+};
+
+/* pad multiplexing for gmac device */
+static const unsigned gmac_pins[] = { 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131 };
+#define GMAC_MUXREG \
+ { \
+ .reg = PAD_FUNCTION_EN_4, \
+ .mask = GMAC_REG3_MASK, \
+ .val = GMAC_REG3_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_5, \
+ .mask = GMAC_REG4_MASK, \
+ .val = GMAC_REG4_MASK, \
+ }
+
+/* pad multiplexing for gmii device */
+static struct spear_muxreg gmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_GMII_VAL,
+ },
+};
+
+static struct spear_modemux gmii_modemux[] = {
+ {
+ .muxregs = gmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(gmii_muxreg),
+ },
+};
+
+static struct spear_pingroup gmii_pingroup = {
+ .name = "gmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = gmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+/* pad multiplexing for rgmii device */
+static struct spear_muxreg rgmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_RGMII_VAL,
+ },
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+ {
+ .muxregs = rgmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+ },
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+ .name = "rgmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = rgmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+/* pad multiplexing for rmii device */
+static struct spear_muxreg rmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_RMII_VAL,
+ },
+};
+
+static struct spear_modemux rmii_modemux[] = {
+ {
+ .muxregs = rmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(rmii_muxreg),
+ },
+};
+
+static struct spear_pingroup rmii_pingroup = {
+ .name = "rmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = rmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(rmii_modemux),
+};
+
+/* pad multiplexing for sgmii device */
+static struct spear_muxreg sgmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_SGMII_VAL,
+ },
+};
+
+static struct spear_modemux sgmii_modemux[] = {
+ {
+ .muxregs = sgmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(sgmii_muxreg),
+ },
+};
+
+static struct spear_pingroup sgmii_pingroup = {
+ .name = "sgmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = sgmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(sgmii_modemux),
+};
+
+static const char *const gmac_grps[] = { "gmii_grp", "rgmii_grp", "rmii_grp",
+ "sgmii_grp" };
+static struct spear_function gmac_function = {
+ .name = "gmac",
+ .groups = gmac_grps,
+ .ngroups = ARRAY_SIZE(gmac_grps),
+};
+
+/* pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 133, 134 };
+static struct spear_muxreg i2c0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = I2C0_REG4_MASK,
+ .val = I2C0_REG4_MASK,
+ },
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+ {
+ .muxregs = i2c0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .npins = ARRAY_SIZE(i2c0_pins),
+ .modemuxs = i2c0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+ .name = "i2c0",
+ .groups = i2c0_grps,
+ .ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* pad multiplexing for i2c1 device */
+static const unsigned i2c1_pins[] = { 18, 23 };
+static struct spear_muxreg i2c1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = I2C1_REG0_MASK,
+ .val = I2C1_REG0_MASK,
+ },
+};
+
+static struct spear_modemux i2c1_modemux[] = {
+ {
+ .muxregs = i2c1_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c1_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c1_pingroup = {
+ .name = "i2c1_grp",
+ .pins = i2c1_pins,
+ .npins = ARRAY_SIZE(i2c1_pins),
+ .modemuxs = i2c1_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c1_modemux),
+};
+
+static const char *const i2c1_grps[] = { "i2c1_grp" };
+static struct spear_function i2c1_function = {
+ .name = "i2c1",
+ .groups = i2c1_grps,
+ .ngroups = ARRAY_SIZE(i2c1_grps),
+};
+
+/* pad multiplexing for cec0 device */
+static const unsigned cec0_pins[] = { 135 };
+static struct spear_muxreg cec0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CEC0_REG4_MASK,
+ .val = CEC0_REG4_MASK,
+ },
+};
+
+static struct spear_modemux cec0_modemux[] = {
+ {
+ .muxregs = cec0_muxreg,
+ .nmuxregs = ARRAY_SIZE(cec0_muxreg),
+ },
+};
+
+static struct spear_pingroup cec0_pingroup = {
+ .name = "cec0_grp",
+ .pins = cec0_pins,
+ .npins = ARRAY_SIZE(cec0_pins),
+ .modemuxs = cec0_modemux,
+ .nmodemuxs = ARRAY_SIZE(cec0_modemux),
+};
+
+static const char *const cec0_grps[] = { "cec0_grp" };
+static struct spear_function cec0_function = {
+ .name = "cec0",
+ .groups = cec0_grps,
+ .ngroups = ARRAY_SIZE(cec0_grps),
+};
+
+/* pad multiplexing for cec1 device */
+static const unsigned cec1_pins[] = { 136 };
+static struct spear_muxreg cec1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CEC1_REG4_MASK,
+ .val = CEC1_REG4_MASK,
+ },
+};
+
+static struct spear_modemux cec1_modemux[] = {
+ {
+ .muxregs = cec1_muxreg,
+ .nmuxregs = ARRAY_SIZE(cec1_muxreg),
+ },
+};
+
+static struct spear_pingroup cec1_pingroup = {
+ .name = "cec1_grp",
+ .pins = cec1_pins,
+ .npins = ARRAY_SIZE(cec1_pins),
+ .modemuxs = cec1_modemux,
+ .nmodemuxs = ARRAY_SIZE(cec1_modemux),
+};
+
+static const char *const cec1_grps[] = { "cec1_grp" };
+static struct spear_function cec1_function = {
+ .name = "cec1",
+ .groups = cec1_grps,
+ .ngroups = ARRAY_SIZE(cec1_grps),
+};
+
+/* pad multiplexing for mcif devices */
+static const unsigned mcif_pins[] = { 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 237 };
+#define MCIF_MUXREG \
+ { \
+ .reg = PAD_SHARED_IP_EN_1, \
+ .mask = MCIF_MASK, \
+ .val = MCIF_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_7, \
+ .mask = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \
+ .val = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_8, \
+ .mask = MCIF_REG7_MASK, \
+ .val = MCIF_REG7_MASK, \
+ }
+
+/* Pad multiplexing for sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_SD,
+ },
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+ {
+ .muxregs = sdhci_muxreg,
+ .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+ },
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+ .name = "sdhci_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = sdhci_modemux,
+ .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+ .name = "sdhci",
+ .groups = sdhci_grps,
+ .ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* Pad multiplexing for cf device */
+static struct spear_muxreg cf_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_CF,
+ },
+};
+
+static struct spear_modemux cf_modemux[] = {
+ {
+ .muxregs = cf_muxreg,
+ .nmuxregs = ARRAY_SIZE(cf_muxreg),
+ },
+};
+
+static struct spear_pingroup cf_pingroup = {
+ .name = "cf_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = cf_modemux,
+ .nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+ .name = "cf",
+ .groups = cf_grps,
+ .ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* Pad multiplexing for xd device */
+static struct spear_muxreg xd_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_XD,
+ },
+};
+
+static struct spear_modemux xd_modemux[] = {
+ {
+ .muxregs = xd_muxreg,
+ .nmuxregs = ARRAY_SIZE(xd_muxreg),
+ },
+};
+
+static struct spear_pingroup xd_pingroup = {
+ .name = "xd_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = xd_modemux,
+ .nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+ .name = "xd",
+ .groups = xd_grps,
+ .ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191 };
+static struct spear_muxreg clcd_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+ .val = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG5_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG6_MASK,
+ },
+};
+
+static struct spear_modemux clcd_modemux[] = {
+ {
+ .muxregs = clcd_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_pingroup = {
+ .name = "clcd_grp",
+ .pins = clcd_pins,
+ .npins = ARRAY_SIZE(clcd_pins),
+ .modemuxs = clcd_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp" };
+static struct spear_function clcd_function = {
+ .name = "clcd",
+ .groups = clcd_grps,
+ .ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+/* pad multiplexing for arm_trace device */
+static const unsigned arm_trace_pins[] = { 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg arm_trace_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = ARM_TRACE_MASK,
+ .val = ARM_TRACE_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CLCD_AND_ARM_TRACE_REG4_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG4_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG5_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG6_MASK,
+ },
+};
+
+static struct spear_modemux arm_trace_modemux[] = {
+ {
+ .muxregs = arm_trace_muxreg,
+ .nmuxregs = ARRAY_SIZE(arm_trace_muxreg),
+ },
+};
+
+static struct spear_pingroup arm_trace_pingroup = {
+ .name = "arm_trace_grp",
+ .pins = arm_trace_pins,
+ .npins = ARRAY_SIZE(arm_trace_pins),
+ .modemuxs = arm_trace_modemux,
+ .nmodemuxs = ARRAY_SIZE(arm_trace_modemux),
+};
+
+static const char *const arm_trace_grps[] = { "arm_trace_grp" };
+static struct spear_function arm_trace_function = {
+ .name = "arm_trace",
+ .groups = arm_trace_grps,
+ .ngroups = ARRAY_SIZE(arm_trace_grps),
+};
+
+/* pad multiplexing for miphy_dbg device */
+static const unsigned miphy_dbg_pins[] = { 96, 97, 98, 99, 100, 101, 102, 103,
+ 132, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157 };
+static struct spear_muxreg miphy_dbg_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = MIPHY_DBG_MASK,
+ .val = MIPHY_DBG_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+ .val = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+ },
+};
+
+static struct spear_modemux miphy_dbg_modemux[] = {
+ {
+ .muxregs = miphy_dbg_muxreg,
+ .nmuxregs = ARRAY_SIZE(miphy_dbg_muxreg),
+ },
+};
+
+static struct spear_pingroup miphy_dbg_pingroup = {
+ .name = "miphy_dbg_grp",
+ .pins = miphy_dbg_pins,
+ .npins = ARRAY_SIZE(miphy_dbg_pins),
+ .modemuxs = miphy_dbg_modemux,
+ .nmodemuxs = ARRAY_SIZE(miphy_dbg_modemux),
+};
+
+static const char *const miphy_dbg_grps[] = { "miphy_dbg_grp" };
+static struct spear_function miphy_dbg_function = {
+ .name = "miphy_dbg",
+ .groups = miphy_dbg_grps,
+ .ngroups = ARRAY_SIZE(miphy_dbg_grps),
+};
+
+/* pad multiplexing for pcie device */
+static const unsigned pcie_pins[] = { 250 };
+static struct spear_muxreg pcie_muxreg[] = {
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_PCIE_CFG_MASK,
+ .val = PCIE_CFG_VAL,
+ },
+};
+
+static struct spear_modemux pcie_modemux[] = {
+ {
+ .muxregs = pcie_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie_pingroup = {
+ .name = "pcie_grp",
+ .pins = pcie_pins,
+ .npins = ARRAY_SIZE(pcie_pins),
+ .modemuxs = pcie_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie_modemux),
+};
+
+static const char *const pcie_grps[] = { "pcie_grp" };
+static struct spear_function pcie_function = {
+ .name = "pcie",
+ .groups = pcie_grps,
+ .ngroups = ARRAY_SIZE(pcie_grps),
+};
+
+/* pad multiplexing for sata device */
+static const unsigned sata_pins[] = { 250 };
+static struct spear_muxreg sata_muxreg[] = {
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_PCIE_CFG_MASK,
+ .val = SATA_CFG_VAL,
+ },
+};
+
+static struct spear_modemux sata_modemux[] = {
+ {
+ .muxregs = sata_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata_muxreg),
+ },
+};
+
+static struct spear_pingroup sata_pingroup = {
+ .name = "sata_grp",
+ .pins = sata_pins,
+ .npins = ARRAY_SIZE(sata_pins),
+ .modemuxs = sata_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata_modemux),
+};
+
+static const char *const sata_grps[] = { "sata_grp" };
+static struct spear_function sata_function = {
+ .name = "sata",
+ .groups = sata_grps,
+ .ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1340_pingroups[] = {
+ &pads_as_gpio_pingroup,
+ &fsmc_8bit_pingroup,
+ &fsmc_16bit_pingroup,
+ &fsmc_pnor_pingroup,
+ &keyboard_row_col_pingroup,
+ &keyboard_col5_pingroup,
+ &spdif_in_pingroup,
+ &spdif_out_pingroup,
+ &gpt_0_1_pingroup,
+ &pwm0_pingroup,
+ &pwm1_pingroup,
+ &pwm2_pingroup,
+ &pwm3_pingroup,
+ &vip_mux_pingroup,
+ &vip_mux_cam0_pingroup,
+ &vip_mux_cam1_pingroup,
+ &vip_mux_cam2_pingroup,
+ &vip_mux_cam3_pingroup,
+ &cam0_pingroup,
+ &cam1_pingroup,
+ &cam2_pingroup,
+ &cam3_pingroup,
+ &smi_pingroup,
+ &ssp0_pingroup,
+ &ssp0_cs1_pingroup,
+ &ssp0_cs2_pingroup,
+ &ssp0_cs3_pingroup,
+ &uart0_pingroup,
+ &uart0_enh_pingroup,
+ &uart1_pingroup,
+ &i2s_in_pingroup,
+ &i2s_out_pingroup,
+ &gmii_pingroup,
+ &rgmii_pingroup,
+ &rmii_pingroup,
+ &sgmii_pingroup,
+ &i2c0_pingroup,
+ &i2c1_pingroup,
+ &cec0_pingroup,
+ &cec1_pingroup,
+ &sdhci_pingroup,
+ &cf_pingroup,
+ &xd_pingroup,
+ &clcd_pingroup,
+ &arm_trace_pingroup,
+ &miphy_dbg_pingroup,
+ &pcie_pingroup,
+ &sata_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1340_functions[] = {
+ &pads_as_gpio_function,
+ &fsmc_function,
+ &keyboard_function,
+ &spdif_in_function,
+ &spdif_out_function,
+ &gpt_0_1_function,
+ &pwm_function,
+ &vip_function,
+ &cam0_function,
+ &cam1_function,
+ &cam2_function,
+ &cam3_function,
+ &smi_function,
+ &ssp0_function,
+ &uart0_function,
+ &uart1_function,
+ &i2s_function,
+ &gmac_function,
+ &i2c0_function,
+ &i2c1_function,
+ &cec0_function,
+ &cec1_function,
+ &sdhci_function,
+ &cf_function,
+ &xd_function,
+ &clcd_function,
+ &arm_trace_function,
+ &miphy_dbg_function,
+ &pcie_function,
+ &sata_function,
+};
+
+static struct spear_pinctrl_machdata spear1340_machdata = {
+ .pins = spear1340_pins,
+ .npins = ARRAY_SIZE(spear1340_pins),
+ .groups = spear1340_pingroups,
+ .ngroups = ARRAY_SIZE(spear1340_pingroups),
+ .functions = spear1340_functions,
+ .nfunctions = ARRAY_SIZE(spear1340_functions),
+ .modes_supported = false,
+};
+
+static struct of_device_id spear1340_pinctrl_of_match[] __devinitdata = {
+ {
+ .compatible = "st,spear1340-pinmux",
+ },
+ {},
+};
+
+static int __devinit spear1340_pinctrl_probe(struct platform_device *pdev)
+{
+ return spear_pinctrl_probe(pdev, &spear1340_machdata);
+}
+
+static int __devexit spear1340_pinctrl_remove(struct platform_device *pdev)
+{
+ return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1340_pinctrl_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = spear1340_pinctrl_of_match,
+ },
+ .probe = spear1340_pinctrl_probe,
+ .remove = __devexit_p(spear1340_pinctrl_remove),
+};
+
+static int __init spear1340_pinctrl_init(void)
+{
+ return platform_driver_register(&spear1340_pinctrl_driver);
+}
+arch_initcall(spear1340_pinctrl_init);
+
+static void __exit spear1340_pinctrl_exit(void)
+{
+ platform_driver_unregister(&spear1340_pinctrl_driver);
+}
+module_exit(spear1340_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index 9c82a35e4e78..4dfc2849b172 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr300 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -702,7 +702,7 @@ static void __exit spear300_pinctrl_exit(void)
}
module_exit(spear300_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 1a9707605125..96883693fb7e 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr310 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -425,7 +425,7 @@ static void __exit spear310_pinctrl_exit(void)
}
module_exit(spear310_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index de726e6c283a..020b1e0bdb3e 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr320 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -3462,7 +3462,7 @@ static void __exit spear320_pinctrl_exit(void)
}
module_exit(spear320_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 832049a8b1c9..0242378f7cb8 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -15,108 +15,7 @@
/* pins */
static const struct pinctrl_pin_desc spear3xx_pins[] = {
- PINCTRL_PIN(0, "PLGPIO0"),
- PINCTRL_PIN(1, "PLGPIO1"),
- PINCTRL_PIN(2, "PLGPIO2"),
- PINCTRL_PIN(3, "PLGPIO3"),
- PINCTRL_PIN(4, "PLGPIO4"),
- PINCTRL_PIN(5, "PLGPIO5"),
- PINCTRL_PIN(6, "PLGPIO6"),
- PINCTRL_PIN(7, "PLGPIO7"),
- PINCTRL_PIN(8, "PLGPIO8"),
- PINCTRL_PIN(9, "PLGPIO9"),
- PINCTRL_PIN(10, "PLGPIO10"),
- PINCTRL_PIN(11, "PLGPIO11"),
- PINCTRL_PIN(12, "PLGPIO12"),
- PINCTRL_PIN(13, "PLGPIO13"),
- PINCTRL_PIN(14, "PLGPIO14"),
- PINCTRL_PIN(15, "PLGPIO15"),
- PINCTRL_PIN(16, "PLGPIO16"),
- PINCTRL_PIN(17, "PLGPIO17"),
- PINCTRL_PIN(18, "PLGPIO18"),
- PINCTRL_PIN(19, "PLGPIO19"),
- PINCTRL_PIN(20, "PLGPIO20"),
- PINCTRL_PIN(21, "PLGPIO21"),
- PINCTRL_PIN(22, "PLGPIO22"),
- PINCTRL_PIN(23, "PLGPIO23"),
- PINCTRL_PIN(24, "PLGPIO24"),
- PINCTRL_PIN(25, "PLGPIO25"),
- PINCTRL_PIN(26, "PLGPIO26"),
- PINCTRL_PIN(27, "PLGPIO27"),
- PINCTRL_PIN(28, "PLGPIO28"),
- PINCTRL_PIN(29, "PLGPIO29"),
- PINCTRL_PIN(30, "PLGPIO30"),
- PINCTRL_PIN(31, "PLGPIO31"),
- PINCTRL_PIN(32, "PLGPIO32"),
- PINCTRL_PIN(33, "PLGPIO33"),
- PINCTRL_PIN(34, "PLGPIO34"),
- PINCTRL_PIN(35, "PLGPIO35"),
- PINCTRL_PIN(36, "PLGPIO36"),
- PINCTRL_PIN(37, "PLGPIO37"),
- PINCTRL_PIN(38, "PLGPIO38"),
- PINCTRL_PIN(39, "PLGPIO39"),
- PINCTRL_PIN(40, "PLGPIO40"),
- PINCTRL_PIN(41, "PLGPIO41"),
- PINCTRL_PIN(42, "PLGPIO42"),
- PINCTRL_PIN(43, "PLGPIO43"),
- PINCTRL_PIN(44, "PLGPIO44"),
- PINCTRL_PIN(45, "PLGPIO45"),
- PINCTRL_PIN(46, "PLGPIO46"),
- PINCTRL_PIN(47, "PLGPIO47"),
- PINCTRL_PIN(48, "PLGPIO48"),
- PINCTRL_PIN(49, "PLGPIO49"),
- PINCTRL_PIN(50, "PLGPIO50"),
- PINCTRL_PIN(51, "PLGPIO51"),
- PINCTRL_PIN(52, "PLGPIO52"),
- PINCTRL_PIN(53, "PLGPIO53"),
- PINCTRL_PIN(54, "PLGPIO54"),
- PINCTRL_PIN(55, "PLGPIO55"),
- PINCTRL_PIN(56, "PLGPIO56"),
- PINCTRL_PIN(57, "PLGPIO57"),
- PINCTRL_PIN(58, "PLGPIO58"),
- PINCTRL_PIN(59, "PLGPIO59"),
- PINCTRL_PIN(60, "PLGPIO60"),
- PINCTRL_PIN(61, "PLGPIO61"),
- PINCTRL_PIN(62, "PLGPIO62"),
- PINCTRL_PIN(63, "PLGPIO63"),
- PINCTRL_PIN(64, "PLGPIO64"),
- PINCTRL_PIN(65, "PLGPIO65"),
- PINCTRL_PIN(66, "PLGPIO66"),
- PINCTRL_PIN(67, "PLGPIO67"),
- PINCTRL_PIN(68, "PLGPIO68"),
- PINCTRL_PIN(69, "PLGPIO69"),
- PINCTRL_PIN(70, "PLGPIO70"),
- PINCTRL_PIN(71, "PLGPIO71"),
- PINCTRL_PIN(72, "PLGPIO72"),
- PINCTRL_PIN(73, "PLGPIO73"),
- PINCTRL_PIN(74, "PLGPIO74"),
- PINCTRL_PIN(75, "PLGPIO75"),
- PINCTRL_PIN(76, "PLGPIO76"),
- PINCTRL_PIN(77, "PLGPIO77"),
- PINCTRL_PIN(78, "PLGPIO78"),
- PINCTRL_PIN(79, "PLGPIO79"),
- PINCTRL_PIN(80, "PLGPIO80"),
- PINCTRL_PIN(81, "PLGPIO81"),
- PINCTRL_PIN(82, "PLGPIO82"),
- PINCTRL_PIN(83, "PLGPIO83"),
- PINCTRL_PIN(84, "PLGPIO84"),
- PINCTRL_PIN(85, "PLGPIO85"),
- PINCTRL_PIN(86, "PLGPIO86"),
- PINCTRL_PIN(87, "PLGPIO87"),
- PINCTRL_PIN(88, "PLGPIO88"),
- PINCTRL_PIN(89, "PLGPIO89"),
- PINCTRL_PIN(90, "PLGPIO90"),
- PINCTRL_PIN(91, "PLGPIO91"),
- PINCTRL_PIN(92, "PLGPIO92"),
- PINCTRL_PIN(93, "PLGPIO93"),
- PINCTRL_PIN(94, "PLGPIO94"),
- PINCTRL_PIN(95, "PLGPIO95"),
- PINCTRL_PIN(96, "PLGPIO96"),
- PINCTRL_PIN(97, "PLGPIO97"),
- PINCTRL_PIN(98, "PLGPIO98"),
- PINCTRL_PIN(99, "PLGPIO99"),
- PINCTRL_PIN(100, "PLGPIO100"),
- PINCTRL_PIN(101, "PLGPIO101"),
+ SPEAR_PIN_0_TO_101,
};
/* firda_pins */
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 5d5fdd8df7b8..31f44347f17c 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
* Header file for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c1a3fd8e1243..ce875dc365e5 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -523,6 +523,30 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
},
},
+ {
+ .callback = video_set_backlight_video_vendor,
+ .ident = "Acer Extensa 5235",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
+ },
+ },
+ {
+ .callback = video_set_backlight_video_vendor,
+ .ident = "Acer TravelMate 5760",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
+ },
+ },
+ {
+ .callback = video_set_backlight_video_vendor,
+ .ident = "Acer Aspire 5750",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
+ },
+ },
{}
};
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 639db4d0aa76..2fd9d36acd15 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -5,7 +5,7 @@
*
* (C) 2009 - Peter Feuerer peter (a) piie.net
* http://piie.net
- * 2009 Borislav Petkov <petkovbb@gmail.com>
+ * 2009 Borislav Petkov bp (a) alien8.de
*
* Inspired by and many thanks to:
* o acerfand - Rachel Greenham
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 8a582bdfdc76..694a15a56230 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -87,6 +87,9 @@ static int gmux_update_status(struct backlight_device *bd)
struct apple_gmux_data *gmux_data = bl_get_data(bd);
u32 brightness = bd->props.brightness;
+ if (bd->props.state & BL_CORE_SUSPENDED)
+ return 0;
+
/*
* Older gmux versions require writing out lower bytes first then
* setting the upper byte to 0 to flush the values. Newer versions
@@ -102,6 +105,7 @@ static int gmux_update_status(struct backlight_device *bd)
}
static const struct backlight_ops gmux_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
.get_brightness = gmux_get_brightness,
.update_status = gmux_update_status,
};
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index e6c08ee8d46c..5f78aac9b163 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -21,7 +21,6 @@
#include <linux/err.h>
#include <linux/dmi.h>
#include <linux/io.h>
-#include <linux/rfkill.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
#include <linux/mm.h>
@@ -90,11 +89,8 @@ static struct platform_driver platform_driver = {
static struct platform_device *platform_device;
static struct backlight_device *dell_backlight_device;
-static struct rfkill *wifi_rfkill;
-static struct rfkill *bluetooth_rfkill;
-static struct rfkill *wwan_rfkill;
-static const struct dmi_system_id __initdata dell_device_table[] = {
+static const struct dmi_system_id dell_device_table[] __initconst = {
{
.ident = "Dell laptop",
.matches = {
@@ -119,96 +115,94 @@ static const struct dmi_system_id __initdata dell_device_table[] = {
};
MODULE_DEVICE_TABLE(dmi, dell_device_table);
-static struct dmi_system_id __devinitdata dell_blacklist[] = {
- /* Supported by compal-laptop */
- {
- .ident = "Dell Mini 9",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"),
- },
- },
+static struct dmi_system_id __devinitdata dell_quirks[] = {
{
- .ident = "Dell Mini 10",
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V130",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
{
- .ident = "Dell Mini 10v",
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V131",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
{
- .ident = "Dell Mini 1012",
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3350",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
{
- .ident = "Dell Inspiron 11z",
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3555",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
{
- .ident = "Dell Mini 12",
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron N311z",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
- {}
-};
-
-static struct dmi_system_id __devinitdata dell_quirks[] = {
{
.callback = dmi_matched,
- .ident = "Dell Vostro V130",
+ .ident = "Dell Inspiron M5110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
- .ident = "Dell Vostro V131",
+ .ident = "Dell Vostro 3360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
- .ident = "Dell Vostro 3555",
+ .ident = "Dell Vostro 3460",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
- .ident = "Dell Inspiron N311z",
+ .ident = "Dell Vostro 3560",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
- .ident = "Dell Inspiron M5110",
+ .ident = "Dell Vostro 3450",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"),
},
.driver_data = &quirk_dell_vostro_v130,
},
@@ -305,94 +299,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
-/* Derived from information in DellWirelessCtl.cpp:
- Class 17, select 11 is radio control. It returns an array of 32-bit values.
-
- Input byte 0 = 0: Wireless information
-
- result[0]: return code
- result[1]:
- Bit 0: Hardware switch supported
- Bit 1: Wifi locator supported
- Bit 2: Wifi is supported
- Bit 3: Bluetooth is supported
- Bit 4: WWAN is supported
- Bit 5: Wireless keyboard supported
- Bits 6-7: Reserved
- Bit 8: Wifi is installed
- Bit 9: Bluetooth is installed
- Bit 10: WWAN is installed
- Bits 11-15: Reserved
- Bit 16: Hardware switch is on
- Bit 17: Wifi is blocked
- Bit 18: Bluetooth is blocked
- Bit 19: WWAN is blocked
- Bits 20-31: Reserved
- result[2]: NVRAM size in bytes
- result[3]: NVRAM format version number
-
- Input byte 0 = 2: Wireless switch configuration
- result[0]: return code
- result[1]:
- Bit 0: Wifi controlled by switch
- Bit 1: Bluetooth controlled by switch
- Bit 2: WWAN controlled by switch
- Bits 3-6: Reserved
- Bit 7: Wireless switch config locked
- Bit 8: Wifi locator enabled
- Bits 9-14: Reserved
- Bit 15: Wifi locator setting locked
- Bits 16-31: Reserved
-*/
-
-static int dell_rfkill_set(void *data, bool blocked)
-{
- int disable = blocked ? 1 : 0;
- unsigned long radio = (unsigned long)data;
- int hwswitch_bit = (unsigned long)data - 1;
- int ret = 0;
-
- get_buffer();
- dell_send_request(buffer, 17, 11);
-
- /* If the hardware switch controls this radio, and the hardware
- switch is disabled, don't allow changing the software state */
- if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16))) {
- ret = -EINVAL;
- goto out;
- }
-
- buffer->input[0] = (1 | (radio<<8) | (disable << 16));
- dell_send_request(buffer, 17, 11);
-
-out:
- release_buffer();
- return ret;
-}
-
-static void dell_rfkill_query(struct rfkill *rfkill, void *data)
-{
- int status;
- int bit = (unsigned long)data + 16;
- int hwswitch_bit = (unsigned long)data - 1;
-
- get_buffer();
- dell_send_request(buffer, 17, 11);
- status = buffer->output[1];
- release_buffer();
-
- rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
-
- if (hwswitch_state & (BIT(hwswitch_bit)))
- rfkill_set_hw_state(rfkill, !(status & BIT(16)));
-}
-
-static const struct rfkill_ops dell_rfkill_ops = {
- .set_block = dell_rfkill_set,
- .query = dell_rfkill_query,
-};
-
static struct dentry *dell_laptop_dir;
static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -462,108 +368,6 @@ static const struct file_operations dell_debugfs_fops = {
.release = single_release,
};
-static void dell_update_rfkill(struct work_struct *ignored)
-{
- if (wifi_rfkill)
- dell_rfkill_query(wifi_rfkill, (void *)1);
- if (bluetooth_rfkill)
- dell_rfkill_query(bluetooth_rfkill, (void *)2);
- if (wwan_rfkill)
- dell_rfkill_query(wwan_rfkill, (void *)3);
-}
-static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
-
-
-static int __init dell_setup_rfkill(void)
-{
- int status;
- int ret;
-
- if (dmi_check_system(dell_blacklist)) {
- pr_info("Blacklisted hardware detected - not enabling rfkill\n");
- return 0;
- }
-
- get_buffer();
- dell_send_request(buffer, 17, 11);
- status = buffer->output[1];
- buffer->input[0] = 0x2;
- dell_send_request(buffer, 17, 11);
- hwswitch_state = buffer->output[1];
- release_buffer();
-
- if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
- wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
- RFKILL_TYPE_WLAN,
- &dell_rfkill_ops, (void *) 1);
- if (!wifi_rfkill) {
- ret = -ENOMEM;
- goto err_wifi;
- }
- ret = rfkill_register(wifi_rfkill);
- if (ret)
- goto err_wifi;
- }
-
- if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
- bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
- &platform_device->dev,
- RFKILL_TYPE_BLUETOOTH,
- &dell_rfkill_ops, (void *) 2);
- if (!bluetooth_rfkill) {
- ret = -ENOMEM;
- goto err_bluetooth;
- }
- ret = rfkill_register(bluetooth_rfkill);
- if (ret)
- goto err_bluetooth;
- }
-
- if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
- wwan_rfkill = rfkill_alloc("dell-wwan",
- &platform_device->dev,
- RFKILL_TYPE_WWAN,
- &dell_rfkill_ops, (void *) 3);
- if (!wwan_rfkill) {
- ret = -ENOMEM;
- goto err_wwan;
- }
- ret = rfkill_register(wwan_rfkill);
- if (ret)
- goto err_wwan;
- }
-
- return 0;
-err_wwan:
- rfkill_destroy(wwan_rfkill);
- if (bluetooth_rfkill)
- rfkill_unregister(bluetooth_rfkill);
-err_bluetooth:
- rfkill_destroy(bluetooth_rfkill);
- if (wifi_rfkill)
- rfkill_unregister(wifi_rfkill);
-err_wifi:
- rfkill_destroy(wifi_rfkill);
-
- return ret;
-}
-
-static void dell_cleanup_rfkill(void)
-{
- if (wifi_rfkill) {
- rfkill_unregister(wifi_rfkill);
- rfkill_destroy(wifi_rfkill);
- }
- if (bluetooth_rfkill) {
- rfkill_unregister(bluetooth_rfkill);
- rfkill_destroy(bluetooth_rfkill);
- }
- if (wwan_rfkill) {
- rfkill_unregister(wwan_rfkill);
- rfkill_destroy(wwan_rfkill);
- }
-}
-
static int dell_send_intensity(struct backlight_device *bd)
{
int ret = 0;
@@ -655,30 +459,6 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
-{
- static bool extended;
-
- if (str & 0x20)
- return false;
-
- if (unlikely(data == 0xe0)) {
- extended = true;
- return false;
- } else if (unlikely(extended)) {
- switch (data) {
- case 0x8:
- schedule_delayed_work(&dell_rfkill_work,
- round_jiffies_relative(HZ));
- break;
- }
- extended = false;
- }
-
- return false;
-}
-
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -720,26 +500,10 @@ static int __init dell_init(void)
goto fail_buffer;
buffer = page_address(bufferpage);
- ret = dell_setup_rfkill();
-
- if (ret) {
- pr_warn("Unable to setup rfkill\n");
- goto fail_rfkill;
- }
-
- ret = i8042_install_filter(dell_laptop_i8042_filter);
- if (ret) {
- pr_warn("Unable to install key filter\n");
- goto fail_filter;
- }
-
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
- if (dell_laptop_dir != NULL)
- debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
- &dell_debugfs_fops);
#ifdef CONFIG_ACPI
/* In the event of an ACPI backlight being available, don't
@@ -782,11 +546,6 @@ static int __init dell_init(void)
return 0;
fail_backlight:
- i8042_remove_filter(dell_laptop_i8042_filter);
- cancel_delayed_work_sync(&dell_rfkill_work);
-fail_filter:
- dell_cleanup_rfkill();
-fail_rfkill:
free_page((unsigned long)bufferpage);
fail_buffer:
platform_device_del(platform_device);
@@ -804,10 +563,7 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
- i8042_remove_filter(dell_laptop_i8042_filter);
- cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
- dell_cleanup_rfkill();
if (platform_device) {
platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 6b26666b37f2..c4c1a5444b38 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1,7 +1,7 @@
/*-*-linux-c-*-*/
/*
- Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
+ Copyright (C) 2007,2008 Jonathan Woithe <jwoithe@just42.net>
Copyright (C) 2008 Peter Gruber <nokos@gmx.net>
Copyright (C) 2008 Tony Vroon <tony@linx.net>
Based on earlier work:
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index 580d80a73c3a..da267eae8ba8 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -16,6 +16,8 @@
* 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -34,7 +36,8 @@
#define ACPI_FUJITSU_CLASS "fujitsu"
#define INVERT_TABLET_MODE_BIT 0x01
-#define FORCE_TABLET_MODE_IF_UNDOCK 0x02
+#define INVERT_DOCK_STATE_BIT 0x02
+#define FORCE_TABLET_MODE_IF_UNDOCK 0x04
#define KEYMAP_LEN 16
@@ -161,6 +164,8 @@ static void fujitsu_send_state(void)
state = fujitsu_read_register(0xdd);
dock = state & 0x02;
+ if (fujitsu.config.quirks & INVERT_DOCK_STATE_BIT)
+ dock = !dock;
if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
tablet_mode = 1;
@@ -221,9 +226,6 @@ static int __devinit input_fujitsu_setup(struct device *parent,
input_set_capability(idev, EV_SW, SW_DOCK);
input_set_capability(idev, EV_SW, SW_TABLET_MODE);
- input_set_capability(idev, EV_SW, SW_DOCK);
- input_set_capability(idev, EV_SW, SW_TABLET_MODE);
-
error = input_register_device(idev);
if (error) {
input_free_device(idev);
@@ -275,25 +277,31 @@ static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi)
+static void __devinit fujitsu_dmi_common(const struct dmi_system_id *dmi)
{
- printk(KERN_INFO MODULENAME ": %s\n", dmi->ident);
+ pr_info("%s\n", dmi->ident);
memcpy(fujitsu.config.keymap, dmi->driver_data,
sizeof(fujitsu.config.keymap));
+}
+
+static int __devinit fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
+{
+ fujitsu_dmi_common(dmi);
+ fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
return 1;
}
static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
{
- fujitsu_dmi_default(dmi);
+ fujitsu_dmi_common(dmi);
fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
- fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
+ fujitsu.config.quirks |= INVERT_DOCK_STATE_BIT;
return 1;
}
static struct dmi_system_id dmi_ids[] __initconst = {
{
- .callback = fujitsu_dmi_default,
+ .callback = fujitsu_dmi_lifebook,
.ident = "Fujitsu Siemens P/T Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -302,7 +310,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
.driver_data = keymap_Lifebook_Tseries
},
{
- .callback = fujitsu_dmi_default,
+ .callback = fujitsu_dmi_lifebook,
.ident = "Fujitsu Lifebook T Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -320,7 +328,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
.driver_data = keymap_Stylistic_Tseries
},
{
- .callback = fujitsu_dmi_default,
+ .callback = fujitsu_dmi_lifebook,
.ident = "Fujitsu LifeBook U810",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -347,7 +355,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
.driver_data = keymap_Stylistic_ST5xxx
},
{
- .callback = fujitsu_dmi_default,
+ .callback = fujitsu_dmi_lifebook,
.ident = "Unknown (using defaults)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, ""),
@@ -473,6 +481,6 @@ module_exit(fujitsu_module_exit);
MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION("2.4");
+MODULE_VERSION("2.5");
MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 7387f97a2941..24a3ae065f1b 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -2,7 +2,7 @@
* hdaps.c - driver for IBM's Hard Drive Active Protection System
*
* Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
*
* The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
* starting with the R40, T41, and X40. It provides a basic two-axis
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index e2faa3cbb792..387183a2d6dd 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -634,6 +634,8 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
RFKILL_TYPE_WLAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WIFI);
+ if (!wifi_rfkill)
+ return -ENOMEM;
rfkill_init_sw_state(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI));
rfkill_set_hw_state(wifi_rfkill,
@@ -648,6 +650,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
RFKILL_TYPE_BLUETOOTH,
&hp_wmi_rfkill_ops,
(void *) HPWMI_BLUETOOTH);
+ if (!bluetooth_rfkill) {
+ err = -ENOMEM;
+ goto register_wifi_error;
+ }
rfkill_init_sw_state(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
rfkill_set_hw_state(bluetooth_rfkill,
@@ -662,6 +668,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
RFKILL_TYPE_WWAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WWAN);
+ if (!wwan_rfkill) {
+ err = -ENOMEM;
+ goto register_bluetooth_error;
+ }
rfkill_init_sw_state(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN));
rfkill_set_hw_state(wwan_rfkill,
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ac902f7a9baa..17f6dfd8dbfb 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -194,7 +194,6 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
/*
* debugfs
*/
-#define DEBUGFS_EVENT_LEN (4096)
static int debugfs_status_show(struct seq_file *s, void *data)
{
unsigned long value;
@@ -315,7 +314,7 @@ static int __devinit ideapad_debugfs_init(struct ideapad_private *priv)
node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
&debugfs_status_fops);
if (!node) {
- pr_err("failed to create event in debugfs");
+ pr_err("failed to create status in debugfs");
goto errout;
}
@@ -695,10 +694,10 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
{
int ret, i;
- unsigned long cfg;
+ int cfg;
struct ideapad_private *priv;
- if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
+ if (read_method_int(adevice->handle, "_CFG", &cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -722,7 +721,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
goto input_failed;
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
- if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
+ if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
ideapad_register_rfkill(adevice, i);
else
priv->rfk[i] = NULL;
@@ -785,6 +784,10 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
case 9:
ideapad_sync_rfk_state(priv);
break;
+ case 13:
+ case 6:
+ ideapad_input_report(priv, vpc_bit);
+ break;
case 4:
ideapad_backlight_notify_brightness(priv);
break;
@@ -795,7 +798,7 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
ideapad_backlight_notify_power(priv);
break;
default:
- ideapad_input_report(priv, vpc_bit);
+ pr_info("Unknown event: %lu\n", vpc_bit);
}
}
}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 0ffdb3cde2bb..9af4257d4901 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -72,6 +72,7 @@
#include <linux/string.h>
#include <linux/tick.h>
#include <linux/timer.h>
+#include <linux/dmi.h>
#include <drm/i915_drm.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -1485,6 +1486,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
MODULE_DEVICE_TABLE(pci, ips_id_table);
+static int ips_blacklist_callback(const struct dmi_system_id *id)
+{
+ pr_info("Blacklisted intel_ips for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id ips_blacklist[] = {
+ {
+ .callback = ips_blacklist_callback,
+ .ident = "HP ProBook",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
+ },
+ },
+ { } /* terminating entry */
+};
+
static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
u64 platform_info;
@@ -1494,6 +1513,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
u16 htshi, trc, trc_required_mask;
u8 tse;
+ if (dmi_check_system(ips_blacklist))
+ return -ENODEV;
+
ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
if (!ips)
return -ENOMEM;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 8a51795aa02a..d456ff0c73b7 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -141,6 +141,27 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
"(default: 0)");
static void sony_nc_kbd_backlight_resume(void);
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_battery_care_cleanup(struct platform_device *pd);
+
+static int sony_nc_thermal_setup(struct platform_device *pd);
+static void sony_nc_thermal_cleanup(struct platform_device *pd);
+static void sony_nc_thermal_resume(void);
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd);
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_touchpad_cleanup(struct platform_device *pd);
enum sony_nc_rfkill {
SONY_WIFI,
@@ -153,6 +174,9 @@ enum sony_nc_rfkill {
static int sony_rfkill_handle;
static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+ unsigned int handle);
+static void sony_nc_rfkill_cleanup(void);
static void sony_nc_rfkill_update(void);
/*********** Input Devices ***********/
@@ -691,59 +715,97 @@ static struct acpi_device *sony_nc_acpi_device = NULL;
/*
* acpi_evaluate_object wrappers
+ * all useful calls into SNC methods take one or zero parameters and return
+ * integers or arrays.
*/
-static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
+static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
+ u64 *value)
{
- struct acpi_buffer output;
- union acpi_object out_obj;
+ union acpi_object *result = NULL;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
- output.length = sizeof(out_obj);
- output.pointer = &out_obj;
+ if (value) {
+ struct acpi_object_list params;
+ union acpi_object in;
+ in.type = ACPI_TYPE_INTEGER;
+ in.integer.value = *value;
+ params.count = 1;
+ params.pointer = &in;
+ status = acpi_evaluate_object(handle, method, &params, &output);
+ dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method,
+ (unsigned int)(*value >> 32),
+ (unsigned int)*value & 0xffffffff);
+ } else {
+ status = acpi_evaluate_object(handle, method, NULL, &output);
+ dprintk("__call_snc_method: [%s]\n", method);
+ }
- status = acpi_evaluate_object(handle, name, NULL, &output);
- if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
- *result = out_obj.integer.value;
- return 0;
+ if (ACPI_FAILURE(status)) {
+ pr_err("Failed to evaluate [%s]\n", method);
+ return NULL;
}
- pr_warn("acpi_callreadfunc failed\n");
+ result = (union acpi_object *) output.pointer;
+ if (!result)
+ dprintk("No return object [%s]\n", method);
- return -1;
+ return result;
}
-static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
- int *result)
+static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
+ int *result)
{
- struct acpi_object_list params;
- union acpi_object in_obj;
- struct acpi_buffer output;
- union acpi_object out_obj;
- acpi_status status;
-
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = value;
+ union acpi_object *object = NULL;
+ if (value) {
+ u64 v = *value;
+ object = __call_snc_method(handle, name, &v);
+ } else
+ object = __call_snc_method(handle, name, NULL);
- output.length = sizeof(out_obj);
- output.pointer = &out_obj;
+ if (!object)
+ return -EINVAL;
- status = acpi_evaluate_object(handle, name, &params, &output);
- if (status == AE_OK) {
- if (result != NULL) {
- if (out_obj.type != ACPI_TYPE_INTEGER) {
- pr_warn("acpi_evaluate_object bad return type\n");
- return -1;
- }
- *result = out_obj.integer.value;
- }
- return 0;
+ if (object->type != ACPI_TYPE_INTEGER) {
+ pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+ ACPI_TYPE_INTEGER, object->type);
+ kfree(object);
+ return -EINVAL;
}
- pr_warn("acpi_evaluate_object failed\n");
+ if (result)
+ *result = object->integer.value;
- return -1;
+ kfree(object);
+ return 0;
+}
+
+#define MIN(a, b) (a > b ? b : a)
+static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+ void *buffer, size_t buflen)
+{
+ size_t len = len;
+ union acpi_object *object = __call_snc_method(handle, name, value);
+
+ if (!object)
+ return -EINVAL;
+
+ if (object->type == ACPI_TYPE_BUFFER)
+ len = MIN(buflen, object->buffer.length);
+
+ else if (object->type == ACPI_TYPE_INTEGER)
+ len = MIN(buflen, sizeof(object->integer.value));
+
+ else {
+ pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+ ACPI_TYPE_BUFFER, object->type);
+ kfree(object);
+ return -EINVAL;
+ }
+
+ memcpy(buffer, object->buffer.pointer, len);
+ kfree(object);
+ return 0;
}
struct sony_nc_handles {
@@ -770,16 +832,17 @@ static ssize_t sony_nc_handles_show(struct device *dev,
static int sony_nc_handles_setup(struct platform_device *pd)
{
- int i;
- int result;
+ int i, r, result, arg;
handles = kzalloc(sizeof(*handles), GFP_KERNEL);
if (!handles)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
- if (!acpi_callsetfunc(sony_nc_acpi_handle,
- "SN00", i + 0x20, &result)) {
+ arg = i + 0x20;
+ r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg,
+ &result);
+ if (!r) {
dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
result, i);
handles->cap[i] = result;
@@ -819,8 +882,8 @@ static int sony_find_snc_handle(int handle)
int i;
/* not initialized yet, return early */
- if (!handles)
- return -1;
+ if (!handles || !handle)
+ return -EINVAL;
for (i = 0; i < 0x10; i++) {
if (handles->cap[i] == handle) {
@@ -830,21 +893,20 @@ static int sony_find_snc_handle(int handle)
}
}
dprintk("handle 0x%.4x not found\n", handle);
- return -1;
+ return -EINVAL;
}
static int sony_call_snc_handle(int handle, int argument, int *result)
{
- int ret = 0;
+ int arg, ret = 0;
int offset = sony_find_snc_handle(handle);
if (offset < 0)
- return -1;
+ return offset;
- ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument,
- result);
- dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument,
- *result);
+ arg = offset | argument;
+ ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result);
+ dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result);
return ret;
}
@@ -889,14 +951,16 @@ static int boolean_validate(const int direction, const int value)
static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
char *buffer)
{
- int value;
+ int value, ret = 0;
struct sony_nc_value *item =
container_of(attr, struct sony_nc_value, devattr);
if (!*item->acpiget)
return -EIO;
- if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0)
+ ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL,
+ &value);
+ if (ret < 0)
return -EIO;
if (item->validate)
@@ -910,6 +974,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
const char *buffer, size_t count)
{
int value;
+ int ret = 0;
struct sony_nc_value *item =
container_of(attr, struct sony_nc_value, devattr);
@@ -919,7 +984,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
if (count > 31)
return -EINVAL;
- value = simple_strtoul(buffer, NULL, 10);
+ if (kstrtoint(buffer, 10, &value))
+ return -EINVAL;
if (item->validate)
value = item->validate(SNC_VALIDATE_IN, value);
@@ -927,8 +993,11 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
if (value < 0)
return value;
- if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0)
+ ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+ &value, NULL);
+ if (ret < 0)
return -EIO;
+
item->value = value;
item->valid = 1;
return count;
@@ -941,6 +1010,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
struct sony_backlight_props {
struct backlight_device *dev;
int handle;
+ int cmd_base;
u8 offset;
u8 maxlvl;
};
@@ -948,15 +1018,15 @@ struct sony_backlight_props sony_bl_props;
static int sony_backlight_update_status(struct backlight_device *bd)
{
- return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
- bd->props.brightness + 1, NULL);
+ int arg = bd->props.brightness + 1;
+ return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL);
}
static int sony_backlight_get_brightness(struct backlight_device *bd)
{
int value;
- if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value))
+ if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value))
return 0;
/* brightness levels are 1-based, while backlight ones are 0-based */
return value - 1;
@@ -968,7 +1038,7 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
- sony_call_snc_handle(sdev->handle, 0x0200, &result);
+ sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result);
return (result & 0xff) - sdev->offset;
}
@@ -980,7 +1050,8 @@ static int sony_nc_update_status_ng(struct backlight_device *bd)
(struct sony_backlight_props *)bl_get_data(bd);
value = bd->props.brightness + sdev->offset;
- if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
+ if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10),
+ &result))
return -EIO;
return value;
@@ -1024,10 +1095,14 @@ static struct sony_nc_event sony_100_events[] = {
{ 0x06, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x87, SONYPI_EVENT_FNKEY_F7 },
{ 0x07, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x88, SONYPI_EVENT_FNKEY_F8 },
+ { 0x08, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x89, SONYPI_EVENT_FNKEY_F9 },
{ 0x09, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x8A, SONYPI_EVENT_FNKEY_F10 },
{ 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x8B, SONYPI_EVENT_FNKEY_F11 },
+ { 0x0B, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x8C, SONYPI_EVENT_FNKEY_F12 },
{ 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
@@ -1063,63 +1138,139 @@ static struct sony_nc_event sony_127_events[] = {
{ 0, 0 },
};
+static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
+{
+ int ret = -EINVAL;
+ unsigned int result = 0;
+ struct sony_nc_event *key_event;
+
+ if (sony_call_snc_handle(handle, 0x200, &result)) {
+ dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle,
+ event);
+ return -EINVAL;
+ }
+
+ result &= 0xFF;
+
+ if (handle == 0x0100)
+ key_event = sony_100_events;
+ else
+ key_event = sony_127_events;
+
+ for (; key_event->data; key_event++) {
+ if (key_event->data == result) {
+ ret = key_event->event;
+ break;
+ }
+ }
+
+ if (!key_event->data)
+ pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n",
+ event, result, handle);
+
+ return ret;
+}
+
/*
* ACPI callbacks
*/
+enum event_types {
+ HOTKEY = 1,
+ KILLSWITCH,
+ GFX_SWITCH
+};
static void sony_nc_notify(struct acpi_device *device, u32 event)
{
- u32 ev = event;
+ u32 real_ev = event;
+ u8 ev_type = 0;
+ dprintk("sony_nc_notify, event: 0x%.2x\n", event);
+
+ if (event >= 0x90) {
+ unsigned int result = 0;
+ unsigned int arg = 0;
+ unsigned int handle = 0;
+ unsigned int offset = event - 0x90;
+
+ if (offset >= ARRAY_SIZE(handles->cap)) {
+ pr_err("Event 0x%x outside of capabilities list\n",
+ event);
+ return;
+ }
+ handle = handles->cap[offset];
+
+ /* list of handles known for generating events */
+ switch (handle) {
+ /* hotkey event */
+ case 0x0100:
+ case 0x0127:
+ ev_type = HOTKEY;
+ real_ev = sony_nc_hotkeys_decode(event, handle);
+
+ if (real_ev > 0)
+ sony_laptop_report_input_event(real_ev);
+ else
+ /* restore the original event for reporting */
+ real_ev = event;
- if (ev >= 0x90) {
- /* New-style event */
- int result;
- int key_handle = 0;
- ev -= 0x90;
-
- if (sony_find_snc_handle(0x100) == ev)
- key_handle = 0x100;
- if (sony_find_snc_handle(0x127) == ev)
- key_handle = 0x127;
-
- if (key_handle) {
- struct sony_nc_event *key_event;
-
- if (sony_call_snc_handle(key_handle, 0x200, &result)) {
- dprintk("sony_nc_notify, unable to decode"
- " event 0x%.2x 0x%.2x\n", key_handle,
- ev);
- /* restore the original event */
- ev = event;
- } else {
- ev = result & 0xFF;
-
- if (key_handle == 0x100)
- key_event = sony_100_events;
- else
- key_event = sony_127_events;
-
- for (; key_event->data; key_event++) {
- if (key_event->data == ev) {
- ev = key_event->event;
- break;
- }
- }
+ break;
- if (!key_event->data)
- pr_info("Unknown event: 0x%x 0x%x\n",
- key_handle, ev);
- else
- sony_laptop_report_input_event(ev);
- }
- } else if (sony_find_snc_handle(sony_rfkill_handle) == ev) {
- sony_nc_rfkill_update();
- return;
+ /* wlan switch */
+ case 0x0124:
+ case 0x0135:
+ /* events on this handle are reported when the
+ * switch changes position or for battery
+ * events. We'll notify both of them but only
+ * update the rfkill device status when the
+ * switch is moved.
+ */
+ ev_type = KILLSWITCH;
+ sony_call_snc_handle(handle, 0x0100, &result);
+ real_ev = result & 0x03;
+
+ /* hw switch event */
+ if (real_ev == 1)
+ sony_nc_rfkill_update();
+
+ break;
+
+ case 0x0128:
+ case 0x0146:
+ /* Hybrid GFX switching */
+ sony_call_snc_handle(handle, 0x0000, &result);
+ dprintk("GFX switch event received (reason: %s)\n",
+ (result & 0x01) ?
+ "switch change" : "unknown");
+
+ /* verify the switch state
+ * 1: discrete GFX
+ * 0: integrated GFX
+ */
+ sony_call_snc_handle(handle, 0x0100, &result);
+
+ ev_type = GFX_SWITCH;
+ real_ev = result & 0xff;
+ break;
+
+ default:
+ dprintk("Unknown event 0x%x for handle 0x%x\n",
+ event, handle);
+ break;
}
- } else
- sony_laptop_report_input_event(ev);
- dprintk("sony_nc_notify, event: 0x%.2x\n", ev);
- acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev);
+ /* clear the event (and the event reason when present) */
+ arg = 1 << offset;
+ sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result);
+
+ } else {
+ /* old style event */
+ ev_type = HOTKEY;
+ sony_laptop_report_input_event(real_ev);
+ }
+
+ acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev);
+
+ acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
+ dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
}
static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
@@ -1140,20 +1291,190 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
/*
* ACPI device
*/
-static int sony_nc_function_setup(struct acpi_device *device)
+static void sony_nc_function_setup(struct acpi_device *device,
+ struct platform_device *pf_device)
{
- int result;
+ unsigned int i, result, bitmask, arg;
+
+ if (!handles)
+ return;
+
+ /* setup found handles here */
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+ unsigned int handle = handles->cap[i];
+
+ if (!handle)
+ continue;
+
+ dprintk("setting up handle 0x%.4x\n", handle);
+
+ switch (handle) {
+ case 0x0100:
+ case 0x0101:
+ case 0x0127:
+ /* setup hotkeys */
+ sony_call_snc_handle(handle, 0, &result);
+ break;
+ case 0x0102:
+ /* setup hotkeys */
+ sony_call_snc_handle(handle, 0x100, &result);
+ break;
+ case 0x0105:
+ case 0x0148:
+ /* touchpad enable/disable */
+ result = sony_nc_touchpad_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up touchpad control function (%d)\n",
+ result);
+ break;
+ case 0x0115:
+ case 0x0136:
+ case 0x013f:
+ result = sony_nc_battery_care_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up battery care function (%d)\n",
+ result);
+ break;
+ case 0x0119:
+ result = sony_nc_lid_resume_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up lid resume function (%d)\n",
+ result);
+ break;
+ case 0x0122:
+ result = sony_nc_thermal_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up thermal profile function (%d)\n",
+ result);
+ break;
+ case 0x0131:
+ result = sony_nc_highspeed_charging_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up high speed charging function (%d)\n",
+ result);
+ break;
+ case 0x0124:
+ case 0x0135:
+ result = sony_nc_rfkill_setup(device, handle);
+ if (result)
+ pr_err("couldn't set up rfkill support (%d)\n",
+ result);
+ break;
+ case 0x0137:
+ case 0x0143:
+ result = sony_nc_kbd_backlight_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up keyboard backlight function (%d)\n",
+ result);
+ break;
+ default:
+ continue;
+ }
+ }
/* Enable all events */
- acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result);
+ arg = 0x10;
+ if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+ sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+ &result);
+}
- /* Setup hotkeys */
- sony_call_snc_handle(0x0100, 0, &result);
- sony_call_snc_handle(0x0101, 0, &result);
- sony_call_snc_handle(0x0102, 0x100, &result);
- sony_call_snc_handle(0x0127, 0, &result);
+static void sony_nc_function_cleanup(struct platform_device *pd)
+{
+ unsigned int i, result, bitmask, handle;
- return 0;
+ /* get enabled events and disable them */
+ sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
+ sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
+
+ /* cleanup handles here */
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+
+ handle = handles->cap[i];
+
+ if (!handle)
+ continue;
+
+ switch (handle) {
+ case 0x0105:
+ case 0x0148:
+ sony_nc_touchpad_cleanup(pd);
+ break;
+ case 0x0115:
+ case 0x0136:
+ case 0x013f:
+ sony_nc_battery_care_cleanup(pd);
+ break;
+ case 0x0119:
+ sony_nc_lid_resume_cleanup(pd);
+ break;
+ case 0x0122:
+ sony_nc_thermal_cleanup(pd);
+ break;
+ case 0x0131:
+ sony_nc_highspeed_charging_cleanup(pd);
+ break;
+ case 0x0124:
+ case 0x0135:
+ sony_nc_rfkill_cleanup();
+ break;
+ case 0x0137:
+ case 0x0143:
+ sony_nc_kbd_backlight_cleanup(pd);
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* finally cleanup the handles list */
+ sony_nc_handles_cleanup(pd);
+}
+
+static void sony_nc_function_resume(void)
+{
+ unsigned int i, result, bitmask, arg;
+
+ dprintk("Resuming SNC device\n");
+
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+ unsigned int handle = handles->cap[i];
+
+ if (!handle)
+ continue;
+
+ switch (handle) {
+ case 0x0100:
+ case 0x0101:
+ case 0x0127:
+ /* re-enable hotkeys */
+ sony_call_snc_handle(handle, 0, &result);
+ break;
+ case 0x0102:
+ /* re-enable hotkeys */
+ sony_call_snc_handle(handle, 0x100, &result);
+ break;
+ case 0x0122:
+ sony_nc_thermal_resume();
+ break;
+ case 0x0124:
+ case 0x0135:
+ sony_nc_rfkill_update();
+ break;
+ case 0x0137:
+ case 0x0143:
+ sony_nc_kbd_backlight_resume();
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* Enable all events */
+ arg = 0x10;
+ if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+ sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+ &result);
}
static int sony_nc_resume(struct acpi_device *device)
@@ -1166,8 +1487,8 @@ static int sony_nc_resume(struct acpi_device *device)
if (!item->valid)
continue;
- ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
- item->value, NULL);
+ ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+ &item->value, NULL);
if (ret < 0) {
pr_err("%s: %d\n", __func__, ret);
break;
@@ -1176,21 +1497,14 @@ static int sony_nc_resume(struct acpi_device *device)
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
&handle))) {
- if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+ int arg = 1;
+ if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
- &handle))) {
- dprintk("Doing SNC setup\n");
- sony_nc_function_setup(device);
- }
-
- /* re-read rfkill state */
- sony_nc_rfkill_update();
-
- /* restore kbd backlight states */
- sony_nc_kbd_backlight_resume();
+ &handle)))
+ sony_nc_function_resume();
return 0;
}
@@ -1213,7 +1527,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
int argument = sony_rfkill_address[(long) data] + 0x100;
if (!blocked)
- argument |= 0xff0000;
+ argument |= 0x030000;
return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
}
@@ -1230,7 +1544,7 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
enum rfkill_type type;
const char *name;
int result;
- bool hwblock;
+ bool hwblock, swblock;
switch (nc_type) {
case SONY_WIFI:
@@ -1258,8 +1572,21 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
if (!rfk)
return -ENOMEM;
- sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+ if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
+ rfkill_destroy(rfk);
+ return -1;
+ }
hwblock = !(result & 0x1);
+
+ if (sony_call_snc_handle(sony_rfkill_handle,
+ sony_rfkill_address[nc_type],
+ &result) < 0) {
+ rfkill_destroy(rfk);
+ return -1;
+ }
+ swblock = !(result & 0x2);
+
+ rfkill_init_sw_state(rfk, swblock);
rfkill_set_hw_state(rfk, hwblock);
err = rfkill_register(rfk);
@@ -1295,101 +1622,79 @@ static void sony_nc_rfkill_update(void)
sony_call_snc_handle(sony_rfkill_handle, argument, &result);
rfkill_set_states(sony_rfkill_devices[i],
- !(result & 0xf), false);
+ !(result & 0x2), false);
}
}
-static void sony_nc_rfkill_setup(struct acpi_device *device)
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+ unsigned int handle)
{
- int offset;
- u8 dev_code, i;
- acpi_status status;
- struct acpi_object_list params;
- union acpi_object in_obj;
- union acpi_object *device_enum;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
- offset = sony_find_snc_handle(0x124);
- if (offset == -1) {
- offset = sony_find_snc_handle(0x135);
- if (offset == -1)
- return;
- else
- sony_rfkill_handle = 0x135;
- } else
- sony_rfkill_handle = 0x124;
- dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle);
-
- /* need to read the whole buffer returned by the acpi call to SN06
- * here otherwise we may miss some features
- */
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = offset;
- status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
- &buffer);
- if (ACPI_FAILURE(status)) {
- dprintk("Radio device enumeration failed\n");
- return;
- }
-
- device_enum = (union acpi_object *) buffer.pointer;
- if (!device_enum) {
- pr_err("No SN06 return object\n");
- goto out_no_enum;
- }
- if (device_enum->type != ACPI_TYPE_BUFFER) {
- pr_err("Invalid SN06 return object 0x%.2x\n",
- device_enum->type);
- goto out_no_enum;
- }
+ u64 offset;
+ int i;
+ unsigned char buffer[32] = { 0 };
- /* the buffer is filled with magic numbers describing the devices
- * available, 0xff terminates the enumeration
+ offset = sony_find_snc_handle(handle);
+ sony_rfkill_handle = handle;
+
+ i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+ 32);
+ if (i < 0)
+ return i;
+
+ /* The buffer is filled with magic numbers describing the devices
+ * available, 0xff terminates the enumeration.
+ * Known codes:
+ * 0x00 WLAN
+ * 0x10 BLUETOOTH
+ * 0x20 WWAN GPRS-EDGE
+ * 0x21 WWAN HSDPA
+ * 0x22 WWAN EV-DO
+ * 0x23 WWAN GPS
+ * 0x25 Gobi WWAN no GPS
+ * 0x26 Gobi WWAN + GPS
+ * 0x28 Gobi WWAN no GPS
+ * 0x29 Gobi WWAN + GPS
+ * 0x30 WIMAX
+ * 0x50 Gobi WWAN no GPS
+ * 0x51 Gobi WWAN + GPS
+ * 0x70 no SIM card slot
+ * 0x71 SIM card slot
*/
- for (i = 0; i < device_enum->buffer.length; i++) {
+ for (i = 0; i < ARRAY_SIZE(buffer); i++) {
- dev_code = *(device_enum->buffer.pointer + i);
- if (dev_code == 0xff)
+ if (buffer[i] == 0xff)
break;
- dprintk("Radio devices, looking at 0x%.2x\n", dev_code);
+ dprintk("Radio devices, found 0x%.2x\n", buffer[i]);
- if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI])
+ if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI])
sony_nc_setup_rfkill(device, SONY_WIFI);
- if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
+ if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
- if ((0xf0 & dev_code) == 0x20 &&
+ if (((0xf0 & buffer[i]) == 0x20 ||
+ (0xf0 & buffer[i]) == 0x50) &&
!sony_rfkill_devices[SONY_WWAN])
sony_nc_setup_rfkill(device, SONY_WWAN);
- if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
+ if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
sony_nc_setup_rfkill(device, SONY_WIMAX);
}
-
-out_no_enum:
- kfree(buffer.pointer);
- return;
+ return 0;
}
/* Keyboard backlight feature */
-#define KBDBL_HANDLER 0x137
-#define KBDBL_PRESENT 0xB00
-#define SET_MODE 0xC00
-#define SET_STATE 0xD00
-#define SET_TIMEOUT 0xE00
-
struct kbd_backlight {
- int mode;
- int timeout;
+ unsigned int handle;
+ unsigned int base;
+ unsigned int mode;
+ unsigned int timeout;
struct device_attribute mode_attr;
struct device_attribute timeout_attr;
};
-static struct kbd_backlight *kbdbl_handle;
+static struct kbd_backlight *kbdbl_ctl;
static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
{
@@ -1398,15 +1703,15 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
if (value > 1)
return -EINVAL;
- if (sony_call_snc_handle(KBDBL_HANDLER,
- (value << 0x10) | SET_MODE, &result))
+ if (sony_call_snc_handle(kbdbl_ctl->handle,
+ (value << 0x10) | (kbdbl_ctl->base), &result))
return -EIO;
/* Try to turn the light on/off immediately */
- sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
- &result);
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ (value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
- kbdbl_handle->mode = value;
+ kbdbl_ctl->mode = value;
return 0;
}
@@ -1421,7 +1726,7 @@ static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
if (count > 31)
return -EINVAL;
- if (strict_strtoul(buffer, 10, &value))
+ if (kstrtoul(buffer, 10, &value))
return -EINVAL;
ret = __sony_nc_kbd_backlight_mode_set(value);
@@ -1435,7 +1740,7 @@ static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count = 0;
- count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode);
+ count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode);
return count;
}
@@ -1446,11 +1751,11 @@ static int __sony_nc_kbd_backlight_timeout_set(u8 value)
if (value > 3)
return -EINVAL;
- if (sony_call_snc_handle(KBDBL_HANDLER,
- (value << 0x10) | SET_TIMEOUT, &result))
+ if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) |
+ (kbdbl_ctl->base + 0x200), &result))
return -EIO;
- kbdbl_handle->timeout = value;
+ kbdbl_ctl->timeout = value;
return 0;
}
@@ -1465,7 +1770,7 @@ static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
if (count > 31)
return -EINVAL;
- if (strict_strtoul(buffer, 10, &value))
+ if (kstrtoul(buffer, 10, &value))
return -EINVAL;
ret = __sony_nc_kbd_backlight_timeout_set(value);
@@ -1479,39 +1784,58 @@ static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count = 0;
- count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout);
+ count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout);
return count;
}
-static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+ unsigned int handle)
{
int result;
+ int ret = 0;
- if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
- return 0;
- if (!(result & 0x02))
+ /* verify the kbd backlight presence, these handles are not used for
+ * keyboard backlight only
+ */
+ ret = sony_call_snc_handle(handle, handle == 0x0137 ? 0x0B00 : 0x0100,
+ &result);
+ if (ret)
+ return ret;
+
+ if ((handle == 0x0137 && !(result & 0x02)) ||
+ !(result & 0x01)) {
+ dprintk("no backlight keyboard found\n");
return 0;
+ }
- kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL);
- if (!kbdbl_handle)
+ kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
+ if (!kbdbl_ctl)
return -ENOMEM;
- sysfs_attr_init(&kbdbl_handle->mode_attr.attr);
- kbdbl_handle->mode_attr.attr.name = "kbd_backlight";
- kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
- kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show;
- kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store;
+ kbdbl_ctl->handle = handle;
+ if (handle == 0x0137)
+ kbdbl_ctl->base = 0x0C00;
+ else
+ kbdbl_ctl->base = 0x4000;
+
+ sysfs_attr_init(&kbdbl_ctl->mode_attr.attr);
+ kbdbl_ctl->mode_attr.attr.name = "kbd_backlight";
+ kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+ kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show;
+ kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store;
- sysfs_attr_init(&kbdbl_handle->timeout_attr.attr);
- kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout";
- kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
- kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
- kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
+ sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr);
+ kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout";
+ kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
+ kbdbl_ctl->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
+ kbdbl_ctl->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
- if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr))
+ ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr);
+ if (ret)
goto outkzalloc;
- if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr))
+ ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr);
+ if (ret)
goto outmode;
__sony_nc_kbd_backlight_mode_set(kbd_backlight);
@@ -1520,113 +1844,708 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
return 0;
outmode:
- device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
+ device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
outkzalloc:
- kfree(kbdbl_handle);
- kbdbl_handle = NULL;
- return -1;
+ kfree(kbdbl_ctl);
+ kbdbl_ctl = NULL;
+ return ret;
}
-static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
{
- if (kbdbl_handle) {
+ if (kbdbl_ctl) {
int result;
- device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
- device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
+ device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
+ device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
/* restore the default hw behaviour */
- sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
- sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ kbdbl_ctl->base | 0x10000, &result);
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ kbdbl_ctl->base + 0x200, &result);
- kfree(kbdbl_handle);
+ kfree(kbdbl_ctl);
+ kbdbl_ctl = NULL;
}
- return 0;
}
static void sony_nc_kbd_backlight_resume(void)
{
int ignore = 0;
- if (!kbdbl_handle)
+ if (!kbdbl_ctl)
return;
- if (kbdbl_handle->mode == 0)
- sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
-
- if (kbdbl_handle->timeout != 0)
- sony_call_snc_handle(KBDBL_HANDLER,
- (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
+ if (kbdbl_ctl->mode == 0)
+ sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
&ignore);
+
+ if (kbdbl_ctl->timeout != 0)
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ (kbdbl_ctl->base + 0x200) |
+ (kbdbl_ctl->timeout << 0x10), &ignore);
+}
+
+struct battery_care_control {
+ struct device_attribute attrs[2];
+ unsigned int handle;
+};
+static struct battery_care_control *bcare_ctl;
+
+static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result, cmd;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
+ /* limit values (2 bits):
+ * 00 - none
+ * 01 - 80%
+ * 10 - 50%
+ * 11 - 100%
+ *
+ * bit 0: 0 disable BCL, 1 enable BCL
+ * bit 1: 1 tell to store the battery limit (see bits 6,7) too
+ * bits 2,3: reserved
+ * bits 4,5: store the limit into the EC
+ * bits 6,7: store the limit into the battery
+ */
+ cmd = 0;
+
+ if (value > 0) {
+ if (value <= 50)
+ cmd = 0x20;
+
+ else if (value <= 80)
+ cmd = 0x10;
+
+ else if (value <= 100)
+ cmd = 0x30;
+
+ else
+ return -EINVAL;
+
+ /*
+ * handle 0x0115 should allow storing on battery too;
+ * handle 0x0136 same as 0x0115 + health status;
+ * handle 0x013f, same as 0x0136 but no storing on the battery
+ */
+ if (bcare_ctl->handle != 0x013f)
+ cmd = cmd | (cmd << 2);
+
+ cmd = (cmd | 0x1) << 0x10;
+ }
+
+ if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_battery_care_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result, status;
+
+ if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result))
+ return -EIO;
+
+ status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0;
+ switch (status) {
+ case 1:
+ status = 80;
+ break;
+ case 2:
+ status = 50;
+ break;
+ case 3:
+ status = 100;
+ break;
+ default:
+ status = 0;
+ break;
+ }
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t sony_nc_battery_care_health_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ ssize_t count = 0;
+ unsigned int health;
+
+ if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health))
+ return -EIO;
+
+ count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff);
+
+ return count;
+}
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ int ret = 0;
+
+ bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL);
+ if (!bcare_ctl)
+ return -ENOMEM;
+
+ bcare_ctl->handle = handle;
+
+ sysfs_attr_init(&bcare_ctl->attrs[0].attr);
+ bcare_ctl->attrs[0].attr.name = "battery_care_limiter";
+ bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+ bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show;
+ bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store;
+
+ ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]);
+ if (ret)
+ goto outkzalloc;
+
+ /* 0x0115 is for models with no health reporting capability */
+ if (handle == 0x0115)
+ return 0;
+
+ sysfs_attr_init(&bcare_ctl->attrs[1].attr);
+ bcare_ctl->attrs[1].attr.name = "battery_care_health";
+ bcare_ctl->attrs[1].attr.mode = S_IRUGO;
+ bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show;
+
+ ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]);
+ if (ret)
+ goto outlimiter;
+
+ return 0;
+
+outlimiter:
+ device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+
+outkzalloc:
+ kfree(bcare_ctl);
+ bcare_ctl = NULL;
+
+ return ret;
+}
+
+static void sony_nc_battery_care_cleanup(struct platform_device *pd)
+{
+ if (bcare_ctl) {
+ device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+ if (bcare_ctl->handle != 0x0115)
+ device_remove_file(&pd->dev, &bcare_ctl->attrs[1]);
+
+ kfree(bcare_ctl);
+ bcare_ctl = NULL;
+ }
+}
+
+struct snc_thermal_ctrl {
+ unsigned int mode;
+ unsigned int profiles;
+ struct device_attribute mode_attr;
+ struct device_attribute profiles_attr;
+};
+static struct snc_thermal_ctrl *th_handle;
+
+#define THM_PROFILE_MAX 3
+static const char * const snc_thermal_profiles[] = {
+ "balanced",
+ "silent",
+ "performance"
+};
+
+static int sony_nc_thermal_mode_set(unsigned short mode)
+{
+ unsigned int result;
+
+ /* the thermal profile seems to be a two bit bitmask:
+ * lsb -> silent
+ * msb -> performance
+ * no bit set is the normal operation and is always valid
+ * Some vaio models only have "balanced" and "performance"
+ */
+ if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result))
+ return -EIO;
+
+ th_handle->mode = mode;
+
+ return 0;
+}
+
+static int sony_nc_thermal_mode_get(void)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0122, 0x0100, &result))
+ return -EIO;
+
+ return result & 0xff;
+}
+
+static ssize_t sony_nc_thermal_profiles_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ short cnt;
+ size_t idx = 0;
+
+ for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) {
+ if (!cnt || (th_handle->profiles & cnt))
+ idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
+ snc_thermal_profiles[cnt]);
+ }
+ idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n");
+
+ return idx;
+}
+
+static ssize_t sony_nc_thermal_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned short cmd;
+ size_t len = count;
+
+ if (count == 0)
+ return -EINVAL;
+
+ /* skip the newline if present */
+ if (buffer[len - 1] == '\n')
+ len--;
+
+ for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++)
+ if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0)
+ break;
+
+ if (sony_nc_thermal_mode_set(cmd))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_thermal_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ ssize_t count = 0;
+ int mode = sony_nc_thermal_mode_get();
+
+ if (mode < 0)
+ return mode;
+
+ count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]);
+
+ return count;
+}
+
+static int sony_nc_thermal_setup(struct platform_device *pd)
+{
+ int ret = 0;
+ th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL);
+ if (!th_handle)
+ return -ENOMEM;
+
+ ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles);
+ if (ret) {
+ pr_warn("couldn't to read the thermal profiles\n");
+ goto outkzalloc;
+ }
+
+ ret = sony_nc_thermal_mode_get();
+ if (ret < 0) {
+ pr_warn("couldn't to read the current thermal profile");
+ goto outkzalloc;
+ }
+ th_handle->mode = ret;
+
+ sysfs_attr_init(&th_handle->profiles_attr.attr);
+ th_handle->profiles_attr.attr.name = "thermal_profiles";
+ th_handle->profiles_attr.attr.mode = S_IRUGO;
+ th_handle->profiles_attr.show = sony_nc_thermal_profiles_show;
+
+ sysfs_attr_init(&th_handle->mode_attr.attr);
+ th_handle->mode_attr.attr.name = "thermal_control";
+ th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+ th_handle->mode_attr.show = sony_nc_thermal_mode_show;
+ th_handle->mode_attr.store = sony_nc_thermal_mode_store;
+
+ ret = device_create_file(&pd->dev, &th_handle->profiles_attr);
+ if (ret)
+ goto outkzalloc;
+
+ ret = device_create_file(&pd->dev, &th_handle->mode_attr);
+ if (ret)
+ goto outprofiles;
+
+ return 0;
+
+outprofiles:
+ device_remove_file(&pd->dev, &th_handle->profiles_attr);
+outkzalloc:
+ kfree(th_handle);
+ th_handle = NULL;
+ return ret;
+}
+
+static void sony_nc_thermal_cleanup(struct platform_device *pd)
+{
+ if (th_handle) {
+ device_remove_file(&pd->dev, &th_handle->profiles_attr);
+ device_remove_file(&pd->dev, &th_handle->mode_attr);
+ kfree(th_handle);
+ th_handle = NULL;
+ }
+}
+
+static void sony_nc_thermal_resume(void)
+{
+ unsigned int status = sony_nc_thermal_mode_get();
+
+ if (status != th_handle->mode)
+ sony_nc_thermal_mode_set(th_handle->mode);
+}
+
+/* resume on LID open */
+struct snc_lid_resume_control {
+ struct device_attribute attrs[3];
+ unsigned int status;
+};
+static struct snc_lid_resume_control *lid_ctl;
+
+static ssize_t sony_nc_lid_resume_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result, pos;
+ unsigned long value;
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ /* the value we have to write to SNC is a bitmask:
+ * +--------------+
+ * | S3 | S4 | S5 |
+ * +--------------+
+ * 2 1 0
+ */
+ if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+ pos = 2;
+ else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+ pos = 1;
+ else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+ pos = 0;
+ else
+ return -EINVAL;
+
+ if (value)
+ value = lid_ctl->status | (1 << pos);
+ else
+ value = lid_ctl->status & ~(1 << pos);
+
+ if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
+ return -EIO;
+
+ lid_ctl->status = value;
+
+ return count;
+}
+
+static ssize_t sony_nc_lid_resume_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int pos;
+
+ if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+ pos = 2;
+ else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+ pos = 1;
+ else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+ pos = 0;
+ else
+ return -EINVAL;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n",
+ (lid_ctl->status >> pos) & 0x01);
+}
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd)
+{
+ unsigned int result;
+ int i;
+
+ if (sony_call_snc_handle(0x0119, 0x0000, &result))
+ return -EIO;
+
+ lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
+ if (!lid_ctl)
+ return -ENOMEM;
+
+ lid_ctl->status = result & 0x7;
+
+ sysfs_attr_init(&lid_ctl->attrs[0].attr);
+ lid_ctl->attrs[0].attr.name = "lid_resume_S3";
+ lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
+
+ sysfs_attr_init(&lid_ctl->attrs[1].attr);
+ lid_ctl->attrs[1].attr.name = "lid_resume_S4";
+ lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
+
+ sysfs_attr_init(&lid_ctl->attrs[2].attr);
+ lid_ctl->attrs[2].attr.name = "lid_resume_S5";
+ lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
+
+ for (i = 0; i < 3; i++) {
+ result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
+ if (result)
+ goto liderror;
+ }
+
+ return 0;
+
+liderror:
+ for (; i > 0; i--)
+ device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+ kfree(lid_ctl);
+ lid_ctl = NULL;
+
+ return result;
+}
+
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
+{
+ int i;
+
+ if (lid_ctl) {
+ for (i = 0; i < 3; i++)
+ device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+ kfree(lid_ctl);
+ lid_ctl = NULL;
+ }
+}
+
+/* High speed charging function */
+static struct device_attribute *hsc_handle;
+
+static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_highspeed_charging_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0131, 0x0100, &result))
+ return -EIO;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) {
+ /* some models advertise the handle but have no implementation
+ * for it
+ */
+ pr_info("No High Speed Charging capability found\n");
+ return 0;
+ }
+
+ hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!hsc_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&hsc_handle->attr);
+ hsc_handle->attr.name = "battery_highspeed_charging";
+ hsc_handle->attr.mode = S_IRUGO | S_IWUSR;
+ hsc_handle->show = sony_nc_highspeed_charging_show;
+ hsc_handle->store = sony_nc_highspeed_charging_store;
+
+ result = device_create_file(&pd->dev, hsc_handle);
+ if (result) {
+ kfree(hsc_handle);
+ hsc_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
+{
+ if (hsc_handle) {
+ device_remove_file(&pd->dev, hsc_handle);
+ kfree(hsc_handle);
+ hsc_handle = NULL;
+ }
+}
+
+/* Touchpad enable/disable */
+struct touchpad_control {
+ struct device_attribute attr;
+ int handle;
+};
+static struct touchpad_control *tp_ctl;
+
+static ssize_t sony_nc_touchpad_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ /* sysfs: 0 disabled, 1 enabled
+ * EC: 0 enabled, 1 disabled
+ */
+ if (sony_call_snc_handle(tp_ctl->handle,
+ (!value << 0x10) | 0x100, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_touchpad_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result))
+ return -EINVAL;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01));
+}
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ int ret = 0;
+
+ tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL);
+ if (!tp_ctl)
+ return -ENOMEM;
+
+ tp_ctl->handle = handle;
+
+ sysfs_attr_init(&tp_ctl->attr.attr);
+ tp_ctl->attr.attr.name = "touchpad";
+ tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR;
+ tp_ctl->attr.show = sony_nc_touchpad_show;
+ tp_ctl->attr.store = sony_nc_touchpad_store;
+
+ ret = device_create_file(&pd->dev, &tp_ctl->attr);
+ if (ret) {
+ kfree(tp_ctl);
+ tp_ctl = NULL;
+ }
+
+ return ret;
+}
+
+static void sony_nc_touchpad_cleanup(struct platform_device *pd)
+{
+ if (tp_ctl) {
+ device_remove_file(&pd->dev, &tp_ctl->attr);
+ kfree(tp_ctl);
+ tp_ctl = NULL;
+ }
}
static void sony_nc_backlight_ng_read_limits(int handle,
struct sony_backlight_props *props)
{
- int offset;
- acpi_status status;
- u8 brlvl, i;
+ u64 offset;
+ int i;
+ int lvl_table_len = 0;
u8 min = 0xff, max = 0x00;
- struct acpi_object_list params;
- union acpi_object in_obj;
- union acpi_object *lvl_enum;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ unsigned char buffer[32] = { 0 };
props->handle = handle;
props->offset = 0;
props->maxlvl = 0xff;
offset = sony_find_snc_handle(handle);
- if (offset < 0)
- return;
/* try to read the boundaries from ACPI tables, if we fail the above
* defaults should be reasonable
*/
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = offset;
- status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
- &buffer);
- if (ACPI_FAILURE(status))
+ i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+ 32);
+ if (i < 0)
return;
- lvl_enum = (union acpi_object *) buffer.pointer;
- if (!lvl_enum) {
- pr_err("No SN06 return object.");
- return;
- }
- if (lvl_enum->type != ACPI_TYPE_BUFFER) {
- pr_err("Invalid SN06 return object 0x%.2x\n",
- lvl_enum->type);
- goto out_invalid;
+ switch (handle) {
+ case 0x012f:
+ case 0x0137:
+ lvl_table_len = 9;
+ break;
+ case 0x143:
+ lvl_table_len = 16;
+ break;
}
/* the buffer lists brightness levels available, brightness levels are
- * from 0 to 8 in the array, other values are used by ALS control.
+ * from position 0 to 8 in the array, other values are used by ALS
+ * control.
*/
- for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) {
+ for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) {
- brlvl = *(lvl_enum->buffer.pointer + i);
- dprintk("Brightness level: %d\n", brlvl);
+ dprintk("Brightness level: %d\n", buffer[i]);
- if (!brlvl)
+ if (!buffer[i])
break;
- if (brlvl > max)
- max = brlvl;
- if (brlvl < min)
- min = brlvl;
+ if (buffer[i] > max)
+ max = buffer[i];
+ if (buffer[i] < min)
+ min = buffer[i];
}
props->offset = min;
props->maxlvl = max;
dprintk("Brightness levels: min=%d max=%d\n", props->offset,
props->maxlvl);
-
-out_invalid:
- kfree(buffer.pointer);
- return;
}
static void sony_nc_backlight_setup(void)
@@ -1636,16 +2555,24 @@ static void sony_nc_backlight_setup(void)
const struct backlight_ops *ops = NULL;
struct backlight_properties props;
- if (sony_find_snc_handle(0x12f) != -1) {
+ if (sony_find_snc_handle(0x12f) >= 0) {
ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x0100;
sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
- } else if (sony_find_snc_handle(0x137) != -1) {
+ } else if (sony_find_snc_handle(0x137) >= 0) {
ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x0100;
sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+ } else if (sony_find_snc_handle(0x143) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
&unused))) {
ops = &sony_backlight_ops;
@@ -1713,32 +2640,29 @@ static int sony_nc_add(struct acpi_device *device)
}
}
+ result = sony_laptop_setup_input(device);
+ if (result) {
+ pr_err("Unable to create input devices\n");
+ goto outplatform;
+ }
+
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
&handle))) {
- if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+ int arg = 1;
+ if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
&handle))) {
dprintk("Doing SNC setup\n");
+ /* retrieve the available handles */
result = sony_nc_handles_setup(sony_pf_device);
- if (result)
- goto outpresent;
- result = sony_nc_kbd_backlight_setup(sony_pf_device);
- if (result)
- goto outsnc;
- sony_nc_function_setup(device);
- sony_nc_rfkill_setup(device);
+ if (!result)
+ sony_nc_function_setup(device, sony_pf_device);
}
/* setup input devices and helper fifo */
- result = sony_laptop_setup_input(device);
- if (result) {
- pr_err("Unable to create input devices\n");
- goto outkbdbacklight;
- }
-
if (acpi_video_backlight_support()) {
pr_info("brightness ignored, must be controlled by ACPI video driver\n");
} else {
@@ -1786,24 +2710,21 @@ static int sony_nc_add(struct acpi_device *device)
return 0;
- out_sysfs:
+out_sysfs:
for (item = sony_nc_values; item->name; ++item) {
device_remove_file(&sony_pf_device->dev, &item->devattr);
}
sony_nc_backlight_cleanup();
+ sony_nc_function_cleanup(sony_pf_device);
+ sony_nc_handles_cleanup(sony_pf_device);
+outplatform:
sony_laptop_remove_input();
- outkbdbacklight:
- sony_nc_kbd_backlight_cleanup(sony_pf_device);
-
- outsnc:
- sony_nc_handles_cleanup(sony_pf_device);
-
- outpresent:
+outpresent:
sony_pf_remove();
- outwalk:
+outwalk:
sony_nc_rfkill_cleanup();
return result;
}
@@ -1820,11 +2741,10 @@ static int sony_nc_remove(struct acpi_device *device, int type)
device_remove_file(&sony_pf_device->dev, &item->devattr);
}
- sony_nc_kbd_backlight_cleanup(sony_pf_device);
+ sony_nc_function_cleanup(sony_pf_device);
sony_nc_handles_cleanup(sony_pf_device);
sony_pf_remove();
sony_laptop_remove_input();
- sony_nc_rfkill_cleanup();
dprintk(SONY_NC_DRIVER_NAME " removed.\n");
return 0;
@@ -2437,7 +3357,9 @@ static ssize_t sony_pic_wwanpower_store(struct device *dev,
if (count > 31)
return -EINVAL;
- value = simple_strtoul(buffer, NULL, 10);
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
mutex_lock(&spic_dev.lock);
__sony_pic_set_wwanpower(value);
mutex_unlock(&spic_dev.lock);
@@ -2474,7 +3396,9 @@ static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
if (count > 31)
return -EINVAL;
- value = simple_strtoul(buffer, NULL, 10);
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
mutex_lock(&spic_dev.lock);
__sony_pic_set_bluetoothpower(value);
mutex_unlock(&spic_dev.lock);
@@ -2513,7 +3437,9 @@ static ssize_t sony_pic_fanspeed_store(struct device *dev,
if (count > 31)
return -EINVAL;
- value = simple_strtoul(buffer, NULL, 10);
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
if (sony_pic_set_fanspeed(value))
return -EIO;
@@ -2671,7 +3597,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
ret = -EIO;
break;
}
- if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) {
+ if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL,
+ &value)) {
ret = -EIO;
break;
}
@@ -2688,8 +3615,9 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
ret = -EFAULT;
break;
}
- if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
- (val8 >> 5) + 1, NULL)) {
+ value = (val8 >> 5) + 1;
+ if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value,
+ NULL)) {
ret = -EIO;
break;
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d68c0002f4a2..8b5610d88418 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3402,7 +3402,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Do not issue duplicate brightness change events to
* userspace. tpacpi_detect_brightness_capabilities() must have
* been called before this point */
- if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
+ if (acpi_video_backlight_support()) {
pr_info("This ThinkPad has standard ACPI backlight "
"brightness control, supported by the ACPI "
"video driver\n");
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index ee79ce64d9df..dab10f6edcd4 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -95,6 +95,7 @@ MODULE_LICENSE("GPL");
/* registers */
#define HCI_FAN 0x0004
+#define HCI_TR_BACKLIGHT 0x0005
#define HCI_SYSTEM_EVENT 0x0016
#define HCI_VIDEO_OUT 0x001c
#define HCI_HOTKEY_EVENT 0x001e
@@ -134,6 +135,7 @@ struct toshiba_acpi_dev {
unsigned int system_event_supported:1;
unsigned int ntfy_supported:1;
unsigned int info_supported:1;
+ unsigned int tr_backlight_supported:1;
struct mutex mutex;
};
@@ -478,34 +480,70 @@ static const struct rfkill_ops toshiba_rfk_ops = {
.poll = bt_rfkill_poll,
};
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+{
+ u32 hci_result;
+ u32 status;
+
+ hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result);
+ *enabled = !status;
+ return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
+{
+ u32 hci_result;
+ u32 value = !enable;
+
+ hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result);
+ return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
-static int get_lcd(struct backlight_device *bd)
+static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
{
- struct toshiba_acpi_dev *dev = bl_get_data(bd);
u32 hci_result;
u32 value;
+ int brightness = 0;
+
+ if (dev->tr_backlight_supported) {
+ bool enabled;
+ int ret = get_tr_backlight_status(dev, &enabled);
+ if (ret)
+ return ret;
+ if (enabled)
+ return 0;
+ brightness++;
+ }
hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
if (hci_result == HCI_SUCCESS)
- return (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+ return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
return -EIO;
}
+static int get_lcd_brightness(struct backlight_device *bd)
+{
+ struct toshiba_acpi_dev *dev = bl_get_data(bd);
+ return __get_lcd_brightness(dev);
+}
+
static int lcd_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
int value;
+ int levels;
if (!dev->backlight_dev)
return -ENODEV;
- value = get_lcd(dev->backlight_dev);
+ levels = dev->backlight_dev->props.max_brightness + 1;
+ value = get_lcd_brightness(dev->backlight_dev);
if (value >= 0) {
seq_printf(m, "brightness: %d\n", value);
- seq_printf(m, "brightness_levels: %d\n",
- HCI_LCD_BRIGHTNESS_LEVELS);
+ seq_printf(m, "brightness_levels: %d\n", levels);
return 0;
}
@@ -518,10 +556,19 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
return single_open(file, lcd_proc_show, PDE(inode)->data);
}
-static int set_lcd(struct toshiba_acpi_dev *dev, int value)
+static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
{
u32 hci_result;
+ if (dev->tr_backlight_supported) {
+ bool enable = !value;
+ int ret = set_tr_backlight_status(dev, enable);
+ if (ret)
+ return ret;
+ if (value)
+ value--;
+ }
+
value = value << HCI_LCD_BRIGHTNESS_SHIFT;
hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result);
return hci_result == HCI_SUCCESS ? 0 : -EIO;
@@ -530,7 +577,7 @@ static int set_lcd(struct toshiba_acpi_dev *dev, int value)
static int set_lcd_status(struct backlight_device *bd)
{
struct toshiba_acpi_dev *dev = bl_get_data(bd);
- return set_lcd(dev, bd->props.brightness);
+ return set_lcd_brightness(dev, bd->props.brightness);
}
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
@@ -541,6 +588,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
size_t len;
int value;
int ret;
+ int levels = dev->backlight_dev->props.max_brightness + 1;
len = min(count, sizeof(cmd) - 1);
if (copy_from_user(cmd, buf, len))
@@ -548,8 +596,8 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
cmd[len] = '\0';
if (sscanf(cmd, " brightness : %i", &value) == 1 &&
- value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
- ret = set_lcd(dev, value);
+ value >= 0 && value < levels) {
+ ret = set_lcd_brightness(dev, value);
if (ret == 0)
ret = count;
} else {
@@ -860,8 +908,9 @@ static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
}
static const struct backlight_ops toshiba_backlight_data = {
- .get_brightness = get_lcd,
- .update_status = set_lcd_status,
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = get_lcd_brightness,
+ .update_status = set_lcd_status,
};
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
@@ -1020,6 +1069,56 @@ static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
return error;
}
+static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
+{
+ struct backlight_properties props;
+ int brightness;
+ int ret;
+ bool enabled;
+
+ /*
+ * Some machines don't support the backlight methods at all, and
+ * others support it read-only. Either of these is pretty useless,
+ * so only register the backlight device if the backlight method
+ * supports both reads and writes.
+ */
+ brightness = __get_lcd_brightness(dev);
+ if (brightness < 0)
+ return 0;
+ ret = set_lcd_brightness(dev, brightness);
+ if (ret) {
+ pr_debug("Backlight method is read-only, disabling backlight support\n");
+ return 0;
+ }
+
+ /* Determine whether or not BIOS supports transflective backlight */
+ ret = get_tr_backlight_status(dev, &enabled);
+ dev->tr_backlight_supported = !ret;
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
+
+ /* adding an extra level and having 0 change to transflective mode */
+ if (dev->tr_backlight_supported)
+ props.max_brightness++;
+
+ dev->backlight_dev = backlight_device_register("toshiba",
+ &dev->acpi_dev->dev,
+ dev,
+ &toshiba_backlight_data,
+ &props);
+ if (IS_ERR(dev->backlight_dev)) {
+ ret = PTR_ERR(dev->backlight_dev);
+ pr_err("Could not register toshiba backlight device\n");
+ dev->backlight_dev = NULL;
+ return ret;
+ }
+
+ dev->backlight_dev->props.brightness = brightness;
+ return 0;
+}
+
static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
@@ -1078,7 +1177,6 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
u32 dummy;
bool bt_present;
int ret = 0;
- struct backlight_properties props;
if (toshiba_acpi)
return -EBUSY;
@@ -1104,21 +1202,9 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
mutex_init(&dev->mutex);
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
- dev->backlight_dev = backlight_device_register("toshiba",
- &acpi_dev->dev,
- dev,
- &toshiba_backlight_data,
- &props);
- if (IS_ERR(dev->backlight_dev)) {
- ret = PTR_ERR(dev->backlight_dev);
-
- pr_err("Could not register toshiba backlight device\n");
- dev->backlight_dev = NULL;
+ ret = toshiba_acpi_setup_backlight(dev);
+ if (ret)
goto error;
- }
- dev->backlight_dev->props.brightness = get_lcd(dev->backlight_dev);
/* Register rfkill switch for Bluetooth */
if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index 41781ed8301c..b57ad8641480 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -15,15 +15,26 @@
#include <asm/olpc.h>
+static bool card_blocked;
+
static int rfkill_set_block(void *data, bool blocked)
{
unsigned char cmd;
+ int r;
+
+ if (blocked == card_blocked)
+ return 0;
+
if (blocked)
cmd = EC_WLAN_ENTER_RESET;
else
cmd = EC_WLAN_LEAVE_RESET;
- return olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+ r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+ if (r == 0)
+ card_blocked = blocked;
+
+ return r;
}
static const struct rfkill_ops rfkill_ops = {
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 99dc29f2f2f2..e3a3b4956f08 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -1,5 +1,5 @@
menuconfig POWER_SUPPLY
- tristate "Power supply class support"
+ bool "Power supply class support"
help
Say Y here to enable power supply class support. This allows
power supply (batteries, AC, USB) monitoring by userspace
@@ -77,7 +77,7 @@ config BATTERY_DS2780
Say Y here to enable support for batteries with ds2780 chip.
config BATTERY_DS2781
- tristate "2781 battery driver"
+ tristate "DS2781 battery driver"
depends on HAS_IOMEM
select W1
select W1_SLAVE_DS2781
@@ -181,14 +181,15 @@ config BATTERY_MAX17040
to operate with a single lithium cell
config BATTERY_MAX17042
- tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
+ tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
depends on I2C
help
MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
in handheld and portable equipment. The MAX17042 is configured
to operate with a single lithium cell. MAX8997 and MAX8966 are
multi-function devices that include fuel gauages that are compatible
- with MAX17042.
+ with MAX17042. This driver also supports max17047/50 chips which are
+ improved version of max17042.
config BATTERY_Z2
tristate "Z2 battery driver"
@@ -291,6 +292,7 @@ config CHARGER_MAX8998
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
+ select REGMAP_I2C
help
Say Y to include support for Summit Microelectronics SMB347
Battery Charger.
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index d8bb99394ac0..bba3ccac72fe 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -964,10 +964,15 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
{
int irq, i, ret = 0;
u8 val;
- struct abx500_bm_plat_data *plat_data;
+ struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct ab8500_btemp *di;
+
+ if (!plat_data) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
- struct ab8500_btemp *di =
- kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
@@ -977,7 +982,6 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
/* get btemp specific platform data */
- plat_data = pdev->dev.platform_data;
di->pdata = plat_data->btemp;
if (!di->pdata) {
dev_err(di->dev, "no btemp platform data supplied\n");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index e2b4accbec88..d2303d0b7c75 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -2534,10 +2534,15 @@ static int __devexit ab8500_charger_remove(struct platform_device *pdev)
static int __devinit ab8500_charger_probe(struct platform_device *pdev)
{
int irq, i, charger_status, ret = 0;
- struct abx500_bm_plat_data *plat_data;
+ struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct ab8500_charger *di;
- struct ab8500_charger *di =
- kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+ if (!plat_data) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
@@ -2550,9 +2555,7 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
spin_lock_init(&di->usb_state.usb_lock);
/* get charger specific platform data */
- plat_data = pdev->dev.platform_data;
di->pdata = plat_data->charger;
-
if (!di->pdata) {
dev_err(di->dev, "no charger platform data supplied\n");
ret = -EINVAL;
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index c22f2f05657e..bf022255994c 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -2446,10 +2446,15 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
{
int i, irq;
int ret = 0;
- struct abx500_bm_plat_data *plat_data;
+ struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct ab8500_fg *di;
+
+ if (!plat_data) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
- struct ab8500_fg *di =
- kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
@@ -2461,7 +2466,6 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
/* get fg specific platform data */
- plat_data = pdev->dev.platform_data;
di->pdata = plat_data->fg;
if (!di->pdata) {
dev_err(di->dev, "no fg platform data supplied\n");
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 222ccd872ac5..f5d6d379f2fb 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -451,7 +451,7 @@ static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di,
}
/*
- * Return the battery Voltage in milivolts
+ * Return the battery Voltage in millivolts
* Or < 0 if something fails.
*/
static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 9eca9f1ff0ea..86935ec18954 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -23,6 +23,16 @@
#include <linux/power/charger-manager.h>
#include <linux/regulator/consumer.h>
+static const char * const default_event_names[] = {
+ [CM_EVENT_UNKNOWN] = "Unknown",
+ [CM_EVENT_BATT_FULL] = "Battery Full",
+ [CM_EVENT_BATT_IN] = "Battery Inserted",
+ [CM_EVENT_BATT_OUT] = "Battery Pulled Out",
+ [CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
+ [CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
+ [CM_EVENT_OTHERS] = "Other battery events"
+};
+
/*
* Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
* delayed works so that we can run delayed works with CM_JIFFIES_SMALL
@@ -57,6 +67,12 @@ static bool cm_suspended;
static bool cm_rtc_set;
static unsigned long cm_suspend_duration_ms;
+/* About normal (not suspended) monitoring */
+static unsigned long polling_jiffy = ULONG_MAX; /* ULONG_MAX: no polling */
+static unsigned long next_polling; /* Next appointed polling time */
+static struct workqueue_struct *cm_wq; /* init at driver add */
+static struct delayed_work cm_monitor_work; /* init at driver add */
+
/* Global charger-manager description */
static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
@@ -71,6 +87,11 @@ static bool is_batt_present(struct charger_manager *cm)
int i, ret;
switch (cm->desc->battery_present) {
+ case CM_BATTERY_PRESENT:
+ present = true;
+ break;
+ case CM_NO_BATTERY:
+ break;
case CM_FUEL_GAUGE:
ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
POWER_SUPPLY_PROP_PRESENT, &val);
@@ -279,6 +300,26 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
}
/**
+ * try_charger_restart - Restart charging.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Restart charging by turning off and on the charger.
+ */
+static int try_charger_restart(struct charger_manager *cm)
+{
+ int err;
+
+ if (cm->emergency_stop)
+ return -EAGAIN;
+
+ err = try_charger_enable(cm, false);
+ if (err)
+ return err;
+
+ return try_charger_enable(cm, true);
+}
+
+/**
* uevent_notify - Let users know something has changed.
* @cm: the Charger Manager representing the battery.
* @event: the event string.
@@ -334,6 +375,46 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
}
/**
+ * fullbatt_vchk - Check voltage drop some times after "FULL" event.
+ * @work: the work_struct appointing the function
+ *
+ * If a user has designated "fullbatt_vchkdrop_ms/uV" values with
+ * charger_desc, Charger Manager checks voltage drop after the battery
+ * "FULL" event. It checks whether the voltage has dropped more than
+ * fullbatt_vchkdrop_uV by calling this function after fullbatt_vchkrop_ms.
+ */
+static void fullbatt_vchk(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct charger_manager *cm = container_of(dwork,
+ struct charger_manager, fullbatt_vchk_work);
+ struct charger_desc *desc = cm->desc;
+ int batt_uV, err, diff;
+
+ /* remove the appointment for fullbatt_vchk */
+ cm->fullbatt_vchk_jiffies_at = 0;
+
+ if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+ return;
+
+ err = get_batt_uV(cm, &batt_uV);
+ if (err) {
+ dev_err(cm->dev, "%s: get_batt_uV error(%d).\n", __func__, err);
+ return;
+ }
+
+ diff = cm->fullbatt_vchk_uV;
+ diff -= batt_uV;
+
+ dev_dbg(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
+
+ if (diff > desc->fullbatt_vchkdrop_uV) {
+ try_charger_restart(cm);
+ uevent_notify(cm, "Recharge");
+ }
+}
+
+/**
* _cm_monitor - Monitor the temperature and return true for exceptions.
* @cm: the Charger Manager representing the battery.
*
@@ -392,6 +473,131 @@ static bool cm_monitor(void)
return stop;
}
+/**
+ * _setup_polling - Setup the next instance of polling.
+ * @work: work_struct of the function _setup_polling.
+ */
+static void _setup_polling(struct work_struct *work)
+{
+ unsigned long min = ULONG_MAX;
+ struct charger_manager *cm;
+ bool keep_polling = false;
+ unsigned long _next_polling;
+
+ mutex_lock(&cm_list_mtx);
+
+ list_for_each_entry(cm, &cm_list, entry) {
+ if (is_polling_required(cm) && cm->desc->polling_interval_ms) {
+ keep_polling = true;
+
+ if (min > cm->desc->polling_interval_ms)
+ min = cm->desc->polling_interval_ms;
+ }
+ }
+
+ polling_jiffy = msecs_to_jiffies(min);
+ if (polling_jiffy <= CM_JIFFIES_SMALL)
+ polling_jiffy = CM_JIFFIES_SMALL + 1;
+
+ if (!keep_polling)
+ polling_jiffy = ULONG_MAX;
+ if (polling_jiffy == ULONG_MAX)
+ goto out;
+
+ WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
+ ". try it later. %s\n", __func__);
+
+ _next_polling = jiffies + polling_jiffy;
+
+ if (!delayed_work_pending(&cm_monitor_work) ||
+ (delayed_work_pending(&cm_monitor_work) &&
+ time_after(next_polling, _next_polling))) {
+ cancel_delayed_work_sync(&cm_monitor_work);
+ next_polling = jiffies + polling_jiffy;
+ queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+ }
+
+out:
+ mutex_unlock(&cm_list_mtx);
+}
+static DECLARE_WORK(setup_polling, _setup_polling);
+
+/**
+ * cm_monitor_poller - The Monitor / Poller.
+ * @work: work_struct of the function cm_monitor_poller
+ *
+ * During non-suspended state, cm_monitor_poller is used to poll and monitor
+ * the batteries.
+ */
+static void cm_monitor_poller(struct work_struct *work)
+{
+ cm_monitor();
+ schedule_work(&setup_polling);
+}
+
+/**
+ * fullbatt_handler - Event handler for CM_EVENT_BATT_FULL
+ * @cm: the Charger Manager representing the battery.
+ */
+static void fullbatt_handler(struct charger_manager *cm)
+{
+ struct charger_desc *desc = cm->desc;
+
+ if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+ goto out;
+
+ if (cm_suspended)
+ device_set_wakeup_capable(cm->dev, true);
+
+ if (delayed_work_pending(&cm->fullbatt_vchk_work))
+ cancel_delayed_work(&cm->fullbatt_vchk_work);
+ queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+ msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+ cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
+ desc->fullbatt_vchkdrop_ms);
+
+ if (cm->fullbatt_vchk_jiffies_at == 0)
+ cm->fullbatt_vchk_jiffies_at = 1;
+
+out:
+ dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged.\n");
+ uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
+}
+
+/**
+ * battout_handler - Event handler for CM_EVENT_BATT_OUT
+ * @cm: the Charger Manager representing the battery.
+ */
+static void battout_handler(struct charger_manager *cm)
+{
+ if (cm_suspended)
+ device_set_wakeup_capable(cm->dev, true);
+
+ if (!is_batt_present(cm)) {
+ dev_emerg(cm->dev, "Battery Pulled Out!\n");
+ uevent_notify(cm, default_event_names[CM_EVENT_BATT_OUT]);
+ } else {
+ uevent_notify(cm, "Battery Reinserted?");
+ }
+}
+
+/**
+ * misc_event_handler - Handler for other evnets
+ * @cm: the Charger Manager representing the battery.
+ * @type: the Charger Manager representing the battery.
+ */
+static void misc_event_handler(struct charger_manager *cm,
+ enum cm_event_types type)
+{
+ if (cm_suspended)
+ device_set_wakeup_capable(cm->dev, true);
+
+ if (!delayed_work_pending(&cm_monitor_work) &&
+ is_polling_required(cm) && cm->desc->polling_interval_ms)
+ schedule_work(&setup_polling);
+ uevent_notify(cm, default_event_names[type]);
+}
+
static int charger_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -613,6 +819,21 @@ static bool cm_setup_timer(void)
mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
+ unsigned int fbchk_ms = 0;
+
+ /* fullbatt_vchk is required. setup timer for that */
+ if (cm->fullbatt_vchk_jiffies_at) {
+ fbchk_ms = jiffies_to_msecs(cm->fullbatt_vchk_jiffies_at
+ - jiffies);
+ if (time_is_before_eq_jiffies(
+ cm->fullbatt_vchk_jiffies_at) ||
+ msecs_to_jiffies(fbchk_ms) < CM_JIFFIES_SMALL) {
+ fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+ fbchk_ms = 0;
+ }
+ }
+ CM_MIN_VALID(wakeup_ms, fbchk_ms);
+
/* Skip if polling is not required for this CM */
if (!is_polling_required(cm) && !cm->emergency_stop)
continue;
@@ -672,6 +893,23 @@ static bool cm_setup_timer(void)
return false;
}
+static void _cm_fbchk_in_suspend(struct charger_manager *cm)
+{
+ unsigned long jiffy_now = jiffies;
+
+ if (!cm->fullbatt_vchk_jiffies_at)
+ return;
+
+ if (g_desc && g_desc->assume_timer_stops_in_suspend)
+ jiffy_now += msecs_to_jiffies(cm_suspend_duration_ms);
+
+ /* Execute now if it's going to be executed not too long after */
+ jiffy_now += CM_JIFFIES_SMALL;
+
+ if (time_after_eq(jiffy_now, cm->fullbatt_vchk_jiffies_at))
+ fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+}
+
/**
* cm_suspend_again - Determine whether suspend again or not
*
@@ -693,6 +931,8 @@ bool cm_suspend_again(void)
ret = true;
mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
+ _cm_fbchk_in_suspend(cm);
+
if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
cm->status_save_batt != is_batt_present(cm)) {
ret = false;
@@ -796,6 +1036,21 @@ static int charger_manager_probe(struct platform_device *pdev)
memcpy(cm->desc, desc, sizeof(struct charger_desc));
cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
+ /*
+ * The following two do not need to be errors.
+ * Users may intentionally ignore those two features.
+ */
+ if (desc->fullbatt_uV == 0) {
+ dev_info(&pdev->dev, "Ignoring full-battery voltage threshold"
+ " as it is not supplied.");
+ }
+ if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) {
+ dev_info(&pdev->dev, "Disabling full-battery voltage drop "
+ "checking mechanism as it is not supplied.");
+ desc->fullbatt_vchkdrop_ms = 0;
+ desc->fullbatt_vchkdrop_uV = 0;
+ }
+
if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
ret = -EINVAL;
dev_err(&pdev->dev, "charger_regulators undefined.\n");
@@ -903,6 +1158,8 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy.num_properties++;
}
+ INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
+
ret = power_supply_register(NULL, &cm->charger_psy);
if (ret) {
dev_err(&pdev->dev, "Cannot register charger-manager with"
@@ -928,6 +1185,15 @@ static int charger_manager_probe(struct platform_device *pdev)
list_add(&cm->entry, &cm_list);
mutex_unlock(&cm_list_mtx);
+ /*
+ * Charger-manager is capable of waking up the systme from sleep
+ * when event is happend through cm_notify_event()
+ */
+ device_init_wakeup(&pdev->dev, true);
+ device_set_wakeup_capable(&pdev->dev, false);
+
+ schedule_work(&setup_polling);
+
return 0;
err_chg_enable:
@@ -958,9 +1224,17 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
list_del(&cm->entry);
mutex_unlock(&cm_list_mtx);
+ if (work_pending(&setup_polling))
+ cancel_work_sync(&setup_polling);
+ if (delayed_work_pending(&cm_monitor_work))
+ cancel_delayed_work_sync(&cm_monitor_work);
+
regulator_bulk_free(desc->num_charger_regulators,
desc->charger_regulators);
power_supply_unregister(&cm->charger_psy);
+
+ try_charger_enable(cm, false);
+
kfree(cm->charger_psy.properties);
kfree(cm->charger_stat);
kfree(cm->desc);
@@ -975,6 +1249,18 @@ static const struct platform_device_id charger_manager_id[] = {
};
MODULE_DEVICE_TABLE(platform, charger_manager_id);
+static int cm_suspend_noirq(struct device *dev)
+{
+ int ret = 0;
+
+ if (device_may_wakeup(dev)) {
+ device_set_wakeup_capable(dev, false);
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
static int cm_suspend_prepare(struct device *dev)
{
struct charger_manager *cm = dev_get_drvdata(dev);
@@ -1000,6 +1286,8 @@ static int cm_suspend_prepare(struct device *dev)
cm_suspended = true;
}
+ if (delayed_work_pending(&cm->fullbatt_vchk_work))
+ cancel_delayed_work(&cm->fullbatt_vchk_work);
cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
cm->status_save_batt = is_batt_present(cm);
@@ -1027,11 +1315,40 @@ static void cm_suspend_complete(struct device *dev)
cm_rtc_set = false;
}
+ /* Re-enqueue delayed work (fullbatt_vchk_work) */
+ if (cm->fullbatt_vchk_jiffies_at) {
+ unsigned long delay = 0;
+ unsigned long now = jiffies + CM_JIFFIES_SMALL;
+
+ if (time_after_eq(now, cm->fullbatt_vchk_jiffies_at)) {
+ delay = (unsigned long)((long)now
+ - (long)(cm->fullbatt_vchk_jiffies_at));
+ delay = jiffies_to_msecs(delay);
+ } else {
+ delay = 0;
+ }
+
+ /*
+ * Account for cm_suspend_duration_ms if
+ * assume_timer_stops_in_suspend is active
+ */
+ if (g_desc && g_desc->assume_timer_stops_in_suspend) {
+ if (delay > cm_suspend_duration_ms)
+ delay -= cm_suspend_duration_ms;
+ else
+ delay = 0;
+ }
+
+ queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+ msecs_to_jiffies(delay));
+ }
+ device_set_wakeup_capable(cm->dev, false);
uevent_notify(cm, NULL);
}
static const struct dev_pm_ops charger_manager_pm = {
.prepare = cm_suspend_prepare,
+ .suspend_noirq = cm_suspend_noirq,
.complete = cm_suspend_complete,
};
@@ -1048,16 +1365,91 @@ static struct platform_driver charger_manager_driver = {
static int __init charger_manager_init(void)
{
+ cm_wq = create_freezable_workqueue("charger_manager");
+ INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
+
return platform_driver_register(&charger_manager_driver);
}
late_initcall(charger_manager_init);
static void __exit charger_manager_cleanup(void)
{
+ destroy_workqueue(cm_wq);
+ cm_wq = NULL;
+
platform_driver_unregister(&charger_manager_driver);
}
module_exit(charger_manager_cleanup);
+/**
+ * find_power_supply - find the associated power_supply of charger
+ * @cm: the Charger Manager representing the battery
+ * @psy: pointer to instance of charger's power_supply
+ */
+static bool find_power_supply(struct charger_manager *cm,
+ struct power_supply *psy)
+{
+ int i;
+ bool found = false;
+
+ for (i = 0; cm->charger_stat[i]; i++) {
+ if (psy == cm->charger_stat[i]) {
+ found = true;
+ break;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * cm_notify_event - charger driver notify Charger Manager of charger event
+ * @psy: pointer to instance of charger's power_supply
+ * @type: type of charger event
+ * @msg: optional message passed to uevent_notify fuction
+ */
+void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
+ char *msg)
+{
+ struct charger_manager *cm;
+ bool found_power_supply = false;
+
+ if (psy == NULL)
+ return;
+
+ mutex_lock(&cm_list_mtx);
+ list_for_each_entry(cm, &cm_list, entry) {
+ found_power_supply = find_power_supply(cm, psy);
+ if (found_power_supply)
+ break;
+ }
+ mutex_unlock(&cm_list_mtx);
+
+ if (!found_power_supply)
+ return;
+
+ switch (type) {
+ case CM_EVENT_BATT_FULL:
+ fullbatt_handler(cm);
+ break;
+ case CM_EVENT_BATT_OUT:
+ battout_handler(cm);
+ break;
+ case CM_EVENT_BATT_IN:
+ case CM_EVENT_EXT_PWR_IN_OUT ... CM_EVENT_CHG_START_STOP:
+ misc_event_handler(cm, type);
+ break;
+ case CM_EVENT_UNKNOWN:
+ case CM_EVENT_OTHERS:
+ uevent_notify(cm, msg ? msg : default_event_names[type]);
+ break;
+ default:
+ dev_err(cm->dev, "%s type not specified.\n", __func__);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(cm_notify_event);
+
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("Charger Manager");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/ds2781_battery.c
index ca0d653d0a7a..975684a40f15 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/ds2781_battery.c
@@ -643,9 +643,7 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK1_END -
- DS2781_EEPROM_BLOCK1_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
return ds2781_read_block(dev_info, buf,
DS2781_EEPROM_BLOCK1_START + off, count);
@@ -661,9 +659,7 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
int ret;
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK1_END -
- DS2781_EEPROM_BLOCK1_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
ret = ds2781_write(dev_info, buf,
DS2781_EEPROM_BLOCK1_START + off, count);
@@ -682,7 +678,7 @@ static struct bin_attribute ds2781_param_eeprom_bin_attr = {
.name = "param_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
- .size = DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1,
+ .size = DS2781_PARAM_EEPROM_SIZE,
.read = ds2781_read_param_eeprom_bin,
.write = ds2781_write_param_eeprom_bin,
};
@@ -696,9 +692,7 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK0_END -
- DS2781_EEPROM_BLOCK0_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
return ds2781_read_block(dev_info, buf,
DS2781_EEPROM_BLOCK0_START + off, count);
@@ -715,9 +709,7 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
int ret;
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK0_END -
- DS2781_EEPROM_BLOCK0_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
ret = ds2781_write(dev_info, buf,
DS2781_EEPROM_BLOCK0_START + off, count);
@@ -736,7 +728,7 @@ static struct bin_attribute ds2781_user_eeprom_bin_attr = {
.name = "user_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
- .size = DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1,
+ .size = DS2781_USER_EEPROM_SIZE,
.read = ds2781_read_user_eeprom_bin,
.write = ds2781_write_user_eeprom_bin,
};
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 39eb50f35f09..e5ccd2979773 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -474,13 +474,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
fail2:
power_supply_unregister(&isp->psy);
fail1:
+ isp1704_charger_set_power(isp, 0);
usb_put_transceiver(isp->phy);
fail0:
kfree(isp);
dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
- isp1704_charger_set_power(isp, 0);
return ret;
}
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 04620c2cb388..140788b309f8 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
@@ -61,9 +62,13 @@
#define dP_ACC_100 0x1900
#define dP_ACC_200 0x3200
+#define MAX17042_IC_VERSION 0x0092
+#define MAX17047_IC_VERSION 0x00AC /* same for max17050 */
+
struct max17042_chip {
struct i2c_client *client;
struct power_supply battery;
+ enum max170xx_chip_type chip_type;
struct max17042_platform_data *pdata;
struct work_struct work;
int init_complete;
@@ -105,6 +110,7 @@ static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_TEMP,
@@ -150,7 +156,10 @@ static int max17042_get_property(struct power_supply *psy,
val->intval *= 20000; /* Units of LSB = 20mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ if (chip->chip_type == MAX17042)
+ ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ else
+ ret = max17042_read_reg(chip->client, MAX17047_V_empty);
if (ret < 0)
return ret;
@@ -171,6 +180,13 @@ static int max17042_get_property(struct power_supply *psy,
val->intval = ret * 625 / 8;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 625 / 8;
+ break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
if (ret < 0)
@@ -325,11 +341,10 @@ static inline int max17042_model_data_compare(struct max17042_chip *chip,
static int max17042_init_model(struct max17042_chip *chip)
{
int ret;
- int table_size =
- sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
+ int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
u16 *temp_data;
- temp_data = kzalloc(table_size, GFP_KERNEL);
+ temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
return -ENOMEM;
@@ -354,12 +369,11 @@ static int max17042_init_model(struct max17042_chip *chip)
static int max17042_verify_model_lock(struct max17042_chip *chip)
{
int i;
- int table_size =
- sizeof(chip->pdata->config_data->cell_char_tbl);
+ int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
u16 *temp_data;
int ret = 0;
- temp_data = kzalloc(table_size, GFP_KERNEL);
+ temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
return -ENOMEM;
@@ -382,6 +396,9 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
max17042_write_reg(chip->client, MAX17042_FilterCFG,
config->filter_cfg);
max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+ if (chip->chip_type == MAX17047)
+ max17042_write_reg(chip->client, MAX17047_FullSOCThr,
+ config->full_soc_thresh);
}
static void max17042_write_custom_regs(struct max17042_chip *chip)
@@ -392,12 +409,23 @@ static void max17042_write_custom_regs(struct max17042_chip *chip)
config->rcomp0);
max17042_write_verify_reg(chip->client, MAX17042_TempCo,
config->tcompc0);
- max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
- config->kempty0);
max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
config->ichgt_term);
+ if (chip->chip_type == MAX17042) {
+ max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+ config->kempty0);
+ } else {
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
+ config->qrtbl00);
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
+ config->qrtbl10);
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
+ config->qrtbl20);
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
+ config->qrtbl30);
+ }
}
static void max17042_update_capacity_regs(struct max17042_chip *chip)
@@ -453,6 +481,8 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip)
config->design_cap);
max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
config->fullcapnom);
+ /* Update SOC register with new SOC */
+ max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
}
/*
@@ -489,20 +519,28 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
- max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
+ if (chip->chip_type == MAX17042)
+ max17042_override_por(client, MAX17042_SOC_empty,
+ config->socempty);
max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
max17042_override_por(client, MAX17042_dQacc, config->dqacc);
max17042_override_por(client, MAX17042_dPacc, config->dpacc);
- max17042_override_por(client, MAX17042_V_empty, config->vempty);
+ if (chip->chip_type == MAX17042)
+ max17042_override_por(client, MAX17042_V_empty, config->vempty);
+ else
+ max17042_override_por(client, MAX17047_V_empty, config->vempty);
max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
max17042_override_por(client, MAX17042_FCTC, config->fctc);
max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
- max17042_override_por(client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
+ if (chip->chip_type) {
+ max17042_override_por(client, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_override_por(client, MAX17042_K_empty0,
+ config->kempty0);
+ }
}
static int max17042_init_chip(struct max17042_chip *chip)
@@ -659,7 +697,19 @@ static int __devinit max17042_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- chip->battery.name = "max17042_battery";
+ ret = max17042_read_reg(chip->client, MAX17042_DevName);
+ if (ret == MAX17042_IC_VERSION) {
+ dev_dbg(&client->dev, "chip type max17042 detected\n");
+ chip->chip_type = MAX17042;
+ } else if (ret == MAX17047_IC_VERSION) {
+ dev_dbg(&client->dev, "chip type max17047/50 detected\n");
+ chip->chip_type = MAX17047;
+ } else {
+ dev_err(&client->dev, "device version mismatch: %x\n", ret);
+ return -EIO;
+ }
+
+ chip->battery.name = "max170xx_battery";
chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
chip->battery.get_property = max17042_get_property;
chip->battery.properties = max17042_battery_props;
@@ -683,6 +733,12 @@ static int __devinit max17042_probe(struct i2c_client *client,
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
}
+ ret = power_supply_register(&client->dev, &chip->battery);
+ if (ret) {
+ dev_err(&client->dev, "failed: power supply register\n");
+ return ret;
+ }
+
if (client->irq) {
ret = request_threaded_irq(client->irq, NULL,
max17042_thread_handler,
@@ -693,13 +749,14 @@ static int __devinit max17042_probe(struct i2c_client *client,
reg |= CONFIG_ALRT_BIT_ENBL;
max17042_write_reg(client, MAX17042_CONFIG, reg);
max17042_set_soc_threshold(chip, 1);
- } else
+ } else {
+ client->irq = 0;
dev_err(&client->dev, "%s(): cannot get IRQ\n",
__func__);
+ }
}
reg = max17042_read_reg(chip->client, MAX17042_STATUS);
-
if (reg & STATUS_POR_BIT) {
INIT_WORK(&chip->work, max17042_init_worker);
schedule_work(&chip->work);
@@ -707,23 +764,65 @@ static int __devinit max17042_probe(struct i2c_client *client,
chip->init_complete = 1;
}
- ret = power_supply_register(&client->dev, &chip->battery);
- if (ret)
- dev_err(&client->dev, "failed: power supply register\n");
- return ret;
+ return 0;
}
static int __devexit max17042_remove(struct i2c_client *client)
{
struct max17042_chip *chip = i2c_get_clientdata(client);
+ if (client->irq)
+ free_irq(client->irq, chip);
power_supply_unregister(&chip->battery);
return 0;
}
+#ifdef CONFIG_PM
+static int max17042_suspend(struct device *dev)
+{
+ struct max17042_chip *chip = dev_get_drvdata(dev);
+
+ /*
+ * disable the irq and enable irq_wake
+ * capability to the interrupt line.
+ */
+ if (chip->client->irq) {
+ disable_irq(chip->client->irq);
+ enable_irq_wake(chip->client->irq);
+ }
+
+ return 0;
+}
+
+static int max17042_resume(struct device *dev)
+{
+ struct max17042_chip *chip = dev_get_drvdata(dev);
+
+ if (chip->client->irq) {
+ disable_irq_wake(chip->client->irq);
+ enable_irq(chip->client->irq);
+ /* re-program the SOC thresholds to 1% change */
+ max17042_set_soc_threshold(chip, 1);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops max17042_pm_ops = {
+ .suspend = max17042_suspend,
+ .resume = max17042_resume,
+};
+
+#define MAX17042_PM_OPS (&max17042_pm_ops)
+#else
+#define MAX17042_PM_OPS NULL
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id max17042_dt_match[] = {
{ .compatible = "maxim,max17042" },
+ { .compatible = "maxim,max17047" },
+ { .compatible = "maxim,max17050" },
{ },
};
MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -731,6 +830,8 @@ MODULE_DEVICE_TABLE(of, max17042_dt_match);
static const struct i2c_device_id max17042_id[] = {
{ "max17042", 0 },
+ { "max17047", 1 },
+ { "max17050", 2 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max17042_id);
@@ -739,6 +840,7 @@ static struct i2c_driver max17042_i2c_driver = {
.driver = {
.name = "max17042",
.of_match_table = of_match_ptr(max17042_dt_match),
+ .pm = MAX17042_PM_OPS,
},
.probe = max17042_probe,
.remove = __devexit_p(max17042_remove),
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 4368e7d61316..4150747f9186 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -146,6 +146,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(voltage_min_design),
POWER_SUPPLY_ATTR(voltage_now),
POWER_SUPPLY_ATTR(voltage_avg),
+ POWER_SUPPLY_ATTR(voltage_ocv),
POWER_SUPPLY_ATTR(current_max),
POWER_SUPPLY_ATTR(current_now),
POWER_SUPPLY_ATTR(current_avg),
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 06b659d91790..a5b6849d4123 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -89,7 +89,7 @@ static const struct chip_data {
[REG_CURRENT] =
SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
[REG_CAPACITY] =
- SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+ SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100),
[REG_REMAINING_CAPACITY] =
SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
[REG_REMAINING_CAPACITY_CHARGE] =
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
index ce1694d1a365..f8eedd8a676f 100644
--- a/drivers/power/smb347-charger.c
+++ b/drivers/power/smb347-charger.c
@@ -11,7 +11,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/debugfs.h>
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -21,7 +21,7 @@
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/power/smb347-charger.h>
-#include <linux/seq_file.h>
+#include <linux/regmap.h>
/*
* Configuration registers. These are mirrored to volatile RAM and can be
@@ -39,6 +39,7 @@
#define CFG_CURRENT_LIMIT_DC_SHIFT 4
#define CFG_CURRENT_LIMIT_USB_MASK 0x0f
#define CFG_FLOAT_VOLTAGE 0x03
+#define CFG_FLOAT_VOLTAGE_FLOAT_MASK 0x3f
#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK 0xc0
#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT 6
#define CFG_STAT 0x05
@@ -113,29 +114,31 @@
#define STAT_C_CHARGER_ERROR BIT(6)
#define STAT_E 0x3f
+#define SMB347_MAX_REGISTER 0x3f
+
/**
* struct smb347_charger - smb347 charger instance
* @lock: protects concurrent access to online variables
- * @client: pointer to i2c client
+ * @dev: pointer to device
+ * @regmap: pointer to driver regmap
* @mains: power_supply instance for AC/DC power
* @usb: power_supply instance for USB power
* @battery: power_supply instance for battery
* @mains_online: is AC/DC input connected
* @usb_online: is USB input connected
* @charging_enabled: is charging enabled
- * @dentry: for debugfs
* @pdata: pointer to platform data
*/
struct smb347_charger {
struct mutex lock;
- struct i2c_client *client;
+ struct device *dev;
+ struct regmap *regmap;
struct power_supply mains;
struct power_supply usb;
struct power_supply battery;
bool mains_online;
bool usb_online;
bool charging_enabled;
- struct dentry *dentry;
const struct smb347_charger_platform_data *pdata;
};
@@ -193,14 +196,6 @@ static const unsigned int ccc_tbl[] = {
1200000,
};
-/* Convert register value to current using lookup table */
-static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
-{
- if (val >= size)
- return -EINVAL;
- return tbl[val];
-}
-
/* Convert current to register value using lookup table */
static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
{
@@ -212,43 +207,22 @@ static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
return i > 0 ? i - 1 : -EINVAL;
}
-static int smb347_read(struct smb347_charger *smb, u8 reg)
-{
- int ret;
-
- ret = i2c_smbus_read_byte_data(smb->client, reg);
- if (ret < 0)
- dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
- reg, ret);
- return ret;
-}
-
-static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
-{
- int ret;
-
- ret = i2c_smbus_write_byte_data(smb->client, reg, val);
- if (ret < 0)
- dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
- reg, ret);
- return ret;
-}
-
/**
- * smb347_update_status - updates the charging status
+ * smb347_update_ps_status - refreshes the power source status
* @smb: pointer to smb347 charger instance
*
- * Function checks status of the charging and updates internal state
- * accordingly. Returns %0 if there is no change in status, %1 if the
- * status has changed and negative errno in case of failure.
+ * Function checks whether any power source is connected to the charger and
+ * updates internal state accordingly. If there is a change to previous state
+ * function returns %1, otherwise %0 and negative errno in case of errror.
*/
-static int smb347_update_status(struct smb347_charger *smb)
+static int smb347_update_ps_status(struct smb347_charger *smb)
{
bool usb = false;
bool dc = false;
+ unsigned int val;
int ret;
- ret = smb347_read(smb, IRQSTAT_E);
+ ret = regmap_read(smb->regmap, IRQSTAT_E, &val);
if (ret < 0)
return ret;
@@ -257,9 +231,9 @@ static int smb347_update_status(struct smb347_charger *smb)
* platform data _and_ whether corresponding undervoltage is set.
*/
if (smb->pdata->use_mains)
- dc = !(ret & IRQSTAT_E_DCIN_UV_STAT);
+ dc = !(val & IRQSTAT_E_DCIN_UV_STAT);
if (smb->pdata->use_usb)
- usb = !(ret & IRQSTAT_E_USBIN_UV_STAT);
+ usb = !(val & IRQSTAT_E_USBIN_UV_STAT);
mutex_lock(&smb->lock);
ret = smb->mains_online != dc || smb->usb_online != usb;
@@ -271,15 +245,15 @@ static int smb347_update_status(struct smb347_charger *smb)
}
/*
- * smb347_is_online - returns whether input power source is connected
+ * smb347_is_ps_online - returns whether input power source is connected
* @smb: pointer to smb347 charger instance
*
* Returns %true if input power source is connected. Note that this is
* dependent on what platform has configured for usable power sources. For
- * example if USB is disabled, this will return %false even if the USB
- * cable is connected.
+ * example if USB is disabled, this will return %false even if the USB cable
+ * is connected.
*/
-static bool smb347_is_online(struct smb347_charger *smb)
+static bool smb347_is_ps_online(struct smb347_charger *smb)
{
bool ret;
@@ -299,16 +273,17 @@ static bool smb347_is_online(struct smb347_charger *smb)
*/
static int smb347_charging_status(struct smb347_charger *smb)
{
+ unsigned int val;
int ret;
- if (!smb347_is_online(smb))
+ if (!smb347_is_ps_online(smb))
return 0;
- ret = smb347_read(smb, STAT_C);
+ ret = regmap_read(smb->regmap, STAT_C, &val);
if (ret < 0)
return 0;
- return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
+ return (val & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
}
static int smb347_charging_set(struct smb347_charger *smb, bool enable)
@@ -316,27 +291,17 @@ static int smb347_charging_set(struct smb347_charger *smb, bool enable)
int ret = 0;
if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
- dev_dbg(&smb->client->dev,
- "charging enable/disable in SW disabled\n");
+ dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
return 0;
}
mutex_lock(&smb->lock);
if (smb->charging_enabled != enable) {
- ret = smb347_read(smb, CMD_A);
- if (ret < 0)
- goto out;
-
- smb->charging_enabled = enable;
-
- if (enable)
- ret |= CMD_A_CHG_ENABLED;
- else
- ret &= ~CMD_A_CHG_ENABLED;
-
- ret = smb347_write(smb, CMD_A, ret);
+ ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
+ enable ? CMD_A_CHG_ENABLED : 0);
+ if (!ret)
+ smb->charging_enabled = enable;
}
-out:
mutex_unlock(&smb->lock);
return ret;
}
@@ -351,7 +316,7 @@ static inline int smb347_charging_disable(struct smb347_charger *smb)
return smb347_charging_set(smb, false);
}
-static int smb347_update_online(struct smb347_charger *smb)
+static int smb347_start_stop_charging(struct smb347_charger *smb)
{
int ret;
@@ -360,16 +325,14 @@ static int smb347_update_online(struct smb347_charger *smb)
* disable or enable the charging. We do it manually because it
* depends on how the platform has configured the valid inputs.
*/
- if (smb347_is_online(smb)) {
+ if (smb347_is_ps_online(smb)) {
ret = smb347_charging_enable(smb);
if (ret < 0)
- dev_err(&smb->client->dev,
- "failed to enable charging\n");
+ dev_err(smb->dev, "failed to enable charging\n");
} else {
ret = smb347_charging_disable(smb);
if (ret < 0)
- dev_err(&smb->client->dev,
- "failed to disable charging\n");
+ dev_err(smb->dev, "failed to disable charging\n");
}
return ret;
@@ -377,112 +340,120 @@ static int smb347_update_online(struct smb347_charger *smb)
static int smb347_set_charge_current(struct smb347_charger *smb)
{
- int ret, val;
-
- ret = smb347_read(smb, CFG_CHARGE_CURRENT);
- if (ret < 0)
- return ret;
+ int ret;
if (smb->pdata->max_charge_current) {
- val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
+ ret = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
smb->pdata->max_charge_current);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CHARGE_CURRENT_FCC_MASK;
- ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+ CFG_CHARGE_CURRENT_FCC_MASK,
+ ret << CFG_CHARGE_CURRENT_FCC_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->pre_charge_current) {
- val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
+ ret = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
smb->pdata->pre_charge_current);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CHARGE_CURRENT_PCC_MASK;
- ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+ CFG_CHARGE_CURRENT_PCC_MASK,
+ ret << CFG_CHARGE_CURRENT_PCC_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->termination_current) {
- val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
+ ret = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
smb->pdata->termination_current);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CHARGE_CURRENT_TC_MASK;
- ret |= val;
+ ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+ CFG_CHARGE_CURRENT_TC_MASK, ret);
+ if (ret < 0)
+ return ret;
}
- return smb347_write(smb, CFG_CHARGE_CURRENT, ret);
+ return 0;
}
static int smb347_set_current_limits(struct smb347_charger *smb)
{
- int ret, val;
-
- ret = smb347_read(smb, CFG_CURRENT_LIMIT);
- if (ret < 0)
- return ret;
+ int ret;
if (smb->pdata->mains_current_limit) {
- val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+ ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
smb->pdata->mains_current_limit);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CURRENT_LIMIT_DC_MASK;
- ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+ CFG_CURRENT_LIMIT_DC_MASK,
+ ret << CFG_CURRENT_LIMIT_DC_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->usb_hc_current_limit) {
- val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+ ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
smb->pdata->usb_hc_current_limit);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CURRENT_LIMIT_USB_MASK;
- ret |= val;
+ ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+ CFG_CURRENT_LIMIT_USB_MASK, ret);
+ if (ret < 0)
+ return ret;
}
- return smb347_write(smb, CFG_CURRENT_LIMIT, ret);
+ return 0;
}
static int smb347_set_voltage_limits(struct smb347_charger *smb)
{
- int ret, val;
-
- ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
- if (ret < 0)
- return ret;
+ int ret;
if (smb->pdata->pre_to_fast_voltage) {
- val = smb->pdata->pre_to_fast_voltage;
+ ret = smb->pdata->pre_to_fast_voltage;
/* uV */
- val = clamp_val(val, 2400000, 3000000) - 2400000;
- val /= 200000;
+ ret = clamp_val(ret, 2400000, 3000000) - 2400000;
+ ret /= 200000;
- ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK;
- ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+ CFG_FLOAT_VOLTAGE_THRESHOLD_MASK,
+ ret << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->max_charge_voltage) {
- val = smb->pdata->max_charge_voltage;
+ ret = smb->pdata->max_charge_voltage;
/* uV */
- val = clamp_val(val, 3500000, 4500000) - 3500000;
- val /= 20000;
+ ret = clamp_val(ret, 3500000, 4500000) - 3500000;
+ ret /= 20000;
- ret |= val;
+ ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+ CFG_FLOAT_VOLTAGE_FLOAT_MASK, ret);
+ if (ret < 0)
+ return ret;
}
- return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret);
+ return 0;
}
static int smb347_set_temp_limits(struct smb347_charger *smb)
{
bool enable_therm_monitor = false;
- int ret, val;
+ int ret = 0;
+ int val;
if (smb->pdata->chip_temp_threshold) {
val = smb->pdata->chip_temp_threshold;
@@ -491,22 +462,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
val = clamp_val(val, 100, 130) - 100;
val /= 10;
- ret = smb347_read(smb, CFG_OTG);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
- ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
-
- ret = smb347_write(smb, CFG_OTG, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_OTG,
+ CFG_OTG_TEMP_THRESHOLD_MASK,
+ val << CFG_OTG_TEMP_THRESHOLD_SHIFT);
if (ret < 0)
return ret;
}
- ret = smb347_read(smb, CFG_TEMP_LIMIT);
- if (ret < 0)
- return ret;
-
if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
val = smb->pdata->soft_cold_temp_limit;
@@ -515,8 +477,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
/* this goes from higher to lower so invert the value */
val = ~val & 0x3;
- ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK;
- ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_SOFT_COLD_MASK,
+ val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
@@ -527,8 +492,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
val = clamp_val(val, 40, 55) - 40;
val /= 5;
- ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK;
- ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_SOFT_HOT_MASK,
+ val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
@@ -541,8 +509,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
/* this goes from higher to lower so invert the value */
val = ~val & 0x3;
- ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK;
- ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_HARD_COLD_MASK,
+ val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
@@ -553,16 +524,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
val = clamp_val(val, 50, 65) - 50;
val /= 5;
- ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK;
- ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_HARD_HOT_MASK,
+ val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
- ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
- if (ret < 0)
- return ret;
-
/*
* If any of the temperature limits are set, we also enable the
* thermistor monitoring.
@@ -574,25 +544,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
* depending on the configuration.
*/
if (enable_therm_monitor) {
- ret = smb347_read(smb, CFG_THERM);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_THERM_MONITOR_DISABLED;
-
- ret = smb347_write(smb, CFG_THERM, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_THERM,
+ CFG_THERM_MONITOR_DISABLED, 0);
if (ret < 0)
return ret;
}
if (smb->pdata->suspend_on_hard_temp_limit) {
- ret = smb347_read(smb, CFG_SYSOK);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
-
- ret = smb347_write(smb, CFG_SYSOK, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+ CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED, 0);
if (ret < 0)
return ret;
}
@@ -601,17 +561,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
val = smb->pdata->soft_temp_limit_compensation & 0x3;
- ret = smb347_read(smb, CFG_THERM);
+ ret = regmap_update_bits(smb->regmap, CFG_THERM,
+ CFG_THERM_SOFT_HOT_COMPENSATION_MASK,
+ val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT);
if (ret < 0)
return ret;
- ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK;
- ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT;
-
- ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
- ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
-
- ret = smb347_write(smb, CFG_THERM, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_THERM,
+ CFG_THERM_SOFT_COLD_COMPENSATION_MASK,
+ val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT);
if (ret < 0)
return ret;
}
@@ -622,14 +580,9 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
if (val < 0)
return val;
- ret = smb347_read(smb, CFG_OTG);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
- ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
-
- ret = smb347_write(smb, CFG_OTG, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_OTG,
+ CFG_OTG_CC_COMPENSATION_MASK,
+ (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT);
if (ret < 0)
return ret;
}
@@ -648,22 +601,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
*/
static int smb347_set_writable(struct smb347_charger *smb, bool writable)
{
- int ret;
-
- ret = smb347_read(smb, CMD_A);
- if (ret < 0)
- return ret;
-
- if (writable)
- ret |= CMD_A_ALLOW_WRITE;
- else
- ret &= ~CMD_A_ALLOW_WRITE;
-
- return smb347_write(smb, CMD_A, ret);
+ return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
+ writable ? CMD_A_ALLOW_WRITE : 0);
}
static int smb347_hw_init(struct smb347_charger *smb)
{
+ unsigned int val;
int ret;
ret = smb347_set_writable(smb, true);
@@ -692,34 +636,19 @@ static int smb347_hw_init(struct smb347_charger *smb)
/* If USB charging is disabled we put the USB in suspend mode */
if (!smb->pdata->use_usb) {
- ret = smb347_read(smb, CMD_A);
- if (ret < 0)
- goto fail;
-
- ret |= CMD_A_SUSPEND_ENABLED;
-
- ret = smb347_write(smb, CMD_A, ret);
+ ret = regmap_update_bits(smb->regmap, CMD_A,
+ CMD_A_SUSPEND_ENABLED,
+ CMD_A_SUSPEND_ENABLED);
if (ret < 0)
goto fail;
}
- ret = smb347_read(smb, CFG_OTHER);
- if (ret < 0)
- goto fail;
-
/*
* If configured by platform data, we enable hardware Auto-OTG
* support for driving VBUS. Otherwise we disable it.
*/
- ret &= ~CFG_OTHER_RID_MASK;
- if (smb->pdata->use_usb_otg)
- ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
-
- ret = smb347_write(smb, CFG_OTHER, ret);
- if (ret < 0)
- goto fail;
-
- ret = smb347_read(smb, CFG_PIN);
+ ret = regmap_update_bits(smb->regmap, CFG_OTHER, CFG_OTHER_RID_MASK,
+ smb->pdata->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
if (ret < 0)
goto fail;
@@ -728,32 +657,33 @@ static int smb347_hw_init(struct smb347_charger *smb)
* command register unless pin control is specified in the platform
* data.
*/
- ret &= ~CFG_PIN_EN_CTRL_MASK;
-
switch (smb->pdata->enable_control) {
- case SMB347_CHG_ENABLE_SW:
- /* Do nothing, 0 means i2c control */
- break;
case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
- ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW;
+ val = CFG_PIN_EN_CTRL_ACTIVE_LOW;
break;
case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
- ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+ val = CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+ break;
+ default:
+ val = 0;
break;
}
- /* Disable Automatic Power Source Detection (APSD) interrupt. */
- ret &= ~CFG_PIN_EN_APSD_IRQ;
+ ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL_MASK,
+ val);
+ if (ret < 0)
+ goto fail;
- ret = smb347_write(smb, CFG_PIN, ret);
+ /* Disable Automatic Power Source Detection (APSD) interrupt. */
+ ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_APSD_IRQ, 0);
if (ret < 0)
goto fail;
- ret = smb347_update_status(smb);
+ ret = smb347_update_ps_status(smb);
if (ret < 0)
goto fail;
- ret = smb347_update_online(smb);
+ ret = smb347_start_stop_charging(smb);
fail:
smb347_set_writable(smb, false);
@@ -763,24 +693,25 @@ fail:
static irqreturn_t smb347_interrupt(int irq, void *data)
{
struct smb347_charger *smb = data;
- int stat_c, irqstat_e, irqstat_c;
- irqreturn_t ret = IRQ_NONE;
+ unsigned int stat_c, irqstat_e, irqstat_c;
+ bool handled = false;
+ int ret;
- stat_c = smb347_read(smb, STAT_C);
- if (stat_c < 0) {
- dev_warn(&smb->client->dev, "reading STAT_C failed\n");
+ ret = regmap_read(smb->regmap, STAT_C, &stat_c);
+ if (ret < 0) {
+ dev_warn(smb->dev, "reading STAT_C failed\n");
return IRQ_NONE;
}
- irqstat_c = smb347_read(smb, IRQSTAT_C);
- if (irqstat_c < 0) {
- dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n");
+ ret = regmap_read(smb->regmap, IRQSTAT_C, &irqstat_c);
+ if (ret < 0) {
+ dev_warn(smb->dev, "reading IRQSTAT_C failed\n");
return IRQ_NONE;
}
- irqstat_e = smb347_read(smb, IRQSTAT_E);
- if (irqstat_e < 0) {
- dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n");
+ ret = regmap_read(smb->regmap, IRQSTAT_E, &irqstat_e);
+ if (ret < 0) {
+ dev_warn(smb->dev, "reading IRQSTAT_E failed\n");
return IRQ_NONE;
}
@@ -789,13 +720,11 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
* disable charging.
*/
if (stat_c & STAT_C_CHARGER_ERROR) {
- dev_err(&smb->client->dev,
- "error in charger, disabling charging\n");
+ dev_err(smb->dev, "error in charger, disabling charging\n");
smb347_charging_disable(smb);
power_supply_changed(&smb->battery);
-
- ret = IRQ_HANDLED;
+ handled = true;
}
/*
@@ -806,7 +735,7 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
power_supply_changed(&smb->battery);
- ret = IRQ_HANDLED;
+ handled = true;
}
/*
@@ -814,15 +743,17 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
* was connected or disconnected.
*/
if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
- if (smb347_update_status(smb) > 0) {
- smb347_update_online(smb);
- power_supply_changed(&smb->mains);
- power_supply_changed(&smb->usb);
+ if (smb347_update_ps_status(smb) > 0) {
+ smb347_start_stop_charging(smb);
+ if (smb->pdata->use_mains)
+ power_supply_changed(&smb->mains);
+ if (smb->pdata->use_usb)
+ power_supply_changed(&smb->usb);
}
- ret = IRQ_HANDLED;
+ handled = true;
}
- return ret;
+ return handled ? IRQ_HANDLED : IRQ_NONE;
}
static int smb347_irq_set(struct smb347_charger *smb, bool enable)
@@ -839,41 +770,18 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
* - termination current reached
* - charger error
*/
- if (enable) {
- ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV);
- if (ret < 0)
- goto fail;
-
- ret = smb347_write(smb, CFG_STATUS_IRQ,
- CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
- if (ret < 0)
- goto fail;
-
- ret = smb347_read(smb, CFG_PIN);
- if (ret < 0)
- goto fail;
-
- ret |= CFG_PIN_EN_CHARGER_ERROR;
-
- ret = smb347_write(smb, CFG_PIN, ret);
- } else {
- ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
- if (ret < 0)
- goto fail;
-
- ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
- if (ret < 0)
- goto fail;
-
- ret = smb347_read(smb, CFG_PIN);
- if (ret < 0)
- goto fail;
-
- ret &= ~CFG_PIN_EN_CHARGER_ERROR;
+ ret = regmap_update_bits(smb->regmap, CFG_FAULT_IRQ, 0xff,
+ enable ? CFG_FAULT_IRQ_DCIN_UV : 0);
+ if (ret < 0)
+ goto fail;
- ret = smb347_write(smb, CFG_PIN, ret);
- }
+ ret = regmap_update_bits(smb->regmap, CFG_STATUS_IRQ, 0xff,
+ enable ? CFG_STATUS_IRQ_TERMINATION_OR_TAPER : 0);
+ if (ret < 0)
+ goto fail;
+ ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
+ enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
fail:
smb347_set_writable(smb, false);
return ret;
@@ -889,18 +797,18 @@ static inline int smb347_irq_disable(struct smb347_charger *smb)
return smb347_irq_set(smb, false);
}
-static int smb347_irq_init(struct smb347_charger *smb)
+static int smb347_irq_init(struct smb347_charger *smb,
+ struct i2c_client *client)
{
const struct smb347_charger_platform_data *pdata = smb->pdata;
int ret, irq = gpio_to_irq(pdata->irq_gpio);
- ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name);
+ ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
if (ret < 0)
goto fail;
ret = request_threaded_irq(irq, NULL, smb347_interrupt,
- IRQF_TRIGGER_FALLING, smb->client->name,
- smb);
+ IRQF_TRIGGER_FALLING, client->name, smb);
if (ret < 0)
goto fail_gpio;
@@ -912,23 +820,14 @@ static int smb347_irq_init(struct smb347_charger *smb)
* Configure the STAT output to be suitable for interrupts: disable
* all other output (except interrupts) and make it active low.
*/
- ret = smb347_read(smb, CFG_STAT);
- if (ret < 0)
- goto fail_readonly;
-
- ret &= ~CFG_STAT_ACTIVE_HIGH;
- ret |= CFG_STAT_DISABLED;
-
- ret = smb347_write(smb, CFG_STAT, ret);
- if (ret < 0)
- goto fail_readonly;
-
- ret = smb347_irq_enable(smb);
+ ret = regmap_update_bits(smb->regmap, CFG_STAT,
+ CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
+ CFG_STAT_DISABLED);
if (ret < 0)
goto fail_readonly;
smb347_set_writable(smb, false);
- smb->client->irq = irq;
+ client->irq = irq;
return 0;
fail_readonly:
@@ -938,7 +837,7 @@ fail_irq:
fail_gpio:
gpio_free(pdata->irq_gpio);
fail:
- smb->client->irq = 0;
+ client->irq = 0;
return ret;
}
@@ -987,13 +886,13 @@ static int smb347_battery_get_property(struct power_supply *psy,
const struct smb347_charger_platform_data *pdata = smb->pdata;
int ret;
- ret = smb347_update_status(smb);
+ ret = smb347_update_ps_status(smb);
if (ret < 0)
return ret;
switch (prop) {
case POWER_SUPPLY_PROP_STATUS:
- if (!smb347_is_online(smb)) {
+ if (!smb347_is_ps_online(smb)) {
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
}
@@ -1004,7 +903,7 @@ static int smb347_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- if (!smb347_is_online(smb))
+ if (!smb347_is_ps_online(smb))
return -ENODATA;
/*
@@ -1036,44 +935,6 @@ static int smb347_battery_get_property(struct power_supply *psy,
val->intval = pdata->battery_info.voltage_max_design;
break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- if (!smb347_is_online(smb))
- return -ENODATA;
- ret = smb347_read(smb, STAT_A);
- if (ret < 0)
- return ret;
-
- ret &= STAT_A_FLOAT_VOLTAGE_MASK;
- if (ret > 0x3d)
- ret = 0x3d;
-
- val->intval = 3500000 + ret * 20000;
- break;
-
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- if (!smb347_is_online(smb))
- return -ENODATA;
-
- ret = smb347_read(smb, STAT_B);
- if (ret < 0)
- return ret;
-
- /*
- * The current value is composition of FCC and PCC values
- * and we can detect which table to use from bit 5.
- */
- if (ret & 0x20) {
- val->intval = hw_to_current(fcc_tbl,
- ARRAY_SIZE(fcc_tbl),
- ret & 7);
- } else {
- ret >>= 3;
- val->intval = hw_to_current(pcc_tbl,
- ARRAY_SIZE(pcc_tbl),
- ret & 7);
- }
- break;
-
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
val->intval = pdata->battery_info.charge_full_design;
break;
@@ -1095,64 +956,58 @@ static enum power_supply_property smb347_battery_properties[] = {
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_MODEL_NAME,
};
-static int smb347_debugfs_show(struct seq_file *s, void *data)
+static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
{
- struct smb347_charger *smb = s->private;
- int ret;
- u8 reg;
-
- seq_printf(s, "Control registers:\n");
- seq_printf(s, "==================\n");
- for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) {
- ret = smb347_read(smb, reg);
- seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
- }
- seq_printf(s, "\n");
-
- seq_printf(s, "Command registers:\n");
- seq_printf(s, "==================\n");
- ret = smb347_read(smb, CMD_A);
- seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
- ret = smb347_read(smb, CMD_B);
- seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
- ret = smb347_read(smb, CMD_C);
- seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
- seq_printf(s, "\n");
-
- seq_printf(s, "Interrupt status registers:\n");
- seq_printf(s, "===========================\n");
- for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
- ret = smb347_read(smb, reg);
- seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
- }
- seq_printf(s, "\n");
-
- seq_printf(s, "Status registers:\n");
- seq_printf(s, "=================\n");
- for (reg = STAT_A; reg <= STAT_E; reg++) {
- ret = smb347_read(smb, reg);
- seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+ switch (reg) {
+ case IRQSTAT_A:
+ case IRQSTAT_C:
+ case IRQSTAT_E:
+ case IRQSTAT_F:
+ case STAT_A:
+ case STAT_B:
+ case STAT_C:
+ case STAT_E:
+ return true;
}
- return 0;
+ return false;
}
-static int smb347_debugfs_open(struct inode *inode, struct file *file)
+static bool smb347_readable_reg(struct device *dev, unsigned int reg)
{
- return single_open(file, smb347_debugfs_show, inode->i_private);
+ switch (reg) {
+ case CFG_CHARGE_CURRENT:
+ case CFG_CURRENT_LIMIT:
+ case CFG_FLOAT_VOLTAGE:
+ case CFG_STAT:
+ case CFG_PIN:
+ case CFG_THERM:
+ case CFG_SYSOK:
+ case CFG_OTHER:
+ case CFG_OTG:
+ case CFG_TEMP_LIMIT:
+ case CFG_FAULT_IRQ:
+ case CFG_STATUS_IRQ:
+ case CFG_ADDRESS:
+ case CMD_A:
+ case CMD_B:
+ case CMD_C:
+ return true;
+ }
+
+ return smb347_volatile_reg(dev, reg);
}
-static const struct file_operations smb347_debugfs_fops = {
- .open = smb347_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct regmap_config smb347_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = SMB347_MAX_REGISTER,
+ .volatile_reg = smb347_volatile_reg,
+ .readable_reg = smb347_readable_reg,
};
static int smb347_probe(struct i2c_client *client,
@@ -1178,28 +1033,45 @@ static int smb347_probe(struct i2c_client *client,
i2c_set_clientdata(client, smb);
mutex_init(&smb->lock);
- smb->client = client;
+ smb->dev = &client->dev;
smb->pdata = pdata;
+ smb->regmap = devm_regmap_init_i2c(client, &smb347_regmap);
+ if (IS_ERR(smb->regmap))
+ return PTR_ERR(smb->regmap);
+
ret = smb347_hw_init(smb);
if (ret < 0)
return ret;
- smb->mains.name = "smb347-mains";
- smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
- smb->mains.get_property = smb347_mains_get_property;
- smb->mains.properties = smb347_mains_properties;
- smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
- smb->mains.supplied_to = battery;
- smb->mains.num_supplicants = ARRAY_SIZE(battery);
-
- smb->usb.name = "smb347-usb";
- smb->usb.type = POWER_SUPPLY_TYPE_USB;
- smb->usb.get_property = smb347_usb_get_property;
- smb->usb.properties = smb347_usb_properties;
- smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
- smb->usb.supplied_to = battery;
- smb->usb.num_supplicants = ARRAY_SIZE(battery);
+ if (smb->pdata->use_mains) {
+ smb->mains.name = "smb347-mains";
+ smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
+ smb->mains.get_property = smb347_mains_get_property;
+ smb->mains.properties = smb347_mains_properties;
+ smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
+ smb->mains.supplied_to = battery;
+ smb->mains.num_supplicants = ARRAY_SIZE(battery);
+ ret = power_supply_register(dev, &smb->mains);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (smb->pdata->use_usb) {
+ smb->usb.name = "smb347-usb";
+ smb->usb.type = POWER_SUPPLY_TYPE_USB;
+ smb->usb.get_property = smb347_usb_get_property;
+ smb->usb.properties = smb347_usb_properties;
+ smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
+ smb->usb.supplied_to = battery;
+ smb->usb.num_supplicants = ARRAY_SIZE(battery);
+ ret = power_supply_register(dev, &smb->usb);
+ if (ret < 0) {
+ if (smb->pdata->use_mains)
+ power_supply_unregister(&smb->mains);
+ return ret;
+ }
+ }
smb->battery.name = "smb347-battery";
smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1207,20 +1079,13 @@ static int smb347_probe(struct i2c_client *client,
smb->battery.properties = smb347_battery_properties;
smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
- ret = power_supply_register(dev, &smb->mains);
- if (ret < 0)
- return ret;
-
- ret = power_supply_register(dev, &smb->usb);
- if (ret < 0) {
- power_supply_unregister(&smb->mains);
- return ret;
- }
ret = power_supply_register(dev, &smb->battery);
if (ret < 0) {
- power_supply_unregister(&smb->usb);
- power_supply_unregister(&smb->mains);
+ if (smb->pdata->use_usb)
+ power_supply_unregister(&smb->usb);
+ if (smb->pdata->use_mains)
+ power_supply_unregister(&smb->mains);
return ret;
}
@@ -1229,15 +1094,15 @@ static int smb347_probe(struct i2c_client *client,
* interrupt support here.
*/
if (pdata->irq_gpio >= 0) {
- ret = smb347_irq_init(smb);
+ ret = smb347_irq_init(smb, client);
if (ret < 0) {
dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
dev_warn(dev, "disabling IRQ support\n");
+ } else {
+ smb347_irq_enable(smb);
}
}
- smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
- &smb347_debugfs_fops);
return 0;
}
@@ -1245,9 +1110,6 @@ static int smb347_remove(struct i2c_client *client)
{
struct smb347_charger *smb = i2c_get_clientdata(client);
- if (!IS_ERR_OR_NULL(smb->dentry))
- debugfs_remove(smb->dentry);
-
if (client->irq) {
smb347_irq_disable(smb);
free_irq(client->irq, smb);
@@ -1255,8 +1117,10 @@ static int smb347_remove(struct i2c_client *client)
}
power_supply_unregister(&smb->battery);
- power_supply_unregister(&smb->usb);
- power_supply_unregister(&smb->mains);
+ if (smb->pdata->use_usb)
+ power_supply_unregister(&smb->usb);
+ if (smb->pdata->use_mains)
+ power_supply_unregister(&smb->mains);
return 0;
}
@@ -1275,17 +1139,7 @@ static struct i2c_driver smb347_driver = {
.id_table = smb347_id,
};
-static int __init smb347_init(void)
-{
- return i2c_add_driver(&smb347_driver);
-}
-module_init(smb347_init);
-
-static void __exit smb347_exit(void)
-{
- i2c_del_driver(&smb347_driver);
-}
-module_exit(smb347_exit);
+module_i2c_driver(smb347_driver);
MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 987332b71d8d..fc1ad9551182 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -565,7 +565,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
goto err_usb;
}
- irq = platform_get_irq_byname(pdev, "SYSLO");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
IRQF_TRIGGER_RISING, "System power low",
power);
@@ -575,7 +575,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
goto err_battery;
}
- irq = platform_get_irq_byname(pdev, "PWR SRC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
IRQF_TRIGGER_RISING, "Power source",
power);
@@ -586,7 +586,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
}
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+ irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev,
+ wm831x_bat_irqs[i]));
ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
IRQF_TRIGGER_RISING,
wm831x_bat_irqs[i],
@@ -606,10 +608,10 @@ err_bat_irq:
irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
free_irq(irq, power);
}
- irq = platform_get_irq_byname(pdev, "PWR SRC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
free_irq(irq, power);
err_syslo:
- irq = platform_get_irq_byname(pdev, "SYSLO");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, power);
err_battery:
if (power->have_battery)
@@ -626,17 +628,20 @@ err_kmalloc:
static __devexit int wm831x_power_remove(struct platform_device *pdev)
{
struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
+ struct wm831x *wm831x = wm831x_power->wm831x;
int irq, i;
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+ irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev,
+ wm831x_bat_irqs[i]));
free_irq(irq, wm831x_power);
}
- irq = platform_get_irq_byname(pdev, "PWR SRC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
free_irq(irq, wm831x_power);
- irq = platform_get_irq_byname(pdev, "SYSLO");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, wm831x_power);
if (wm831x_power->have_battery)
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index a409fa050a1a..93d0a8b7718a 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -338,7 +338,7 @@ int ps3av_do_pkt(u32 cid, u16 send_len, size_t usr_buf_size,
mutex_unlock(&ps3av->mutex);
return 0;
- err:
+err:
mutex_unlock(&ps3av->mutex);
printk(KERN_ERR "%s: failed cid:%x res:%d\n", __func__, cid, res);
return res;
@@ -477,7 +477,6 @@ int ps3av_set_audio_mode(u32 ch, u32 fs, u32 word_bits, u32 format, u32 source)
return 0;
}
-
EXPORT_SYMBOL_GPL(ps3av_set_audio_mode);
static int ps3av_set_videomode(void)
@@ -501,7 +500,7 @@ static void ps3av_set_videomode_packet(u32 id)
video_mode = &video_mode_table[id & PS3AV_MODE_MASK];
- avb_param.num_of_video_pkt = PS3AV_AVB_NUM_VIDEO; /* num of head */
+ avb_param.num_of_video_pkt = PS3AV_AVB_NUM_VIDEO; /* num of head */
avb_param.num_of_audio_pkt = 0;
avb_param.num_of_av_video_pkt = ps3av->av_hw_conf.num_of_hdmi +
ps3av->av_hw_conf.num_of_avmulti;
@@ -521,7 +520,7 @@ static void ps3av_set_videomode_packet(u32 id)
#ifndef PS3AV_HDMI_YUV
if (ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_0 ||
ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_1)
- av_video_cs = RGB8; /* use RGB for HDMI */
+ av_video_cs = RGB8; /* use RGB for HDMI */
#endif
len += ps3av_cmd_set_av_video_cs(&avb_param.buf[len],
ps3av->av_port[i],
@@ -590,8 +589,8 @@ static void ps3avd(struct work_struct *work)
#define SHIFT_VESA 8
static const struct {
- unsigned mask : 19;
- unsigned id : 4;
+ unsigned mask:19;
+ unsigned id:4;
} ps3av_preferred_modes[] = {
{ PS3AV_RESBIT_WUXGA << SHIFT_VESA, PS3AV_MODE_WUXGA },
{ PS3AV_RESBIT_1920x1080P << SHIFT_60, PS3AV_MODE_1080P60 },
@@ -667,7 +666,8 @@ static enum ps3av_mode_num ps3av_hdmi_get_id(struct ps3av_info_monitor *info)
return id;
}
-static void ps3av_monitor_info_dump(const struct ps3av_pkt_av_get_monitor_info *monitor_info)
+static void ps3av_monitor_info_dump(
+ const struct ps3av_pkt_av_get_monitor_info *monitor_info)
{
const struct ps3av_info_monitor *info = &monitor_info->info;
const struct ps3av_info_audio *audio = info->audio;
@@ -717,8 +717,8 @@ static void ps3av_monitor_info_dump(const struct ps3av_pkt_av_get_monitor_info *
/* audio block */
for (i = 0; i < info->num_of_audio_block; i++) {
- pr_debug("audio[%d] type: %02x max_ch: %02x fs: %02x sbit: "
- "%02x\n",
+ pr_debug(
+ "audio[%d] type: %02x max_ch: %02x fs: %02x sbit: %02x\n",
i, audio->type, audio->max_num_of_ch, audio->fs,
audio->sbit);
audio++;
@@ -870,21 +870,18 @@ int ps3av_set_video_mode(int id)
return 0;
}
-
EXPORT_SYMBOL_GPL(ps3av_set_video_mode);
int ps3av_get_auto_mode(void)
{
return ps3av_auto_videomode(&ps3av->av_hw_conf);
}
-
EXPORT_SYMBOL_GPL(ps3av_get_auto_mode);
int ps3av_get_mode(void)
{
return ps3av ? ps3av->ps3av_mode : 0;
}
-
EXPORT_SYMBOL_GPL(ps3av_get_mode);
/* get resolution by video_mode */
@@ -902,7 +899,6 @@ int ps3av_video_mode2res(u32 id, u32 *xres, u32 *yres)
*yres = video_mode_table[id].y;
return 0;
}
-
EXPORT_SYMBOL_GPL(ps3av_video_mode2res);
/* mute */
@@ -911,7 +907,6 @@ int ps3av_video_mute(int mute)
return ps3av_set_av_video_mute(mute ? PS3AV_CMD_MUTE_ON
: PS3AV_CMD_MUTE_OFF);
}
-
EXPORT_SYMBOL_GPL(ps3av_video_mute);
/* mute analog output only */
@@ -935,7 +930,6 @@ int ps3av_audio_mute(int mute)
return ps3av_set_audio_mute(mute ? PS3AV_CMD_MUTE_ON
: PS3AV_CMD_MUTE_OFF);
}
-
EXPORT_SYMBOL_GPL(ps3av_audio_mute);
static int __devinit ps3av_probe(struct ps3_system_bus_device *dev)
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5648dad71fb3..ffdf712f9a67 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -70,7 +70,7 @@ config DP83640_PHY
using the SO_TIMESTAMPING API.
In order for this to work, your MAC driver must also
- implement the skb_tx_timetamp() function.
+ implement the skb_tx_timestamp() function.
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index bc8719238793..6194d35ebb97 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -22,6 +22,20 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
ports for Input/Output direction to allow other traffic
than Maintenance transfers.
+config RAPIDIO_DMA_ENGINE
+ bool "DMA Engine support for RapidIO"
+ depends on RAPIDIO
+ select DMADEVICES
+ select DMA_ENGINE
+ help
+ Say Y here if you want to use DMA Engine frameork for RapidIO data
+ transfers to/from target RIO devices. RapidIO uses NREAD and
+ NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
+ memory and memory on remote target device. You need a DMA controller
+ capable to perform data transfers to/from RapidIO.
+
+ If you are unsure about this, say Y here.
+
config RAPIDIO_DEBUG
bool "RapidIO subsystem debug messages"
depends on RAPIDIO
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 3b7b4e2dff7c..7b62860f34f8 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o
+ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
+obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o
+endif
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 30d2072f480b..722246cf20ab 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -108,6 +108,7 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
u16 destid, u8 hopcount, u32 offset, int len,
u32 *data, int do_wr)
{
+ void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
struct tsi721_dma_desc *bd_ptr;
u32 rd_count, swr_ptr, ch_stat;
int i, err = 0;
@@ -116,10 +117,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
return -EINVAL;
- bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base;
+ bd_ptr = priv->mdma.bd_base;
- rd_count = ioread32(
- priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
+ rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
/* Initialize DMA descriptor */
bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
@@ -134,19 +134,18 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
mb();
/* Start DMA operation */
- iowrite32(rd_count + 2,
- priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
- ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
+ ioread32(regs + TSI721_DMAC_DWRCNT);
i = 0;
/* Wait until DMA transfer is finished */
- while ((ch_stat = ioread32(priv->regs +
- TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) {
+ while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
+ & TSI721_DMAC_STS_RUN) {
udelay(1);
if (++i >= 5000000) {
dev_dbg(&priv->pdev->dev,
"%s : DMA[%d] read timeout ch_status=%x\n",
- __func__, TSI721_DMACH_MAINT, ch_stat);
+ __func__, priv->mdma.ch_id, ch_stat);
if (!do_wr)
*data = 0xffffffff;
err = -EIO;
@@ -162,13 +161,10 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
__func__, ch_stat);
dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
- iowrite32(TSI721_DMAC_INT_ALL,
- priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT));
- iowrite32(TSI721_DMAC_CTL_INIT,
- priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
+ iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
udelay(10);
- iowrite32(0, priv->regs +
- TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ iowrite32(0, regs + TSI721_DMAC_DWRCNT);
udelay(1);
if (!do_wr)
*data = 0xffffffff;
@@ -184,8 +180,8 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
* NOTE: Skipping check and clear FIFO entries because we are waiting
* for transfer to be completed.
*/
- swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT));
- iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT));
+ swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
+ iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
err_out:
return err;
@@ -541,6 +537,22 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
tsi721_pw_handler(mport);
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ if (dev_int & TSI721_DEV_INT_BDMA_CH) {
+ int ch;
+
+ if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
+ dev_dbg(&priv->pdev->dev,
+ "IRQ from DMA channel 0x%08x\n", dev_ch_int);
+
+ for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
+ if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
+ continue;
+ tsi721_bdma_handler(&priv->bdma[ch]);
+ }
+ }
+ }
+#endif
return IRQ_HANDLED;
}
@@ -553,18 +565,26 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
iowrite32(TSI721_SR_CHINT_IDBQRCV,
priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
- iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
- priv->regs + TSI721_DEV_CHAN_INTE);
/* Enable SRIO MAC interrupts */
iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
priv->regs + TSI721_RIO_EM_DEV_INT_EN);
+ /* Enable interrupts from channels in use */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
+ (TSI721_INT_BDMA_CHAN_M &
+ ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
+#else
+ intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
+#endif
+ iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
+
if (priv->flags & TSI721_USING_MSIX)
intr = TSI721_DEV_INT_SRIO;
else
intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
- TSI721_DEV_INT_SMSG_CH;
+ TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
iowrite32(intr, priv->regs + TSI721_DEV_INTE);
ioread32(priv->regs + TSI721_DEV_INTE);
@@ -715,12 +735,29 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
TSI721_MSIX_OMSG_INT(i);
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ /*
+ * Initialize MSI-X entries for Block DMA Engine:
+ * this driver supports XXX DMA channels
+ * (one is reserved for SRIO maintenance transactions)
+ */
+ for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+ entries[TSI721_VECT_DMA0_DONE + i].entry =
+ TSI721_MSIX_DMACH_DONE(i);
+ entries[TSI721_VECT_DMA0_INT + i].entry =
+ TSI721_MSIX_DMACH_INT(i);
+ }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
if (err) {
if (err > 0)
dev_info(&priv->pdev->dev,
"Only %d MSI-X vectors available, "
"not using MSI-X\n", err);
+ else
+ dev_err(&priv->pdev->dev,
+ "Failed to enable MSI-X (err=%d)\n", err);
return err;
}
@@ -760,6 +797,22 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
i, pci_name(priv->pdev));
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+ priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
+ entries[TSI721_VECT_DMA0_DONE + i].vector;
+ snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
+ i, pci_name(priv->pdev));
+
+ priv->msix[TSI721_VECT_DMA0_INT + i].vector =
+ entries[TSI721_VECT_DMA0_INT + i].vector;
+ snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
+ i, pci_name(priv->pdev));
+ }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
return 0;
}
#endif /* CONFIG_PCI_MSI */
@@ -888,20 +941,34 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
priv->idb_base = NULL;
}
-static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
+/**
+ * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
+ * @priv: pointer to tsi721 private data
+ *
+ * Initialize BDMA channel allocated for RapidIO maintenance read/write
+ * request generation
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_bdma_maint_init(struct tsi721_device *priv)
{
struct tsi721_dma_desc *bd_ptr;
u64 *sts_ptr;
dma_addr_t bd_phys, sts_phys;
int sts_size;
- int bd_num = priv->bdma[chnum].bd_num;
+ int bd_num = 2;
+ void __iomem *regs;
- dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum);
+ dev_dbg(&priv->pdev->dev,
+ "Init Block DMA Engine for Maintenance requests, CH%d\n",
+ TSI721_DMACH_MAINT);
/*
* Initialize DMA channel for maintenance requests
*/
+ priv->mdma.ch_id = TSI721_DMACH_MAINT;
+ regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
+
/* Allocate space for DMA descriptors */
bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
@@ -909,8 +976,9 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
if (!bd_ptr)
return -ENOMEM;
- priv->bdma[chnum].bd_phys = bd_phys;
- priv->bdma[chnum].bd_base = bd_ptr;
+ priv->mdma.bd_num = bd_num;
+ priv->mdma.bd_phys = bd_phys;
+ priv->mdma.bd_base = bd_ptr;
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -927,13 +995,13 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
dma_free_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
bd_ptr, bd_phys);
- priv->bdma[chnum].bd_base = NULL;
+ priv->mdma.bd_base = NULL;
return -ENOMEM;
}
- priv->bdma[chnum].sts_phys = sts_phys;
- priv->bdma[chnum].sts_base = sts_ptr;
- priv->bdma[chnum].sts_size = sts_size;
+ priv->mdma.sts_phys = sts_phys;
+ priv->mdma.sts_base = sts_ptr;
+ priv->mdma.sts_size = sts_size;
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
@@ -946,83 +1014,61 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
/* Setup DMA descriptor pointers */
- iowrite32(((u64)bd_phys >> 32),
- priv->regs + TSI721_DMAC_DPTRH(chnum));
+ iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
- priv->regs + TSI721_DMAC_DPTRL(chnum));
+ regs + TSI721_DMAC_DPTRL);
/* Setup descriptor status FIFO */
- iowrite32(((u64)sts_phys >> 32),
- priv->regs + TSI721_DMAC_DSBH(chnum));
+ iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
- priv->regs + TSI721_DMAC_DSBL(chnum));
+ regs + TSI721_DMAC_DSBL);
iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
- priv->regs + TSI721_DMAC_DSSZ(chnum));
+ regs + TSI721_DMAC_DSSZ);
/* Clear interrupt bits */
- iowrite32(TSI721_DMAC_INT_ALL,
- priv->regs + TSI721_DMAC_INT(chnum));
+ iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
- ioread32(priv->regs + TSI721_DMAC_INT(chnum));
+ ioread32(regs + TSI721_DMAC_INT);
/* Toggle DMA channel initialization */
- iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum));
- ioread32(priv->regs + TSI721_DMAC_CTL(chnum));
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
+ ioread32(regs + TSI721_DMAC_CTL);
udelay(10);
return 0;
}
-static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum)
+static int tsi721_bdma_maint_free(struct tsi721_device *priv)
{
u32 ch_stat;
+ struct tsi721_bdma_maint *mdma = &priv->mdma;
+ void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
- if (priv->bdma[chnum].bd_base == NULL)
+ if (mdma->bd_base == NULL)
return 0;
/* Check if DMA channel still running */
- ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum));
+ ch_stat = ioread32(regs + TSI721_DMAC_STS);
if (ch_stat & TSI721_DMAC_STS_RUN)
return -EFAULT;
/* Put DMA channel into init state */
- iowrite32(TSI721_DMAC_CTL_INIT,
- priv->regs + TSI721_DMAC_CTL(chnum));
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
/* Free space allocated for DMA descriptors */
dma_free_coherent(&priv->pdev->dev,
- priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc),
- priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys);
- priv->bdma[chnum].bd_base = NULL;
+ mdma->bd_num * sizeof(struct tsi721_dma_desc),
+ mdma->bd_base, mdma->bd_phys);
+ mdma->bd_base = NULL;
/* Free space allocated for status FIFO */
dma_free_coherent(&priv->pdev->dev,
- priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts),
- priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys);
- priv->bdma[chnum].sts_base = NULL;
- return 0;
-}
-
-static int tsi721_bdma_init(struct tsi721_device *priv)
-{
- /* Initialize BDMA channel allocated for RapidIO maintenance read/write
- * request generation
- */
- priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
- if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
- dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
- " channel %d, aborting\n", TSI721_DMACH_MAINT);
- return -ENOMEM;
- }
-
+ mdma->sts_size * sizeof(struct tsi721_dma_sts),
+ mdma->sts_base, mdma->sts_phys);
+ mdma->sts_base = NULL;
return 0;
}
-static void tsi721_bdma_free(struct tsi721_device *priv)
-{
- tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
-}
-
/* Enable Inbound Messaging Interrupts */
static void
tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
@@ -2035,7 +2081,8 @@ static void tsi721_disable_ints(struct tsi721_device *priv)
/* Disable all BDMA Channel interrupts */
for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
- iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch));
+ iowrite32(0,
+ priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
/* Disable all general BDMA interrupts */
iowrite32(0, priv->regs + TSI721_BDMA_INTE);
@@ -2104,6 +2151,7 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
mport->phy_type = RIO_PHY_SERIAL;
mport->priv = (void *)priv;
mport->phys_efptr = 0x100;
+ priv->mport = mport;
INIT_LIST_HEAD(&mport->dbells);
@@ -2129,17 +2177,21 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
if (!err) {
tsi721_interrupts_init(priv);
ops->pwenable = tsi721_pw_enable;
- } else
+ } else {
dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
"vector %02X err=0x%x\n", pdev->irq, err);
+ goto err_exit;
+ }
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ tsi721_register_dma(priv);
+#endif
/* Enable SRIO link */
iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
TSI721_DEVCTL_SRBOOT_CMPL,
priv->regs + TSI721_DEVCTL);
rio_register_mport(mport);
- priv->mport = mport;
if (mport->host_deviceid >= 0)
iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
@@ -2149,6 +2201,11 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
return 0;
+
+err_exit:
+ kfree(mport);
+ kfree(ops);
+ return err;
}
static int __devinit tsi721_probe(struct pci_dev *pdev,
@@ -2294,7 +2351,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
tsi721_init_pc2sr_mapping(priv);
tsi721_init_sr2pc_mapping(priv);
- if (tsi721_bdma_init(priv)) {
+ if (tsi721_bdma_maint_init(priv)) {
dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
err = -ENOMEM;
goto err_unmap_bars;
@@ -2319,7 +2376,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
err_free_consistent:
tsi721_doorbell_free(priv);
err_free_bdma:
- tsi721_bdma_free(priv);
+ tsi721_bdma_maint_free(priv);
err_unmap_bars:
if (priv->regs)
iounmap(priv->regs);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 1c226b31af13..59de9d7be346 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -167,6 +167,8 @@
#define TSI721_DEV_INTE 0x29840
#define TSI721_DEV_INT 0x29844
#define TSI721_DEV_INTSET 0x29848
+#define TSI721_DEV_INT_BDMA_CH 0x00002000
+#define TSI721_DEV_INT_BDMA_NCH 0x00001000
#define TSI721_DEV_INT_SMSG_CH 0x00000800
#define TSI721_DEV_INT_SMSG_NCH 0x00000400
#define TSI721_DEV_INT_SR2PC_CH 0x00000200
@@ -181,6 +183,8 @@
#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x)))
#define TSI721_INT_OMSG_CHAN_M 0x0000ff00
#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x)))
+#define TSI721_INT_BDMA_CHAN_M 0x000000ff
+#define TSI721_INT_BDMA_CHAN(x) (1 << (x))
/*
* PC2SR block registers
@@ -235,14 +239,16 @@
* x = 0..7
*/
-#define TSI721_DMAC_DWRCNT(x) (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_DRDCNT(x) (0x51004 + (x) * 0x1000)
+#define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_CTL(x) (0x51008 + (x) * 0x1000)
+#define TSI721_DMAC_DWRCNT 0x000
+#define TSI721_DMAC_DRDCNT 0x004
+
+#define TSI721_DMAC_CTL 0x008
#define TSI721_DMAC_CTL_SUSP 0x00000002
#define TSI721_DMAC_CTL_INIT 0x00000001
-#define TSI721_DMAC_INT(x) (0x5100c + (x) * 0x1000)
+#define TSI721_DMAC_INT 0x00c
#define TSI721_DMAC_INT_STFULL 0x00000010
#define TSI721_DMAC_INT_DONE 0x00000008
#define TSI721_DMAC_INT_SUSP 0x00000004
@@ -250,34 +256,33 @@
#define TSI721_DMAC_INT_IOFDONE 0x00000001
#define TSI721_DMAC_INT_ALL 0x0000001f
-#define TSI721_DMAC_INTSET(x) (0x51010 + (x) * 0x1000)
+#define TSI721_DMAC_INTSET 0x010
-#define TSI721_DMAC_STS(x) (0x51014 + (x) * 0x1000)
+#define TSI721_DMAC_STS 0x014
#define TSI721_DMAC_STS_ABORT 0x00400000
#define TSI721_DMAC_STS_RUN 0x00200000
#define TSI721_DMAC_STS_CS 0x001f0000
-#define TSI721_DMAC_INTE(x) (0x51018 + (x) * 0x1000)
+#define TSI721_DMAC_INTE 0x018
-#define TSI721_DMAC_DPTRL(x) (0x51024 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRL 0x024
#define TSI721_DMAC_DPTRL_MASK 0xffffffe0
-#define TSI721_DMAC_DPTRH(x) (0x51028 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRH 0x028
-#define TSI721_DMAC_DSBL(x) (0x5102c + (x) * 0x1000)
+#define TSI721_DMAC_DSBL 0x02c
#define TSI721_DMAC_DSBL_MASK 0xffffffc0
-#define TSI721_DMAC_DSBH(x) (0x51030 + (x) * 0x1000)
+#define TSI721_DMAC_DSBH 0x030
-#define TSI721_DMAC_DSSZ(x) (0x51034 + (x) * 0x1000)
+#define TSI721_DMAC_DSSZ 0x034
#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f
#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4)
-
-#define TSI721_DMAC_DSRP(x) (0x51038 + (x) * 0x1000)
+#define TSI721_DMAC_DSRP 0x038
#define TSI721_DMAC_DSRP_MASK 0x0007ffff
-#define TSI721_DMAC_DSWP(x) (0x5103c + (x) * 0x1000)
+#define TSI721_DMAC_DSWP 0x03c
#define TSI721_DMAC_DSWP_MASK 0x0007ffff
#define TSI721_BDMA_INTE 0x5f000
@@ -612,6 +617,8 @@ enum dma_rtype {
#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
+#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */
+
#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0)
enum tsi721_smsg_int_flag {
@@ -626,7 +633,48 @@ enum tsi721_smsg_int_flag {
/* Structures */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+struct tsi721_tx_desc {
+ struct dma_async_tx_descriptor txd;
+ struct tsi721_dma_desc *hw_desc;
+ u16 destid;
+ /* low 64-bits of 66-bit RIO address */
+ u64 rio_addr;
+ /* upper 2-bits of 66-bit RIO address */
+ u8 rio_addr_u;
+ bool interrupt;
+ struct list_head desc_node;
+ struct list_head tx_list;
+};
+
struct tsi721_bdma_chan {
+ int id;
+ void __iomem *regs;
+ int bd_num; /* number of buffer descriptors */
+ void *bd_base; /* start of DMA descriptors */
+ dma_addr_t bd_phys;
+ void *sts_base; /* start of DMA BD status FIFO */
+ dma_addr_t sts_phys;
+ int sts_size;
+ u32 sts_rdptr;
+ u32 wr_count;
+ u32 wr_count_next;
+
+ struct dma_chan dchan;
+ struct tsi721_tx_desc *tx_desc;
+ spinlock_t lock;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ dma_cookie_t completed_cookie;
+ struct tasklet_struct tasklet;
+};
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+struct tsi721_bdma_maint {
+ int ch_id; /* BDMA channel number */
int bd_num; /* number of buffer descriptors */
void *bd_base; /* start of DMA descriptors */
dma_addr_t bd_phys;
@@ -721,6 +769,24 @@ enum tsi721_msix_vect {
TSI721_VECT_IMB1_INT,
TSI721_VECT_IMB2_INT,
TSI721_VECT_IMB3_INT,
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ TSI721_VECT_DMA0_DONE,
+ TSI721_VECT_DMA1_DONE,
+ TSI721_VECT_DMA2_DONE,
+ TSI721_VECT_DMA3_DONE,
+ TSI721_VECT_DMA4_DONE,
+ TSI721_VECT_DMA5_DONE,
+ TSI721_VECT_DMA6_DONE,
+ TSI721_VECT_DMA7_DONE,
+ TSI721_VECT_DMA0_INT,
+ TSI721_VECT_DMA1_INT,
+ TSI721_VECT_DMA2_INT,
+ TSI721_VECT_DMA3_INT,
+ TSI721_VECT_DMA4_INT,
+ TSI721_VECT_DMA5_INT,
+ TSI721_VECT_DMA6_INT,
+ TSI721_VECT_DMA7_INT,
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
TSI721_VECT_MAX
};
@@ -754,7 +820,11 @@ struct tsi721_device {
u32 pw_discard_count;
/* BDMA Engine */
+ struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
+#endif
/* Inbound Messaging */
int imsg_init[TSI721_IMSG_CHNUM];
@@ -765,4 +835,9 @@ struct tsi721_device {
struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
};
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
+extern int __devinit tsi721_register_dma(struct tsi721_device *priv);
+#endif
+
#endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644
index 000000000000..92e06a5c62ec
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -0,0 +1,823 @@
+/*
+ * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright 2011 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+
+#include "tsi721.h"
+
+static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct tsi721_bdma_chan, dchan);
+}
+
+static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
+{
+ return container_of(ddev, struct rio_mport, dma)->priv;
+}
+
+static inline
+struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct tsi721_tx_desc, txd);
+}
+
+static inline
+struct tsi721_tx_desc *tsi721_dma_first_active(
+ struct tsi721_bdma_chan *bdma_chan)
+{
+ return list_first_entry(&bdma_chan->active_list,
+ struct tsi721_tx_desc, desc_node);
+}
+
+static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_dma_desc *bd_ptr;
+ struct device *dev = bdma_chan->dchan.device->dev;
+ u64 *sts_ptr;
+ dma_addr_t bd_phys;
+ dma_addr_t sts_phys;
+ int sts_size;
+ int bd_num = bdma_chan->bd_num;
+
+ dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
+
+ /* Allocate space for DMA descriptors */
+ bd_ptr = dma_zalloc_coherent(dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_KERNEL);
+ if (!bd_ptr)
+ return -ENOMEM;
+
+ bdma_chan->bd_phys = bd_phys;
+ bdma_chan->bd_base = bd_ptr;
+
+ dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
+ bd_ptr, (unsigned long long)bd_phys);
+
+ /* Allocate space for descriptor status FIFO */
+ sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
+ bd_num : TSI721_DMA_MINSTSSZ;
+ sts_size = roundup_pow_of_two(sts_size);
+ sts_ptr = dma_zalloc_coherent(dev,
+ sts_size * sizeof(struct tsi721_dma_sts),
+ &sts_phys, GFP_KERNEL);
+ if (!sts_ptr) {
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ bd_ptr, bd_phys);
+ bdma_chan->bd_base = NULL;
+ return -ENOMEM;
+ }
+
+ bdma_chan->sts_phys = sts_phys;
+ bdma_chan->sts_base = sts_ptr;
+ bdma_chan->sts_size = sts_size;
+
+ dev_dbg(dev,
+ "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
+ sts_ptr, (unsigned long long)sts_phys, sts_size);
+
+ /* Initialize DMA descriptors ring */
+ bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
+ bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+ TSI721_DMAC_DPTRL_MASK);
+ bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+ /* Setup DMA descriptor pointers */
+ iowrite32(((u64)bd_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DPTRH);
+ iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+ /* Setup descriptor status FIFO */
+ iowrite32(((u64)sts_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DSBH);
+ iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DSBL);
+ iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+ bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+ /* Clear interrupt bits */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+
+ ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+ /* Toggle DMA channel initialization */
+ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+ ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
+ bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+ bdma_chan->sts_rdptr = 0;
+ udelay(10);
+
+ return 0;
+}
+
+static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 ch_stat;
+
+ if (bdma_chan->bd_base == NULL)
+ return 0;
+
+ /* Check if DMA channel still running */
+ ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ if (ch_stat & TSI721_DMAC_STS_RUN)
+ return -EFAULT;
+
+ /* Put DMA channel into init state */
+ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(bdma_chan->dchan.device->dev,
+ bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
+ bdma_chan->bd_base, bdma_chan->bd_phys);
+ bdma_chan->bd_base = NULL;
+
+ /* Free space allocated for status FIFO */
+ dma_free_coherent(bdma_chan->dchan.device->dev,
+ bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
+ bdma_chan->sts_base, bdma_chan->sts_phys);
+ bdma_chan->sts_base = NULL;
+ return 0;
+}
+
+static void
+tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
+{
+ if (enable) {
+ /* Clear pending BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+ ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+ /* Enable BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INTE);
+ } else {
+ /* Disable BDMA channel interrupts */
+ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+ /* Clear pending BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+ }
+
+}
+
+static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 sts;
+
+ sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ return ((sts & TSI721_DMAC_STS_RUN) == 0);
+}
+
+void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+{
+ /* Disable BDMA channel interrupts */
+ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+
+ tasklet_schedule(&bdma_chan->tasklet);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
+ *
+ * Handles BDMA channel interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
+{
+ struct tsi721_bdma_chan *bdma_chan = ptr;
+
+ tsi721_bdma_handler(bdma_chan);
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI_MSI */
+
+/* Must be called with the spinlock held */
+static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
+{
+ if (!tsi721_dma_is_idle(bdma_chan)) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "BUG: Attempt to start non-idle channel\n");
+ return;
+ }
+
+ if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "BUG: Attempt to start DMA with no BDs ready\n");
+ return;
+ }
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "tx_chan: %p, chan: %d, regs: %p\n",
+ bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
+
+ iowrite32(bdma_chan->wr_count_next,
+ bdma_chan->regs + TSI721_DMAC_DWRCNT);
+ ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
+
+ bdma_chan->wr_count = bdma_chan->wr_count_next;
+}
+
+static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
+{
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "Put desc: %p into free list\n", desc);
+
+ if (desc) {
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+ list_add(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->wr_count_next = bdma_chan->wr_count;
+ spin_unlock_bh(&bdma_chan->lock);
+ }
+}
+
+static
+struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_tx_desc *tx_desc, *_tx_desc;
+ struct tsi721_tx_desc *ret = NULL;
+ int i;
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_for_each_entry_safe(tx_desc, _tx_desc,
+ &bdma_chan->free_list, desc_node) {
+ if (async_tx_test_ack(&tx_desc->txd)) {
+ list_del(&tx_desc->desc_node);
+ ret = tx_desc;
+ break;
+ }
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "desc %p not ACKed\n", tx_desc);
+ }
+
+ i = bdma_chan->wr_count_next % bdma_chan->bd_num;
+ if (i == bdma_chan->bd_num - 1) {
+ i = 0;
+ bdma_chan->wr_count_next++; /* skip link descriptor */
+ }
+
+ bdma_chan->wr_count_next++;
+ tx_desc->txd.phys = bdma_chan->bd_phys +
+ i * sizeof(struct tsi721_dma_desc);
+ tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
+
+ spin_unlock_bh(&bdma_chan->lock);
+
+ return ret;
+}
+
+static int
+tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc, struct scatterlist *sg,
+ enum dma_rtype rtype, u32 sys_size)
+{
+ struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
+ u64 rio_addr;
+
+ if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "SG element is too large\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
+ (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
+ sg_dma_len(sg));
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "bd_ptr = %p did=%d raddr=0x%llx\n",
+ bd_ptr, desc->destid, desc->rio_addr);
+
+ /* Initialize DMA descriptor */
+ bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
+ (rtype << 19) | desc->destid);
+ if (desc->interrupt)
+ bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
+ bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
+ (sys_size << 26) | sg_dma_len(sg));
+ rio_addr = (desc->rio_addr >> 2) |
+ ((u64)(desc->rio_addr_u & 0x3) << 62);
+ bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
+ bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
+ bd_ptr->t1.bufptr_lo = cpu_to_le32(
+ (u64)sg_dma_address(sg) & 0xffffffff);
+ bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
+ bd_ptr->t1.s_dist = 0;
+ bd_ptr->t1.s_size = 0;
+
+ return 0;
+}
+
+static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
+{
+ struct dma_async_tx_descriptor *txd = &desc->txd;
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+ list_move(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->completed_cookie = txd->cookie;
+
+ if (callback)
+ callback(param);
+}
+
+static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_tx_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ BUG_ON(!tsi721_dma_is_idle(bdma_chan));
+
+ if (!list_empty(&bdma_chan->queue))
+ tsi721_start_dma(bdma_chan);
+
+ list_splice_init(&bdma_chan->active_list, &list);
+ list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ tsi721_dma_chain_complete(bdma_chan, desc);
+}
+
+static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 srd_ptr;
+ u64 *sts_ptr;
+ int i, j;
+
+ /* Check and clear descriptor status FIFO entries */
+ srd_ptr = bdma_chan->sts_rdptr;
+ sts_ptr = bdma_chan->sts_base;
+ j = srd_ptr * 8;
+ while (sts_ptr[j]) {
+ for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
+ sts_ptr[j] = 0;
+
+ ++srd_ptr;
+ srd_ptr %= bdma_chan->sts_size;
+ j = srd_ptr * 8;
+ }
+
+ iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
+ bdma_chan->sts_rdptr = srd_ptr;
+}
+
+static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
+{
+ if (list_empty(&bdma_chan->active_list) ||
+ list_is_singular(&bdma_chan->active_list)) {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: Active_list empty\n", __func__);
+ tsi721_dma_complete_all(bdma_chan);
+ } else {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: Active_list NOT empty\n", __func__);
+ tsi721_dma_chain_complete(bdma_chan,
+ tsi721_dma_first_active(bdma_chan));
+ tsi721_start_dma(bdma_chan);
+ }
+}
+
+static void tsi721_dma_tasklet(unsigned long data)
+{
+ struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
+ u32 dmac_int, dmac_sts;
+
+ dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+ dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
+ __func__, bdma_chan->id, dmac_int);
+ /* Clear channel interrupts */
+ iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
+
+ if (dmac_int & TSI721_DMAC_INT_ERR) {
+ dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ dev_err(bdma_chan->dchan.device->dev,
+ "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
+ __func__, bdma_chan->id, dmac_sts);
+ }
+
+ if (dmac_int & TSI721_DMAC_INT_STFULL) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "%s: DMAC%d descriptor status FIFO is full\n",
+ __func__, bdma_chan->id);
+ }
+
+ if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
+ tsi721_clr_stat(bdma_chan);
+ spin_lock(&bdma_chan->lock);
+ tsi721_advance_work(bdma_chan);
+ spin_unlock(&bdma_chan->lock);
+ }
+
+ /* Re-Enable BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
+}
+
+static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&bdma_chan->lock);
+
+ cookie = txd->chan->cookie;
+ if (++cookie < 0)
+ cookie = 1;
+ txd->chan->cookie = cookie;
+ txd->cookie = cookie;
+
+ if (list_empty(&bdma_chan->active_list)) {
+ list_add_tail(&desc->desc_node, &bdma_chan->active_list);
+ tsi721_start_dma(bdma_chan);
+ } else {
+ list_add_tail(&desc->desc_node, &bdma_chan->queue);
+ }
+
+ spin_unlock_bh(&bdma_chan->lock);
+ return cookie;
+}
+
+static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+ struct tsi721_tx_desc *desc = NULL;
+ LIST_HEAD(tmp_list);
+ int i;
+ int rc;
+
+ if (bdma_chan->bd_base)
+ return bdma_chan->bd_num - 1;
+
+ /* Initialize BDMA channel */
+ if (tsi721_bdma_ch_init(bdma_chan)) {
+ dev_err(dchan->device->dev, "Unable to initialize data DMA"
+ " channel %d, aborting\n", bdma_chan->id);
+ return -ENOMEM;
+ }
+
+ /* Alocate matching number of logical descriptors */
+ desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
+ GFP_KERNEL);
+ if (!desc) {
+ dev_err(dchan->device->dev,
+ "Failed to allocate logical descriptors\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ bdma_chan->tx_desc = desc;
+
+ for (i = 0; i < bdma_chan->bd_num - 1; i++) {
+ dma_async_tx_descriptor_init(&desc[i].txd, dchan);
+ desc[i].txd.tx_submit = tsi721_tx_submit;
+ desc[i].txd.flags = DMA_CTRL_ACK;
+ INIT_LIST_HEAD(&desc[i].tx_list);
+ list_add_tail(&desc[i].desc_node, &tmp_list);
+ }
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice(&tmp_list, &bdma_chan->free_list);
+ bdma_chan->completed_cookie = dchan->cookie = 1;
+ spin_unlock_bh(&bdma_chan->lock);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ /* Request interrupt service if we are in MSI-X mode */
+ rc = request_irq(
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector,
+ tsi721_bdma_msix, 0,
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].irq_name,
+ (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dchan->device->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "BDMA%d-DONE\n", bdma_chan->id);
+ goto err_out;
+ }
+
+ rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector,
+ tsi721_bdma_msix, 0,
+ priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].irq_name,
+ (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dchan->device->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "BDMA%d-INT\n", bdma_chan->id);
+ free_irq(
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector,
+ (void *)bdma_chan);
+ rc = -EIO;
+ goto err_out;
+ }
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tasklet_enable(&bdma_chan->tasklet);
+ tsi721_bdma_interrupt_enable(bdma_chan, 1);
+
+ return bdma_chan->bd_num - 1;
+
+err_out:
+ kfree(desc);
+ tsi721_bdma_ch_free(bdma_chan);
+ return rc;
+}
+
+static void tsi721_free_chan_resources(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+ LIST_HEAD(list);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (bdma_chan->bd_base == NULL)
+ return;
+
+ BUG_ON(!list_empty(&bdma_chan->active_list));
+ BUG_ON(!list_empty(&bdma_chan->queue));
+
+ tasklet_disable(&bdma_chan->tasklet);
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice_init(&bdma_chan->free_list, &list);
+ spin_unlock_bh(&bdma_chan->lock);
+
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ free_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tsi721_bdma_ch_free(bdma_chan);
+ kfree(bdma_chan->tx_desc);
+}
+
+static
+enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_completed;
+ int ret;
+
+ spin_lock_bh(&bdma_chan->lock);
+ last_completed = bdma_chan->completed_cookie;
+ last_used = dchan->cookie;
+ spin_unlock_bh(&bdma_chan->lock);
+
+ ret = dma_async_is_complete(cookie, last_completed, last_used);
+
+ dma_set_tx_state(txstate, last_completed, last_used, 0);
+
+ dev_dbg(dchan->device->dev,
+ "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
+ __func__, ret, last_completed, last_used);
+
+ return ret;
+}
+
+static void tsi721_issue_pending(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (tsi721_dma_is_idle(bdma_chan)) {
+ spin_lock_bh(&bdma_chan->lock);
+ tsi721_advance_work(bdma_chan);
+ spin_unlock_bh(&bdma_chan->lock);
+ } else
+ dev_dbg(dchan->device->dev,
+ "%s: DMA channel still busy\n", __func__);
+}
+
+static
+struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags,
+ void *tinfo)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ struct tsi721_tx_desc *desc = NULL;
+ struct tsi721_tx_desc *first = NULL;
+ struct scatterlist *sg;
+ struct rio_dma_ext *rext = tinfo;
+ u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
+ unsigned int i;
+ u32 sys_size = dma_to_mport(dchan->device)->sys_size;
+ enum dma_rtype rtype;
+
+ if (!sgl || !sg_len) {
+ dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM)
+ rtype = NREAD;
+ else if (dir == DMA_MEM_TO_DEV) {
+ switch (rext->wr_type) {
+ case RDW_ALL_NWRITE:
+ rtype = ALL_NWRITE;
+ break;
+ case RDW_ALL_NWRITE_R:
+ rtype = ALL_NWRITE_R;
+ break;
+ case RDW_LAST_NWRITE_R:
+ default:
+ rtype = LAST_NWRITE_R;
+ break;
+ }
+ } else {
+ dev_err(dchan->device->dev,
+ "%s: Unsupported DMA direction option\n", __func__);
+ return NULL;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ int err;
+
+ dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
+ desc = tsi721_desc_get(bdma_chan);
+ if (!desc) {
+ dev_err(dchan->device->dev,
+ "Not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
+ if (sg_is_last(sg))
+ desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+ else
+ desc->interrupt = false;
+
+ desc->destid = rext->destid;
+ desc->rio_addr = rio_addr;
+ desc->rio_addr_u = 0;
+
+ err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
+ if (err) {
+ dev_err(dchan->device->dev,
+ "Failed to build desc: %d\n", err);
+ goto err_desc_get;
+ }
+
+ rio_addr += sg_dma_len(sg);
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->desc_node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ desc->txd.flags = flags;
+
+ return &first->txd;
+
+err_desc_get:
+ tsi721_desc_put(bdma_chan, first);
+ return NULL;
+}
+
+static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ struct tsi721_tx_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ spin_lock_bh(&bdma_chan->lock);
+
+ /* make sure to stop the transfer */
+ iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
+
+ list_splice_init(&bdma_chan->active_list, &list);
+ list_splice_init(&bdma_chan->queue, &list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ tsi721_dma_chain_complete(bdma_chan, desc);
+
+ spin_unlock_bh(&bdma_chan->lock);
+
+ return 0;
+}
+
+int __devinit tsi721_register_dma(struct tsi721_device *priv)
+{
+ int i;
+ int nr_channels = TSI721_DMA_MAXCH;
+ int err;
+ struct rio_mport *mport = priv->mport;
+
+ mport->dma.dev = &priv->pdev->dev;
+ mport->dma.chancnt = nr_channels;
+
+ INIT_LIST_HEAD(&mport->dma.channels);
+
+ for (i = 0; i < nr_channels; i++) {
+ struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
+
+ if (i == TSI721_DMACH_MAINT)
+ continue;
+
+ bdma_chan->bd_num = 64;
+ bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
+
+ bdma_chan->dchan.device = &mport->dma;
+ bdma_chan->dchan.cookie = 1;
+ bdma_chan->dchan.chan_id = i;
+ bdma_chan->id = i;
+
+ spin_lock_init(&bdma_chan->lock);
+
+ INIT_LIST_HEAD(&bdma_chan->active_list);
+ INIT_LIST_HEAD(&bdma_chan->queue);
+ INIT_LIST_HEAD(&bdma_chan->free_list);
+
+ tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+ (unsigned long)bdma_chan);
+ tasklet_disable(&bdma_chan->tasklet);
+ list_add_tail(&bdma_chan->dchan.device_node,
+ &mport->dma.channels);
+ }
+
+ dma_cap_zero(mport->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
+
+ mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
+ mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
+ mport->dma.device_tx_status = tsi721_tx_status;
+ mport->dma.device_issue_pending = tsi721_issue_pending;
+ mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
+ mport->dma.device_control = tsi721_device_control;
+
+ err = dma_async_device_register(&mport->dma);
+ if (err)
+ dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
+
+ return err;
+}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 86c9a091a2ff..c40665a4fa33 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1121,6 +1121,87 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
return 0;
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+static bool rio_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct rio_dev *rdev = arg;
+
+ /* Check that DMA device belongs to the right MPORT */
+ return (rdev->net->hport ==
+ container_of(chan->device, struct rio_mport, dma));
+}
+
+/**
+ * rio_request_dma - request RapidIO capable DMA channel that supports
+ * specified target RapidIO device.
+ * @rdev: RIO device control structure
+ *
+ * Returns pointer to allocated DMA channel or NULL if failed.
+ */
+struct dma_chan *rio_request_dma(struct rio_dev *rdev)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *dchan;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dchan = dma_request_channel(mask, rio_chan_filter, rdev);
+
+ return dchan;
+}
+EXPORT_SYMBOL_GPL(rio_request_dma);
+
+/**
+ * rio_release_dma - release specified DMA channel
+ * @dchan: DMA channel to release
+ */
+void rio_release_dma(struct dma_chan *dchan)
+{
+ dma_release_channel(dchan);
+}
+EXPORT_SYMBOL_GPL(rio_release_dma);
+
+/**
+ * rio_dma_prep_slave_sg - RapidIO specific wrapper
+ * for device_prep_slave_sg callback defined by DMAENGINE.
+ * @rdev: RIO device control structure
+ * @dchan: DMA channel to configure
+ * @data: RIO specific data descriptor
+ * @direction: DMA data transfer direction (TO or FROM the device)
+ * @flags: dmaengine defined flags
+ *
+ * Initializes RapidIO capable DMA channel for the specified data transfer.
+ * Uses DMA channel private extension to pass information related to remote
+ * target RIO device.
+ * Returns pointer to DMA transaction descriptor or NULL if failed.
+ */
+struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
+ struct dma_chan *dchan, struct rio_dma_data *data,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct rio_dma_ext rio_ext;
+
+ if (dchan->device->device_prep_slave_sg == NULL) {
+ pr_err("%s: prep_rio_sg == NULL\n", __func__);
+ return NULL;
+ }
+
+ rio_ext.destid = rdev->destid;
+ rio_ext.rio_addr_u = data->rio_addr_u;
+ rio_ext.rio_addr = data->rio_addr;
+ rio_ext.wr_type = data->wr_type;
+
+ txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
+ direction, flags, &rio_ext);
+
+ return txd;
+}
+EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
static void rio_fixup_device(struct rio_dev *dev)
{
}
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index e1b8c54ace5a..a739f5ca936a 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -794,17 +794,17 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
}
static struct of_regulator_match ab8500_regulator_matches[] = {
- { .name = "LDO-AUX1", .driver_data = (void *) AB8500_LDO_AUX1, },
- { .name = "LDO-AUX2", .driver_data = (void *) AB8500_LDO_AUX2, },
- { .name = "LDO-AUX3", .driver_data = (void *) AB8500_LDO_AUX3, },
- { .name = "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, },
- { .name = "LDO-TVOUT", .driver_data = (void *) AB8500_LDO_TVOUT, },
- { .name = "LDO-USB", .driver_data = (void *) AB8500_LDO_USB, },
- { .name = "LDO-AUDIO", .driver_data = (void *) AB8500_LDO_AUDIO, },
- { .name = "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
- { .name = "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
- { .name = "LDO-DMIC", .driver_data = (void *) AB8500_LDO_DMIC, },
- { .name = "LDO-ANA", .driver_data = (void *) AB8500_LDO_ANA, },
+ { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, },
+ { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, },
+ { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8500_LDO_AUX3, },
+ { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
+ { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8500_LDO_TVOUT, },
+ { .name = "ab8500_ldo_usb", .driver_data = (void *) AB8500_LDO_USB, },
+ { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8500_LDO_AUDIO, },
+ { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
+ { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
+ { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8500_LDO_DMIC, },
+ { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, },
};
static __devinit int
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 49b2112b0486..e82e7eaac0f1 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -47,7 +47,7 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
int max_uV, unsigned *selector)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- u32 val, sel;
+ u32 val, sel, mask;
int uv;
uv = min_uV;
@@ -71,11 +71,10 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
val = anatop_reg->min_bit_val + sel;
*selector = sel;
dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val);
- anatop_set_bits(anatop_reg->mfd,
- anatop_reg->control_reg,
- anatop_reg->vol_bit_shift,
- anatop_reg->vol_bit_width,
- val);
+ mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
+ anatop_reg->vol_bit_shift;
+ val <<= anatop_reg->vol_bit_shift;
+ anatop_write_reg(anatop_reg->mfd, anatop_reg->control_reg, val, mask);
return 0;
}
@@ -88,10 +87,9 @@ static int anatop_get_voltage_sel(struct regulator_dev *reg)
if (!anatop_reg->control_reg)
return -ENOTSUPP;
- val = anatop_get_bits(anatop_reg->mfd,
- anatop_reg->control_reg,
- anatop_reg->vol_bit_shift,
- anatop_reg->vol_bit_width);
+ val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg);
+ val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >>
+ anatop_reg->vol_bit_shift;
return val - anatop_reg->min_bit_val;
}
@@ -226,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = {
.of_match_table = of_anatop_regulator_match_tbl,
},
.probe = anatop_regulator_probe,
- .remove = anatop_regulator_remove,
+ .remove = __devexit_p(anatop_regulator_remove),
};
static int __init anatop_regulator_init(void)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7584a74eec8a..8b4b3829d9e7 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
return -EINVAL;
}
+ if (min_uV < rdev->desc->min_uV)
+ min_uV = rdev->desc->min_uV;
+
ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
if (ret < 0)
return ret;
@@ -2516,9 +2519,12 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
{
struct regulator_dev *rdev = regulator->rdev;
struct regulator *consumer;
- int ret, output_uV, input_uV, total_uA_load = 0;
+ int ret, output_uV, input_uV = 0, total_uA_load = 0;
unsigned int mode;
+ if (rdev->supply)
+ input_uV = regulator_get_voltage(rdev->supply);
+
mutex_lock(&rdev->mutex);
/*
@@ -2551,10 +2557,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
goto out;
}
- /* get input voltage */
- input_uV = 0;
- if (rdev->supply)
- input_uV = regulator_get_voltage(rdev->supply);
+ /* No supply? Use constraint voltage */
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 968f97f3cb3d..9dbb491b6efa 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -452,26 +452,26 @@ static __devinit int db8500_regulator_register(struct platform_device *pdev,
}
static struct of_regulator_match db8500_regulator_matches[] = {
- { .name = "db8500-vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
- { .name = "db8500-varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
- { .name = "db8500-vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
- { .name = "db8500-vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
- { .name = "db8500-vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
- { .name = "db8500-vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
- { .name = "db8500-vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
- { .name = "db8500-vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
- { .name = "db8500-sva-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
- { .name = "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
- { .name = "db8500-sva-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
- { .name = "db8500-sia-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
- { .name = "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
- { .name = "db8500-sia-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
- { .name = "db8500-sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
- { .name = "db8500-b2r2-mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
- { .name = "db8500-esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
- { .name = "db8500-esram12-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
- { .name = "db8500-esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
- { .name = "db8500-esram34-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
+ { .name = "db8500_vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
+ { .name = "db8500_varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
+ { .name = "db8500_vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
+ { .name = "db8500_vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
+ { .name = "db8500_vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
+ { .name = "db8500_vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
+ { .name = "db8500_vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
+ { .name = "db8500_vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
+ { .name = "db8500_sva_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
+ { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
+ { .name = "db8500_sva_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
+ { .name = "db8500_sia_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
+ { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
+ { .name = "db8500_sia_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
+ { .name = "db8500_sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
+ { .name = "db8500_b2r2_mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
+ { .name = "db8500_esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
+ { .name = "db8500_esram12_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
+ { .name = "db8500_esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
+ { .name = "db8500_esram34_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
};
static __devinit int
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 9997d7aaca84..242851a4c1a6 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)
}
static int gpio_regulator_set_value(struct regulator_dev *dev,
- int min, int max)
+ int min, int max, unsigned *selector)
{
struct gpio_regulator_data *data = rdev_get_drvdata(dev);
- int ptr, target, state, best_val = INT_MAX;
+ int ptr, target = 0, state, best_val = INT_MAX;
for (ptr = 0; ptr < data->nr_states; ptr++)
if (data->states[ptr].value < best_val &&
data->states[ptr].value >= min &&
- data->states[ptr].value <= max)
+ data->states[ptr].value <= max) {
target = data->states[ptr].gpios;
+ best_val = data->states[ptr].value;
+ if (selector)
+ *selector = ptr;
+ }
if (best_val == INT_MAX)
return -EINVAL;
@@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
int min_uV, int max_uV,
unsigned *selector)
{
- return gpio_regulator_set_value(dev, min_uV, max_uV);
+ return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
}
static int gpio_regulator_list_voltage(struct regulator_dev *dev,
@@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,
static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
int min_uA, int max_uA)
{
- return gpio_regulator_set_value(dev, min_uA, max_uA);
+ return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
}
static struct regulator_ops gpio_regulator_voltage_ops = {
@@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
cfg.dev = &pdev->dev;
cfg.init_data = config->init_data;
- cfg.driver_data = &drvdata;
+ cfg.driver_data = drvdata;
drvdata->dev = regulator_register(&drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 1f4bb80457b3..9d540cd02dab 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
config.dev = &client->dev;
config.init_data = pdata->regulator;
config.driver_data = info;
+ config.regmap = info->regmap;
info->regulator = regulator_register(&dcdc_desc, &config);
if (IS_ERR(info->regulator)) {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index c4435f608df7..795f75a6ac33 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -673,7 +673,9 @@ static __devinit int palmas_probe(struct platform_device *pdev)
pmic->desc[id].ops = &palmas_ops_smps10;
pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
pmic->desc[id].vsel_mask = SMPS10_VSEL;
- pmic->desc[id].enable_reg = PALMAS_SMPS10_STATUS;
+ pmic->desc[id].enable_reg =
+ PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+ PALMAS_SMPS10_STATUS);
pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
}
@@ -739,7 +741,8 @@ static __devinit int palmas_probe(struct platform_device *pdev)
pmic->desc[id].type = REGULATOR_VOLTAGE;
pmic->desc[id].owner = THIS_MODULE;
- pmic->desc[id].enable_reg = palmas_regs_info[id].ctrl_addr;
+ pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+ palmas_regs_info[id].ctrl_addr);
pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE;
if (pdata && pdata->reg_data)
@@ -775,9 +778,6 @@ static __devinit int palmas_probe(struct platform_device *pdev)
err_unregister_regulator:
while (--id >= 0)
regulator_unregister(pmic->rdev[id]);
- kfree(pmic->rdev);
- kfree(pmic->desc);
- kfree(pmic);
return ret;
}
@@ -788,10 +788,6 @@ static int __devexit palmas_remove(struct platform_device *pdev)
for (id = 0; id < PALMAS_NUM_REGS; id++)
regulator_unregister(pmic->rdev[id]);
-
- kfree(pmic->rdev);
- kfree(pmic->desc);
- kfree(pmic);
return 0;
}
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 290d6fc01029..9caadb482178 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -451,7 +451,7 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
desc = reg_voltage_map[reg_id];
- if (old_sel < new_sel)
+ if ((old_sel < new_sel) && s5m8767->ramp_delay)
return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
s5m8767->ramp_delay * 1000);
return 0;
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index f841bd0db6aa..8f1be8586c72 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -71,7 +71,7 @@
/* LDO_CTRL bitfields */
#define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id) ((ldo_id)*4)
-#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0x0F << ((ldo_id)*4))
+#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0x07 << ((ldo_id)*4))
/* Number of step-down converters available */
#define TPS65023_NUM_DCDC 3
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index b88b3df82381..1b299aacf22f 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -482,7 +482,7 @@ static int get_voltage_sel(struct regulator_dev *rdev)
info = &supply_info[rdev_get_id(rdev)];
if (info->flags & FIXED_VOLTAGE)
- return info->fixed_voltage;
+ return 0;
ret = read_field(hw, &info->voltage);
if (ret < 0)
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 8dc3d9392bfa..6bf864b4bdf6 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -31,54 +31,54 @@
TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 | \
TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
-/* supported VIO voltages in milivolts */
+/* supported VIO voltages in millivolts */
static const u16 VIO_VSEL_table[] = {
1500, 1800, 2500, 3300,
};
/* VSEL tables for TPS65910 specific LDOs and dcdc's */
-/* supported VDD3 voltages in milivolts */
+/* supported VDD3 voltages in millivolts */
static const u16 VDD3_VSEL_table[] = {
5000,
};
-/* supported VDIG1 voltages in milivolts */
+/* supported VDIG1 voltages in millivolts */
static const u16 VDIG1_VSEL_table[] = {
1200, 1500, 1800, 2700,
};
-/* supported VDIG2 voltages in milivolts */
+/* supported VDIG2 voltages in millivolts */
static const u16 VDIG2_VSEL_table[] = {
1000, 1100, 1200, 1800,
};
-/* supported VPLL voltages in milivolts */
+/* supported VPLL voltages in millivolts */
static const u16 VPLL_VSEL_table[] = {
1000, 1100, 1800, 2500,
};
-/* supported VDAC voltages in milivolts */
+/* supported VDAC voltages in millivolts */
static const u16 VDAC_VSEL_table[] = {
1800, 2600, 2800, 2850,
};
-/* supported VAUX1 voltages in milivolts */
+/* supported VAUX1 voltages in millivolts */
static const u16 VAUX1_VSEL_table[] = {
1800, 2500, 2800, 2850,
};
-/* supported VAUX2 voltages in milivolts */
+/* supported VAUX2 voltages in millivolts */
static const u16 VAUX2_VSEL_table[] = {
1800, 2800, 2900, 3300,
};
-/* supported VAUX33 voltages in milivolts */
+/* supported VAUX33 voltages in millivolts */
static const u16 VAUX33_VSEL_table[] = {
1800, 2000, 2800, 3300,
};
-/* supported VMMC voltages in milivolts */
+/* supported VMMC voltages in millivolts */
static const u16 VMMC_VSEL_table[] = {
1800, 2800, 3000, 3300,
};
@@ -331,21 +331,16 @@ struct tps65910_reg {
static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
{
- u8 val;
+ unsigned int val;
int err;
- err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
+ err = tps65910_reg_read(pmic->mfd, reg, &val);
if (err)
return err;
return val;
}
-static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
-{
- return pmic->mfd->write(pmic->mfd, reg, 1, &val);
-}
-
static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
u8 set_mask, u8 clear_mask)
{
@@ -362,7 +357,7 @@ static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
data &= ~clear_mask;
data |= set_mask;
- err = tps65910_write(pmic, reg, data);
+ err = tps65910_reg_write(pmic->mfd, reg, data);
if (err)
dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
@@ -371,7 +366,7 @@ out:
return err;
}
-static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
+static int tps65910_reg_read_locked(struct tps65910_reg *pmic, u8 reg)
{
int data;
@@ -385,13 +380,13 @@ static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
return data;
}
-static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+static int tps65910_reg_write_locked(struct tps65910_reg *pmic, u8 reg, u8 val)
{
int err;
mutex_lock(&pmic->mutex);
- err = tps65910_write(pmic, reg, val);
+ err = tps65910_reg_write(pmic->mfd, reg, val);
if (err < 0)
dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
@@ -490,9 +485,9 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
LDO_ST_MODE_BIT);
case REGULATOR_MODE_IDLE:
value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
- return tps65910_set_bits(mfd, reg, value);
+ return tps65910_reg_set_bits(mfd, reg, value);
case REGULATOR_MODE_STANDBY:
- return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
+ return tps65910_reg_clear_bits(mfd, reg, LDO_ST_ON_BIT);
}
return -EINVAL;
@@ -507,7 +502,7 @@ static unsigned int tps65910_get_mode(struct regulator_dev *dev)
if (reg < 0)
return reg;
- value = tps65910_reg_read(pmic, reg);
+ value = tps65910_reg_read_locked(pmic, reg);
if (value < 0)
return value;
@@ -527,28 +522,28 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
switch (id) {
case TPS65910_REG_VDD1:
- opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
- mult = tps65910_reg_read(pmic, TPS65910_VDD1);
+ opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_OP);
+ mult = tps65910_reg_read_locked(pmic, TPS65910_VDD1);
mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
+ srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_SR);
sr = opvsel & VDD1_OP_CMD_MASK;
opvsel &= VDD1_OP_SEL_MASK;
srvsel &= VDD1_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65910_REG_VDD2:
- opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
- mult = tps65910_reg_read(pmic, TPS65910_VDD2);
+ opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_OP);
+ mult = tps65910_reg_read_locked(pmic, TPS65910_VDD2);
mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
+ srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_SR);
sr = opvsel & VDD2_OP_CMD_MASK;
opvsel &= VDD2_OP_SEL_MASK;
srvsel &= VDD2_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65911_REG_VDDCTRL:
- opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
- srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
+ opvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_OP);
+ srvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_SR);
sr = opvsel & VDDCTRL_OP_CMD_MASK;
opvsel &= VDDCTRL_OP_SEL_MASK;
srvsel &= VDDCTRL_SR_SEL_MASK;
@@ -588,7 +583,7 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev)
if (reg < 0)
return reg;
- value = tps65910_reg_read(pmic, reg);
+ value = tps65910_reg_read_locked(pmic, reg);
if (value < 0)
return value;
@@ -625,7 +620,7 @@ static int tps65911_get_voltage_sel(struct regulator_dev *dev)
reg = pmic->get_ctrl_reg(id);
- value = tps65910_reg_read(pmic, reg);
+ value = tps65910_reg_read_locked(pmic, reg);
switch (id) {
case TPS65911_REG_LDO1:
@@ -670,7 +665,7 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
tps65910_modify_bits(pmic, TPS65910_VDD1,
(dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
+ tps65910_reg_write_locked(pmic, TPS65910_VDD1_OP, vsel);
break;
case TPS65910_REG_VDD2:
dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
@@ -681,11 +676,11 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
tps65910_modify_bits(pmic, TPS65910_VDD2,
(dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
+ tps65910_reg_write_locked(pmic, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
vsel = selector + 3;
- tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
+ tps65910_reg_write_locked(pmic, TPS65911_VDDCTRL_OP, vsel);
}
return 0;
@@ -936,10 +931,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* External EN1 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -949,10 +944,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* External EN2 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -964,10 +959,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
if ((tps65910_chip_id(mfd) == TPS65910) &&
(id >= TPS65910_REG_VDIG1)) {
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -979,10 +974,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* Return if no external control is selected */
if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) {
/* Clear all sleep controls */
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret)
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
if (ret < 0)
dev_err(mfd->dev,
@@ -1001,32 +996,33 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
(tps65910_chip_id(mfd) == TPS65911))) {
int op_reg_add = pmic->get_ctrl_reg(id) + 1;
int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
- int opvsel = tps65910_reg_read(pmic, op_reg_add);
- int srvsel = tps65910_reg_read(pmic, sr_reg_add);
+ int opvsel = tps65910_reg_read_locked(pmic, op_reg_add);
+ int srvsel = tps65910_reg_read_locked(pmic, sr_reg_add);
if (opvsel & VDD1_OP_CMD_MASK) {
u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
- ret = tps65910_reg_write(pmic, op_reg_add, reg_val);
+ ret = tps65910_reg_write_locked(pmic, op_reg_add,
+ reg_val);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring op register\n");
return ret;
}
}
- ret = tps65910_reg_write(pmic, sr_reg_add, 0);
+ ret = tps65910_reg_write_locked(pmic, sr_reg_add, 0);
if (ret < 0) {
dev_err(mfd->dev, "Error in settting sr register\n");
return ret;
}
}
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret) {
if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
}
if (ret < 0)
@@ -1177,7 +1173,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
/* Give control of all register to control port */
- tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+ tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
DEVCTRL_SR_CTL_I2C_SEL_MASK);
switch(tps65910_chip_id(tps65910)) {
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index a885911bb5fc..099da11e989f 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -535,7 +535,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
@@ -544,7 +544,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
goto err_regulator;
}
- irq = platform_get_irq_byname(pdev, "HC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
@@ -558,7 +558,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
return 0;
err_uv:
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
err_regulator:
regulator_unregister(dcdc->regulator);
err:
@@ -570,11 +571,14 @@ err:
static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
+ struct wm831x *wm831x = dcdc->wm831x;
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "HC"), dcdc);
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC")),
+ dcdc);
+ free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
regulator_unregister(dcdc->regulator);
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
@@ -726,7 +730,7 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
@@ -751,7 +755,8 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
regulator_unregister(dcdc->regulator);
return 0;
@@ -859,7 +864,7 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
IRQF_TRIGGER_RISING, dcdc->name,
dcdc);
@@ -885,7 +890,8 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
regulator_unregister(dcdc->regulator);
return 0;
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index b50ab778b098..0d207c297714 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -202,7 +202,7 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq(pdev, 0);
+ irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
IRQF_TRIGGER_RISING, isink->name, isink);
if (ret != 0) {
@@ -227,7 +227,7 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq(pdev, 0), isink);
+ free_irq(wm831x_irq(isink->wm831x, platform_get_irq(pdev, 0)), isink);
regulator_unregister(isink->regulator);
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index aa1f8b3fbe16..a9a28d8ac185 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -321,7 +321,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
IRQF_TRIGGER_RISING, ldo->name,
ldo);
@@ -347,7 +347,8 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(wm831x_irq(ldo->wm831x,
+ platform_get_irq_byname(pdev, "UV")), ldo);
regulator_unregister(ldo->regulator);
return 0;
@@ -582,7 +583,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
IRQF_TRIGGER_RISING, ldo->name, ldo);
if (ret != 0) {
@@ -605,7 +606,8 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
- free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(wm831x_irq(ldo->wm831x, platform_get_irq_byname(pdev, "UV")),
+ ldo);
regulator_unregister(ldo->regulator);
return 0;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 24d880e78ec6..f8d818abf98c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -4,9 +4,11 @@ menu "Remoteproc drivers (EXPERIMENTAL)"
config REMOTEPROC
tristate
depends on EXPERIMENTAL
+ select FW_CONFIG
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
+ depends on EXPERIMENTAL
depends on ARCH_OMAP4
depends on OMAP_IOMMU
select REMOTEPROC
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 69425c4e86f3..de138e30d3e6 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -182,7 +182,7 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev)
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
- dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
return ret;
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index e756a0df3664..66324ee4678f 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -78,7 +78,7 @@ typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
* the recovery of the remote processor.
*/
static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
- unsigned long iova, int flags)
+ unsigned long iova, int flags, void *token)
{
dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
@@ -117,7 +117,7 @@ static int rproc_enable_iommu(struct rproc *rproc)
return -ENOMEM;
}
- iommu_set_fault_handler(domain, rproc_iommu_fault);
+ iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
ret = iommu_attach_device(domain, dev);
if (ret) {
@@ -247,7 +247,7 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
}
if (offset + filesz > len) {
- dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
offset + filesz, len);
ret = -EINVAL;
break;
@@ -934,7 +934,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
if (unmapped != entry->len) {
/* nothing much to do besides complaining */
- dev_err(dev, "failed to unmap %u/%u\n", entry->len,
+ dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
unmapped);
}
@@ -1020,7 +1020,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
ehdr = (struct elf32_hdr *)fw->data;
- dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
+ dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
* if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1041,8 +1041,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/* look for the resource table */
table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
- if (!table)
+ if (!table) {
+ ret = -EINVAL;
goto clean_up;
+ }
/* handle fw resources which are required to boot rproc */
ret = rproc_handle_boot_rsc(rproc, table, tablesz);
@@ -1105,8 +1107,7 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
goto out;
out:
- if (fw)
- release_firmware(fw);
+ release_firmware(fw);
/* allow rproc_unregister() contexts, if any, to proceed */
complete_all(&rproc->firmware_loading_complete);
}
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 75506ec2840e..f56c8ba3a861 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -188,6 +188,26 @@ static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
rpdev->id.name);
}
+/**
+ * __ept_release() - deallocate an rpmsg endpoint
+ * @kref: the ept's reference count
+ *
+ * This function deallocates an ept, and is invoked when its @kref refcount
+ * drops to zero.
+ *
+ * Never invoke this function directly!
+ */
+static void __ept_release(struct kref *kref)
+{
+ struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+ refcount);
+ /*
+ * At this point no one holds a reference to ept anymore,
+ * so we can directly free it
+ */
+ kfree(ept);
+}
+
/* for more info, see below documentation of rpmsg_create_ept() */
static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
@@ -206,6 +226,9 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
return NULL;
}
+ kref_init(&ept->refcount);
+ mutex_init(&ept->cb_lock);
+
ept->rpdev = rpdev;
ept->cb = cb;
ept->priv = priv;
@@ -238,7 +261,7 @@ rem_idr:
idr_remove(&vrp->endpoints, request);
free_ept:
mutex_unlock(&vrp->endpoints_lock);
- kfree(ept);
+ kref_put(&ept->refcount, __ept_release);
return NULL;
}
@@ -302,11 +325,17 @@ EXPORT_SYMBOL(rpmsg_create_ept);
static void
__rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
{
+ /* make sure new inbound messages can't find this ept anymore */
mutex_lock(&vrp->endpoints_lock);
idr_remove(&vrp->endpoints, ept->addr);
mutex_unlock(&vrp->endpoints_lock);
- kfree(ept);
+ /* make sure in-flight inbound messages won't invoke cb anymore */
+ mutex_lock(&ept->cb_lock);
+ ept->cb = NULL;
+ mutex_unlock(&ept->cb_lock);
+
+ kref_put(&ept->refcount, __ept_release);
}
/**
@@ -790,12 +819,28 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
/* use the dst addr to fetch the callback of the appropriate user */
mutex_lock(&vrp->endpoints_lock);
+
ept = idr_find(&vrp->endpoints, msg->dst);
+
+ /* let's make sure no one deallocates ept while we use it */
+ if (ept)
+ kref_get(&ept->refcount);
+
mutex_unlock(&vrp->endpoints_lock);
- if (ept && ept->cb)
- ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src);
- else
+ if (ept) {
+ /* make sure ept->cb doesn't go away while we use it */
+ mutex_lock(&ept->cb_lock);
+
+ if (ept->cb)
+ ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
+ msg->src);
+
+ mutex_unlock(&ept->cb_lock);
+
+ /* farewell, ept, we don't need you anymore */
+ kref_put(&ept->refcount, __ept_release);
+ } else
dev_warn(dev, "msg received with no recepient\n");
/* publish the real size of the buffer */
@@ -1040,7 +1085,7 @@ static int __init rpmsg_init(void)
return ret;
}
-module_init(rpmsg_init);
+subsys_initcall(rpmsg_init);
static void __exit rpmsg_fini(void)
{
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4161bfe462cd..08cbdb900a18 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -620,27 +620,6 @@ config RTC_DRV_MSM6242
This driver can also be built as a module. If so, the module
will be called rtc-msm6242.
-config RTC_DRV_IMXDI
- tristate "Freescale IMX DryIce Real Time Clock"
- depends on ARCH_MX25
- depends on RTC_CLASS
- help
- Support for Freescale IMX DryIce RTC
-
- This driver can also be built as a module, if so, the module
- will be called "rtc-imxdi".
-
-config RTC_MXC
- tristate "Freescale MXC Real Time Clock"
- depends on ARCH_MXC
- depends on RTC_CLASS
- help
- If you say yes here you get support for the Freescale MXC
- RTC module.
-
- This driver can also be built as a module, if so, the module
- will be called "rtc-mxc".
-
config RTC_DRV_BQ4802
tristate "TI BQ4802"
help
@@ -738,6 +717,16 @@ config RTC_DRV_DAVINCI
This driver can also be built as a module. If so, the module
will be called rtc-davinci.
+config RTC_DRV_IMXDI
+ tristate "Freescale IMX DryIce Real Time Clock"
+ depends on SOC_IMX25
+ depends on RTC_CLASS
+ help
+ Support for Freescale IMX DryIce RTC
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-imxdi".
+
config RTC_DRV_OMAP
tristate "TI OMAP1"
depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
@@ -1087,4 +1076,15 @@ config RTC_DRV_LOONGSON1
This driver can also be built as a module. If so, the module
will be called rtc-ls1x.
+config RTC_DRV_MXC
+ tristate "Freescale MXC Real Time Clock"
+ depends on ARCH_MXC
+ depends on RTC_CLASS
+ help
+ If you say yes here you get support for the Freescale MXC
+ RTC module.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-mxc".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 727ae7786e6c..2973921c30d8 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -61,7 +61,7 @@ obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
-obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
+obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 4bcf9ca2818a..370889d0489b 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -17,6 +17,7 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/delay.h>
+#include <linux/of.h>
#define AB8500_RTC_SOFF_STAT_REG 0x00
#define AB8500_RTC_CC_CONF_REG 0x01
@@ -422,7 +423,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
}
err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
- IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
+ IRQF_NO_SUSPEND | IRQF_ONESHOT, "ab8500-rtc", rtc);
if (err < 0) {
rtc_device_unregister(rtc);
return err;
@@ -430,7 +431,6 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
-
err = ab8500_sysfs_rtc_register(&pdev->dev);
if (err) {
dev_err(&pdev->dev, "sysfs RTC failed to register\n");
@@ -454,10 +454,16 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_rtc_match[] = {
+ { .compatible = "stericsson,ab8500-rtc", },
+ {}
+};
+
static struct platform_driver ab8500_rtc_driver = {
.driver = {
.name = "ab8500-rtc",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_rtc_match,
},
.probe = ab8500_rtc_probe,
.remove = __devexit_p(ab8500_rtc_remove),
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7d5f56edb8ef..4267789ca995 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev)
static u32 rtc_handler(void *context)
{
+ struct device *dev = context;
+
+ pm_wakeup_event(dev, 0);
acpi_clear_event(ACPI_EVENT_RTC);
acpi_disable_event(ACPI_EVENT_RTC, 0);
return ACPI_INTERRUPT_HANDLED;
}
-static inline void rtc_wake_setup(void)
+static inline void rtc_wake_setup(struct device *dev)
{
- acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+ acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
/*
* After the RTC handler is installed, the Fixed_RTC event should
* be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev)
if (acpi_disabled)
return;
- rtc_wake_setup();
+ rtc_wake_setup(dev);
acpi_rtc_info.wake_on = rtc_wake_on;
acpi_rtc_info.wake_off = rtc_wake_off;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index c293d0cdb104..836710ce750e 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -17,8 +17,7 @@
#include <linux/string.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
-
-
+#include <linux/rtc/ds1307.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
@@ -92,7 +91,8 @@ enum ds_type {
# define DS1337_BIT_A2I 0x02
# define DS1337_BIT_A1I 0x01
#define DS1339_REG_ALARM1_SECS 0x07
-#define DS1339_REG_TRICKLE 0x10
+
+#define DS13XX_TRICKLE_CHARGER_MAGIC 0xa0
#define RX8025_REG_CTRL1 0x0e
# define RX8025_BIT_2412 0x20
@@ -124,6 +124,7 @@ struct chip_desc {
unsigned alarm:1;
u16 nvram_offset;
u16 nvram_size;
+ u16 trickle_charger_reg;
};
static const struct chip_desc chips[last_ds_type] = {
@@ -140,6 +141,13 @@ static const struct chip_desc chips[last_ds_type] = {
},
[ds_1339] = {
.alarm = 1,
+ .trickle_charger_reg = 0x10,
+ },
+ [ds_1340] = {
+ .trickle_charger_reg = 0x08,
+ },
+ [ds_1388] = {
+ .trickle_charger_reg = 0x0a,
},
[ds_3231] = {
.alarm = 1,
@@ -619,6 +627,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int want_irq = false;
unsigned char *buf;
+ struct ds1307_platform_data *pdata = client->dev.platform_data;
static const int bbsqi_bitpos[] = {
[ds_1337] = 0,
[ds_1339] = DS1339_BIT_BBSQI,
@@ -637,7 +646,10 @@ static int __devinit ds1307_probe(struct i2c_client *client,
ds1307->client = client;
ds1307->type = id->driver_data;
- ds1307->offset = 0;
+
+ if (pdata && pdata->trickle_charger_setup && chip->trickle_charger_reg)
+ i2c_smbus_write_byte_data(client, chip->trickle_charger_reg,
+ DS13XX_TRICKLE_CHARGER_MAGIC | pdata->trickle_charger_setup);
buf = ds1307->regs;
if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 14a42a1edc66..9602278ff988 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -127,7 +127,7 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
.attrs = ep93xx_rtc_attrs,
};
-static int __init ep93xx_rtc_probe(struct platform_device *pdev)
+static int __devinit ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
struct resource *res;
@@ -174,7 +174,7 @@ exit:
return err;
}
-static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
+static int __devexit ep93xx_rtc_remove(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
@@ -186,31 +186,19 @@ static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:ep93xx-rtc");
-
static struct platform_driver ep93xx_rtc_driver = {
.driver = {
.name = "ep93xx-rtc",
.owner = THIS_MODULE,
},
- .remove = __exit_p(ep93xx_rtc_remove),
+ .probe = ep93xx_rtc_probe,
+ .remove = __devexit_p(ep93xx_rtc_remove),
};
-static int __init ep93xx_rtc_init(void)
-{
- return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe);
-}
-
-static void __exit ep93xx_rtc_exit(void)
-{
- platform_driver_unregister(&ep93xx_rtc_driver);
-}
+module_platform_driver(ep93xx_rtc_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("EP93XX RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(ep93xx_rtc_init);
-module_exit(ep93xx_rtc_exit);
+MODULE_ALIAS("platform:ep93xx-rtc");
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index d93a9608b1f0..891cd6c61d0a 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -405,7 +405,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
imxdi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(imxdi->clk))
return PTR_ERR(imxdi->clk);
- clk_enable(imxdi->clk);
+ clk_prepare_enable(imxdi->clk);
/*
* Initialize dryice hardware
@@ -470,7 +470,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
return 0;
err:
- clk_disable(imxdi->clk);
+ clk_disable_unprepare(imxdi->clk);
clk_put(imxdi->clk);
return rc;
@@ -487,7 +487,7 @@ static int __devexit dryice_rtc_remove(struct platform_device *pdev)
rtc_device_unregister(imxdi->rtc);
- clk_disable(imxdi->clk);
+ clk_disable_unprepare(imxdi->clk);
clk_put(imxdi->clk);
return 0;
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 63c72189c64b..d5218553741f 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -19,6 +19,7 @@
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of.h>
/*
* Clock and Power control register offsets
@@ -386,13 +387,22 @@ static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
#define LPC32XX_RTC_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id lpc32xx_rtc_match[] = {
+ { .compatible = "nxp,lpc3220-rtc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
+#endif
+
static struct platform_driver lpc32xx_rtc_driver = {
.probe = lpc32xx_rtc_probe,
.remove = __devexit_p(lpc32xx_rtc_remove),
.driver = {
.name = RTC_NAME,
.owner = THIS_MODULE,
- .pm = LPC32XX_RTC_PM_OPS
+ .pm = LPC32XX_RTC_PM_OPS,
+ .of_match_table = of_match_ptr(lpc32xx_rtc_match),
},
};
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 10f1c29436ec..efab3d48cb15 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -48,6 +48,7 @@ static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data)
static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
+ int tmp;
u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */
u8 * const data = &buf[1]; /* ptr to first data byte */
@@ -62,6 +63,30 @@ static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+ if (tmp < 0)
+ return tmp;
+
+ if (tmp & M41T93_FLAG_OF) {
+ dev_warn(&spi->dev, "OF bit is set, resetting.\n");
+ m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
+
+ tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+ if (tmp < 0) {
+ return tmp;
+ } else if (tmp & M41T93_FLAG_OF) {
+ /* OF cannot be immediately reset: oscillator has to be
+ * restarted. */
+ u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
+
+ dev_warn(&spi->dev,
+ "OF bit is still set, kickstarting clock.\n");
+ m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+ reset_osc &= ~M41T93_FLAG_ST;
+ m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+ }
+ }
+
data[M41T93_REG_SSEC] = 0;
data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec);
data[M41T93_REG_MIN] = bin2bcd(tm->tm_min);
@@ -89,10 +114,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
1. halt bit (HT) is set: the clock is running but update of readout
registers has been disabled due to power failure. This is normal
case after poweron. Time is valid after resetting HT bit.
- 2. oscillator fail bit (OF) is set. Oscillator has be stopped and
- time is invalid:
- a) OF can be immeditely reset.
- b) OF cannot be immediately reset: oscillator has to be restarted.
+ 2. oscillator fail bit (OF) is set: time is invalid.
*/
tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
if (tmp < 0)
@@ -110,21 +132,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
if (tmp & M41T93_FLAG_OF) {
ret = -EINVAL;
- dev_warn(&spi->dev, "OF bit is set, resetting.\n");
- m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
-
- tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
- if (tmp < 0)
- return tmp;
- else if (tmp & M41T93_FLAG_OF) {
- u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
-
- dev_warn(&spi->dev,
- "OF bit is still set, kickstarting clock.\n");
- m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
- reset_osc &= ~M41T93_FLAG_ST;
- m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
- }
+ dev_warn(&spi->dev, "OF bit is set, write time to restart.\n");
}
if (tmp & M41T93_FLAG_BL)
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 5e1d64ee5228..e3e50d69baf8 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -202,10 +202,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
+ unsigned long flags;
u32 status;
u32 events = 0;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
/* clear interrupt sources */
writew(status, ioaddr + RTC_RTCISR);
@@ -224,7 +225,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
events |= (RTC_PF | RTC_IRQF);
rtc_update_irq(pdata->rtc, 1, events);
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index bc0677de1996..97a3284bb7c6 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -64,6 +64,7 @@ struct pcf8563 {
* 1970...2069.
*/
int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
+ int voltage_low; /* incicates if a low_voltage was detected */
};
/*
@@ -86,9 +87,11 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
return -EIO;
}
- if (buf[PCF8563_REG_SC] & PCF8563_SC_LV)
+ if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
+ pcf8563->voltage_low = 1;
dev_info(&client->dev,
"low voltage detected, date/time is not reliable.\n");
+ }
dev_dbg(&client->dev,
"%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
@@ -173,6 +176,44 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
return 0;
}
+#ifdef CONFIG_RTC_INTF_DEV
+static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
+ struct rtc_time tm;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ if (pcf8563->voltage_low)
+ dev_info(dev, "low voltage detected, date/time is not reliable.\n");
+
+ if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
+ sizeof(int)))
+ return -EFAULT;
+ return 0;
+ case RTC_VL_CLR:
+ /*
+ * Clear the VL bit in the seconds register in case
+ * the time has not been set already (which would
+ * have cleared it). This does not really matter
+ * because of the cached voltage_low value but do it
+ * anyway for consistency.
+ */
+ if (pcf8563_get_datetime(to_i2c_client(dev), &tm))
+ pcf8563_set_datetime(to_i2c_client(dev), &tm);
+
+ /* Clear the cached value. */
+ pcf8563->voltage_low = 0;
+
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#else
+#define pcf8563_rtc_ioctl NULL
+#endif
+
static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return pcf8563_get_datetime(to_i2c_client(dev), tm);
@@ -184,6 +225,7 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
}
static const struct rtc_class_ops pcf8563_rtc_ops = {
+ .ioctl = pcf8563_rtc_ioctl,
.read_time = pcf8563_rtc_read_time,
.set_time = pcf8563_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f027c063fb20..cc0533994f6e 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -220,17 +220,9 @@ static irqreturn_t pl031_interrupt(int irq, void *dev_id)
unsigned long events = 0;
rtcmis = readl(ldata->base + RTC_MIS);
- if (rtcmis) {
- writel(rtcmis, ldata->base + RTC_ICR);
-
- if (rtcmis & RTC_BIT_AI)
- events |= (RTC_AF | RTC_IRQF);
-
- /* Timer interrupt is only available in ST variants */
- if ((rtcmis & RTC_BIT_PI) &&
- (ldata->hw_designer == AMBA_VENDOR_ST))
- events |= (RTC_PF | RTC_IRQF);
-
+ if (rtcmis & RTC_BIT_AI) {
+ writel(RTC_BIT_AI, ldata->base + RTC_ICR);
+ events |= (RTC_AF | RTC_IRQF);
rtc_update_irq(ldata->rtc, 1, events);
return IRQ_HANDLED;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 3f3a29752369..7e6af0b22f17 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -670,6 +670,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#define s3c_rtc_resume NULL
#endif
+#ifdef CONFIG_OF
static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
[TYPE_S3C2410] = { TYPE_S3C2410 },
[TYPE_S3C2416] = { TYPE_S3C2416 },
@@ -677,7 +678,6 @@ static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
[TYPE_S3C64XX] = { TYPE_S3C64XX },
};
-#ifdef CONFIG_OF
static const struct of_device_id s3c_rtc_dt_match[] = {
{
.compatible = "samsung,s3c2410-rtc",
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index e38da0dc4187..e2785479113c 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -457,12 +458,12 @@ static int __devexit spear_rtc_remove(struct platform_device *pdev)
clk_disable(config->clk);
clk_put(config->clk);
iounmap(config->ioaddr);
- kfree(config);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
rtc_device_unregister(config->rtc);
+ kfree(config);
return 0;
}
@@ -519,6 +520,14 @@ static void spear_rtc_shutdown(struct platform_device *pdev)
clk_disable(config->clk);
}
+#ifdef CONFIG_OF
+static const struct of_device_id spear_rtc_id_table[] = {
+ { .compatible = "st,spear600-rtc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_rtc_id_table);
+#endif
+
static struct platform_driver spear_rtc_driver = {
.probe = spear_rtc_probe,
.remove = __devexit_p(spear_rtc_remove),
@@ -527,6 +536,7 @@ static struct platform_driver spear_rtc_driver = {
.shutdown = spear_rtc_shutdown,
.driver = {
.name = "rtc-spear",
+ .of_match_table = of_match_ptr(spear_rtc_id_table),
},
};
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 75259fe38602..c006025cecc8 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -309,7 +309,8 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct tegra_rtc_info),
+ GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -317,29 +318,18 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
if (!res) {
dev_err(&pdev->dev,
"Unable to allocate resources for device.\n");
- ret = -EBUSY;
- goto err_free_info;
+ return -EBUSY;
}
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev,
- "Unable to request mem region for device.\n");
- ret = -EBUSY;
- goto err_free_info;
+ info->rtc_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!info->rtc_base) {
+ dev_err(&pdev->dev, "Unable to request mem region and grab IOs for device.\n");
+ return -EBUSY;
}
info->tegra_rtc_irq = platform_get_irq(pdev, 0);
- if (info->tegra_rtc_irq <= 0) {
- ret = -EBUSY;
- goto err_release_mem_region;
- }
-
- info->rtc_base = ioremap_nocache(res->start, resource_size(res));
- if (!info->rtc_base) {
- dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
- ret = -EBUSY;
- goto err_release_mem_region;
- }
+ if (info->tegra_rtc_irq <= 0)
+ return -EBUSY;
/* set context info. */
info->pdev = pdev;
@@ -362,11 +352,12 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Unable to register device (err=%d).\n",
ret);
- goto err_iounmap;
+ return ret;
}
- ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler,
- IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev);
+ ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
+ tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH,
+ "rtc alarm", &pdev->dev);
if (ret) {
dev_err(&pdev->dev,
"Unable to request interrupt for device (err=%d).\n",
@@ -380,12 +371,6 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
err_dev_unreg:
rtc_device_unregister(info->rtc_dev);
-err_iounmap:
- iounmap(info->rtc_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_info:
- kfree(info);
return ret;
}
@@ -393,17 +378,8 @@ err_free_info:
static int __devexit tegra_rtc_remove(struct platform_device *pdev)
{
struct tegra_rtc_info *info = platform_get_drvdata(pdev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
- free_irq(info->tegra_rtc_irq, &pdev->dev);
rtc_device_unregister(info->rtc_dev);
- iounmap(info->rtc_base);
- release_mem_region(res->start, resource_size(res));
- kfree(info);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 258abeabf624..c5d06fe83bba 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -510,7 +510,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
}
ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
- IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
dev_name(&rtc->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 3b6e6a67e765..59c6245e0421 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -396,7 +396,7 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_rtc *wm831x_rtc;
- int alm_irq = platform_get_irq_byname(pdev, "ALM");
+ int alm_irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "ALM"));
int ret = 0;
wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 33a6743ddc55..c05da00583f0 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -10,8 +10,6 @@
#ifndef DASD_INT_H
#define DASD_INT_H
-#ifdef __KERNEL__
-
/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -791,6 +789,4 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
#define dasd_eer_enabled(d) (0)
#endif /* CONFIG_DASD_ERR */
-#endif /* __KERNEL__ */
-
#endif /* DASD_H */
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 36506366158d..766cb7b19b40 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
@@ -38,7 +39,8 @@ struct read_info_sccb {
u64 facilities; /* 48-55 */
u8 _reserved2[84 - 56]; /* 56-83 */
u8 fac84; /* 84 */
- u8 _reserved3[91 - 85]; /* 85-90 */
+ u8 fac85; /* 85 */
+ u8 _reserved3[91 - 86]; /* 86-90 */
u8 flags; /* 91 */
u8 _reserved4[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
@@ -51,6 +53,7 @@ static int __initdata early_read_info_sccb_valid;
u64 sclp_facilities;
static u8 sclp_fac84;
+static u8 sclp_fac85;
static unsigned long long rzm;
static unsigned long long rnmax;
@@ -112,6 +115,7 @@ void __init sclp_facilities_detect(void)
sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
+ sclp_fac85 = sccb->fac85;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
@@ -127,6 +131,12 @@ unsigned long long sclp_get_rzm(void)
return rzm;
}
+u8 sclp_get_fac85(void)
+{
+ return sclp_fac85;
+}
+EXPORT_SYMBOL_GPL(sclp_get_fac85);
+
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. Therefore the sccb should have valid contents.
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 69e6c50d4cfb..50f7115990ff 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -211,7 +211,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
sccb.evbuf.event_qual = EQ_STORE_DATA;
sccb.evbuf.data_id = DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
sccb.evbuf.asa_size = ASA_SIZE_64;
#else
sccb.evbuf.asa_size = ASA_SIZE_32;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index bea04e5d3b51..e9559782d3ec 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -808,19 +808,6 @@ config SCSI_FUTURE_DOMAIN
To compile this driver as a module, choose M here: the
module will be called fdomain.
-config SCSI_FD_MCS
- tristate "Future Domain MCS-600/700 SCSI support"
- depends on MCA_LEGACY && SCSI
- ---help---
- This is support for Future Domain MCS 600/700 MCA SCSI adapters.
- Some PS/2 computers are equipped with IBM Fast SCSI Adapter/A which
- is identical to the MCS 700 and hence also supported by this driver.
- This driver also supports the Reply SB16/SCSI card (the SCSI part).
- It supports multiple adapters in the same system.
-
- To compile this driver as a module, choose M here: the
- module will be called fd_mcs.
-
config SCSI_GDTH
tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
@@ -890,76 +877,6 @@ config SCSI_GENERIC_NCR53C400
not detect your card. See the file
<file:Documentation/scsi/g_NCR5380.txt> for details.
-config SCSI_IBMMCA
- tristate "IBMMCA SCSI support"
- depends on MCA && SCSI
- ---help---
- This is support for the IBM SCSI adapter found in many of the PS/2
- series computers. These machines have an MCA bus, so you need to
- answer Y to "MCA support" as well and read
- <file:Documentation/mca.txt>.
-
- If the adapter isn't found during boot (a common problem for models
- 56, 57, 76, and 77) you'll need to use the 'ibmmcascsi=<pun>' kernel
- option, where <pun> is the id of the SCSI subsystem (usually 7, but
- if that doesn't work check your reference diskette). Owners of
- model 95 with a LED-matrix-display can in addition activate some
- activity info like under OS/2, but more informative, by setting
- 'ibmmcascsi=display' as an additional kernel parameter. Try "man
- bootparam" or see the documentation of your boot loader about how to
- pass options to the kernel.
-
- To compile this driver as a module, choose M here: the
- module will be called ibmmca.
-
-config IBMMCA_SCSI_ORDER_STANDARD
- bool "Standard SCSI-order"
- depends on SCSI_IBMMCA
- ---help---
- In the PC-world and in most modern SCSI-BIOS-setups, SCSI-hard disks
- are assigned to the drive letters, starting with the lowest SCSI-id
- (physical number -- pun) to be drive C:, as seen from DOS and
- similar operating systems. When looking into papers describing the
- ANSI-SCSI-standard, this assignment of drives appears to be wrong.
- The SCSI-standard follows a hardware-hierarchy which says that id 7
- has the highest priority and id 0 the lowest. Therefore, the host
- adapters are still today everywhere placed as SCSI-id 7 by default.
- In the SCSI-standard, the drive letters express the priority of the
- disk. C: should be the hard disk, or a partition on it, with the
- highest priority. This must therefore be the disk with the highest
- SCSI-id (e.g. 6) and not the one with the lowest! IBM-BIOS kept the
- original definition of the SCSI-standard as also industrial- and
- process-control-machines, like VME-CPUs running under realtime-OSes
- (e.g. LynxOS, OS9) do.
-
- If you like to run Linux on your MCA-machine with the same
- assignment of hard disks as seen from e.g. DOS or OS/2 on your
- machine, which is in addition conformant to the SCSI-standard, you
- must say Y here. This is also necessary for MCA-Linux users who want
- to keep downward compatibility to older releases of the
- IBM-MCA-SCSI-driver (older than driver-release 2.00 and older than
- June 1997).
-
- If you like to have the lowest SCSI-id assigned as drive C:, as
- modern SCSI-BIOSes do, which does not conform to the standard, but
- is widespread and common in the PC-world of today, you must say N
- here. If unsure, say Y.
-
-config IBMMCA_SCSI_DEV_RESET
- bool "Reset SCSI-devices at boottime"
- depends on SCSI_IBMMCA
- ---help---
- By default, SCSI-devices are reset when the machine is powered on.
- However, some devices exist, like special-control-devices,
- SCSI-CNC-machines, SCSI-printer or scanners of older type, that do
- not reset when switched on. If you say Y here, each device connected
- to your SCSI-bus will be issued a reset-command after it has been
- probed, while the kernel is booting. This may cause problems with
- more modern devices, like hard disks, which do not appreciate these
- reset commands, and can cause your system to hang. So say Y only if
- you know that one of your older devices needs it; N is the safe
- answer.
-
config SCSI_IPS
tristate "IBM ServeRAID support"
depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 8deedeaf5608..1a3368b08615 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -75,7 +75,6 @@ obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
obj-$(CONFIG_SCSI_PM8001) += pm8001/
obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
-obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
obj-$(CONFIG_SCSI_IN2000) += in2000.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
@@ -100,7 +99,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
-obj-$(CONFIG_SCSI_IBMMCA) += ibmmca.o
obj-$(CONFIG_SCSI_EATA) += eata.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_DC390T) += tmscsim.o
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index ede91f378000..f79c8f9e33a4 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -22,7 +22,7 @@
* Added module command-line options
* 19-Jul-99
* Modified by Adam Fritzler
- * Added proper detection of the AHA-1640 (MCA version of AHA-1540)
+ * Added proper detection of the AHA-1640 (MCA, now deleted)
*/
#include <linux/module.h>
@@ -37,8 +37,6 @@
#include <linux/spinlock.h>
#include <linux/isapnp.h>
#include <linux/blkdev.h>
-#include <linux/mca.h>
-#include <linux/mca-legacy.h>
#include <linux/slab.h>
#include <asm/dma.h>
@@ -71,7 +69,7 @@
#define MAXBOARDS 4 /* Increase this and the sizes of the
arrays below, if you need more.. */
-/* Boards 3,4 slots are reserved for ISAPnP/MCA scans */
+/* Boards 3,4 slots are reserved for ISAPnP scans */
static unsigned int bases[MAXBOARDS] __initdata = {0x330, 0x334, 0, 0};
@@ -1009,66 +1007,6 @@ static int __init aha1542_detect(struct scsi_host_template * tpnt)
#endif
/*
- * Find MicroChannel cards (AHA1640)
- */
-#ifdef CONFIG_MCA_LEGACY
- if(MCA_bus) {
- int slot = 0;
- int pos = 0;
-
- for (indx = 0; (slot != MCA_NOTFOUND) && (indx < ARRAY_SIZE(bases)); indx++) {
-
- if (bases[indx])
- continue;
-
- /* Detect only AHA-1640 cards -- MCA ID 0F1F */
- slot = mca_find_unused_adapter(0x0f1f, slot);
- if (slot == MCA_NOTFOUND)
- break;
-
- /* Found one */
- pos = mca_read_stored_pos(slot, 3);
-
- /* Decode address */
- if (pos & 0x80) {
- if (pos & 0x02) {
- if (pos & 0x01)
- bases[indx] = 0x334;
- else
- bases[indx] = 0x234;
- } else {
- if (pos & 0x01)
- bases[indx] = 0x134;
- }
- } else {
- if (pos & 0x02) {
- if (pos & 0x01)
- bases[indx] = 0x330;
- else
- bases[indx] = 0x230;
- } else {
- if (pos & 0x01)
- bases[indx] = 0x130;
- }
- }
-
- /* No need to decode IRQ and Arb level -- those are
- * read off the card later.
- */
- printk(KERN_INFO "Found an AHA-1640 in MCA slot %d, I/O 0x%04x\n", slot, bases[indx]);
-
- mca_set_adapter_name(slot, "Adapter AHA-1640");
- mca_set_adapter_procfn(slot, NULL, NULL);
- mca_mark_as_used(slot);
-
- /* Go on */
- slot++;
- }
-
- }
-#endif
-
- /*
* Hunt for ISA Plug'n'Pray Adaptecs (AHA1535)
*/
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index 390168f62a13..5fdca93892ad 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -1228,8 +1228,7 @@ static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
int asd_release_firmware(void)
{
- if (sequencer_fw)
- release_firmware(sequencer_fw);
+ release_firmware(sequencer_fw);
return 0;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 532d212b6b2c..393e7ce8e95a 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
- memcpy(&resp->ending_fis[0], r+16, 24);
+ memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
ts->buf_valid_size = sizeof(*resp);
}
}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 01bb04cd9e75..2a096795b9aa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -571,13 +571,12 @@ free_cmd:
static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
int iscsi_cmd, int size)
{
- cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
- &cmd->dma);
+ cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
if (!cmd->va) {
SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
return -ENOMEM;
}
- memset(cmd->va, 0, sizeof(size));
+ memset(cmd->va, 0, size);
cmd->size = size;
be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
return 0;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 8b6c6bf7837e..b83927440171 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
vshost = vport->drv_port.im_port->shost;
fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
+ fc_host_supported_classes(vshost) = FC_COS_CLASS3;
+
+ memset(fc_host_supported_fc4s(vshost), 0,
+ sizeof(fc_host_supported_fc4s(vshost)));
+
+ /* For FCP type 0x08 */
+ if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+ fc_host_supported_fc4s(vshost)[2] = 1;
+
+ /* For fibre channel services type 0x20 */
+ fc_host_supported_fc4s(vshost)[7] = 1;
+
+ fc_host_supported_speeds(vshost) =
+ bfad_im_supported_speeds(&bfad->bfa);
+ fc_host_maxframe_size(vshost) =
+ bfa_fcport_get_maxfrsize(&bfad->bfa);
+
fc_vport->dd_data = vport;
vport->drv_port.im_port->fc_vport = fc_vport;
} else if (rc == BFA_STATUS_INVALID_WWN)
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 3153923f5b60..1ac09afe35ee 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -987,7 +987,7 @@ done:
return 0;
}
-static u32
+u32
bfad_im_supported_speeds(struct bfa_s *bfa)
{
struct bfa_ioc_attr_s *ioc_attr;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 0814367ef101..f6c1023e502a 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -37,6 +37,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
struct bfad_im_port_s *im_port, struct device *dev);
void bfad_im_scsi_host_free(struct bfad_s *bfad,
struct bfad_im_port_s *im_port);
+u32 bfad_im_supported_speeds(struct bfa_s *bfa);
#define MAX_FCP_TARGET 1024
#define MAX_FCP_LUN 16384
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index a4953ef9e53a..0578fa0dc14b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.10"
+#define BNX2FC_VERSION "1.0.11"
#define PFX "bnx2fc: "
@@ -228,13 +228,16 @@ struct bnx2fc_interface {
struct packet_type fip_packet_type;
struct workqueue_struct *timer_work_queue;
struct kref kref;
- struct fcoe_ctlr ctlr;
u8 vlan_enabled;
int vlan_id;
bool enabled;
};
-#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
+#define bnx2fc_from_ctlr(x) \
+ ((struct bnx2fc_interface *)((x) + 1))
+
+#define bnx2fc_to_ctlr(x) \
+ ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
struct bnx2fc_lport {
struct list_head list;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index ce0ce3e32f33..bdbbb13b8534 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
struct fc_exch *exch = fc_seq_exch(seq);
struct fc_lport *lport = exch->lp;
u8 *mac;
- struct fc_frame_header *fh;
u8 op;
if (IS_ERR(fp))
@@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
mac = fr_cb(fp)->granted_mac;
if (is_zero_ether_addr(mac)) {
- fh = fc_frame_header_get(fp);
- if (fh->fh_type != FC_TYPE_ELS) {
- printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
- "fh_type != FC_TYPE_ELS\n");
- fc_frame_free(fp);
- return;
- }
op = fc_frame_payload_op(fp);
if (lport->vport) {
if (op == ELS_LS_RJT) {
@@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
return;
}
}
- if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
- fc_frame_free(fp);
- return;
- }
+ fcoe_ctlr_recv_flogi(fip, lport, fp);
}
- fip->update_mac(lport, mac);
+ if (!is_zero_ether_addr(mac))
+ fip->update_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
}
@@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
{
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_interface *interface = port->priv;
- struct fcoe_ctlr *fip = &interface->ctlr;
+ struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index c1c6a92a0b98..f52f668fd247 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Jan 22, 2011"
+#define DRV_MODULE_RELDATE "Apr 24, 2012"
static char version[] __devinitdata =
@@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb;
static struct libfc_function_template bnx2fc_libfc_fcn_templ;
static struct scsi_host_template bnx2fc_shost_template;
static struct fc_function_template bnx2fc_transport_function;
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
static struct fc_function_template bnx2fc_vport_xport_function;
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport,
__fcoe_get_lesb(lport, fc_lesb, netdev);
}
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = bnx2fc_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
+
+static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev =
+ fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+ fcf_dev->vlan_id = fcoe->vlan_id;
+}
+
static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
{
struct fcoe_percpu_s *bg;
@@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct sk_buff *skb;
struct fc_frame_header *fh;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct bnx2fc_hba *hba;
struct fcoe_port *port;
struct fcoe_hdr *hp;
@@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
port = (struct fcoe_port *)lport_priv(lport);
interface = port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
hba = interface->hba;
fh = fc_frame_header_get(fp);
@@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
- if (!interface->ctlr.sel_fcf) {
+ if (!ctlr->sel_fcf) {
BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
kfree_skb(skb);
return -EINVAL;
}
- if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
+ if (fcoe_ctlr_els_send(ctlr, lport, skb))
return 0;
}
@@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
- if (interface->ctlr.map_dest)
+ if (ctlr->map_dest)
fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
else
/* insert GW address */
- memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
+ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
- if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
- memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
+ if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct fc_lport *lport;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct fc_frame_header *fh;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
@@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
- lport = interface->ctlr.lp;
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
if (unlikely(lport == NULL)) {
printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
{
struct bnx2fc_hba *hba;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
u64 wwnn, wwpn;
port = lport_priv(lport);
interface = port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
hba = interface->hba;
/* require support for get_pauseparam ethtool op. */
@@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
- wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+ wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
1, 0);
BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
fc_set_wwnn(lport, wwnn);
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
- wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+ wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
2, 0);
BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
struct fc_lport *lport;
struct fc_lport *vport;
struct bnx2fc_interface *interface, *tmp;
+ struct fcoe_ctlr *ctlr;
int wait_for_upload = 0;
u32 link_possible = 1;
@@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
if (interface->hba != hba)
continue;
- lport = interface->ctlr.lp;
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
interface->netdev->name, event);
@@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
* on a stale vlan
*/
if (interface->enabled)
- fcoe_ctlr_link_up(&interface->ctlr);
- } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
+ fcoe_ctlr_link_up(ctlr);
+ } else if (fcoe_ctlr_link_down(ctlr)) {
mutex_lock(&lport->lp_mutex);
list_for_each_entry(vport, &lport->vports, list)
fc_host_port_type(vport->host) =
@@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
struct net_device *orig_dev)
{
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
interface = container_of(ptype, struct bnx2fc_interface,
fip_packet_type);
- fcoe_ctlr_recv(&interface->ctlr, skb);
+ ctlr = bnx2fc_to_ctlr(interface);
+ fcoe_ctlr_recv(ctlr, skb);
return 0;
}
@@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
{
struct net_device *netdev = interface->netdev;
struct net_device *physdev = interface->hba->phys_dev;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct netdev_hw_addr *ha;
int sel_san_mac = 0;
@@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
(is_valid_ether_addr(ha->addr))) {
- memcpy(interface->ctlr.ctl_src_addr, ha->addr,
+ memcpy(ctlr->ctl_src_addr, ha->addr,
ETH_ALEN);
sel_san_mac = 1;
BNX2FC_MISC_DBG("Found SAN MAC\n");
@@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void)
static void bnx2fc_interface_release(struct kref *kref)
{
+ struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct net_device *netdev;
interface = container_of(kref, struct bnx2fc_interface, kref);
BNX2FC_MISC_DBG("Interface is being released\n");
+ ctlr = bnx2fc_to_ctlr(interface);
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
netdev = interface->netdev;
/* tear-down FIP controller */
if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
- fcoe_ctlr_destroy(&interface->ctlr);
+ fcoe_ctlr_destroy(ctlr);
- kfree(interface);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
module_put(THIS_MODULE);
@@ -1329,33 +1381,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
struct net_device *netdev,
enum fip_state fip_mode)
{
+ struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ int size;
int rc = 0;
- interface = kzalloc(sizeof(*interface), GFP_KERNEL);
- if (!interface) {
+ size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
+ ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
+ size);
+ if (!ctlr_dev) {
printk(KERN_ERR PFX "Unable to allocate interface structure\n");
return NULL;
}
+ ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ interface = fcoe_ctlr_priv(ctlr);
dev_hold(netdev);
kref_init(&interface->kref);
interface->hba = hba;
interface->netdev = netdev;
/* Initialize FIP */
- fcoe_ctlr_init(&interface->ctlr, fip_mode);
- interface->ctlr.send = bnx2fc_fip_send;
- interface->ctlr.update_mac = bnx2fc_update_src_mac;
- interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
+ fcoe_ctlr_init(ctlr, fip_mode);
+ ctlr->send = bnx2fc_fip_send;
+ ctlr->update_mac = bnx2fc_update_src_mac;
+ ctlr->get_src_addr = bnx2fc_get_src_mac;
set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
rc = bnx2fc_interface_setup(interface);
if (!rc)
return interface;
- fcoe_ctlr_destroy(&interface->ctlr);
+ fcoe_ctlr_destroy(ctlr);
dev_put(netdev);
- kfree(interface);
+ fcoe_ctlr_device_delete(ctlr_dev);
return NULL;
}
@@ -1373,6 +1432,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv)
{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
struct Scsi_Host *shost;
@@ -1383,7 +1443,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
if (!blport) {
- BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
+ BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
return NULL;
}
@@ -1479,7 +1539,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
{
- struct fc_lport *lport = interface->ctlr.lp;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport = ctlr->lp;
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_hba *hba = interface->hba;
@@ -1519,7 +1580,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
{
- struct fc_lport *lport = interface->ctlr.lp;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport = ctlr->lp;
struct fcoe_port *port = lport_priv(lport);
bnx2fc_interface_cleanup(interface);
@@ -1543,13 +1605,15 @@ static int bnx2fc_destroy(struct net_device *netdev)
{
struct bnx2fc_interface *interface = NULL;
struct workqueue_struct *timer_work_queue;
+ struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
- if (!interface || !interface->ctlr.lp) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
goto netdev_err;
@@ -1646,6 +1710,7 @@ static void bnx2fc_ulp_start(void *handle)
{
struct bnx2fc_hba *hba = handle;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct fc_lport *lport;
mutex_lock(&bnx2fc_dev_lock);
@@ -1657,7 +1722,8 @@ static void bnx2fc_ulp_start(void *handle)
list_for_each_entry(interface, &if_list, list) {
if (interface->hba == hba) {
- lport = interface->ctlr.lp;
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
/* Kick off Fabric discovery*/
printk(KERN_ERR PFX "ulp_init: start discovery\n");
lport->tt.frame_send = bnx2fc_xmit;
@@ -1677,13 +1743,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
static void bnx2fc_stop(struct bnx2fc_interface *interface)
{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport;
struct fc_lport *vport;
if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
return;
- lport = interface->ctlr.lp;
+ lport = ctlr->lp;
bnx2fc_port_shutdown(lport);
mutex_lock(&lport->lp_mutex);
@@ -1692,7 +1759,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface)
FC_PORTTYPE_UNKNOWN;
mutex_unlock(&lport->lp_mutex);
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- fcoe_ctlr_link_down(&interface->ctlr);
+ fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(lport);
}
@@ -1804,6 +1871,7 @@ exit:
static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport;
int wait_cnt = 0;
@@ -1814,18 +1882,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
return;
}
- lport = interface->ctlr.lp;
+ lport = ctlr->lp;
BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
if (!bnx2fc_link_ok(lport) && interface->enabled) {
BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
- fcoe_ctlr_link_up(&interface->ctlr);
+ fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
/* wait for the FCF to be selected before issuing FLOGI */
- while (!interface->ctlr.sel_fcf) {
+ while (!ctlr->sel_fcf) {
msleep(250);
/* give up after 3 secs */
if (++wait_cnt > 12)
@@ -1889,19 +1957,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
static int bnx2fc_disable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
- if (!interface || !interface->ctlr.lp) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
} else {
interface->enabled = false;
- fcoe_ctlr_link_down(&interface->ctlr);
- fcoe_clean_pending_queue(interface->ctlr.lp);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -1913,17 +1983,19 @@ static int bnx2fc_disable(struct net_device *netdev)
static int bnx2fc_enable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
- if (!interface || !interface->ctlr.lp) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
- } else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
- fcoe_ctlr_link_up(&interface->ctlr);
+ } else if (!bnx2fc_link_ok(ctlr->lp)) {
+ fcoe_ctlr_link_up(ctlr);
interface->enabled = true;
}
@@ -1944,6 +2016,7 @@ static int bnx2fc_enable(struct net_device *netdev)
*/
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
{
+ struct fcoe_ctlr *ctlr;
struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
struct net_device *phys_dev;
@@ -2010,6 +2083,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
goto ifput_err;
}
+ ctlr = bnx2fc_to_ctlr(interface);
interface->vlan_id = vlan_id;
interface->vlan_enabled = 1;
@@ -2035,10 +2109,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
lport->boot_time = jiffies;
/* Make this master N_port */
- interface->ctlr.lp = lport;
+ ctlr->lp = lport;
if (!bnx2fc_link_ok(lport)) {
- fcoe_ctlr_link_up(&interface->ctlr);
+ fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
@@ -2439,6 +2513,19 @@ static void __exit bnx2fc_mod_exit(void)
module_init(bnx2fc_mod_init);
module_exit(bnx2fc_mod_exit);
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
+ .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+
+ .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+ .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
+};
+
static struct fc_function_template bnx2fc_transport_function = {
.show_host_node_name = 1,
.show_host_port_name = 1,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index afd570962b8c..2ca6bfe4ce5e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
{
struct fc_lport *lport = port->lport;
struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct kwqe *kwqe_arr[4];
struct fcoe_kwqe_conn_offload1 ofld_req1;
@@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
- ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
+ ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
/* fcf mac */
- ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
- ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
- ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
- ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
- ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
+ ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
{
struct kwqe *kwqe_arr[2];
struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable enbl_req;
struct fc_lport *lport = port->lport;
@@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
- enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
- enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
- enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
- enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
- enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
- enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
+ enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
+ enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
port_id = fc_host_port_id(lport->host);
if (port_id != tgt->sid) {
@@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable disable_req;
struct kwqe *kwqe_arr[2];
@@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
- disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
- disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
- disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
- disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
- disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
- disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
+ disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
+ disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
port_id = tgt->sid;
disable_req.s_id[0] = (port_id & 0x000000FF);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e897ce975bb8..4f7453b9e41e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -810,8 +810,22 @@ retry_tmf:
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
- if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+ if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+ if (io_req->on_tmf_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ }
+ io_req->wait_for_comp = 1;
+ bnx2fc_initiate_cleanup(io_req);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_FW_TIMEOUT);
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ if (!rc)
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
spin_unlock_bh(&tgt->tgt_lock);
@@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
}
+int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ int logo_issued;
+ int rc = SUCCESS;
+ int wait_cnt = 0;
+
+ BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+ logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ io_req->wait_for_comp = 1;
+ bnx2fc_initiate_cleanup(io_req);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ io_req->wait_for_comp = 0;
+ /*
+ * release the reference taken in eh_abort to allow the
+ * target to re-login after flushing IOs
+ */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ if (!logo_issued) {
+ clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ do {
+ msleep(BNX2FC_RELOGIN_WAIT_TIME);
+ if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
+ rc = FAILED;
+ break;
+ }
+ } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
+ }
+ spin_lock_bh(&tgt->tgt_lock);
+ return rc;
+}
/**
* bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
* SCSI command
@@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct bnx2fc_cmd *io_req;
struct fc_lport *lport;
- struct fc_rport_priv *rdata;
struct bnx2fc_rport *tgt;
- int logo_issued;
- int wait_cnt = 0;
int rc = FAILED;
@@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
list_add_tail(&io_req->link, &tgt->io_retire_queue);
init_completion(&io_req->tm_done);
- io_req->wait_for_comp = 1;
- if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
- /* Cancel the current timer running on this io_req */
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* drop timer hold */
- set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
- rc = bnx2fc_initiate_abts(io_req);
- } else {
+ if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"already in abts processing\n", io_req->xid);
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
- bnx2fc_initiate_cleanup(io_req);
+ rc = bnx2fc_expl_logo(lport, io_req);
+ goto out;
+ }
+ /* Cancel the current timer running on this io_req */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+ io_req->wait_for_comp = 1;
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc == FAILED) {
+ bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
-
wait_for_completion(&io_req->tm_done);
-
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
- rdata = io_req->tgt->rdata;
- logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
- &tgt->flags);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
-
- if (!logo_issued) {
- BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
- tgt->flags);
- mutex_lock(&lport->disc.disc_mutex);
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&lport->disc.disc_mutex);
- do {
- msleep(BNX2FC_RELOGIN_WAIT_TIME);
- /*
- * If session not recovered, let SCSI-ml
- * escalate error recovery.
- */
- if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
- return FAILED;
- } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
- &tgt->flags));
- }
- return SUCCESS;
- }
- if (rc == FAILED) {
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- return rc;
+ goto done;
}
spin_unlock_bh(&tgt->tgt_lock);
@@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);
- rc = FAILED;
+ rc = bnx2fc_expl_logo(lport, io_req);
+ goto out;
} else {
/*
* We come here even when there was a race condition
@@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
bnx2fc_scsi_done(io_req, DID_ABORT);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
-
+done:
/* release the reference taken in eh_abort */
kref_put(&io_req->refcount, bnx2fc_cmd_release);
+out:
spin_unlock_bh(&tgt->tgt_lock);
return rc;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c1800b531270..082a25c3117e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
BUG_ON(rc);
}
+ list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
+ }
+
list_for_each_safe(list, tmp, &tgt->els_queue) {
i++;
io_req = (struct bnx2fc_cmd *)list;
@@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
- if (cancel_delayed_work(&io_req->timeout_work))
+ if (cancel_delayed_work(&io_req->timeout_work)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+ "in retire_q\n");
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
+ }
kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0c53c28dc3d3..7e77cf620291 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -350,6 +350,7 @@ struct bnx2i_hba {
struct pci_dev *pcidev;
struct net_device *netdev;
void __iomem *regview;
+ resource_size_t reg_base;
u32 age;
unsigned long cnic_dev_type;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index ece47e502282..86a12b48e477 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2724,7 +2724,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
goto arm_cq;
}
- reg_base = ep->hba->netdev->base_addr;
if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
(ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
@@ -2740,7 +2739,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
/* 5709 device in normal node and 5706/5708 devices */
reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
- ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+ ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
MB_KERNEL_CTX_SIZE);
if (!ep->qp.ctx_base)
return -ENOMEM;
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f8d516b53161..621538b8b544 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
bnx2i_identify_device(hba);
bnx2i_setup_host_queue_size(hba, shost);
+ hba->reg_base = pci_resource_start(hba->pcidev, 0);
if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
- hba->regview = ioremap_nocache(hba->netdev->base_addr,
- BNX2_MQ_CONFIG2);
+ hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
if (!hba->regview)
goto ioreg_map_err;
} else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
- hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+ hba->regview = pci_iomap(hba->pcidev, 0, 4096);
if (!hba->regview)
goto ioreg_map_err;
}
@@ -884,7 +884,7 @@ cid_que_err:
bnx2i_free_mp_bdt(hba);
mp_bdt_mem_err:
if (hba->regview) {
- iounmap(hba->regview);
+ pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
ioreg_map_err:
@@ -910,7 +910,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
pci_dev_put(hba->pcidev);
if (hba->regview) {
- iounmap(hba->regview);
+ pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
bnx2i_free_mp_bdt(hba);
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 394ed9e79fd4..34552bf1c023 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1000,7 +1000,7 @@ static int esp_check_spur_intr(struct esp *esp)
static void esp_schedule_reset(struct esp *esp)
{
- esp_log_reset("ESP: esp_schedule_reset() from %p\n",
+ esp_log_reset("ESP: esp_schedule_reset() from %pf\n",
__builtin_return_address(0));
esp->flags |= ESP_FLAG_RESETTING;
esp_event(esp, ESP_EVENT_RESET);
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index f6d37d0271f7..aed0f5db3668 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FCOE) += fcoe.o
obj-$(CONFIG_LIBFCOE) += libfcoe.o
-libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 76e3d0b5bfa6..fe30b1b65e1d 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -41,6 +41,7 @@
#include <scsi/fc/fc_encaps.h>
#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
@@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
+
+static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
+ .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
+
+ .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+ .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
+};
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = {
static int fcoe_interface_setup(struct fcoe_interface *fcoe,
struct net_device *netdev)
{
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct netdev_hw_addr *ha;
struct net_device *real_dev;
u8 flogi_maddr[ETH_ALEN];
@@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
enum fip_state fip_mode)
{
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
+ int size;
int err;
if (!try_module_get(THIS_MODULE)) {
@@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
goto out;
}
- fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
- if (!fcoe) {
- FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
+ size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
+ ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
+ size);
+ if (!ctlr_dev) {
+ FCOE_DBG("Failed to add fcoe_ctlr_device\n");
fcoe = ERR_PTR(-ENOMEM);
goto out_putmod;
}
+ ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ fcoe = fcoe_ctlr_priv(ctlr);
+
dev_hold(netdev);
/*
* Initialize FIP.
*/
- fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
- fcoe->ctlr.send = fcoe_fip_send;
- fcoe->ctlr.update_mac = fcoe_update_src_mac;
- fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
+ fcoe_ctlr_init(ctlr, fip_mode);
+ ctlr->send = fcoe_fip_send;
+ ctlr->update_mac = fcoe_update_src_mac;
+ ctlr->get_src_addr = fcoe_get_src_mac;
err = fcoe_interface_setup(fcoe, netdev);
if (err) {
- fcoe_ctlr_destroy(&fcoe->ctlr);
- kfree(fcoe);
+ fcoe_ctlr_destroy(ctlr);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
fcoe = ERR_PTR(err);
goto out_putmod;
@@ -419,7 +443,7 @@ out:
static void fcoe_interface_remove(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
@@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
rtnl_lock();
if (!fcoe->removed)
@@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
/* Release the self-reference taken during fcoe_interface_create() */
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
- scsi_host_put(fcoe->ctlr.lp->host);
- kfree(fcoe);
+ scsi_host_put(fip->lp->host);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
module_put(THIS_MODULE);
}
@@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
struct net_device *orig_dev)
{
struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
- fcoe_ctlr_recv(&fcoe->ctlr, skb);
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_recv(ctlr, skb);
return 0;
}
@@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
u32 mfs;
u64 wwnn, wwpn;
struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
/* Setup lport private data to point to fcoe softc */
port = lport_priv(lport);
fcoe = port->priv;
+ ctlr = fcoe_to_ctlr(fcoe);
/*
* Determine max frame size based on underlying device and optional
@@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
- wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+ wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
fc_set_wwnn(lport, wwnn);
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
- wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
+ wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
2, 0);
fc_set_wwpn(lport, wwpn);
}
@@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct device *parent, int npiv)
{
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
struct net_device *netdev = fcoe->netdev;
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
@@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
/* Initialize the library */
- rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
+ rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
"interface\n");
@@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
{
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct fcoe_percpu_s *fps;
@@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
unsigned int cpu;
fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
- lport = fcoe->ctlr.lp;
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
if (unlikely(!lport)) {
FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
goto err2;
@@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
eh = eth_hdr(skb);
- if (is_fip_mode(&fcoe->ctlr) &&
- compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
+ if (is_fip_mode(ctlr) &&
+ compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
eh->h_source);
goto err;
@@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
unsigned int elen; /* eth header, may include vlan */
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
u8 sof, eof;
struct fcoe_hdr *hp;
@@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
- fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
+ fcoe_ctlr_els_send(ctlr, lport, skb))
return 0;
sof = fr_sof(fp);
@@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
- memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
- if (fcoe->ctlr.map_dest)
+ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
+ if (ctlr->map_dest)
memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
- if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
- memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
+ if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
static inline int fcoe_filter_frames(struct fc_lport *lport,
struct fc_frame *fp)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct sk_buff *skb = (struct sk_buff *)fp;
@@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
- if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+ ctlr = fcoe_to_ctlr(fcoe);
+ if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
return -EINVAL;
@@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct dcb_app_type *entry = ptr;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct net_device *netdev;
struct fcoe_port *port;
@@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
if (!fcoe)
return NOTIFY_OK;
+ ctlr = fcoe_to_ctlr(fcoe);
+
if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
prio = ffs(entry->app.priority) - 1;
else
@@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
if (entry->app.protocol == ETH_P_FIP ||
entry->app.protocol == ETH_P_FCOE)
- fcoe->ctlr.priority = prio;
+ ctlr->priority = prio;
if (entry->app.protocol == ETH_P_FCOE) {
- port = lport_priv(fcoe->ctlr.lp);
+ port = lport_priv(ctlr->lp);
port->priority = prio;
}
@@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
{
struct fc_lport *lport = NULL;
struct net_device *netdev = ptr;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fcoe_port *port;
struct fcoe_dev_stats *stats;
@@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
list_for_each_entry(fcoe, &fcoe_hostlist, list) {
if (fcoe->netdev == netdev) {
- lport = fcoe->ctlr.lp;
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
break;
}
}
@@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
break;
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
- port = lport_priv(fcoe->ctlr.lp);
+ port = lport_priv(ctlr->lp);
queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
@@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
fcoe_link_speed_update(lport);
if (link_possible && !fcoe_link_ok(lport))
- fcoe_ctlr_link_up(&fcoe->ctlr);
- else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
+ fcoe_ctlr_link_up(ctlr);
+ else if (fcoe_ctlr_link_down(ctlr)) {
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->LinkFailureCount++;
put_cpu();
@@ -2003,6 +2043,7 @@ out:
*/
static int fcoe_disable(struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
int rc = 0;
@@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev)
rtnl_unlock();
if (fcoe) {
- fcoe_ctlr_link_down(&fcoe->ctlr);
- fcoe_clean_pending_queue(fcoe->ctlr.lp);
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
} else
rc = -ENODEV;
@@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev)
*/
static int fcoe_enable(struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
int rc = 0;
@@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev)
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
- if (!fcoe)
+ if (!fcoe) {
rc = -ENODEV;
- else if (!fcoe_link_ok(fcoe->ctlr.lp))
- fcoe_ctlr_link_up(&fcoe->ctlr);
+ goto out;
+ }
+
+ ctlr = fcoe_to_ctlr(fcoe);
+
+ if (!fcoe_link_ok(ctlr->lp))
+ fcoe_ctlr_link_up(ctlr);
+out:
mutex_unlock(&fcoe_config_mutex);
return rc;
}
@@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev)
*/
static int fcoe_destroy(struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
struct fcoe_port *port;
@@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev)
rc = -ENODEV;
goto out_nodev;
}
- lport = fcoe->ctlr.lp;
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
port = lport_priv(lport);
list_del(&fcoe->list);
queue_work(fcoe_wq, &port->destroy_work);
@@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
int dcbx;
u8 fup, up;
struct net_device *netdev = fcoe->realdev;
- struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct fcoe_port *port = lport_priv(ctlr->lp);
struct dcb_app app = {
.priority = 0,
.protocol = ETH_P_FCOE
@@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
}
port->priority = ffs(up) ? ffs(up) - 1 : 0;
- fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+ ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
}
#endif
}
@@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
int rc = 0;
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
@@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
goto out_nodev;
}
- lport = fcoe_if_create(fcoe, &netdev->dev, 0);
+ ctlr = fcoe_to_ctlr(fcoe);
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
+ lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
if (IS_ERR(lport)) {
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
@@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
}
/* Make this the "master" N_Port */
- fcoe->ctlr.lp = lport;
+ ctlr->lp = lport;
/* setup DCB priority attributes. */
fcoe_dcb_create(fcoe);
@@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
fc_fabric_login(lport);
if (!fcoe_link_ok(lport)) {
rtnl_unlock();
- fcoe_ctlr_link_up(&fcoe->ctlr);
+ fcoe_ctlr_link_up(ctlr);
mutex_unlock(&fcoe_config_mutex);
return rc;
}
@@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost)
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
- fcoe_ctlr_link_down(&fcoe->ctlr);
- fcoe_clean_pending_queue(fcoe->ctlr.lp);
- if (!fcoe_link_ok(fcoe->ctlr.lp))
- fcoe_ctlr_link_up(&fcoe->ctlr);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
+ if (!fcoe_link_ok(ctlr->lp))
+ fcoe_ctlr_link_up(ctlr);
return 0;
}
@@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev)
*/
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
fcoe = fcoe_hostlist_lookup_port(netdev);
- return (fcoe) ? fcoe->ctlr.lp : NULL;
+ ctlr = fcoe_to_ctlr(fcoe);
+ return (fcoe) ? ctlr->lp : NULL;
}
/**
@@ -2466,6 +2525,7 @@ module_init(fcoe_init);
static void __exit fcoe_exit(void)
{
struct fcoe_interface *fcoe, *tmp;
+ struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
unsigned int cpu;
@@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void)
rtnl_lock();
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
list_del(&fcoe->list);
- port = lport_priv(fcoe->ctlr.lp);
+ ctlr = fcoe_to_ctlr(fcoe);
+ port = lport_priv(ctlr->lp);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
@@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport,
__fcoe_get_lesb(lport, fc_lesb, netdev);
}
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = fcoe_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev =
+ fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+ fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
+}
+
/**
* fcoe_set_port_id() - Callback from libfc when Port_ID is set.
* @lport: the local port
@@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport,
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
- fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
+ fcoe_ctlr_recv_flogi(ctlr, lport, fp);
}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 96ac938d39cc..a624add4f8ec 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -68,7 +68,6 @@ do { \
* @netdev: The associated net device
* @fcoe_packet_type: FCoE packet type
* @fip_packet_type: FIP packet type
- * @ctlr: The FCoE controller (for FIP)
* @oem: The offload exchange manager for all local port
* instances associated with this port
* @removed: Indicates fcoe interface removed from net device
@@ -80,12 +79,15 @@ struct fcoe_interface {
struct net_device *realdev;
struct packet_type fcoe_packet_type;
struct packet_type fip_packet_type;
- struct fcoe_ctlr ctlr;
struct fc_exch_mgr *oem;
u8 removed;
};
-#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
+#define fcoe_to_ctlr(x) \
+ (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
+
+#define fcoe_from_ctlr(x) \
+ ((struct fcoe_interface *)((x) + 1))
/**
* fcoe_netdev() - Return the net device associated with a local port
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5a4c7250aa77..d68d57241ee6 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
}
EXPORT_SYMBOL(fcoe_ctlr_init);
+static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
+{
+ struct fcoe_ctlr *fip = new->fip;
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+ struct fcoe_fcf_device temp, *fcf_dev;
+ int rc = 0;
+
+ LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+ new->fabric_name, new->fcf_mac);
+
+ mutex_lock(&ctlr_dev->lock);
+
+ temp.fabric_name = new->fabric_name;
+ temp.switch_name = new->switch_name;
+ temp.fc_map = new->fc_map;
+ temp.vfid = new->vfid;
+ memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
+ temp.priority = new->pri;
+ temp.fka_period = new->fka_period;
+ temp.selected = 0; /* default to unselected */
+
+ fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
+ if (unlikely(!fcf_dev)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * The fcoe_sysfs layer can return a CONNECTED fcf that
+ * has a priv (fcf was never deleted) or a CONNECTED fcf
+ * that doesn't have a priv (fcf was deleted). However,
+ * libfcoe will always delete FCFs before trying to add
+ * them. This is ensured because both recv_adv and
+ * age_fcfs are protected by the the fcoe_ctlr's mutex.
+ * This means that we should never get a FCF with a
+ * non-NULL priv pointer.
+ */
+ BUG_ON(fcf_dev->priv);
+
+ fcf_dev->priv = new;
+ new->fcf_dev = fcf_dev;
+
+ list_add(&new->list, &fip->fcfs);
+ fip->fcf_count++;
+
+out:
+ mutex_unlock(&ctlr_dev->lock);
+ return rc;
+}
+
+static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
+{
+ struct fcoe_ctlr *fip = new->fip;
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+ struct fcoe_fcf_device *fcf_dev;
+
+ list_del(&new->list);
+ fip->fcf_count--;
+
+ mutex_lock(&ctlr_dev->lock);
+
+ fcf_dev = fcoe_fcf_to_fcf_dev(new);
+ WARN_ON(!fcf_dev);
+ new->fcf_dev = NULL;
+ fcoe_fcf_device_delete(fcf_dev);
+ kfree(new);
+
+ mutex_unlock(&ctlr_dev->lock);
+}
+
/**
* fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
* @fip: The FCoE controller whose FCFs are to be reset
@@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
fip->sel_fcf = NULL;
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
- list_del(&fcf->list);
- kfree(fcf);
+ fcoe_sysfs_fcf_del(fcf);
}
- fip->fcf_count = 0;
+ WARN_ON(fip->fcf_count);
+
fip->sel_time = 0;
}
@@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
unsigned long deadline;
unsigned long sel_time = 0;
+ struct list_head del_list;
struct fcoe_dev_stats *stats;
+ INIT_LIST_HEAD(&del_list);
+
stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
if (time_after_eq(jiffies, deadline)) {
if (fip->sel_fcf == fcf)
fip->sel_fcf = NULL;
+ /*
+ * Move to delete list so we can call
+ * fcoe_sysfs_fcf_del (which can sleep)
+ * after the put_cpu().
+ */
list_del(&fcf->list);
- WARN_ON(!fip->fcf_count);
- fip->fcf_count--;
- kfree(fcf);
+ list_add(&fcf->list, &del_list);
stats->VLinkFailureCount++;
} else {
if (time_after(next_timer, deadline))
@@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
}
}
put_cpu();
+
+ list_for_each_entry_safe(fcf, next, &del_list, list) {
+ /* Removes fcf from current list */
+ fcoe_sysfs_fcf_del(fcf);
+ }
+
if (sel_time && !fip->sel_fcf && !fip->sel_time) {
sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
fip->sel_time = sel_time;
@@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fcoe_fcf *fcf;
struct fcoe_fcf new;
- struct fcoe_fcf *found;
unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
int first = 0;
int mtu_valid;
+ int found = 0;
+ int rc = 0;
if (fcoe_ctlr_parse_adv(fip, skb, &new))
return;
mutex_lock(&fip->ctlr_mutex);
first = list_empty(&fip->fcfs);
- found = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) {
if (fcf->switch_name == new.switch_name &&
fcf->fabric_name == new.fabric_name &&
fcf->fc_map == new.fc_map &&
compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
- found = fcf;
+ found = 1;
break;
}
}
@@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (!fcf)
goto out;
- fip->fcf_count++;
memcpy(fcf, &new, sizeof(new));
- list_add(&fcf->list, &fip->fcfs);
+ fcf->fip = fip;
+ rc = fcoe_sysfs_fcf_add(fcf);
+ if (rc) {
+ printk(KERN_ERR "Failed to allocate sysfs instance "
+ "for FCF, fab %16.16llx mac %pM\n",
+ new.fabric_name, new.fcf_mac);
+ kfree(fcf);
+ goto out;
+ }
} else {
/*
* Update the FCF's keep-alive descriptor flags.
@@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
fcf->fka_period = new.fka_period;
memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
}
+
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies;
if (!found)
@@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
time_before(fip->sel_time, fip->timer.expires))
mod_timer(&fip->timer, fip->sel_time);
}
+
out:
mutex_unlock(&fip->ctlr_mutex);
}
@@ -2718,9 +2809,9 @@ unlock:
/**
* fcoe_libfc_config() - Sets up libfc related properties for local port
- * @lp: The local port to configure libfc for
- * @fip: The FCoE controller in use by the local port
- * @tt: The libfc function template
+ * @lport: The local port to configure libfc for
+ * @fip: The FCoE controller in use by the local port
+ * @tt: The libfc function template
* @init_fcp: If non-zero, the FCP portion of libfc should be initialized
*
* Returns : 0 for success
@@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fcoe_fcf *fcf;
+
+ mutex_lock(&fip->ctlr_mutex);
+ mutex_lock(&ctlr_dev->lock);
+
+ fcf = fcoe_fcf_device_priv(fcf_dev);
+ if (fcf)
+ fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
+ else
+ fcf_dev->selected = 0;
+
+ mutex_unlock(&ctlr_dev->lock);
+ mutex_unlock(&fip->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_fcf_get_selected);
+
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ mutex_lock(&ctlr->ctlr_mutex);
+ switch (ctlr->mode) {
+ case FIP_MODE_FABRIC:
+ ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
+ break;
+ case FIP_MODE_VN2VN:
+ ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+ break;
+ default:
+ ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+ break;
+ }
+ mutex_unlock(&ctlr->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644
index 000000000000..2bc163198d33
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -0,0 +1,832 @@
+/*
+ * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
+#include <scsi/fcoe_sysfs.h>
+
+static atomic_t ctlr_num;
+static atomic_t fcf_num;
+
+/*
+ * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
+ * should insulate the loss of a fcf.
+ */
+static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */
+
+module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
+ uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fcf_dev_loss_tmo,
+ "Maximum number of seconds that libfcoe should"
+ " insulate the loss of a fcf. Once this value is"
+ " exceeded, the fcf is removed.");
+
+/*
+ * These are used by the fcoe_*_show_function routines, they
+ * are intentionally placed in the .c file as they're not intended
+ * for use throughout the code.
+ */
+#define fcoe_ctlr_id(x) \
+ ((x)->id)
+#define fcoe_ctlr_work_q_name(x) \
+ ((x)->work_q_name)
+#define fcoe_ctlr_work_q(x) \
+ ((x)->work_q)
+#define fcoe_ctlr_devloss_work_q_name(x) \
+ ((x)->devloss_work_q_name)
+#define fcoe_ctlr_devloss_work_q(x) \
+ ((x)->devloss_work_q)
+#define fcoe_ctlr_mode(x) \
+ ((x)->mode)
+#define fcoe_ctlr_fcf_dev_loss_tmo(x) \
+ ((x)->fcf_dev_loss_tmo)
+#define fcoe_ctlr_link_fail(x) \
+ ((x)->lesb.lesb_link_fail)
+#define fcoe_ctlr_vlink_fail(x) \
+ ((x)->lesb.lesb_vlink_fail)
+#define fcoe_ctlr_miss_fka(x) \
+ ((x)->lesb.lesb_miss_fka)
+#define fcoe_ctlr_symb_err(x) \
+ ((x)->lesb.lesb_symb_err)
+#define fcoe_ctlr_err_block(x) \
+ ((x)->lesb.lesb_err_block)
+#define fcoe_ctlr_fcs_error(x) \
+ ((x)->lesb.lesb_fcs_error)
+#define fcoe_fcf_state(x) \
+ ((x)->state)
+#define fcoe_fcf_fabric_name(x) \
+ ((x)->fabric_name)
+#define fcoe_fcf_switch_name(x) \
+ ((x)->switch_name)
+#define fcoe_fcf_fc_map(x) \
+ ((x)->fc_map)
+#define fcoe_fcf_vfid(x) \
+ ((x)->vfid)
+#define fcoe_fcf_mac(x) \
+ ((x)->mac)
+#define fcoe_fcf_priority(x) \
+ ((x)->priority)
+#define fcoe_fcf_fka_period(x) \
+ ((x)->fka_period)
+#define fcoe_fcf_dev_loss_tmo(x) \
+ ((x)->dev_loss_tmo)
+#define fcoe_fcf_selected(x) \
+ ((x)->selected)
+#define fcoe_fcf_vlan_id(x) \
+ ((x)->vlan_id)
+
+/*
+ * dev_loss_tmo attribute
+ */
+static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
+{
+ int ret;
+
+ ret = kstrtoul(buf, 0, val);
+ if (ret || *val < 0)
+ return -EINVAL;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (*val > UINT_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
+ unsigned long val)
+{
+ if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
+ (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
+ (fcf->state == FCOE_FCF_STATE_DELETED))
+ return -EBUSY;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (val > UINT_MAX)
+ return -EINVAL;
+
+ fcoe_fcf_dev_loss_tmo(fcf) = val;
+ return 0;
+}
+
+#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+#define fcoe_ctlr_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
+ if (ctlr->f->get_fcoe_ctlr_##field) \
+ ctlr->f->get_fcoe_ctlr_##field(ctlr); \
+ return snprintf(buf, sz, format_string, \
+ cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \
+ if (ctlr->f->get_fcoe_fcf_##field) \
+ ctlr->f->get_fcoe_fcf_##field(fcf); \
+ return snprintf(buf, sz, format_string, \
+ cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
+ return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
+ return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \
+ fcoe_ctlr_private_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_ctlr_rd_attr(field, format_string, sz) \
+ fcoe_ctlr_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_rd_attr(field, format_string, sz) \
+ fcoe_fcf_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr(field, format_string, sz) \
+ fcoe_fcf_private_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
+ fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \
+ fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_enum_name_search(title, table_type, table) \
+static const char *get_fcoe_##title##_name(enum table_type table_key) \
+{ \
+ int i; \
+ char *name = NULL; \
+ \
+ for (i = 0; i < ARRAY_SIZE(table); i++) { \
+ if (table[i].value == table_key) { \
+ name = table[i].name; \
+ break; \
+ } \
+ } \
+ return name; \
+}
+
+static struct {
+ enum fcf_state value;
+ char *name;
+} fcf_state_names[] = {
+ { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
+ { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
+ { FCOE_FCF_STATE_CONNECTED, "Connected" },
+};
+fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
+#define FCOE_FCF_STATE_MAX_NAMELEN 50
+
+static ssize_t show_fcf_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ const char *name;
+ name = get_fcoe_fcf_state_name(fcf->state);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
+
+static struct {
+ enum fip_conn_type value;
+ char *name;
+} fip_conn_type_names[] = {
+ { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
+ { FIP_CONN_TYPE_FABRIC, "Fabric" },
+ { FIP_CONN_TYPE_VN2VN, "VN2VN" },
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+#define FCOE_CTLR_MODE_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ const char *name;
+
+ if (ctlr->f->get_fcoe_ctlr_mode)
+ ctlr->f->get_fcoe_ctlr_mode(ctlr);
+
+ name = get_fcoe_ctlr_mode_name(ctlr->mode);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+ "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
+ show_ctlr_mode, NULL);
+
+static ssize_t
+store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ struct fcoe_fcf_device *fcf;
+ unsigned long val;
+ int rc;
+
+ rc = fcoe_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
+ mutex_lock(&ctlr->lock);
+ list_for_each_entry(fcf, &ctlr->fcfs, peers)
+ fcoe_fcf_set_dev_loss_tmo(fcf, val);
+ mutex_unlock(&ctlr->lock);
+ return count;
+}
+fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
+static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fcoe_ctlr_device_fcf_dev_loss_tmo,
+ store_private_fcoe_ctlr_fcf_dev_loss_tmo);
+
+/* Link Error Status Block (LESB) */
+fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
+fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
+fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
+fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
+
+fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
+fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
+fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
+fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
+fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
+fcoe_fcf_rd_attr(selected, "%u\n", 20);
+fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
+
+fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
+static ssize_t
+store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ unsigned long val;
+ int rc;
+
+ rc = fcoe_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
+ if (rc)
+ return rc;
+ return count;
+}
+static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fcoe_fcf_device_dev_loss_tmo,
+ store_fcoe_fcf_dev_loss_tmo);
+
+static struct attribute *fcoe_ctlr_lesb_attrs[] = {
+ &device_attr_fcoe_ctlr_link_fail.attr,
+ &device_attr_fcoe_ctlr_vlink_fail.attr,
+ &device_attr_fcoe_ctlr_miss_fka.attr,
+ &device_attr_fcoe_ctlr_symb_err.attr,
+ &device_attr_fcoe_ctlr_err_block.attr,
+ &device_attr_fcoe_ctlr_fcs_error.attr,
+ NULL,
+};
+
+static struct attribute_group fcoe_ctlr_lesb_attr_group = {
+ .name = "lesb",
+ .attrs = fcoe_ctlr_lesb_attrs,
+};
+
+static struct attribute *fcoe_ctlr_attrs[] = {
+ &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_mode.attr,
+ NULL,
+};
+
+static struct attribute_group fcoe_ctlr_attr_group = {
+ .attrs = fcoe_ctlr_attrs,
+};
+
+static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
+ &fcoe_ctlr_attr_group,
+ &fcoe_ctlr_lesb_attr_group,
+ NULL,
+};
+
+static struct attribute *fcoe_fcf_attrs[] = {
+ &device_attr_fcoe_fcf_fabric_name.attr,
+ &device_attr_fcoe_fcf_switch_name.attr,
+ &device_attr_fcoe_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_fcf_fc_map.attr,
+ &device_attr_fcoe_fcf_vfid.attr,
+ &device_attr_fcoe_fcf_mac.attr,
+ &device_attr_fcoe_fcf_priority.attr,
+ &device_attr_fcoe_fcf_fka_period.attr,
+ &device_attr_fcoe_fcf_state.attr,
+ &device_attr_fcoe_fcf_selected.attr,
+ &device_attr_fcoe_fcf_vlan_id.attr,
+ NULL
+};
+
+static struct attribute_group fcoe_fcf_attr_group = {
+ .attrs = fcoe_fcf_attrs,
+};
+
+static const struct attribute_group *fcoe_fcf_attr_groups[] = {
+ &fcoe_fcf_attr_group,
+ NULL,
+};
+
+struct bus_type fcoe_bus_type;
+
+static int fcoe_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ if (dev->bus == &fcoe_bus_type)
+ return 1;
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_device_release() - Release the FIP ctlr memory
+ * @dev: Pointer to the FIP ctlr's embedded device
+ *
+ * Called when the last FIP ctlr reference is released.
+ */
+static void fcoe_ctlr_device_release(struct device *dev)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ kfree(ctlr);
+}
+
+/**
+ * fcoe_fcf_device_release() - Release the FIP fcf memory
+ * @dev: Pointer to the fcf's embedded device
+ *
+ * Called when the last FIP fcf reference is released.
+ */
+static void fcoe_fcf_device_release(struct device *dev)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ kfree(fcf);
+}
+
+struct device_type fcoe_ctlr_device_type = {
+ .name = "fcoe_ctlr",
+ .groups = fcoe_ctlr_attr_groups,
+ .release = fcoe_ctlr_device_release,
+};
+
+struct device_type fcoe_fcf_device_type = {
+ .name = "fcoe_fcf",
+ .groups = fcoe_fcf_attr_groups,
+ .release = fcoe_fcf_device_release,
+};
+
+struct bus_type fcoe_bus_type = {
+ .name = "fcoe",
+ .match = &fcoe_bus_match,
+};
+
+/**
+ * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
+{
+ if (!fcoe_ctlr_work_q(ctlr)) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to flush work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fcoe_ctlr_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work: Work to queue for execution
+ *
+ * Return value:
+ * 1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
+ struct work_struct *work)
+{
+ if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to queue work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_work(fcoe_ctlr_work_q(ctlr), work);
+}
+
+/**
+ * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
+{
+ if (!fcoe_ctlr_devloss_work_q(ctlr)) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to flush work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work: Work to queue for execution
+ * @delay: jiffies to delay the work queuing
+ *
+ * Return value:
+ * 1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
+ struct delayed_work *work,
+ unsigned long delay)
+{
+ if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to queue work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
+}
+
+static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
+ struct fcoe_fcf_device *old)
+{
+ if (new->switch_name == old->switch_name &&
+ new->fabric_name == old->fabric_name &&
+ new->fc_map == old->fc_map &&
+ compare_ether_addr(new->mac, old->mac) == 0)
+ return 1;
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
+ * @parent: The parent device to which the fcoe_ctlr instance
+ * should be attached
+ * @f: The LLD's FCoE sysfs function template pointer
+ * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
+ *
+ * This routine allocates a FIP ctlr object with some additional memory
+ * for the LLD. The FIP ctlr is initialized, added to sysfs and then
+ * attributes are added to it.
+ */
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+ struct fcoe_sysfs_function_template *f,
+ int priv_size)
+{
+ struct fcoe_ctlr_device *ctlr;
+ int error = 0;
+
+ ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
+ GFP_KERNEL);
+ if (!ctlr)
+ goto out;
+
+ ctlr->id = atomic_inc_return(&ctlr_num) - 1;
+ ctlr->f = f;
+ INIT_LIST_HEAD(&ctlr->fcfs);
+ mutex_init(&ctlr->lock);
+ ctlr->dev.parent = parent;
+ ctlr->dev.bus = &fcoe_bus_type;
+ ctlr->dev.type = &fcoe_ctlr_device_type;
+
+ ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
+
+ snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
+ "ctlr_wq_%d", ctlr->id);
+ ctlr->work_q = create_singlethread_workqueue(
+ ctlr->work_q_name);
+ if (!ctlr->work_q)
+ goto out_del;
+
+ snprintf(ctlr->devloss_work_q_name,
+ sizeof(ctlr->devloss_work_q_name),
+ "ctlr_dl_wq_%d", ctlr->id);
+ ctlr->devloss_work_q = create_singlethread_workqueue(
+ ctlr->devloss_work_q_name);
+ if (!ctlr->devloss_work_q)
+ goto out_del_q;
+
+ dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+ error = device_register(&ctlr->dev);
+ if (error)
+ goto out_del_q2;
+
+ return ctlr;
+
+out_del_q2:
+ destroy_workqueue(ctlr->devloss_work_q);
+ ctlr->devloss_work_q = NULL;
+out_del_q:
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+out_del:
+ kfree(ctlr);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
+
+/**
+ * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
+ * @ctlr: A pointer to the ctlr to be deleted
+ *
+ * Deletes a FIP ctlr and any fcfs attached
+ * to it. Deleting fcfs will cause their childen
+ * to be deleted as well.
+ *
+ * The ctlr is detached from sysfs and it's resources
+ * are freed (work q), but the memory is not freed
+ * until its last reference is released.
+ *
+ * This routine expects no locks to be held before
+ * calling.
+ *
+ * TODO: Currently there are no callbacks to clean up LLD data
+ * for a fcoe_fcf_device. LLDs must keep this in mind as they need
+ * to clean up each of their LLD data for all fcoe_fcf_device before
+ * calling fcoe_ctlr_device_delete.
+ */
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
+{
+ struct fcoe_fcf_device *fcf, *next;
+ /* Remove any attached fcfs */
+ mutex_lock(&ctlr->lock);
+ list_for_each_entry_safe(fcf, next,
+ &ctlr->fcfs, peers) {
+ list_del(&fcf->peers);
+ fcf->state = FCOE_FCF_STATE_DELETED;
+ fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+ }
+ mutex_unlock(&ctlr->lock);
+
+ fcoe_ctlr_device_flush_work(ctlr);
+
+ destroy_workqueue(ctlr->devloss_work_q);
+ ctlr->devloss_work_q = NULL;
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+
+ device_unregister(&ctlr->dev);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
+
+/**
+ * fcoe_fcf_device_final_delete() - Final delete routine
+ * @work: The FIP fcf's embedded work struct
+ *
+ * It is expected that the fcf has been removed from
+ * the FIP ctlr's list before calling this routine.
+ */
+static void fcoe_fcf_device_final_delete(struct work_struct *work)
+{
+ struct fcoe_fcf_device *fcf =
+ container_of(work, struct fcoe_fcf_device, delete_work);
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+ /*
+ * Cancel any outstanding timers. These should really exist
+ * only when rmmod'ing the LLDD and we're asking for
+ * immediate termination of the rports
+ */
+ if (!cancel_delayed_work(&fcf->dev_loss_work))
+ fcoe_ctlr_device_flush_devloss(ctlr);
+
+ device_unregister(&fcf->dev);
+}
+
+/**
+ * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
+ * @work: The FIP fcf's embedded work struct
+ *
+ * Removes the fcf from the FIP ctlr's list of fcfs and
+ * queues the final deletion.
+ */
+static void fip_timeout_deleted_fcf(struct work_struct *work)
+{
+ struct fcoe_fcf_device *fcf =
+ container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+ mutex_lock(&ctlr->lock);
+
+ /*
+ * If the fcf is deleted or reconnected before the timer
+ * fires the devloss queue will be flushed, but the state will
+ * either be CONNECTED or DELETED. If that is the case we
+ * cancel deleting the fcf.
+ */
+ if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
+ goto out;
+
+ dev_printk(KERN_ERR, &fcf->dev,
+ "FIP fcf connection time out: removing fcf\n");
+
+ list_del(&fcf->peers);
+ fcf->state = FCOE_FCF_STATE_DELETED;
+ fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+
+out:
+ mutex_unlock(&ctlr->lock);
+}
+
+/**
+ * fcoe_fcf_device_delete() - Delete a FIP fcf
+ * @fcf: Pointer to the fcf which is to be deleted
+ *
+ * Queues the FIP fcf on the devloss workqueue
+ *
+ * Expects the ctlr_attrs mutex to be held for fcf
+ * state change.
+ */
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
+{
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+ int timeout = fcf->dev_loss_tmo;
+
+ if (fcf->state != FCOE_FCF_STATE_CONNECTED)
+ return;
+
+ fcf->state = FCOE_FCF_STATE_DISCONNECTED;
+
+ /*
+ * FCF will only be re-connected by the LLD calling
+ * fcoe_fcf_device_add, and it should be setting up
+ * priv then.
+ */
+ fcf->priv = NULL;
+
+ fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
+ timeout * HZ);
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
+
+/**
+ * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
+ * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent
+ * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
+ *
+ * Expects to be called with the ctlr->lock held
+ */
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+ struct fcoe_fcf_device *new_fcf)
+{
+ struct fcoe_fcf_device *fcf;
+ int error = 0;
+
+ list_for_each_entry(fcf, &ctlr->fcfs, peers) {
+ if (fcoe_fcf_device_match(new_fcf, fcf)) {
+ if (fcf->state == FCOE_FCF_STATE_CONNECTED)
+ return fcf;
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+
+ if (!cancel_delayed_work(&fcf->dev_loss_work))
+ fcoe_ctlr_device_flush_devloss(ctlr);
+
+ return fcf;
+ }
+ }
+
+ fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
+ if (unlikely(!fcf))
+ goto out;
+
+ INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
+ INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
+
+ fcf->dev.parent = &ctlr->dev;
+ fcf->dev.bus = &fcoe_bus_type;
+ fcf->dev.type = &fcoe_fcf_device_type;
+ fcf->id = atomic_inc_return(&fcf_num) - 1;
+ fcf->state = FCOE_FCF_STATE_UNKNOWN;
+
+ fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
+
+ dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
+
+ fcf->fabric_name = new_fcf->fabric_name;
+ fcf->switch_name = new_fcf->switch_name;
+ fcf->fc_map = new_fcf->fc_map;
+ fcf->vfid = new_fcf->vfid;
+ memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
+ fcf->priority = new_fcf->priority;
+ fcf->fka_period = new_fcf->fka_period;
+ fcf->selected = new_fcf->selected;
+
+ error = device_register(&fcf->dev);
+ if (error)
+ goto out_del;
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+ list_add_tail(&fcf->peers, &ctlr->fcfs);
+
+ return fcf;
+
+out_del:
+ kfree(fcf);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
+
+int __init fcoe_sysfs_setup(void)
+{
+ int error;
+
+ atomic_set(&ctlr_num, 0);
+ atomic_set(&fcf_num, 0);
+
+ error = bus_register(&fcoe_bus_type);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+void __exit fcoe_sysfs_teardown(void)
+{
+ bus_unregister(&fcoe_bus_type);
+}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 710e149d41b6..b46f43dced78 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -815,9 +815,17 @@ out_nodev:
*/
static int __init libfcoe_init(void)
{
- fcoe_transport_init();
+ int rc = 0;
- return 0;
+ rc = fcoe_transport_init();
+ if (rc)
+ return rc;
+
+ rc = fcoe_sysfs_setup();
+ if (rc)
+ fcoe_transport_exit();
+
+ return rc;
}
module_init(libfcoe_init);
@@ -826,6 +834,7 @@ module_init(libfcoe_init);
*/
static void __exit libfcoe_exit(void)
{
+ fcoe_sysfs_teardown();
fcoe_transport_exit();
}
module_exit(libfcoe_exit);
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
deleted file mode 100644
index 53bfcaa86f09..000000000000
--- a/drivers/scsi/fd_mcs.c
+++ /dev/null
@@ -1,1354 +0,0 @@
-/* fd_mcs.c -- Future Domain MCS 600/700 (or IBM OEM) driver
- *
- * FutureDomain MCS-600/700 v0.2 03/11/1998 by ZP Gu (zpg@castle.net)
- *
- * This driver is cloned from fdomain.* to specifically support
- * the Future Domain MCS 600/700 MCA SCSI adapters. Some PS/2s
- * also equipped with IBM Fast SCSI Adapter/A which is an OEM
- * of MCS 700.
- *
- * This driver also supports Reply SB16/SCSI card (the SCSI part).
- *
- * What makes this driver different is that this driver is MCA only
- * and it supports multiple adapters in the same system, IRQ
- * sharing, some driver statistics, and maps highest SCSI id to sda.
- * All cards are auto-detected.
- *
- * Assumptions: TMC-1800/18C50/18C30, BIOS >= 3.4
- *
- * LILO command-line options:
- * fd_mcs=<FIFO_COUNT>[,<FIFO_SIZE>]
- *
- * ********************************************************
- * Please see Copyrights/Comments in fdomain.* for credits.
- * Following is from fdomain.c for acknowledgement:
- *
- * Created: Sun May 3 18:53:19 1992 by faith@cs.unc.edu
- * Revised: Wed Oct 2 11:10:55 1996 by r.faith@ieee.org
- * Author: Rickard E. Faith, faith@cs.unc.edu
- * Copyright 1992, 1993, 1994, 1995, 1996 Rickard E. Faith
- *
- * $Id: fdomain.c,v 5.45 1996/10/02 15:13:06 root Exp $
-
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
-
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
-
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
-
- **************************************************************************
-
- NOTES ON USER DEFINABLE OPTIONS:
-
- DEBUG: This turns on the printing of various debug information.
-
- ENABLE_PARITY: This turns on SCSI parity checking. With the current
- driver, all attached devices must support SCSI parity. If none of your
- devices support parity, then you can probably get the driver to work by
- turning this option off. I have no way of testing this, however, and it
- would appear that no one ever uses this option.
-
- FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the
- 18C30 chip have a 2k cache). When this many 512 byte blocks are filled by
- the SCSI device, an interrupt will be raised. Therefore, this could be as
- low as 0, or as high as 16. Note, however, that values which are too high
- or too low seem to prevent any interrupts from occurring, and thereby lock
- up the machine. I have found that 2 is a good number, but throughput may
- be increased by changing this value to values which are close to 2.
- Please let me know if you try any different values.
- [*****Now a runtime option*****]
-
- RESELECTION: This is no longer an option, since I gave up trying to
- implement it in version 4.x of this driver. It did not improve
- performance at all and made the driver unstable (because I never found one
- of the two race conditions which were introduced by the multiple
- outstanding command code). The instability seems a very high price to pay
- just so that you don't have to wait for the tape to rewind. If you want
- this feature implemented, send me patches. I'll be happy to send a copy
- of my (broken) driver to anyone who would like to see a copy.
-
- **************************************************************************/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/proc_fs.h>
-#include <linux/delay.h>
-#include <linux/mca.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <scsi/scsicam.h>
-#include <linux/mca-legacy.h>
-
-#include <asm/io.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-
-#define DRIVER_VERSION "v0.2 by ZP Gu<zpg@castle.net>"
-
-/* START OF USER DEFINABLE OPTIONS */
-
-#define DEBUG 0 /* Enable debugging output */
-#define ENABLE_PARITY 1 /* Enable SCSI Parity */
-
-/* END OF USER DEFINABLE OPTIONS */
-
-#if DEBUG
-#define EVERY_ACCESS 0 /* Write a line on every scsi access */
-#define ERRORS_ONLY 1 /* Only write a line if there is an error */
-#define DEBUG_MESSAGES 1 /* Debug MESSAGE IN phase */
-#define DEBUG_ABORT 1 /* Debug abort() routine */
-#define DEBUG_RESET 1 /* Debug reset() routine */
-#define DEBUG_RACE 1 /* Debug interrupt-driven race condition */
-#else
-#define EVERY_ACCESS 0 /* LEAVE THESE ALONE--CHANGE THE ONES ABOVE */
-#define ERRORS_ONLY 0
-#define DEBUG_MESSAGES 0
-#define DEBUG_ABORT 0
-#define DEBUG_RESET 0
-#define DEBUG_RACE 0
-#endif
-
-/* Errors are reported on the line, so we don't need to report them again */
-#if EVERY_ACCESS
-#undef ERRORS_ONLY
-#define ERRORS_ONLY 0
-#endif
-
-#if ENABLE_PARITY
-#define PARITY_MASK 0x08
-#else
-#define PARITY_MASK 0x00
-#endif
-
-enum chip_type {
- unknown = 0x00,
- tmc1800 = 0x01,
- tmc18c50 = 0x02,
- tmc18c30 = 0x03,
-};
-
-enum {
- in_arbitration = 0x02,
- in_selection = 0x04,
- in_other = 0x08,
- disconnect = 0x10,
- aborted = 0x20,
- sent_ident = 0x40,
-};
-
-enum in_port_type {
- Read_SCSI_Data = 0,
- SCSI_Status = 1,
- TMC_Status = 2,
- FIFO_Status = 3, /* tmc18c50/tmc18c30 only */
- Interrupt_Cond = 4, /* tmc18c50/tmc18c30 only */
- LSB_ID_Code = 5,
- MSB_ID_Code = 6,
- Read_Loopback = 7,
- SCSI_Data_NoACK = 8,
- Interrupt_Status = 9,
- Configuration1 = 10,
- Configuration2 = 11, /* tmc18c50/tmc18c30 only */
- Read_FIFO = 12,
- FIFO_Data_Count = 14
-};
-
-enum out_port_type {
- Write_SCSI_Data = 0,
- SCSI_Cntl = 1,
- Interrupt_Cntl = 2,
- SCSI_Mode_Cntl = 3,
- TMC_Cntl = 4,
- Memory_Cntl = 5, /* tmc18c50/tmc18c30 only */
- Write_Loopback = 7,
- IO_Control = 11, /* tmc18c30 only */
- Write_FIFO = 12
-};
-
-struct fd_hostdata {
- unsigned long _bios_base;
- int _bios_major;
- int _bios_minor;
- volatile int _in_command;
- Scsi_Cmnd *_current_SC;
- enum chip_type _chip;
- int _adapter_mask;
- int _fifo_count; /* Number of 512 byte blocks before INTR */
-
- char _adapter_name[64];
-#if DEBUG_RACE
- volatile int _in_interrupt_flag;
-#endif
-
- int _SCSI_Mode_Cntl_port;
- int _FIFO_Data_Count_port;
- int _Interrupt_Cntl_port;
- int _Interrupt_Status_port;
- int _Interrupt_Cond_port;
- int _Read_FIFO_port;
- int _Read_SCSI_Data_port;
- int _SCSI_Cntl_port;
- int _SCSI_Data_NoACK_port;
- int _SCSI_Status_port;
- int _TMC_Cntl_port;
- int _TMC_Status_port;
- int _Write_FIFO_port;
- int _Write_SCSI_Data_port;
-
- int _FIFO_Size; /* = 0x2000; 8k FIFO for
- pre-tmc18c30 chips */
- /* simple stats */
- int _Bytes_Read;
- int _Bytes_Written;
- int _INTR_Processed;
-};
-
-#define FD_MAX_HOSTS 3 /* enough? */
-
-#define HOSTDATA(shpnt) ((struct fd_hostdata *) shpnt->hostdata)
-#define bios_base (HOSTDATA(shpnt)->_bios_base)
-#define bios_major (HOSTDATA(shpnt)->_bios_major)
-#define bios_minor (HOSTDATA(shpnt)->_bios_minor)
-#define in_command (HOSTDATA(shpnt)->_in_command)
-#define current_SC (HOSTDATA(shpnt)->_current_SC)
-#define chip (HOSTDATA(shpnt)->_chip)
-#define adapter_mask (HOSTDATA(shpnt)->_adapter_mask)
-#define FIFO_COUNT (HOSTDATA(shpnt)->_fifo_count)
-#define adapter_name (HOSTDATA(shpnt)->_adapter_name)
-#if DEBUG_RACE
-#define in_interrupt_flag (HOSTDATA(shpnt)->_in_interrupt_flag)
-#endif
-#define SCSI_Mode_Cntl_port (HOSTDATA(shpnt)->_SCSI_Mode_Cntl_port)
-#define FIFO_Data_Count_port (HOSTDATA(shpnt)->_FIFO_Data_Count_port)
-#define Interrupt_Cntl_port (HOSTDATA(shpnt)->_Interrupt_Cntl_port)
-#define Interrupt_Status_port (HOSTDATA(shpnt)->_Interrupt_Status_port)
-#define Interrupt_Cond_port (HOSTDATA(shpnt)->_Interrupt_Cond_port)
-#define Read_FIFO_port (HOSTDATA(shpnt)->_Read_FIFO_port)
-#define Read_SCSI_Data_port (HOSTDATA(shpnt)->_Read_SCSI_Data_port)
-#define SCSI_Cntl_port (HOSTDATA(shpnt)->_SCSI_Cntl_port)
-#define SCSI_Data_NoACK_port (HOSTDATA(shpnt)->_SCSI_Data_NoACK_port)
-#define SCSI_Status_port (HOSTDATA(shpnt)->_SCSI_Status_port)
-#define TMC_Cntl_port (HOSTDATA(shpnt)->_TMC_Cntl_port)
-#define TMC_Status_port (HOSTDATA(shpnt)->_TMC_Status_port)
-#define Write_FIFO_port (HOSTDATA(shpnt)->_Write_FIFO_port)
-#define Write_SCSI_Data_port (HOSTDATA(shpnt)->_Write_SCSI_Data_port)
-#define FIFO_Size (HOSTDATA(shpnt)->_FIFO_Size)
-#define Bytes_Read (HOSTDATA(shpnt)->_Bytes_Read)
-#define Bytes_Written (HOSTDATA(shpnt)->_Bytes_Written)
-#define INTR_Processed (HOSTDATA(shpnt)->_INTR_Processed)
-
-struct fd_mcs_adapters_struct {
- char *name;
- int id;
- enum chip_type fd_chip;
- int fifo_size;
- int fifo_count;
-};
-
-#define REPLY_ID 0x5137
-
-static struct fd_mcs_adapters_struct fd_mcs_adapters[] = {
- {"Future Domain SCSI Adapter MCS-700(18C50)",
- 0x60e9,
- tmc18c50,
- 0x2000,
- 4},
- {"Future Domain SCSI Adapter MCS-600/700(TMC-1800)",
- 0x6127,
- tmc1800,
- 0x2000,
- 4},
- {"Reply Sound Blaster/SCSI Adapter",
- REPLY_ID,
- tmc18c30,
- 0x800,
- 2},
-};
-
-#define FD_BRDS ARRAY_SIZE(fd_mcs_adapters)
-
-static irqreturn_t fd_mcs_intr(int irq, void *dev_id);
-
-static unsigned long addresses[] = { 0xc8000, 0xca000, 0xce000, 0xde000 };
-static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
-static unsigned short interrupts[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
-
-/* host information */
-static int found = 0;
-static struct Scsi_Host *hosts[FD_MAX_HOSTS + 1] = { NULL };
-
-static int user_fifo_count = 0;
-static int user_fifo_size = 0;
-
-#ifndef MODULE
-static int __init fd_mcs_setup(char *str)
-{
- static int done_setup = 0;
- int ints[3];
-
- get_options(str, 3, ints);
- if (done_setup++ || ints[0] < 1 || ints[0] > 2 || ints[1] < 1 || ints[1] > 16) {
- printk("fd_mcs: usage: fd_mcs=FIFO_COUNT, FIFO_SIZE\n");
- return 0;
- }
-
- user_fifo_count = ints[0] >= 1 ? ints[1] : 0;
- user_fifo_size = ints[0] >= 2 ? ints[2] : 0;
- return 1;
-}
-
-__setup("fd_mcs=", fd_mcs_setup);
-#endif /* !MODULE */
-
-static void print_banner(struct Scsi_Host *shpnt)
-{
- printk("scsi%d <fd_mcs>: ", shpnt->host_no);
-
- if (bios_base) {
- printk("BIOS at 0x%lX", bios_base);
- } else {
- printk("No BIOS");
- }
-
- printk(", HostID %d, %s Chip, IRQ %d, IO 0x%lX\n", shpnt->this_id, chip == tmc18c50 ? "TMC-18C50" : (chip == tmc18c30 ? "TMC-18C30" : (chip == tmc1800 ? "TMC-1800" : "Unknown")), shpnt->irq, shpnt->io_port);
-}
-
-
-static void do_pause(unsigned amount)
-{ /* Pause for amount*10 milliseconds */
- do {
- mdelay(10);
- } while (--amount);
-}
-
-static void fd_mcs_make_bus_idle(struct Scsi_Host *shpnt)
-{
- outb(0, SCSI_Cntl_port);
- outb(0, SCSI_Mode_Cntl_port);
- if (chip == tmc18c50 || chip == tmc18c30)
- outb(0x21 | PARITY_MASK, TMC_Cntl_port); /* Clear forced intr. */
- else
- outb(0x01 | PARITY_MASK, TMC_Cntl_port);
-}
-
-static int fd_mcs_detect(struct scsi_host_template * tpnt)
-{
- int loop;
- struct Scsi_Host *shpnt;
-
- /* get id, port, bios, irq */
- int slot;
- u_char pos2, pos3, pos4;
- int id, port, irq;
- unsigned long bios;
-
- /* if not MCA machine, return */
- if (!MCA_bus)
- return 0;
-
- /* changeable? */
- id = 7;
-
- for (loop = 0; loop < FD_BRDS; loop++) {
- slot = 0;
- while (MCA_NOTFOUND != (slot = mca_find_adapter(fd_mcs_adapters[loop].id, slot))) {
-
- /* if we get this far, an adapter has been detected and is
- enabled */
-
- printk(KERN_INFO "scsi <fd_mcs>: %s at slot %d\n", fd_mcs_adapters[loop].name, slot + 1);
-
- pos2 = mca_read_stored_pos(slot, 2);
- pos3 = mca_read_stored_pos(slot, 3);
- pos4 = mca_read_stored_pos(slot, 4);
-
- /* ready for next probe */
- slot++;
-
- if (fd_mcs_adapters[loop].id == REPLY_ID) { /* reply card */
- static int reply_irq[] = { 10, 11, 14, 15 };
-
- bios = 0; /* no bios */
-
- if (pos2 & 0x2)
- port = ports[pos4 & 0x3];
- else
- continue;
-
- /* can't really disable it, same as irq=10 */
- irq = reply_irq[((pos4 >> 2) & 0x1) + 2 * ((pos4 >> 4) & 0x1)];
- } else {
- bios = addresses[pos2 >> 6];
- port = ports[(pos2 >> 4) & 0x03];
- irq = interrupts[(pos2 >> 1) & 0x07];
- }
-
- if (irq) {
- /* claim the slot */
- mca_set_adapter_name(slot - 1, fd_mcs_adapters[loop].name);
-
- /* check irq/region */
- if (request_irq(irq, fd_mcs_intr, IRQF_SHARED, "fd_mcs", hosts)) {
- printk(KERN_ERR "fd_mcs: interrupt is not available, skipping...\n");
- continue;
- }
-
- /* request I/O region */
- if (request_region(port, 0x10, "fd_mcs")) {
- printk(KERN_ERR "fd_mcs: I/O region is already in use, skipping...\n");
- continue;
- }
- /* register */
- if (!(shpnt = scsi_register(tpnt, sizeof(struct fd_hostdata)))) {
- printk(KERN_ERR "fd_mcs: scsi_register() failed\n");
- release_region(port, 0x10);
- free_irq(irq, hosts);
- continue;
- }
-
-
- /* save name */
- strcpy(adapter_name, fd_mcs_adapters[loop].name);
-
- /* chip/fifo */
- chip = fd_mcs_adapters[loop].fd_chip;
- /* use boot time value if available */
- FIFO_COUNT = user_fifo_count ? user_fifo_count : fd_mcs_adapters[loop].fifo_count;
- FIFO_Size = user_fifo_size ? user_fifo_size : fd_mcs_adapters[loop].fifo_size;
-
-/* FIXME: Do we need to keep this bit of code inside NOT_USED around at all? */
-#ifdef NOT_USED
- /* *************************************************** */
- /* Try to toggle 32-bit mode. This only
- works on an 18c30 chip. (User reports
- say this works, so we should switch to
- it in the near future.) */
- outb(0x80, port + IO_Control);
- if ((inb(port + Configuration2) & 0x80) == 0x80) {
- outb(0x00, port + IO_Control);
- if ((inb(port + Configuration2) & 0x80) == 0x00) {
- chip = tmc18c30;
- FIFO_Size = 0x800; /* 2k FIFO */
-
- printk("FIRST: chip=%s, fifo_size=0x%x\n", (chip == tmc18c30) ? "tmc18c30" : "tmc18c50", FIFO_Size);
- }
- }
-
- /* That should have worked, but appears to
- have problems. Let's assume it is an
- 18c30 if the RAM is disabled. */
-
- if (inb(port + Configuration2) & 0x02) {
- chip = tmc18c30;
- FIFO_Size = 0x800; /* 2k FIFO */
-
- printk("SECOND: chip=%s, fifo_size=0x%x\n", (chip == tmc18c30) ? "tmc18c30" : "tmc18c50", FIFO_Size);
- }
- /* *************************************************** */
-#endif
-
- /* IBM/ANSI scsi scan ordering */
- /* Stick this back in when the scsi.c changes are there */
- shpnt->reverse_ordering = 1;
-
-
- /* saving info */
- hosts[found++] = shpnt;
-
- shpnt->this_id = id;
- shpnt->irq = irq;
- shpnt->io_port = port;
- shpnt->n_io_port = 0x10;
-
- /* save */
- bios_base = bios;
- adapter_mask = (1 << id);
-
- /* save more */
- SCSI_Mode_Cntl_port = port + SCSI_Mode_Cntl;
- FIFO_Data_Count_port = port + FIFO_Data_Count;
- Interrupt_Cntl_port = port + Interrupt_Cntl;
- Interrupt_Status_port = port + Interrupt_Status;
- Interrupt_Cond_port = port + Interrupt_Cond;
- Read_FIFO_port = port + Read_FIFO;
- Read_SCSI_Data_port = port + Read_SCSI_Data;
- SCSI_Cntl_port = port + SCSI_Cntl;
- SCSI_Data_NoACK_port = port + SCSI_Data_NoACK;
- SCSI_Status_port = port + SCSI_Status;
- TMC_Cntl_port = port + TMC_Cntl;
- TMC_Status_port = port + TMC_Status;
- Write_FIFO_port = port + Write_FIFO;
- Write_SCSI_Data_port = port + Write_SCSI_Data;
-
- Bytes_Read = 0;
- Bytes_Written = 0;
- INTR_Processed = 0;
-
- /* say something */
- print_banner(shpnt);
-
- /* reset */
- outb(1, SCSI_Cntl_port);
- do_pause(2);
- outb(0, SCSI_Cntl_port);
- do_pause(115);
- outb(0, SCSI_Mode_Cntl_port);
- outb(PARITY_MASK, TMC_Cntl_port);
- /* done reset */
- }
- }
-
- if (found == FD_MAX_HOSTS) {
- printk("fd_mcs: detecting reached max=%d host adapters.\n", FD_MAX_HOSTS);
- break;
- }
- }
-
- return found;
-}
-
-static const char *fd_mcs_info(struct Scsi_Host *shpnt)
-{
- return adapter_name;
-}
-
-static int TOTAL_INTR = 0;
-
-/*
- * inout : decides on the direction of the dataflow and the meaning of the
- * variables
- * buffer: If inout==FALSE data is being written to it else read from it
- * *start: If inout==FALSE start of the valid data in the buffer
- * offset: If inout==FALSE offset from the beginning of the imaginary file
- * from which we start writing into the buffer
- * length: If inout==FALSE max number of bytes to be written into the buffer
- * else number of bytes in the buffer
- */
-static int fd_mcs_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout)
-{
- int len = 0;
-
- if (inout)
- return (-ENOSYS);
-
- *start = buffer + offset;
-
- len += sprintf(buffer + len, "Future Domain MCS-600/700 Driver %s\n", DRIVER_VERSION);
- len += sprintf(buffer + len, "HOST #%d: %s\n", shpnt->host_no, adapter_name);
- len += sprintf(buffer + len, "FIFO Size=0x%x, FIFO Count=%d\n", FIFO_Size, FIFO_COUNT);
- len += sprintf(buffer + len, "DriverCalls=%d, Interrupts=%d, BytesRead=%d, BytesWrite=%d\n\n", TOTAL_INTR, INTR_Processed, Bytes_Read, Bytes_Written);
-
- if ((len -= offset) <= 0)
- return 0;
- if (len > length)
- len = length;
- return len;
-}
-
-static int fd_mcs_select(struct Scsi_Host *shpnt, int target)
-{
- int status;
- unsigned long timeout;
-
- outb(0x82, SCSI_Cntl_port); /* Bus Enable + Select */
- outb(adapter_mask | (1 << target), SCSI_Data_NoACK_port);
-
- /* Stop arbitration and enable parity */
- outb(PARITY_MASK, TMC_Cntl_port);
-
- timeout = 350; /* 350mS -- because of timeouts
- (was 250mS) */
-
- do {
- status = inb(SCSI_Status_port); /* Read adapter status */
- if (status & 1) { /* Busy asserted */
- /* Enable SCSI Bus (on error, should make bus idle with 0) */
- outb(0x80, SCSI_Cntl_port);
- return 0;
- }
- udelay(1000); /* wait one msec */
- } while (--timeout);
-
- /* Make bus idle */
- fd_mcs_make_bus_idle(shpnt);
-#if EVERY_ACCESS
- if (!target)
- printk("Selection failed\n");
-#endif
-#if ERRORS_ONLY
- if (!target) {
- static int flag = 0;
-
- if (!flag) /* Skip first failure for all chips. */
- ++flag;
- else
- printk("fd_mcs: Selection failed\n");
- }
-#endif
- return 1;
-}
-
-static void my_done(struct Scsi_Host *shpnt, int error)
-{
- if (in_command) {
- in_command = 0;
- outb(0x00, Interrupt_Cntl_port);
- fd_mcs_make_bus_idle(shpnt);
- current_SC->result = error;
- current_SC->scsi_done(current_SC);
- } else {
- panic("fd_mcs: my_done() called outside of command\n");
- }
-#if DEBUG_RACE
- in_interrupt_flag = 0;
-#endif
-}
-
-/* only my_done needs to be protected */
-static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
-{
- unsigned long flags;
- int status;
- int done = 0;
- unsigned data_count, tmp_count;
-
- int i = 0;
- struct Scsi_Host *shpnt;
-
- TOTAL_INTR++;
-
- /* search for one adapter-response on shared interrupt */
- while ((shpnt = hosts[i++])) {
- if ((inb(TMC_Status_port)) & 1)
- break;
- }
-
- /* return if some other device on this IRQ caused the interrupt */
- if (!shpnt) {
- return IRQ_NONE;
- }
-
- INTR_Processed++;
-
- outb(0x00, Interrupt_Cntl_port);
-
- /* Abort calls my_done, so we do nothing here. */
- if (current_SC->SCp.phase & aborted) {
-#if DEBUG_ABORT
- printk("Interrupt after abort, ignoring\n");
-#endif
- /* return IRQ_HANDLED; */
- }
-#if DEBUG_RACE
- ++in_interrupt_flag;
-#endif
-
- if (current_SC->SCp.phase & in_arbitration) {
- status = inb(TMC_Status_port); /* Read adapter status */
- if (!(status & 0x02)) {
-#if EVERY_ACCESS
- printk(" AFAIL ");
-#endif
- spin_lock_irqsave(shpnt->host_lock, flags);
- my_done(shpnt, DID_BUS_BUSY << 16);
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_HANDLED;
- }
- current_SC->SCp.phase = in_selection;
-
- outb(0x40 | FIFO_COUNT, Interrupt_Cntl_port);
-
- outb(0x82, SCSI_Cntl_port); /* Bus Enable + Select */
- outb(adapter_mask | (1 << scmd_id(current_SC)), SCSI_Data_NoACK_port);
-
- /* Stop arbitration and enable parity */
- outb(0x10 | PARITY_MASK, TMC_Cntl_port);
-#if DEBUG_RACE
- in_interrupt_flag = 0;
-#endif
- return IRQ_HANDLED;
- } else if (current_SC->SCp.phase & in_selection) {
- status = inb(SCSI_Status_port);
- if (!(status & 0x01)) {
- /* Try again, for slow devices */
- if (fd_mcs_select(shpnt, scmd_id(current_SC))) {
-#if EVERY_ACCESS
- printk(" SFAIL ");
-#endif
- spin_lock_irqsave(shpnt->host_lock, flags);
- my_done(shpnt, DID_NO_CONNECT << 16);
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_HANDLED;
- } else {
-#if EVERY_ACCESS
- printk(" AltSel ");
-#endif
- /* Stop arbitration and enable parity */
- outb(0x10 | PARITY_MASK, TMC_Cntl_port);
- }
- }
- current_SC->SCp.phase = in_other;
- outb(0x90 | FIFO_COUNT, Interrupt_Cntl_port);
- outb(0x80, SCSI_Cntl_port);
-#if DEBUG_RACE
- in_interrupt_flag = 0;
-#endif
- return IRQ_HANDLED;
- }
-
- /* current_SC->SCp.phase == in_other: this is the body of the routine */
-
- status = inb(SCSI_Status_port);
-
- if (status & 0x10) { /* REQ */
-
- switch (status & 0x0e) {
-
- case 0x08: /* COMMAND OUT */
- outb(current_SC->cmnd[current_SC->SCp.sent_command++], Write_SCSI_Data_port);
-#if EVERY_ACCESS
- printk("CMD = %x,", current_SC->cmnd[current_SC->SCp.sent_command - 1]);
-#endif
- break;
- case 0x00: /* DATA OUT -- tmc18c50/tmc18c30 only */
- if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
- current_SC->SCp.have_data_in = -1;
- outb(0xd0 | PARITY_MASK, TMC_Cntl_port);
- }
- break;
- case 0x04: /* DATA IN -- tmc18c50/tmc18c30 only */
- if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
- current_SC->SCp.have_data_in = 1;
- outb(0x90 | PARITY_MASK, TMC_Cntl_port);
- }
- break;
- case 0x0c: /* STATUS IN */
- current_SC->SCp.Status = inb(Read_SCSI_Data_port);
-#if EVERY_ACCESS
- printk("Status = %x, ", current_SC->SCp.Status);
-#endif
-#if ERRORS_ONLY
- if (current_SC->SCp.Status && current_SC->SCp.Status != 2 && current_SC->SCp.Status != 8) {
- printk("ERROR fd_mcs: target = %d, command = %x, status = %x\n", current_SC->device->id, current_SC->cmnd[0], current_SC->SCp.Status);
- }
-#endif
- break;
- case 0x0a: /* MESSAGE OUT */
- outb(MESSAGE_REJECT, Write_SCSI_Data_port); /* Reject */
- break;
- case 0x0e: /* MESSAGE IN */
- current_SC->SCp.Message = inb(Read_SCSI_Data_port);
-#if EVERY_ACCESS
- printk("Message = %x, ", current_SC->SCp.Message);
-#endif
- if (!current_SC->SCp.Message)
- ++done;
-#if DEBUG_MESSAGES || EVERY_ACCESS
- if (current_SC->SCp.Message) {
- printk("fd_mcs: message = %x\n", current_SC->SCp.Message);
- }
-#endif
- break;
- }
- }
-
- if (chip == tmc1800 && !current_SC->SCp.have_data_in && (current_SC->SCp.sent_command >= current_SC->cmd_len)) {
- /* We have to get the FIFO direction
- correct, so I've made a table based
- on the SCSI Standard of which commands
- appear to require a DATA OUT phase.
- */
- /*
- p. 94: Command for all device types
- CHANGE DEFINITION 40 DATA OUT
- COMPARE 39 DATA OUT
- COPY 18 DATA OUT
- COPY AND VERIFY 3a DATA OUT
- INQUIRY 12
- LOG SELECT 4c DATA OUT
- LOG SENSE 4d
- MODE SELECT (6) 15 DATA OUT
- MODE SELECT (10) 55 DATA OUT
- MODE SENSE (6) 1a
- MODE SENSE (10) 5a
- READ BUFFER 3c
- RECEIVE DIAGNOSTIC RESULTS 1c
- REQUEST SENSE 03
- SEND DIAGNOSTIC 1d DATA OUT
- TEST UNIT READY 00
- WRITE BUFFER 3b DATA OUT
-
- p.178: Commands for direct-access devices (not listed on p. 94)
- FORMAT UNIT 04 DATA OUT
- LOCK-UNLOCK CACHE 36
- PRE-FETCH 34
- PREVENT-ALLOW MEDIUM REMOVAL 1e
- READ (6)/RECEIVE 08
- READ (10) 3c
- READ CAPACITY 25
- READ DEFECT DATA (10) 37
- READ LONG 3e
- REASSIGN BLOCKS 07 DATA OUT
- RELEASE 17
- RESERVE 16 DATA OUT
- REZERO UNIT/REWIND 01
- SEARCH DATA EQUAL (10) 31 DATA OUT
- SEARCH DATA HIGH (10) 30 DATA OUT
- SEARCH DATA LOW (10) 32 DATA OUT
- SEEK (6) 0b
- SEEK (10) 2b
- SET LIMITS (10) 33
- START STOP UNIT 1b
- SYNCHRONIZE CACHE 35
- VERIFY (10) 2f
- WRITE (6)/PRINT/SEND 0a DATA OUT
- WRITE (10)/SEND 2a DATA OUT
- WRITE AND VERIFY (10) 2e DATA OUT
- WRITE LONG 3f DATA OUT
- WRITE SAME 41 DATA OUT ?
-
- p. 261: Commands for sequential-access devices (not previously listed)
- ERASE 19
- LOAD UNLOAD 1b
- LOCATE 2b
- READ BLOCK LIMITS 05
- READ POSITION 34
- READ REVERSE 0f
- RECOVER BUFFERED DATA 14
- SPACE 11
- WRITE FILEMARKS 10 ?
-
- p. 298: Commands for printer devices (not previously listed)
- ****** NOT SUPPORTED BY THIS DRIVER, since 0b is SEEK (6) *****
- SLEW AND PRINT 0b DATA OUT -- same as seek
- STOP PRINT 1b
- SYNCHRONIZE BUFFER 10
-
- p. 315: Commands for processor devices (not previously listed)
-
- p. 321: Commands for write-once devices (not previously listed)
- MEDIUM SCAN 38
- READ (12) a8
- SEARCH DATA EQUAL (12) b1 DATA OUT
- SEARCH DATA HIGH (12) b0 DATA OUT
- SEARCH DATA LOW (12) b2 DATA OUT
- SET LIMITS (12) b3
- VERIFY (12) af
- WRITE (12) aa DATA OUT
- WRITE AND VERIFY (12) ae DATA OUT
-
- p. 332: Commands for CD-ROM devices (not previously listed)
- PAUSE/RESUME 4b
- PLAY AUDIO (10) 45
- PLAY AUDIO (12) a5
- PLAY AUDIO MSF 47
- PLAY TRACK RELATIVE (10) 49
- PLAY TRACK RELATIVE (12) a9
- READ HEADER 44
- READ SUB-CHANNEL 42
- READ TOC 43
-
- p. 370: Commands for scanner devices (not previously listed)
- GET DATA BUFFER STATUS 34
- GET WINDOW 25
- OBJECT POSITION 31
- SCAN 1b
- SET WINDOW 24 DATA OUT
-
- p. 391: Commands for optical memory devices (not listed)
- ERASE (10) 2c
- ERASE (12) ac
- MEDIUM SCAN 38 DATA OUT
- READ DEFECT DATA (12) b7
- READ GENERATION 29
- READ UPDATED BLOCK 2d
- UPDATE BLOCK 3d DATA OUT
-
- p. 419: Commands for medium changer devices (not listed)
- EXCHANGE MEDIUM 46
- INITIALIZE ELEMENT STATUS 07
- MOVE MEDIUM a5
- POSITION TO ELEMENT 2b
- READ ELEMENT STATUS b8
- REQUEST VOL. ELEMENT ADDRESS b5
- SEND VOLUME TAG b6 DATA OUT
-
- p. 454: Commands for communications devices (not listed previously)
- GET MESSAGE (6) 08
- GET MESSAGE (10) 28
- GET MESSAGE (12) a8
- */
-
- switch (current_SC->cmnd[0]) {
- case CHANGE_DEFINITION:
- case COMPARE:
- case COPY:
- case COPY_VERIFY:
- case LOG_SELECT:
- case MODE_SELECT:
- case MODE_SELECT_10:
- case SEND_DIAGNOSTIC:
- case WRITE_BUFFER:
-
- case FORMAT_UNIT:
- case REASSIGN_BLOCKS:
- case RESERVE:
- case SEARCH_EQUAL:
- case SEARCH_HIGH:
- case SEARCH_LOW:
- case WRITE_6:
- case WRITE_10:
- case WRITE_VERIFY:
- case 0x3f:
- case 0x41:
-
- case 0xb1:
- case 0xb0:
- case 0xb2:
- case 0xaa:
- case 0xae:
-
- case 0x24:
-
- case 0x38:
- case 0x3d:
-
- case 0xb6:
-
- case 0xea: /* alternate number for WRITE LONG */
-
- current_SC->SCp.have_data_in = -1;
- outb(0xd0 | PARITY_MASK, TMC_Cntl_port);
- break;
-
- case 0x00:
- default:
-
- current_SC->SCp.have_data_in = 1;
- outb(0x90 | PARITY_MASK, TMC_Cntl_port);
- break;
- }
- }
-
- if (current_SC->SCp.have_data_in == -1) { /* DATA OUT */
- while ((data_count = FIFO_Size - inw(FIFO_Data_Count_port)) > 512) {
-#if EVERY_ACCESS
- printk("DC=%d, ", data_count);
-#endif
- if (data_count > current_SC->SCp.this_residual)
- data_count = current_SC->SCp.this_residual;
- if (data_count > 0) {
-#if EVERY_ACCESS
- printk("%d OUT, ", data_count);
-#endif
- if (data_count == 1) {
- Bytes_Written++;
-
- outb(*current_SC->SCp.ptr++, Write_FIFO_port);
- --current_SC->SCp.this_residual;
- } else {
- data_count >>= 1;
- tmp_count = data_count << 1;
- outsw(Write_FIFO_port, current_SC->SCp.ptr, data_count);
- current_SC->SCp.ptr += tmp_count;
- Bytes_Written += tmp_count;
- current_SC->SCp.this_residual -= tmp_count;
- }
- }
- if (!current_SC->SCp.this_residual) {
- if (current_SC->SCp.buffers_residual) {
- --current_SC->SCp.buffers_residual;
- ++current_SC->SCp.buffer;
- current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
- current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
- } else
- break;
- }
- }
- } else if (current_SC->SCp.have_data_in == 1) { /* DATA IN */
- while ((data_count = inw(FIFO_Data_Count_port)) > 0) {
-#if EVERY_ACCESS
- printk("DC=%d, ", data_count);
-#endif
- if (data_count > current_SC->SCp.this_residual)
- data_count = current_SC->SCp.this_residual;
- if (data_count) {
-#if EVERY_ACCESS
- printk("%d IN, ", data_count);
-#endif
- if (data_count == 1) {
- Bytes_Read++;
- *current_SC->SCp.ptr++ = inb(Read_FIFO_port);
- --current_SC->SCp.this_residual;
- } else {
- data_count >>= 1; /* Number of words */
- tmp_count = data_count << 1;
- insw(Read_FIFO_port, current_SC->SCp.ptr, data_count);
- current_SC->SCp.ptr += tmp_count;
- Bytes_Read += tmp_count;
- current_SC->SCp.this_residual -= tmp_count;
- }
- }
- if (!current_SC->SCp.this_residual && current_SC->SCp.buffers_residual) {
- --current_SC->SCp.buffers_residual;
- ++current_SC->SCp.buffer;
- current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
- current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
- }
- }
- }
-
- if (done) {
-#if EVERY_ACCESS
- printk(" ** IN DONE %d ** ", current_SC->SCp.have_data_in);
-#endif
-
-#if EVERY_ACCESS
- printk("BEFORE MY_DONE. . .");
-#endif
- spin_lock_irqsave(shpnt->host_lock, flags);
- my_done(shpnt, (current_SC->SCp.Status & 0xff)
- | ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16));
- spin_unlock_irqrestore(shpnt->host_lock, flags);
-#if EVERY_ACCESS
- printk("RETURNING.\n");
-#endif
-
- } else {
- if (current_SC->SCp.phase & disconnect) {
- outb(0xd0 | FIFO_COUNT, Interrupt_Cntl_port);
- outb(0x00, SCSI_Cntl_port);
- } else {
- outb(0x90 | FIFO_COUNT, Interrupt_Cntl_port);
- }
- }
-#if DEBUG_RACE
- in_interrupt_flag = 0;
-#endif
- return IRQ_HANDLED;
-}
-
-static int fd_mcs_release(struct Scsi_Host *shpnt)
-{
- int i, this_host, irq_usage;
-
- release_region(shpnt->io_port, shpnt->n_io_port);
-
- this_host = -1;
- irq_usage = 0;
- for (i = 0; i < found; i++) {
- if (shpnt == hosts[i])
- this_host = i;
- if (shpnt->irq == hosts[i]->irq)
- irq_usage++;
- }
-
- /* only for the last one */
- if (1 == irq_usage)
- free_irq(shpnt->irq, hosts);
-
- found--;
-
- for (i = this_host; i < found; i++)
- hosts[i] = hosts[i + 1];
-
- hosts[found] = NULL;
-
- return 0;
-}
-
-static int fd_mcs_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
-{
- struct Scsi_Host *shpnt = SCpnt->device->host;
-
- if (in_command) {
- panic("fd_mcs: fd_mcs_queue() NOT REENTRANT!\n");
- }
-#if EVERY_ACCESS
- printk("queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
- SCpnt->target, *(unsigned char *) SCpnt->cmnd,
- scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
-#endif
-
- fd_mcs_make_bus_idle(shpnt);
-
- SCpnt->scsi_done = done; /* Save this for the done function */
- current_SC = SCpnt;
-
- /* Initialize static data */
-
- if (scsi_bufflen(current_SC)) {
- current_SC->SCp.buffer = scsi_sglist(current_SC);
- current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
- current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
- current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
- } else {
- current_SC->SCp.ptr = NULL;
- current_SC->SCp.this_residual = 0;
- current_SC->SCp.buffer = NULL;
- current_SC->SCp.buffers_residual = 0;
- }
-
-
- current_SC->SCp.Status = 0;
- current_SC->SCp.Message = 0;
- current_SC->SCp.have_data_in = 0;
- current_SC->SCp.sent_command = 0;
- current_SC->SCp.phase = in_arbitration;
-
- /* Start arbitration */
- outb(0x00, Interrupt_Cntl_port);
- outb(0x00, SCSI_Cntl_port); /* Disable data drivers */
- outb(adapter_mask, SCSI_Data_NoACK_port); /* Set our id bit */
- in_command = 1;
- outb(0x20, Interrupt_Cntl_port);
- outb(0x14 | PARITY_MASK, TMC_Cntl_port); /* Start arbitration */
-
- return 0;
-}
-
-static DEF_SCSI_QCMD(fd_mcs_queue)
-
-#if DEBUG_ABORT || DEBUG_RESET
-static void fd_mcs_print_info(Scsi_Cmnd * SCpnt)
-{
- unsigned int imr;
- unsigned int irr;
- unsigned int isr;
- struct Scsi_Host *shpnt = SCpnt->host;
-
- if (!SCpnt || !SCpnt->host) {
- printk("fd_mcs: cannot provide detailed information\n");
- }
-
- printk("%s\n", fd_mcs_info(SCpnt->host));
- print_banner(SCpnt->host);
- switch (SCpnt->SCp.phase) {
- case in_arbitration:
- printk("arbitration ");
- break;
- case in_selection:
- printk("selection ");
- break;
- case in_other:
- printk("other ");
- break;
- default:
- printk("unknown ");
- break;
- }
-
- printk("(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
- SCpnt->SCp.phase, SCpnt->device->id, *(unsigned char *) SCpnt->cmnd,
- scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
- printk("sent_command = %d, have_data_in = %d, timeout = %d\n", SCpnt->SCp.sent_command, SCpnt->SCp.have_data_in, SCpnt->timeout);
-#if DEBUG_RACE
- printk("in_interrupt_flag = %d\n", in_interrupt_flag);
-#endif
-
- imr = (inb(0x0a1) << 8) + inb(0x21);
- outb(0x0a, 0xa0);
- irr = inb(0xa0) << 8;
- outb(0x0a, 0x20);
- irr += inb(0x20);
- outb(0x0b, 0xa0);
- isr = inb(0xa0) << 8;
- outb(0x0b, 0x20);
- isr += inb(0x20);
-
- /* Print out interesting information */
- printk("IMR = 0x%04x", imr);
- if (imr & (1 << shpnt->irq))
- printk(" (masked)");
- printk(", IRR = 0x%04x, ISR = 0x%04x\n", irr, isr);
-
- printk("SCSI Status = 0x%02x\n", inb(SCSI_Status_port));
- printk("TMC Status = 0x%02x", inb(TMC_Status_port));
- if (inb(TMC_Status_port) & 1)
- printk(" (interrupt)");
- printk("\n");
- printk("Interrupt Status = 0x%02x", inb(Interrupt_Status_port));
- if (inb(Interrupt_Status_port) & 0x08)
- printk(" (enabled)");
- printk("\n");
- if (chip == tmc18c50 || chip == tmc18c30) {
- printk("FIFO Status = 0x%02x\n", inb(shpnt->io_port + FIFO_Status));
- printk("Int. Condition = 0x%02x\n", inb(shpnt->io_port + Interrupt_Cond));
- }
- printk("Configuration 1 = 0x%02x\n", inb(shpnt->io_port + Configuration1));
- if (chip == tmc18c50 || chip == tmc18c30)
- printk("Configuration 2 = 0x%02x\n", inb(shpnt->io_port + Configuration2));
-}
-#endif
-
-static int fd_mcs_abort(Scsi_Cmnd * SCpnt)
-{
- struct Scsi_Host *shpnt = SCpnt->device->host;
-
- unsigned long flags;
-#if EVERY_ACCESS || ERRORS_ONLY || DEBUG_ABORT
- printk("fd_mcs: abort ");
-#endif
-
- spin_lock_irqsave(shpnt->host_lock, flags);
- if (!in_command) {
-#if EVERY_ACCESS || ERRORS_ONLY
- printk(" (not in command)\n");
-#endif
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return FAILED;
- } else
- printk("\n");
-
-#if DEBUG_ABORT
- fd_mcs_print_info(SCpnt);
-#endif
-
- fd_mcs_make_bus_idle(shpnt);
-
- current_SC->SCp.phase |= aborted;
-
- current_SC->result = DID_ABORT << 16;
-
- /* Aborts are not done well. . . */
- my_done(shpnt, DID_ABORT << 16);
-
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return SUCCESS;
-}
-
-static int fd_mcs_bus_reset(Scsi_Cmnd * SCpnt) {
- struct Scsi_Host *shpnt = SCpnt->device->host;
- unsigned long flags;
-
-#if DEBUG_RESET
- static int called_once = 0;
-#endif
-
-#if ERRORS_ONLY
- if (SCpnt)
- printk("fd_mcs: SCSI Bus Reset\n");
-#endif
-
-#if DEBUG_RESET
- if (called_once)
- fd_mcs_print_info(current_SC);
- called_once = 1;
-#endif
-
- spin_lock_irqsave(shpnt->host_lock, flags);
-
- outb(1, SCSI_Cntl_port);
- do_pause(2);
- outb(0, SCSI_Cntl_port);
- do_pause(115);
- outb(0, SCSI_Mode_Cntl_port);
- outb(PARITY_MASK, TMC_Cntl_port);
-
- spin_unlock_irqrestore(shpnt->host_lock, flags);
-
- /* Unless this is the very first call (i.e., SCPnt == NULL), everything
- is probably hosed at this point. We will, however, try to keep
- things going by informing the high-level code that we need help. */
- return SUCCESS;
-}
-
-#include <scsi/scsi_ioctl.h>
-
-static int fd_mcs_biosparam(struct scsi_device * disk, struct block_device *bdev,
- sector_t capacity, int *info_array)
-{
- unsigned char *p = scsi_bios_ptable(bdev);
- int size = capacity;
-
- /* BIOS >= 3.4 for MCA cards */
- /* This algorithm was provided by Future Domain (much thanks!). */
-
- if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */
- && p[4]) { /* Partition type */
- /* The partition table layout is as follows:
-
- Start: 0x1b3h
- Offset: 0 = partition status
- 1 = starting head
- 2 = starting sector and cylinder (word, encoded)
- 4 = partition type
- 5 = ending head
- 6 = ending sector and cylinder (word, encoded)
- 8 = starting absolute sector (double word)
- c = number of sectors (double word)
- Signature: 0x1fe = 0x55aa
-
- So, this algorithm assumes:
- 1) the first partition table is in use,
- 2) the data in the first entry is correct, and
- 3) partitions never divide cylinders
-
- Note that (1) may be FALSE for NetBSD (and other BSD flavors),
- as well as for Linux. Note also, that Linux doesn't pay any
- attention to the fields that are used by this algorithm -- it
- only uses the absolute sector data. Recent versions of Linux's
- fdisk(1) will fill this data in correctly, and forthcoming
- versions will check for consistency.
-
- Checking for a non-zero partition type is not part of the
- Future Domain algorithm, but it seemed to be a reasonable thing
- to do, especially in the Linux and BSD worlds. */
-
- info_array[0] = p[5] + 1; /* heads */
- info_array[1] = p[6] & 0x3f; /* sectors */
- } else {
- /* Note that this new method guarantees that there will always be
- less than 1024 cylinders on a platter. This is good for drives
- up to approximately 7.85GB (where 1GB = 1024 * 1024 kB). */
- if ((unsigned int) size >= 0x7e0000U)
- {
- info_array[0] = 0xff; /* heads = 255 */
- info_array[1] = 0x3f; /* sectors = 63 */
- } else if ((unsigned int) size >= 0x200000U) {
- info_array[0] = 0x80; /* heads = 128 */
- info_array[1] = 0x3f; /* sectors = 63 */
- } else {
- info_array[0] = 0x40; /* heads = 64 */
- info_array[1] = 0x20; /* sectors = 32 */
- }
- }
- /* For both methods, compute the cylinders */
- info_array[2] = (unsigned int) size / (info_array[0] * info_array[1]);
- kfree(p);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .proc_name = "fd_mcs",
- .proc_info = fd_mcs_proc_info,
- .detect = fd_mcs_detect,
- .release = fd_mcs_release,
- .info = fd_mcs_info,
- .queuecommand = fd_mcs_queue,
- .eh_abort_handler = fd_mcs_abort,
- .eh_bus_reset_handler = fd_mcs_bus_reset,
- .bios_param = fd_mcs_biosparam,
- .can_queue = 1,
- .this_id = 7,
- .sg_tablesize = 64,
- .cmd_per_lun = 1,
- .use_clustering = DISABLE_CLUSTERING,
-};
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
deleted file mode 100644
index cd09132d5d7d..000000000000
--- a/drivers/scsi/ibmmca.c
+++ /dev/null
@@ -1,2379 +0,0 @@
-/*
- Low Level Linux Driver for the IBM Microchannel SCSI Subsystem for
- Linux Kernel >= 2.4.0.
- Copyright (c) 1995 Strom Systems, Inc. under the terms of the GNU
- General Public License. Written by Martin Kolinek, December 1995.
- Further development by: Chris Beauregard, Klaus Kudielka, Michael Lang
- See the file Documentation/scsi/ibmmca.txt for a detailed description
- of this driver, the commandline arguments and the history of its
- development.
- See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest
- updates, info and ADF-files for adapters supported by this driver.
-
- Alan Cox <alan@lxorguk.ukuu.org.uk>
- Updated for Linux 2.5.45 to use the new error handler, cleaned up the
- lock macros and did a few unavoidable locking tweaks, plus one locking
- fix in the irq and completion path.
-
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/mca.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-
-/* Common forward declarations for all Linux-versions: */
-static int ibmmca_queuecommand (struct Scsi_Host *, struct scsi_cmnd *);
-static int ibmmca_abort (Scsi_Cmnd *);
-static int ibmmca_host_reset (Scsi_Cmnd *);
-static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
-static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout);
-
-
-
-/* current version of this driver-source: */
-#define IBMMCA_SCSI_DRIVER_VERSION "4.0b-ac"
-
-/* driver configuration */
-#define IM_MAX_HOSTS 8 /* maximum number of host adapters */
-#define IM_RESET_DELAY 60 /* seconds allowed for a reset */
-
-/* driver debugging - #undef all for normal operation */
-/* if defined: count interrupts and ignore this special one: */
-#undef IM_DEBUG_TIMEOUT //50
-#define TIMEOUT_PUN 0
-#define TIMEOUT_LUN 0
-/* verbose interrupt: */
-#undef IM_DEBUG_INT
-/* verbose queuecommand: */
-#undef IM_DEBUG_CMD
-/* verbose queucommand for specific SCSI-device type: */
-#undef IM_DEBUG_CMD_SPEC_DEV
-/* verbose device probing */
-#undef IM_DEBUG_PROBE
-
-/* device type that shall be displayed on syslog (only during debugging): */
-#define IM_DEBUG_CMD_DEVICE TYPE_TAPE
-
-/* relative addresses of hardware registers on a subsystem */
-#define IM_CMD_REG(h) ((h)->io_port) /*Command Interface, (4 bytes long) */
-#define IM_ATTN_REG(h) ((h)->io_port+4) /*Attention (1 byte) */
-#define IM_CTR_REG(h) ((h)->io_port+5) /*Basic Control (1 byte) */
-#define IM_INTR_REG(h) ((h)->io_port+6) /*Interrupt Status (1 byte, r/o) */
-#define IM_STAT_REG(h) ((h)->io_port+7) /*Basic Status (1 byte, read only) */
-
-/* basic I/O-port of first adapter */
-#define IM_IO_PORT 0x3540
-/* maximum number of hosts that can be found */
-#define IM_N_IO_PORT 8
-
-/*requests going into the upper nibble of the Attention register */
-/*note: the lower nibble specifies the device(0-14), or subsystem(15) */
-#define IM_IMM_CMD 0x10 /*immediate command */
-#define IM_SCB 0x30 /*Subsystem Control Block command */
-#define IM_LONG_SCB 0x40 /*long Subsystem Control Block command */
-#define IM_EOI 0xe0 /*end-of-interrupt request */
-
-/*values for bits 7,1,0 of Basic Control reg. (bits 6-2 reserved) */
-#define IM_HW_RESET 0x80 /*hardware reset */
-#define IM_ENABLE_DMA 0x02 /*enable subsystem's busmaster DMA */
-#define IM_ENABLE_INTR 0x01 /*enable interrupts to the system */
-
-/*to interpret the upper nibble of Interrupt Status register */
-/*note: the lower nibble specifies the device(0-14), or subsystem(15) */
-#define IM_SCB_CMD_COMPLETED 0x10
-#define IM_SCB_CMD_COMPLETED_WITH_RETRIES 0x50
-#define IM_LOOP_SCATTER_BUFFER_FULL 0x60
-#define IM_ADAPTER_HW_FAILURE 0x70
-#define IM_IMMEDIATE_CMD_COMPLETED 0xa0
-#define IM_CMD_COMPLETED_WITH_FAILURE 0xc0
-#define IM_CMD_ERROR 0xe0
-#define IM_SOFTWARE_SEQUENCING_ERROR 0xf0
-
-/*to interpret bits 3-0 of Basic Status register (bits 7-4 reserved) */
-#define IM_CMD_REG_FULL 0x08
-#define IM_CMD_REG_EMPTY 0x04
-#define IM_INTR_REQUEST 0x02
-#define IM_BUSY 0x01
-
-/*immediate commands (word written into low 2 bytes of command reg) */
-#define IM_RESET_IMM_CMD 0x0400
-#define IM_FEATURE_CTR_IMM_CMD 0x040c
-#define IM_DMA_PACING_IMM_CMD 0x040d
-#define IM_ASSIGN_IMM_CMD 0x040e
-#define IM_ABORT_IMM_CMD 0x040f
-#define IM_FORMAT_PREP_IMM_CMD 0x0417
-
-/*SCB (Subsystem Control Block) structure */
-struct im_scb {
- unsigned short command; /*command word (read, etc.) */
- unsigned short enable; /*enable word, modifies cmd */
- union {
- unsigned long log_blk_adr; /*block address on SCSI device */
- unsigned char scsi_cmd_length; /*6,10,12, for other scsi cmd */
- } u1;
- unsigned long sys_buf_adr; /*physical system memory adr */
- unsigned long sys_buf_length; /*size of sys mem buffer */
- unsigned long tsb_adr; /*Termination Status Block adr */
- unsigned long scb_chain_adr; /*optional SCB chain address */
- union {
- struct {
- unsigned short count; /*block count, on SCSI device */
- unsigned short length; /*block length, on SCSI device */
- } blk;
- unsigned char scsi_command[12]; /*other scsi command */
- } u2;
-};
-
-/*structure scatter-gather element (for list of system memory areas) */
-struct im_sge {
- void *address;
- unsigned long byte_length;
-};
-
-/*structure returned by a get_pos_info command: */
-struct im_pos_info {
- unsigned short pos_id; /* adapter id */
- unsigned char pos_3a; /* pos 3 (if pos 6 = 0) */
- unsigned char pos_2; /* pos 2 */
- unsigned char int_level; /* interrupt level IRQ 11 or 14 */
- unsigned char pos_4a; /* pos 4 (if pos 6 = 0) */
- unsigned short connector_size; /* MCA connector size: 16 or 32 Bit */
- unsigned char num_luns; /* number of supported luns per device */
- unsigned char num_puns; /* number of supported puns */
- unsigned char pacing_factor; /* pacing factor */
- unsigned char num_ldns; /* number of ldns available */
- unsigned char eoi_off; /* time EOI and interrupt inactive */
- unsigned char max_busy; /* time between reset and busy on */
- unsigned short cache_stat; /* ldn cachestat. Bit=1 = not cached */
- unsigned short retry_stat; /* retry status of ldns. Bit=1=disabled */
- unsigned char pos_4b; /* pos 4 (if pos 6 = 1) */
- unsigned char pos_3b; /* pos 3 (if pos 6 = 1) */
- unsigned char pos_6; /* pos 6 */
- unsigned char pos_5; /* pos 5 */
- unsigned short max_overlap; /* maximum overlapping requests */
- unsigned short num_bus; /* number of SCSI-busses */
-};
-
-/*values for SCB command word */
-#define IM_NO_SYNCHRONOUS 0x0040 /*flag for any command */
-#define IM_NO_DISCONNECT 0x0080 /*flag for any command */
-#define IM_READ_DATA_CMD 0x1c01
-#define IM_WRITE_DATA_CMD 0x1c02
-#define IM_READ_VERIFY_CMD 0x1c03
-#define IM_WRITE_VERIFY_CMD 0x1c04
-#define IM_REQUEST_SENSE_CMD 0x1c08
-#define IM_READ_CAPACITY_CMD 0x1c09
-#define IM_DEVICE_INQUIRY_CMD 0x1c0b
-#define IM_READ_LOGICAL_CMD 0x1c2a
-#define IM_OTHER_SCSI_CMD_CMD 0x241f
-
-/* unused, but supported, SCB commands */
-#define IM_GET_COMMAND_COMPLETE_STATUS_CMD 0x1c07 /* command status */
-#define IM_GET_POS_INFO_CMD 0x1c0a /* returns neat stuff */
-#define IM_READ_PREFETCH_CMD 0x1c31 /* caching controller only */
-#define IM_FOMAT_UNIT_CMD 0x1c16 /* format unit */
-#define IM_REASSIGN_BLOCK_CMD 0x1c18 /* in case of error */
-
-/*values to set bits in the enable word of SCB */
-#define IM_READ_CONTROL 0x8000
-#define IM_REPORT_TSB_ONLY_ON_ERROR 0x4000
-#define IM_RETRY_ENABLE 0x2000
-#define IM_POINTER_TO_LIST 0x1000
-#define IM_SUPRESS_EXCEPTION_SHORT 0x0400
-#define IM_BYPASS_BUFFER 0x0200
-#define IM_CHAIN_ON_NO_ERROR 0x0001
-
-/*TSB (Termination Status Block) structure */
-struct im_tsb {
- unsigned short end_status;
- unsigned short reserved1;
- unsigned long residual_byte_count;
- unsigned long sg_list_element_adr;
- unsigned short status_length;
- unsigned char dev_status;
- unsigned char cmd_status;
- unsigned char dev_error;
- unsigned char cmd_error;
- unsigned short reserved2;
- unsigned short reserved3;
- unsigned short low_of_last_scb_adr;
- unsigned short high_of_last_scb_adr;
-};
-
-/*subsystem uses interrupt request level 14 */
-#define IM_IRQ 14
-/*SCSI-2 F/W may evade to interrupt 11 */
-#define IM_IRQ_FW 11
-
-/* Model 95 has an additional alphanumeric display, which can be used
- to display SCSI-activities. 8595 models do not have any disk led, which
- makes this feature quite useful.
- The regular PS/2 disk led is turned on/off by bits 6,7 of system
- control port. */
-
-/* LED display-port (actually, last LED on display) */
-#define MOD95_LED_PORT 0x108
-/* system-control-register of PS/2s with diskindicator */
-#define PS2_SYS_CTR 0x92
-/* activity displaying methods */
-#define LED_DISP 1
-#define LED_ADISP 2
-#define LED_ACTIVITY 4
-/* failed intr */
-#define CMD_FAIL 255
-
-/* The SCSI-ID(!) of the accessed SCSI-device is shown on PS/2-95 machines' LED
- displays. ldn is no longer displayed here, because the ldn mapping is now
- done dynamically and the ldn <-> pun,lun maps can be looked-up at boottime
- or during uptime in /proc/scsi/ibmmca/<host_no> in case of trouble,
- interest, debugging or just for having fun. The left number gives the
- host-adapter number and the right shows the accessed SCSI-ID. */
-
-/* display_mode is set by the ibmmcascsi= command line arg */
-static int display_mode = 0;
-/* set default adapter timeout */
-static unsigned int adapter_timeout = 45;
-/* for probing on feature-command: */
-static unsigned int global_command_error_excuse = 0;
-/* global setting by command line for adapter_speed */
-static int global_adapter_speed = 0; /* full speed by default */
-
-/* Panel / LED on, do it right for F/W addressin, too. adisplay will
- * just ignore ids>7, as the panel has only 7 digits available */
-#define PS2_DISK_LED_ON(ad,id) { if (display_mode & LED_DISP) { if (id>9) \
- outw((ad+48)|((id+55)<<8), MOD95_LED_PORT ); else \
- outw((ad+48)|((id+48)<<8), MOD95_LED_PORT ); } else \
- if (display_mode & LED_ADISP) { if (id<7) outb((char)(id+48),MOD95_LED_PORT+1+id); \
- outb((char)(ad+48), MOD95_LED_PORT); } \
- if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
- outb(inb(PS2_SYS_CTR) | 0xc0, PS2_SYS_CTR); }
-
-/* Panel / LED off */
-/* bug fixed, Dec 15, 1997, where | was replaced by & here */
-#define PS2_DISK_LED_OFF() { if (display_mode & LED_DISP) \
- outw(0x2020, MOD95_LED_PORT ); else if (display_mode & LED_ADISP) { \
- outl(0x20202020,MOD95_LED_PORT); outl(0x20202020,MOD95_LED_PORT+4); } \
- if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
- outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); }
-
-/* types of different supported hardware that goes to hostdata special */
-#define IBM_SCSI2_FW 0
-#define IBM_7568_WCACHE 1
-#define IBM_EXP_UNIT 2
-#define IBM_SCSI_WCACHE 3
-#define IBM_SCSI 4
-#define IBM_INTEGSCSI 5
-
-/* other special flags for hostdata structure */
-#define FORCED_DETECTION 100
-#define INTEGRATED_SCSI 101
-
-/* List of possible IBM-SCSI-adapters */
-static short ibmmca_id_table[] = {
- 0x8efc,
- 0x8efd,
- 0x8ef8,
- 0x8eff,
- 0x8efe,
- /* No entry for integrated SCSI, that's part of the register */
- 0
-};
-
-static const char *ibmmca_description[] = {
- "IBM SCSI-2 F/W Adapter", /* special = 0 */
- "IBM 7568 Industrial Computer SCSI Adapter w/Cache", /* special = 1 */
- "IBM Expansion Unit SCSI Controller", /* special = 2 */
- "IBM SCSI Adapter w/Cache", /* special = 3 */
- "IBM SCSI Adapter", /* special = 4 */
- "IBM Integrated SCSI Controller", /* special = 5 */
-};
-
-/* Max number of logical devices (can be up from 0 to 14). 15 is the address
-of the adapter itself. */
-#define MAX_LOG_DEV 15
-
-/*local data for a logical device */
-struct logical_device {
- struct im_scb scb; /* SCSI-subsystem-control-block structure */
- struct im_tsb tsb; /* SCSI command complete status block structure */
- struct im_sge sge[16]; /* scatter gather list structure */
- unsigned char buf[256]; /* SCSI command return data buffer */
- Scsi_Cmnd *cmd; /* SCSI-command that is currently in progress */
- int device_type; /* type of the SCSI-device. See include/scsi/scsi.h
- for interpretation of the possible values */
- int block_length; /* blocksize of a particular logical SCSI-device */
- int cache_flag; /* 1 if this is uncached, 0 if cache is present for ldn */
- int retry_flag; /* 1 if adapter retry is disabled, 0 if enabled */
-};
-
-/* statistics of the driver during operations (for proc_info) */
-struct Driver_Statistics {
- /* SCSI statistics on the adapter */
- int ldn_access[MAX_LOG_DEV + 1]; /* total accesses on a ldn */
- int ldn_read_access[MAX_LOG_DEV + 1]; /* total read-access on a ldn */
- int ldn_write_access[MAX_LOG_DEV + 1]; /* total write-access on a ldn */
- int ldn_inquiry_access[MAX_LOG_DEV + 1]; /* total inquiries on a ldn */
- int ldn_modeselect_access[MAX_LOG_DEV + 1]; /* total mode selects on ldn */
- int scbs; /* short SCBs queued */
- int long_scbs; /* long SCBs queued */
- int total_accesses; /* total accesses on all ldns */
- int total_interrupts; /* total interrupts (should be
- same as total_accesses) */
- int total_errors; /* command completed with error */
- /* dynamical assignment statistics */
- int total_scsi_devices; /* number of physical pun,lun */
- int dyn_flag; /* flag showing dynamical mode */
- int dynamical_assignments; /* number of remappings of ldns */
- int ldn_assignments[MAX_LOG_DEV + 1]; /* number of remappings of each
- ldn */
-};
-
-/* data structure for each host adapter */
-struct ibmmca_hostdata {
- /* array of logical devices: */
- struct logical_device _ld[MAX_LOG_DEV + 1];
- /* array to convert (pun, lun) into logical device number: */
- unsigned char _get_ldn[16][8];
- /*array that contains the information about the physical SCSI-devices
- attached to this host adapter: */
- unsigned char _get_scsi[16][8];
- /* used only when checking logical devices: */
- int _local_checking_phase_flag;
- /* report received interrupt: */
- int _got_interrupt;
- /* report termination-status of SCSI-command: */
- int _stat_result;
- /* reset status (used only when doing reset): */
- int _reset_status;
- /* code of the last SCSI command (needed for panic info): */
- int _last_scsi_command[MAX_LOG_DEV + 1];
- /* identifier of the last SCSI-command type */
- int _last_scsi_type[MAX_LOG_DEV + 1];
- /* last blockcount */
- int _last_scsi_blockcount[MAX_LOG_DEV + 1];
- /* last locgical block address */
- unsigned long _last_scsi_logical_block[MAX_LOG_DEV + 1];
- /* Counter that points on the next reassignable ldn for dynamical
- remapping. The default value is 7, that is the first reassignable
- number in the list at boottime: */
- int _next_ldn;
- /* Statistics-structure for this IBM-SCSI-host: */
- struct Driver_Statistics _IBM_DS;
- /* This hostadapters pos-registers pos2 until pos6 */
- unsigned int _pos[8];
- /* assign a special variable, that contains dedicated info about the
- adaptertype */
- int _special;
- /* connector size on the MCA bus */
- int _connector_size;
- /* synchronous SCSI transfer rate bitpattern */
- int _adapter_speed;
-};
-
-/* macros to access host data structure */
-#define subsystem_pun(h) ((h)->this_id)
-#define subsystem_maxid(h) ((h)->max_id)
-#define ld(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_ld)
-#define get_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_ldn)
-#define get_scsi(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_scsi)
-#define local_checking_phase_flag(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_local_checking_phase_flag)
-#define got_interrupt(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_got_interrupt)
-#define stat_result(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_stat_result)
-#define reset_status(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_reset_status)
-#define last_scsi_command(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_command)
-#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
-#define last_scsi_blockcount(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_blockcount)
-#define last_scsi_logical_block(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_logical_block)
-#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
-#define next_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_next_ldn)
-#define IBM_DS(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_IBM_DS)
-#define special(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_special)
-#define subsystem_connector_size(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_connector_size)
-#define adapter_speed(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_adapter_speed)
-#define pos2(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[2])
-#define pos3(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[3])
-#define pos4(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[4])
-#define pos5(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[5])
-#define pos6(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[6])
-
-/* Define a arbitrary number as subsystem-marker-type. This number is, as
- described in the ANSI-SCSI-standard, not occupied by other device-types. */
-#define TYPE_IBM_SCSI_ADAPTER 0x2F
-
-/* Define 0xFF for no device type, because this type is not defined within
- the ANSI-SCSI-standard, therefore, it can be used and should not cause any
- harm. */
-#define TYPE_NO_DEVICE 0xFF
-
-/* define medium-changer. If this is not defined previously, e.g. Linux
- 2.0.x, define this type here. */
-#ifndef TYPE_MEDIUM_CHANGER
-#define TYPE_MEDIUM_CHANGER 0x08
-#endif
-
-/* define possible operations for the immediate_assign command */
-#define SET_LDN 0
-#define REMOVE_LDN 1
-
-/* ldn which is used to probe the SCSI devices */
-#define PROBE_LDN 0
-
-/* reset status flag contents */
-#define IM_RESET_NOT_IN_PROGRESS 0
-#define IM_RESET_IN_PROGRESS 1
-#define IM_RESET_FINISHED_OK 2
-#define IM_RESET_FINISHED_FAIL 3
-#define IM_RESET_NOT_IN_PROGRESS_NO_INT 4
-#define IM_RESET_FINISHED_OK_NO_INT 5
-
-/* define undefined SCSI-command */
-#define NO_SCSI 0xffff
-
-/*-----------------------------------------------------------------------*/
-
-/* if this is nonzero, ibmmcascsi option has been passed to the kernel */
-static int io_port[IM_MAX_HOSTS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
-static int scsi_id[IM_MAX_HOSTS] = { 7, 7, 7, 7, 7, 7, 7, 7 };
-
-/* fill module-parameters only, when this define is present.
- (that is kernel version 2.1.x) */
-#if defined(MODULE)
-static char *boot_options = NULL;
-module_param(boot_options, charp, 0);
-module_param_array(io_port, int, NULL, 0);
-module_param_array(scsi_id, int, NULL, 0);
-
-MODULE_LICENSE("GPL");
-#endif
-/*counter of concurrent disk read/writes, to turn on/off disk led */
-static int disk_rw_in_progress = 0;
-
-static unsigned int pos[8]; /* whole pos register-line for diagnosis */
-/* Taking into account the additions, made by ZP Gu.
- * This selects now the preset value from the configfile and
- * offers the 'normal' commandline option to be accepted */
-#ifdef CONFIG_IBMMCA_SCSI_ORDER_STANDARD
-static char ibm_ansi_order = 1;
-#else
-static char ibm_ansi_order = 0;
-#endif
-
-static void issue_cmd(struct Scsi_Host *, unsigned long, unsigned char);
-static void internal_done(Scsi_Cmnd * cmd);
-static void check_devices(struct Scsi_Host *, int);
-static int immediate_assign(struct Scsi_Host *, unsigned int, unsigned int, unsigned int, unsigned int);
-static int immediate_feature(struct Scsi_Host *, unsigned int, unsigned int);
-#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
-static int immediate_reset(struct Scsi_Host *, unsigned int);
-#endif
-static int device_inquiry(struct Scsi_Host *, int);
-static int read_capacity(struct Scsi_Host *, int);
-static int get_pos_info(struct Scsi_Host *);
-static char *ti_p(int);
-static char *ti_l(int);
-static char *ibmrate(unsigned int, int);
-static int probe_display(int);
-static int probe_bus_mode(struct Scsi_Host *);
-static int device_exists(struct Scsi_Host *, int, int *, int *);
-static int option_setup(char *);
-/* local functions needed for proc_info */
-static int ldn_access_load(struct Scsi_Host *, int);
-static int ldn_access_total_read_write(struct Scsi_Host *);
-
-static irqreturn_t interrupt_handler(int irq, void *dev_id)
-{
- unsigned int intr_reg;
- unsigned int cmd_result;
- unsigned int ldn;
- unsigned long flags;
- Scsi_Cmnd *cmd;
- int lastSCSI;
- struct device *dev = dev_id;
- struct Scsi_Host *shpnt = dev_get_drvdata(dev);
-
- spin_lock_irqsave(shpnt->host_lock, flags);
-
- if(!(inb(IM_STAT_REG(shpnt)) & IM_INTR_REQUEST)) {
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_NONE;
- }
-
- /* the reset-function already did all the job, even ints got
- renabled on the subsystem, so just return */
- if ((reset_status(shpnt) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(shpnt) == IM_RESET_FINISHED_OK_NO_INT)) {
- reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_HANDLED;
- }
-
- /*must wait for attention reg not busy, then send EOI to subsystem */
- while (1) {
- if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
- break;
- cpu_relax();
- }
-
- /*get command result and logical device */
- intr_reg = (unsigned char) (inb(IM_INTR_REG(shpnt)));
- cmd_result = intr_reg & 0xf0;
- ldn = intr_reg & 0x0f;
- /* get the last_scsi_command here */
- lastSCSI = last_scsi_command(shpnt)[ldn];
- outb(IM_EOI | ldn, IM_ATTN_REG(shpnt));
-
- /*these should never happen (hw fails, or a local programming bug) */
- if (!global_command_error_excuse) {
- switch (cmd_result) {
- /* Prevent from Ooopsing on error to show the real reason */
- case IM_ADAPTER_HW_FAILURE:
- case IM_SOFTWARE_SEQUENCING_ERROR:
- case IM_CMD_ERROR:
- printk(KERN_ERR "IBM MCA SCSI: Fatal Subsystem ERROR!\n");
- printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(shpnt)[ldn].scb.enable);
- if (ld(shpnt)[ldn].cmd)
- printk("%ld/%ld,", (long) (scsi_bufflen(ld(shpnt)[ldn].cmd)), (long) (ld(shpnt)[ldn].scb.sys_buf_length));
- else
- printk("none,");
- if (ld(shpnt)[ldn].cmd)
- printk("Blocksize=%d", ld(shpnt)[ldn].scb.u2.blk.length);
- else
- printk("Blocksize=none");
- printk(", host=%p, ldn=0x%x\n", shpnt, ldn);
- if (ld(shpnt)[ldn].cmd) {
- printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(shpnt)[ldn], ld(shpnt)[ldn].scb.u2.blk.count);
- printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(shpnt)[ldn], ld(shpnt)[ldn].scb.u1.log_blk_adr);
- }
- printk(KERN_ERR "Reason given: %s\n", (cmd_result == IM_ADAPTER_HW_FAILURE) ? "HARDWARE FAILURE" : (cmd_result == IM_SOFTWARE_SEQUENCING_ERROR) ? "SOFTWARE SEQUENCING ERROR" : (cmd_result == IM_CMD_ERROR) ? "COMMAND ERROR" : "UNKNOWN");
- /* if errors appear, enter this section to give detailed info */
- printk(KERN_ERR "IBM MCA SCSI: Subsystem Error-Status follows:\n");
- printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(shpnt)[ldn]);
- printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(shpnt)));
- printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(shpnt)));
- printk(KERN_ERR " Interrupt Status Register...: %x\n", intr_reg);
- printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(shpnt)));
- if ((last_scsi_type(shpnt)[ldn] == IM_SCB) || (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB)) {
- printk(KERN_ERR " SCB-Command.................: %x\n", ld(shpnt)[ldn].scb.command);
- printk(KERN_ERR " SCB-Enable..................: %x\n", ld(shpnt)[ldn].scb.enable);
- printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(shpnt)[ldn].scb.u1.log_blk_adr);
- printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(shpnt)[ldn].scb.sys_buf_adr);
- printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(shpnt)[ldn].scb.sys_buf_length);
- printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(shpnt)[ldn].scb.tsb_adr);
- printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(shpnt)[ldn].scb.scb_chain_adr);
- printk(KERN_ERR " SCB-block count.............: %x\n", ld(shpnt)[ldn].scb.u2.blk.count);
- printk(KERN_ERR " SCB-block length............: %x\n", ld(shpnt)[ldn].scb.u2.blk.length);
- }
- printk(KERN_ERR " Send this report to the maintainer.\n");
- panic("IBM MCA SCSI: Fatal error message from the subsystem (0x%X,0x%X)!\n", lastSCSI, cmd_result);
- break;
- }
- } else {
- /* The command error handling is made silent, but we tell the
- * calling function, that there is a reported error from the
- * adapter. */
- switch (cmd_result) {
- case IM_ADAPTER_HW_FAILURE:
- case IM_SOFTWARE_SEQUENCING_ERROR:
- case IM_CMD_ERROR:
- global_command_error_excuse = CMD_FAIL;
- break;
- default:
- global_command_error_excuse = 0;
- break;
- }
- }
- /* if no panic appeared, increase the interrupt-counter */
- IBM_DS(shpnt).total_interrupts++;
- /*only for local checking phase */
- if (local_checking_phase_flag(shpnt)) {
- stat_result(shpnt) = cmd_result;
- got_interrupt(shpnt) = 1;
- reset_status(shpnt) = IM_RESET_FINISHED_OK;
- last_scsi_command(shpnt)[ldn] = NO_SCSI;
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_HANDLED;
- }
- /* handling of commands coming from upper level of scsi driver */
- if (last_scsi_type(shpnt)[ldn] == IM_IMM_CMD) {
- /* verify ldn, and may handle rare reset immediate command */
- if ((reset_status(shpnt) == IM_RESET_IN_PROGRESS) && (last_scsi_command(shpnt)[ldn] == IM_RESET_IMM_CMD)) {
- if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
- disk_rw_in_progress = 0;
- PS2_DISK_LED_OFF();
- reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
- } else {
- /*reset disk led counter, turn off disk led */
- disk_rw_in_progress = 0;
- PS2_DISK_LED_OFF();
- reset_status(shpnt) = IM_RESET_FINISHED_OK;
- }
- stat_result(shpnt) = cmd_result;
- last_scsi_command(shpnt)[ldn] = NO_SCSI;
- last_scsi_type(shpnt)[ldn] = 0;
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_HANDLED;
- } else if (last_scsi_command(shpnt)[ldn] == IM_ABORT_IMM_CMD) {
- /* react on SCSI abort command */
-#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: Interrupt from SCSI-abort.\n");
-#endif
- disk_rw_in_progress = 0;
- PS2_DISK_LED_OFF();
- cmd = ld(shpnt)[ldn].cmd;
- ld(shpnt)[ldn].cmd = NULL;
- if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE)
- cmd->result = DID_NO_CONNECT << 16;
- else
- cmd->result = DID_ABORT << 16;
- stat_result(shpnt) = cmd_result;
- last_scsi_command(shpnt)[ldn] = NO_SCSI;
- last_scsi_type(shpnt)[ldn] = 0;
- if (cmd->scsi_done)
- (cmd->scsi_done) (cmd); /* should be the internal_done */
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_HANDLED;
- } else {
- disk_rw_in_progress = 0;
- PS2_DISK_LED_OFF();
- reset_status(shpnt) = IM_RESET_FINISHED_OK;
- stat_result(shpnt) = cmd_result;
- last_scsi_command(shpnt)[ldn] = NO_SCSI;
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_HANDLED;
- }
- }
- last_scsi_command(shpnt)[ldn] = NO_SCSI;
- last_scsi_type(shpnt)[ldn] = 0;
- cmd = ld(shpnt)[ldn].cmd;
- ld(shpnt)[ldn].cmd = NULL;
-#ifdef IM_DEBUG_TIMEOUT
- if (cmd) {
- if ((cmd->target == TIMEOUT_PUN) && (cmd->device->lun == TIMEOUT_LUN)) {
- spin_unlock_irqsave(shpnt->host_lock, flags);
- printk("IBM MCA SCSI: Ignoring interrupt from pun=%x, lun=%x.\n", cmd->target, cmd->device->lun);
- return IRQ_HANDLED;
- }
- }
-#endif
- /*if no command structure, just return, else clear cmd */
- if (!cmd)
- {
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_HANDLED;
- }
-
-#ifdef IM_DEBUG_INT
- printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(shpnt)[ldn].tsb.dev_status, ld(shpnt)[ldn].tsb.cmd_status, ld(shpnt)[ldn].tsb.dev_error, ld(shpnt)[ldn].tsb.cmd_error);
-#endif
- /*if this is end of media read/write, may turn off PS/2 disk led */
- if ((ld(shpnt)[ldn].device_type != TYPE_NO_LUN) && (ld(shpnt)[ldn].device_type != TYPE_NO_DEVICE)) {
- /* only access this, if there was a valid device addressed */
- if (--disk_rw_in_progress == 0)
- PS2_DISK_LED_OFF();
- }
-
- /* IBM describes the status-mask to be 0x1e, but this is not conform
- * with SCSI-definition, I suppose, the reason for it is that IBM
- * adapters do not support CMD_TERMINATED, TASK_SET_FULL and
- * ACA_ACTIVE as returning statusbyte information. (ML) */
- if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
- cmd->result = (unsigned char) (ld(shpnt)[ldn].tsb.dev_status & 0x1e);
- IBM_DS(shpnt).total_errors++;
- } else
- cmd->result = 0;
- /* write device status into cmd->result, and call done function */
- if (lastSCSI == NO_SCSI) { /* unexpected interrupt :-( */
- cmd->result |= DID_BAD_INTR << 16;
- printk("IBM MCA SCSI: WARNING - Interrupt from non-pending SCSI-command!\n");
- } else /* things went right :-) */
- cmd->result |= DID_OK << 16;
- if (cmd->scsi_done)
- (cmd->scsi_done) (cmd);
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return IRQ_HANDLED;
-}
-
-static void issue_cmd(struct Scsi_Host *shpnt, unsigned long cmd_reg,
- unsigned char attn_reg)
-{
- unsigned long flags;
- /* must wait for attention reg not busy */
- while (1) {
- spin_lock_irqsave(shpnt->host_lock, flags);
- if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
- break;
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- }
- /* write registers and enable system interrupts */
- outl(cmd_reg, IM_CMD_REG(shpnt));
- outb(attn_reg, IM_ATTN_REG(shpnt));
- spin_unlock_irqrestore(shpnt->host_lock, flags);
-}
-
-static void internal_done(Scsi_Cmnd * cmd)
-{
- cmd->SCp.Status++;
- return;
-}
-
-/* SCSI-SCB-command for device_inquiry */
-static int device_inquiry(struct Scsi_Host *shpnt, int ldn)
-{
- int retr;
- struct im_scb *scb;
- struct im_tsb *tsb;
- unsigned char *buf;
-
- scb = &(ld(shpnt)[ldn].scb);
- tsb = &(ld(shpnt)[ldn].tsb);
- buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
- ld(shpnt)[ldn].tsb.dev_status = 0; /* prepare statusblock */
- for (retr = 0; retr < 3; retr++) {
- /* fill scb with inquiry command */
- scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT;
- scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
- last_scsi_command(shpnt)[ldn] = IM_DEVICE_INQUIRY_CMD;
- last_scsi_type(shpnt)[ldn] = IM_SCB;
- scb->sys_buf_adr = isa_virt_to_bus(buf);
- scb->sys_buf_length = 255; /* maximum bufferlength gives max info */
- scb->tsb_adr = isa_virt_to_bus(tsb);
- /* issue scb to passed ldn, and busy wait for interrupt */
- got_interrupt(shpnt) = 0;
- issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
- while (!got_interrupt(shpnt))
- barrier();
-
- /*if command successful, break */
- if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
- return 1;
- }
- /*if all three retries failed, return "no device at this ldn" */
- if (retr >= 3)
- return 0;
- else
- return 1;
-}
-
-static int read_capacity(struct Scsi_Host *shpnt, int ldn)
-{
- int retr;
- struct im_scb *scb;
- struct im_tsb *tsb;
- unsigned char *buf;
-
- scb = &(ld(shpnt)[ldn].scb);
- tsb = &(ld(shpnt)[ldn].tsb);
- buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
- ld(shpnt)[ldn].tsb.dev_status = 0;
- for (retr = 0; retr < 3; retr++) {
- /*fill scb with read capacity command */
- scb->command = IM_READ_CAPACITY_CMD;
- scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
- last_scsi_command(shpnt)[ldn] = IM_READ_CAPACITY_CMD;
- last_scsi_type(shpnt)[ldn] = IM_SCB;
- scb->sys_buf_adr = isa_virt_to_bus(buf);
- scb->sys_buf_length = 8;
- scb->tsb_adr = isa_virt_to_bus(tsb);
- /*issue scb to passed ldn, and busy wait for interrupt */
- got_interrupt(shpnt) = 0;
- issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
- while (!got_interrupt(shpnt))
- barrier();
-
- /*if got capacity, get block length and return one device found */
- if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
- return 1;
- }
- /*if all three retries failed, return "no device at this ldn" */
- if (retr >= 3)
- return 0;
- else
- return 1;
-}
-
-static int get_pos_info(struct Scsi_Host *shpnt)
-{
- int retr;
- struct im_scb *scb;
- struct im_tsb *tsb;
- unsigned char *buf;
-
- scb = &(ld(shpnt)[MAX_LOG_DEV].scb);
- tsb = &(ld(shpnt)[MAX_LOG_DEV].tsb);
- buf = (unsigned char *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
- ld(shpnt)[MAX_LOG_DEV].tsb.dev_status = 0;
- for (retr = 0; retr < 3; retr++) {
- /*fill scb with get_pos_info command */
- scb->command = IM_GET_POS_INFO_CMD;
- scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
- last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD;
- last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_SCB;
- scb->sys_buf_adr = isa_virt_to_bus(buf);
- if (special(shpnt) == IBM_SCSI2_FW)
- scb->sys_buf_length = 256; /* get all info from F/W adapter */
- else
- scb->sys_buf_length = 18; /* get exactly 18 bytes for other SCSI */
- scb->tsb_adr = isa_virt_to_bus(tsb);
- /*issue scb to ldn=15, and busy wait for interrupt */
- got_interrupt(shpnt) = 0;
- issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV);
-
- /* FIXME: timeout */
- while (!got_interrupt(shpnt))
- barrier();
-
- /*if got POS-stuff, get block length and return one device found */
- if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
- return 1;
- }
- /* if all three retries failed, return "no device at this ldn" */
- if (retr >= 3)
- return 0;
- else
- return 1;
-}
-
-/* SCSI-immediate-command for assign. This functions maps/unmaps specific
- ldn-numbers on SCSI (PUN,LUN). It is needed for presetting of the
- subsystem and for dynamical remapping od ldns. */
-static int immediate_assign(struct Scsi_Host *shpnt, unsigned int pun,
- unsigned int lun, unsigned int ldn,
- unsigned int operation)
-{
- int retr;
- unsigned long imm_cmd;
-
- for (retr = 0; retr < 3; retr++) {
- /* select mutation level of the SCSI-adapter */
- switch (special(shpnt)) {
- case IBM_SCSI2_FW:
- imm_cmd = (unsigned long) (IM_ASSIGN_IMM_CMD);
- imm_cmd |= (unsigned long) ((lun & 7) << 24);
- imm_cmd |= (unsigned long) ((operation & 1) << 23);
- imm_cmd |= (unsigned long) ((pun & 7) << 20) | ((pun & 8) << 24);
- imm_cmd |= (unsigned long) ((ldn & 15) << 16);
- break;
- default:
- imm_cmd = inl(IM_CMD_REG(shpnt));
- imm_cmd &= (unsigned long) (0xF8000000); /* keep reserved bits */
- imm_cmd |= (unsigned long) (IM_ASSIGN_IMM_CMD);
- imm_cmd |= (unsigned long) ((lun & 7) << 24);
- imm_cmd |= (unsigned long) ((operation & 1) << 23);
- imm_cmd |= (unsigned long) ((pun & 7) << 20);
- imm_cmd |= (unsigned long) ((ldn & 15) << 16);
- break;
- }
- last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
- last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
- got_interrupt(shpnt) = 0;
- issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
- while (!got_interrupt(shpnt))
- barrier();
-
- /*if command successful, break */
- if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
- return 1;
- }
- if (retr >= 3)
- return 0;
- else
- return 1;
-}
-
-static int immediate_feature(struct Scsi_Host *shpnt, unsigned int speed, unsigned int timeout)
-{
- int retr;
- unsigned long imm_cmd;
-
- for (retr = 0; retr < 3; retr++) {
- /* select mutation level of the SCSI-adapter */
- imm_cmd = IM_FEATURE_CTR_IMM_CMD;
- imm_cmd |= (unsigned long) ((speed & 0x7) << 29);
- imm_cmd |= (unsigned long) ((timeout & 0x1fff) << 16);
- last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
- last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
- got_interrupt(shpnt) = 0;
- /* we need to run into command errors in order to probe for the
- * right speed! */
- global_command_error_excuse = 1;
- issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
-
- /* FIXME: timeout */
- while (!got_interrupt(shpnt))
- barrier();
- if (global_command_error_excuse == CMD_FAIL) {
- global_command_error_excuse = 0;
- return 2;
- } else
- global_command_error_excuse = 0;
- /*if command successful, break */
- if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
- return 1;
- }
- if (retr >= 3)
- return 0;
- else
- return 1;
-}
-
-#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
-static int immediate_reset(struct Scsi_Host *shpnt, unsigned int ldn)
-{
- int retries;
- int ticks;
- unsigned long imm_command;
-
- for (retries = 0; retries < 3; retries++) {
- imm_command = inl(IM_CMD_REG(shpnt));
- imm_command &= (unsigned long) (0xFFFF0000); /* keep reserved bits */
- imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
- last_scsi_command(shpnt)[ldn] = IM_RESET_IMM_CMD;
- last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
- got_interrupt(shpnt) = 0;
- reset_status(shpnt) = IM_RESET_IN_PROGRESS;
- issue_cmd(shpnt, (unsigned long) (imm_command), IM_IMM_CMD | ldn);
- ticks = IM_RESET_DELAY * HZ;
- while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks) {
- udelay((1 + 999 / HZ) * 1000);
- barrier();
- }
- /* if reset did not complete, just complain */
- if (!ticks) {
- printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
- reset_status(shpnt) = IM_RESET_FINISHED_OK;
- /* did not work, finish */
- return 1;
- }
- /*if command successful, break */
- if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
- return 1;
- }
- if (retries >= 3)
- return 0;
- else
- return 1;
-}
-#endif
-
-/* type-interpreter for physical device numbers */
-static char *ti_p(int dev)
-{
- switch (dev) {
- case TYPE_IBM_SCSI_ADAPTER:
- return ("A");
- case TYPE_DISK:
- return ("D");
- case TYPE_TAPE:
- return ("T");
- case TYPE_PROCESSOR:
- return ("P");
- case TYPE_WORM:
- return ("W");
- case TYPE_ROM:
- return ("R");
- case TYPE_SCANNER:
- return ("S");
- case TYPE_MOD:
- return ("M");
- case TYPE_MEDIUM_CHANGER:
- return ("C");
- case TYPE_NO_LUN:
- return ("+"); /* show NO_LUN */
- }
- return ("-"); /* TYPE_NO_DEVICE and others */
-}
-
-/* interpreter for logical device numbers (ldn) */
-static char *ti_l(int val)
-{
- const char hex[16] = "0123456789abcdef";
- static char answer[2];
-
- answer[1] = (char) (0x0);
- if (val <= MAX_LOG_DEV)
- answer[0] = hex[val];
- else
- answer[0] = '-';
- return (char *) &answer;
-}
-
-/* transfers bitpattern of the feature command to values in MHz */
-static char *ibmrate(unsigned int speed, int i)
-{
- switch (speed) {
- case 0:
- return i ? "5.00" : "10.00";
- case 1:
- return i ? "4.00" : "8.00";
- case 2:
- return i ? "3.33" : "6.66";
- case 3:
- return i ? "2.86" : "5.00";
- case 4:
- return i ? "2.50" : "4.00";
- case 5:
- return i ? "2.22" : "3.10";
- case 6:
- return i ? "2.00" : "2.50";
- case 7:
- return i ? "1.82" : "2.00";
- }
- return "---";
-}
-
-static int probe_display(int what)
-{
- static int rotator = 0;
- const char rotor[] = "|/-\\";
-
- if (!(display_mode & LED_DISP))
- return 0;
- if (!what) {
- outl(0x20202020, MOD95_LED_PORT);
- outl(0x20202020, MOD95_LED_PORT + 4);
- } else {
- outb('S', MOD95_LED_PORT + 7);
- outb('C', MOD95_LED_PORT + 6);
- outb('S', MOD95_LED_PORT + 5);
- outb('I', MOD95_LED_PORT + 4);
- outb('i', MOD95_LED_PORT + 3);
- outb('n', MOD95_LED_PORT + 2);
- outb('i', MOD95_LED_PORT + 1);
- outb((char) (rotor[rotator]), MOD95_LED_PORT);
- rotator++;
- if (rotator > 3)
- rotator = 0;
- }
- return 0;
-}
-
-static int probe_bus_mode(struct Scsi_Host *shpnt)
-{
- struct im_pos_info *info;
- int num_bus = 0;
- int ldn;
-
- info = (struct im_pos_info *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
- if (get_pos_info(shpnt)) {
- if (info->connector_size & 0xf000)
- subsystem_connector_size(shpnt) = 16;
- else
- subsystem_connector_size(shpnt) = 32;
- num_bus |= (info->pos_4b & 8) >> 3;
- for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- if ((special(shpnt) == IBM_SCSI_WCACHE) || (special(shpnt) == IBM_7568_WCACHE)) {
- if (!((info->cache_stat >> ldn) & 1))
- ld(shpnt)[ldn].cache_flag = 0;
- }
- if (!((info->retry_stat >> ldn) & 1))
- ld(shpnt)[ldn].retry_flag = 0;
- }
-#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: SCSI-Cache bits: ");
- for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- printk("%d", ld(shpnt)[ldn].cache_flag);
- }
- printk("\nIBM MCA SCSI: SCSI-Retry bits: ");
- for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- printk("%d", ld(shpnt)[ldn].retry_flag);
- }
- printk("\n");
-#endif
- }
- return num_bus;
-}
-
-/* probing scsi devices */
-static void check_devices(struct Scsi_Host *shpnt, int adaptertype)
-{
- int id, lun, ldn, ticks;
- int count_devices; /* local counter for connected device */
- int max_pun;
- int num_bus;
- int speedrun; /* local adapter_speed check variable */
-
- /* assign default values to certain variables */
- ticks = 0;
- count_devices = 0;
- IBM_DS(shpnt).dyn_flag = 0; /* normally no need for dynamical ldn management */
- IBM_DS(shpnt).total_errors = 0; /* set errorcounter to 0 */
- next_ldn(shpnt) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */
-
- /* initialize the very important driver-informational arrays/structs */
- memset(ld(shpnt), 0, sizeof(ld(shpnt)));
- for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- last_scsi_command(shpnt)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */
- last_scsi_type(shpnt)[ldn] = 0;
- ld(shpnt)[ldn].cache_flag = 1;
- ld(shpnt)[ldn].retry_flag = 1;
- }
- memset(get_ldn(shpnt), TYPE_NO_DEVICE, sizeof(get_ldn(shpnt))); /* this is essential ! */
- memset(get_scsi(shpnt), TYPE_NO_DEVICE, sizeof(get_scsi(shpnt))); /* this is essential ! */
- for (lun = 0; lun < 8; lun++) {
- /* mark the adapter at its pun on all luns */
- get_scsi(shpnt)[subsystem_pun(shpnt)][lun] = TYPE_IBM_SCSI_ADAPTER;
- get_ldn(shpnt)[subsystem_pun(shpnt)][lun] = MAX_LOG_DEV; /* make sure, the subsystem
- ldn is active for all
- luns. */
- }
- probe_display(0); /* Supercool display usage during SCSI-probing. */
- /* This makes sense, when booting without any */
- /* monitor connected on model XX95. */
-
- /* STEP 1: */
- adapter_speed(shpnt) = global_adapter_speed;
- speedrun = adapter_speed(shpnt);
- while (immediate_feature(shpnt, speedrun, adapter_timeout) == 2) {
- probe_display(1);
- if (speedrun == 7)
- panic("IBM MCA SCSI: Cannot set Synchronous-Transfer-Rate!\n");
- speedrun++;
- if (speedrun > 7)
- speedrun = 7;
- }
- adapter_speed(shpnt) = speedrun;
- /* Get detailed information about the current adapter, necessary for
- * device operations: */
- num_bus = probe_bus_mode(shpnt);
-
- /* num_bus contains only valid data for the F/W adapter! */
- if (adaptertype == IBM_SCSI2_FW) { /* F/W SCSI adapter: */
- /* F/W adapter PUN-space extension evaluation: */
- if (num_bus) {
- printk(KERN_INFO "IBM MCA SCSI: Separate bus mode (wide-addressing enabled)\n");
- subsystem_maxid(shpnt) = 16;
- } else {
- printk(KERN_INFO "IBM MCA SCSI: Combined bus mode (wide-addressing disabled)\n");
- subsystem_maxid(shpnt) = 8;
- }
- printk(KERN_INFO "IBM MCA SCSI: Sync.-Rate (F/W: 20, Int.: 10, Ext.: %s) MBytes/s\n", ibmrate(speedrun, adaptertype));
- } else /* all other IBM SCSI adapters: */
- printk(KERN_INFO "IBM MCA SCSI: Synchronous-SCSI-Transfer-Rate: %s MBytes/s\n", ibmrate(speedrun, adaptertype));
-
- /* assign correct PUN device space */
- max_pun = subsystem_maxid(shpnt);
-
-#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: Current SCSI-host index: %d\n", shpnt);
- printk("IBM MCA SCSI: Removing default logical SCSI-device mapping.");
-#else
- printk(KERN_INFO "IBM MCA SCSI: Dev. Order: %s, Mapping (takes <2min): ", (ibm_ansi_order) ? "ANSI" : "New");
-#endif
- for (ldn = 0; ldn < MAX_LOG_DEV; ldn++) {
- probe_display(1);
-#ifdef IM_DEBUG_PROBE
- printk(".");
-#endif
- immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */
- }
- lun = 0; /* default lun is 0 */
-#ifndef IM_DEBUG_PROBE
- printk("cleared,");
-#endif
- /* STEP 2: */
-#ifdef IM_DEBUG_PROBE
- printk("\nIBM MCA SCSI: Scanning SCSI-devices.");
-#endif
- for (id = 0; id < max_pun; id++)
-#ifdef CONFIG_SCSI_MULTI_LUN
- for (lun = 0; lun < 8; lun++)
-#endif
- {
- probe_display(1);
-#ifdef IM_DEBUG_PROBE
- printk(".");
-#endif
- if (id != subsystem_pun(shpnt)) {
- /* if pun is not the adapter: */
- /* set ldn=0 to pun,lun */
- immediate_assign(shpnt, id, lun, PROBE_LDN, SET_LDN);
- if (device_inquiry(shpnt, PROBE_LDN)) { /* probe device */
- get_scsi(shpnt)[id][lun] = (unsigned char) (ld(shpnt)[PROBE_LDN].buf[0]);
- /* entry, even for NO_LUN */
- if (ld(shpnt)[PROBE_LDN].buf[0] != TYPE_NO_LUN)
- count_devices++; /* a existing device is found */
- }
- /* remove ldn */
- immediate_assign(shpnt, id, lun, PROBE_LDN, REMOVE_LDN);
- }
- }
-#ifndef IM_DEBUG_PROBE
- printk("scanned,");
-#endif
- /* STEP 3: */
-#ifdef IM_DEBUG_PROBE
- printk("\nIBM MCA SCSI: Mapping SCSI-devices.");
-#endif
- ldn = 0;
- lun = 0;
-#ifdef CONFIG_SCSI_MULTI_LUN
- for (lun = 0; lun < 8 && ldn < MAX_LOG_DEV; lun++)
-#endif
- for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
- probe_display(1);
-#ifdef IM_DEBUG_PROBE
- printk(".");
-#endif
- if (id != subsystem_pun(shpnt)) {
- if (get_scsi(shpnt)[id][lun] != TYPE_NO_LUN && get_scsi(shpnt)[id][lun] != TYPE_NO_DEVICE) {
- /* Only map if accepted type. Always enter for
- lun == 0 to get no gaps into ldn-mapping for ldn<7. */
- immediate_assign(shpnt, id, lun, ldn, SET_LDN);
- get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
- if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
-#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
- printk("resetting device at ldn=%x ... ", ldn);
- immediate_reset(shpnt, ldn);
-#endif
- ldn++;
- } else {
- /* device vanished, probably because we don't know how to
- * handle it or because it has problems */
- if (lun > 0) {
- /* remove mapping */
- get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
- immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN);
- } else
- ldn++;
- }
- } else if (lun == 0) {
- /* map lun == 0, even if no device exists */
- immediate_assign(shpnt, id, lun, ldn, SET_LDN);
- get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
- ldn++;
- }
- }
- }
- /* STEP 4: */
-
- /* map remaining ldns to non-existing devices */
- for (lun = 1; lun < 8 && ldn < MAX_LOG_DEV; lun++)
- for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
- if (get_scsi(shpnt)[id][lun] == TYPE_NO_LUN || get_scsi(shpnt)[id][lun] == TYPE_NO_DEVICE) {
- probe_display(1);
- /* Map remaining ldns only to NON-existing pun,lun
- combinations to make sure an inquiry will fail.
- For MULTI_LUN, it is needed to avoid adapter autonome
- SCSI-remapping. */
- immediate_assign(shpnt, id, lun, ldn, SET_LDN);
- get_ldn(shpnt)[id][lun] = ldn;
- ldn++;
- }
- }
-#ifndef IM_DEBUG_PROBE
- printk("mapped.");
-#endif
- printk("\n");
-#ifdef IM_DEBUG_PROBE
- if (ibm_ansi_order)
- printk("IBM MCA SCSI: Device order: IBM/ANSI (pun=7 is first).\n");
- else
- printk("IBM MCA SCSI: Device order: New Industry Standard (pun=0 is first).\n");
-#endif
-
-#ifdef IM_DEBUG_PROBE
- /* Show the physical and logical mapping during boot. */
- printk("IBM MCA SCSI: Determined SCSI-device-mapping:\n");
- printk(" Physical SCSI-Device Map Logical SCSI-Device Map\n");
- printk("ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
- for (id = 0; id < max_pun; id++) {
- printk("%2d ", id);
- for (lun = 0; lun < 8; lun++)
- printk("%2s ", ti_p(get_scsi(shpnt)[id][lun]));
- printk(" %2d ", id);
- for (lun = 0; lun < 8; lun++)
- printk("%2s ", ti_l(get_ldn(shpnt)[id][lun]));
- printk("\n");
- }
-#endif
-
- /* assign total number of found SCSI-devices to the statistics struct */
- IBM_DS(shpnt).total_scsi_devices = count_devices;
-
- /* decide for output in /proc-filesystem, if the configuration of
- SCSI-devices makes dynamical reassignment of devices necessary */
- if (count_devices >= MAX_LOG_DEV)
- IBM_DS(shpnt).dyn_flag = 1; /* dynamical assignment is necessary */
- else
- IBM_DS(shpnt).dyn_flag = 0; /* dynamical assignment is not necessary */
-
- /* If no SCSI-devices are assigned, return 1 in order to cause message. */
- if (ldn == 0)
- printk("IBM MCA SCSI: Warning: No SCSI-devices found/assigned!\n");
-
- /* reset the counters for statistics on the current adapter */
- IBM_DS(shpnt).scbs = 0;
- IBM_DS(shpnt).long_scbs = 0;
- IBM_DS(shpnt).total_accesses = 0;
- IBM_DS(shpnt).total_interrupts = 0;
- IBM_DS(shpnt).dynamical_assignments = 0;
- memset(IBM_DS(shpnt).ldn_access, 0x0, sizeof(IBM_DS(shpnt).ldn_access));
- memset(IBM_DS(shpnt).ldn_read_access, 0x0, sizeof(IBM_DS(shpnt).ldn_read_access));
- memset(IBM_DS(shpnt).ldn_write_access, 0x0, sizeof(IBM_DS(shpnt).ldn_write_access));
- memset(IBM_DS(shpnt).ldn_inquiry_access, 0x0, sizeof(IBM_DS(shpnt).ldn_inquiry_access));
- memset(IBM_DS(shpnt).ldn_modeselect_access, 0x0, sizeof(IBM_DS(shpnt).ldn_modeselect_access));
- memset(IBM_DS(shpnt).ldn_assignments, 0x0, sizeof(IBM_DS(shpnt).ldn_assignments));
- probe_display(0);
- return;
-}
-
-static int device_exists(struct Scsi_Host *shpnt, int ldn, int *block_length, int *device_type)
-{
- unsigned char *buf;
- /* if no valid device found, return immediately with 0 */
- if (!(device_inquiry(shpnt, ldn)))
- return 0;
- buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
- if (*buf == TYPE_ROM) {
- *device_type = TYPE_ROM;
- *block_length = 2048; /* (standard blocksize for yellow-/red-book) */
- return 1;
- }
- if (*buf == TYPE_WORM) {
- *device_type = TYPE_WORM;
- *block_length = 2048;
- return 1;
- }
- if (*buf == TYPE_DISK) {
- *device_type = TYPE_DISK;
- if (read_capacity(shpnt, ldn)) {
- *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
- return 1;
- } else
- return 0;
- }
- if (*buf == TYPE_MOD) {
- *device_type = TYPE_MOD;
- if (read_capacity(shpnt, ldn)) {
- *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
- return 1;
- } else
- return 0;
- }
- if (*buf == TYPE_TAPE) {
- *device_type = TYPE_TAPE;
- *block_length = 0; /* not in use (setting by mt and mtst in op.) */
- return 1;
- }
- if (*buf == TYPE_PROCESSOR) {
- *device_type = TYPE_PROCESSOR;
- *block_length = 0; /* they set their stuff on drivers */
- return 1;
- }
- if (*buf == TYPE_SCANNER) {
- *device_type = TYPE_SCANNER;
- *block_length = 0; /* they set their stuff on drivers */
- return 1;
- }
- if (*buf == TYPE_MEDIUM_CHANGER) {
- *device_type = TYPE_MEDIUM_CHANGER;
- *block_length = 0; /* One never knows, what to expect on a medium
- changer device. */
- return 1;
- }
- return 0;
-}
-
-static void internal_ibmmca_scsi_setup(char *str, int *ints)
-{
- int i, j, io_base, id_base;
- char *token;
-
- io_base = 0;
- id_base = 0;
- if (str) {
- j = 0;
- while ((token = strsep(&str, ",")) != NULL) {
- if (!strcmp(token, "activity"))
- display_mode |= LED_ACTIVITY;
- if (!strcmp(token, "display"))
- display_mode |= LED_DISP;
- if (!strcmp(token, "adisplay"))
- display_mode |= LED_ADISP;
- if (!strcmp(token, "normal"))
- ibm_ansi_order = 0;
- if (!strcmp(token, "ansi"))
- ibm_ansi_order = 1;
- if (!strcmp(token, "fast"))
- global_adapter_speed = 0;
- if (!strcmp(token, "medium"))
- global_adapter_speed = 4;
- if (!strcmp(token, "slow"))
- global_adapter_speed = 7;
- if ((*token == '-') || (isdigit(*token))) {
- if (!(j % 2) && (io_base < IM_MAX_HOSTS))
- io_port[io_base++] = simple_strtoul(token, NULL, 0);
- if ((j % 2) && (id_base < IM_MAX_HOSTS))
- scsi_id[id_base++] = simple_strtoul(token, NULL, 0);
- j++;
- }
- }
- } else if (ints) {
- for (i = 0; i < IM_MAX_HOSTS && 2 * i + 2 < ints[0]; i++) {
- io_port[i] = ints[2 * i + 2];
- scsi_id[i] = ints[2 * i + 2];
- }
- }
- return;
-}
-
-#if 0
- FIXME NEED TO MOVE TO SYSFS
-
-static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
-{
- struct Scsi_Host *shpnt;
- int len, speciale, connectore, k;
- unsigned int pos[8];
- unsigned long flags;
- struct Scsi_Host *dev = dev_id;
-
- spin_lock_irqsave(dev->host_lock, flags);
-
- shpnt = dev; /* assign host-structure to local pointer */
- len = 0; /* set filled text-buffer index to 0 */
- /* get the _special contents of the hostdata structure */
- speciale = ((struct ibmmca_hostdata *) shpnt->hostdata)->_special;
- connectore = ((struct ibmmca_hostdata *) shpnt->hostdata)->_connector_size;
- for (k = 2; k < 4; k++)
- pos[k] = ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k];
- if (speciale == FORCED_DETECTION) { /* forced detection */
- len += sprintf(buf + len,
- "Adapter category: forced detected\n" "***************************************\n" "*** Forced detected SCSI Adapter ***\n" "*** No chip-information available ***\n" "***************************************\n");
- } else if (speciale == INTEGRATED_SCSI) {
- /* if the integrated subsystem has been found automatically: */
- len += sprintf(buf + len,
- "Adapter category: integrated\n" "Chip revision level: %d\n" "Chip status: %s\n" "8 kByte NVRAM status: %s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 1) ? "enabled" : "disabled", (pos[2] & 2) ? "locked" : "accessible");
- } else if ((speciale >= 0) && (speciale < ARRAY_SIZE(subsys_list))) {
- /* if the subsystem is a slot adapter */
- len += sprintf(buf + len, "Adapter category: slot-card\n" "ROM Segment Address: ");
- if ((pos[2] & 0xf0) == 0xf0)
- len += sprintf(buf + len, "off\n");
- else
- len += sprintf(buf + len, "0x%x\n", ((pos[2] & 0xf0) << 13) + 0xc0000);
- len += sprintf(buf + len, "Chip status: %s\n", (pos[2] & 1) ? "enabled" : "disabled");
- len += sprintf(buf + len, "Adapter I/O Offset: 0x%x\n", ((pos[2] & 0x0e) << 2));
- } else {
- len += sprintf(buf + len, "Adapter category: unknown\n");
- }
- /* common subsystem information to write to the slotn file */
- len += sprintf(buf + len, "Subsystem PUN: %d\n", shpnt->this_id);
- len += sprintf(buf + len, "I/O base address range: 0x%x-0x%x\n", (unsigned int) (shpnt->io_port), (unsigned int) (shpnt->io_port + 7));
- len += sprintf(buf + len, "MCA-slot size: %d bits", connectore);
- /* Now make sure, the bufferlength is devidable by 4 to avoid
- * paging problems of the buffer. */
- while (len % sizeof(int) != (sizeof(int) - 1))
- len += sprintf(buf + len, " ");
- len += sprintf(buf + len, "\n");
-
- spin_unlock_irqrestore(shpnt->host_lock, flags);
-
- return len;
-}
-#endif
-
-static struct scsi_host_template ibmmca_driver_template = {
- .proc_name = "ibmmca",
- .proc_info = ibmmca_proc_info,
- .name = "IBM SCSI-Subsystem",
- .queuecommand = ibmmca_queuecommand,
- .eh_abort_handler = ibmmca_abort,
- .eh_host_reset_handler = ibmmca_host_reset,
- .bios_param = ibmmca_biosparam,
- .can_queue = 16,
- .this_id = 7,
- .sg_tablesize = 16,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING,
-};
-
-static int ibmmca_probe(struct device *dev)
-{
- struct Scsi_Host *shpnt;
- int port, id, i, j, k, irq, enabled, ret = -EINVAL;
- struct mca_device *mca_dev = to_mca_device(dev);
- const char *description = ibmmca_description[mca_dev->index];
-
- /* First of all, print the version number of the driver. This is
- * important to allow better user bugreports in case of already
- * having problems with the MCA_bus probing. */
- printk(KERN_INFO "IBM MCA SCSI: Version %s\n", IBMMCA_SCSI_DRIVER_VERSION);
- /* The POS2-register of all PS/2 model SCSI-subsystems has the following
- * interpretation of bits:
- * Bit 7 - 4 : Chip Revision ID (Release)
- * Bit 3 - 2 : Reserved
- * Bit 1 : 8k NVRAM Disabled
- * Bit 0 : Chip Enable (EN-Signal)
- * The POS3-register is interpreted as follows:
- * Bit 7 - 5 : SCSI ID
- * Bit 4 : Reserved = 0
- * Bit 3 - 0 : Reserved = 0
- * (taken from "IBM, PS/2 Hardware Interface Technical Reference, Common
- * Interfaces (1991)").
- * In short words, this means, that IBM PS/2 machines only support
- * 1 single subsystem by default. The slot-adapters must have another
- * configuration on pos2. Here, one has to assume the following
- * things for POS2-register:
- * Bit 7 - 4 : Chip Revision ID (Release)
- * Bit 3 - 1 : port offset factor
- * Bit 0 : Chip Enable (EN-Signal)
- * As I found a patch here, setting the IO-registers to 0x3540 forced,
- * as there was a 0x05 in POS2 on a model 56, I assume, that the
- * port 0x3540 must be fix for integrated SCSI-controllers.
- * Ok, this discovery leads to the following implementation: (M.Lang) */
-
- /* first look for the IBM SCSI integrated subsystem on the motherboard */
- for (j = 0; j < 8; j++) /* read the pos-information */
- pos[j] = mca_device_read_pos(mca_dev, j);
- id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */
- enabled = (pos[2] &0x01);
- if (!enabled) {
- printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
- printk(KERN_WARNING " SCSI-operations may not work.\n");
- }
-
- /* pos2 = pos3 = 0xff if there is no integrated SCSI-subsystem present, but
- * if we ignore the settings of all surrounding pos registers, it is not
- * completely sufficient to only check pos2 and pos3. */
- /* Therefore, now the following if statement is used to
- * make sure, we see a real integrated onboard SCSI-interface and no
- * internal system information, which gets mapped to some pos registers
- * on models 95xx. */
- if (mca_dev->slot == MCA_INTEGSCSI &&
- ((!pos[0] && !pos[1] && pos[2] > 0 &&
- pos[3] > 0 && !pos[4] && !pos[5] &&
- !pos[6] && !pos[7]) ||
- (pos[0] == 0xff && pos[1] == 0xff &&
- pos[2] < 0xff && pos[3] < 0xff &&
- pos[4] == 0xff && pos[5] == 0xff &&
- pos[6] == 0xff && pos[7] == 0xff))) {
- irq = IM_IRQ;
- port = IM_IO_PORT;
- } else {
- irq = IM_IRQ;
- port = IM_IO_PORT + ((pos[2] &0x0e) << 2);
- if ((mca_dev->index == IBM_SCSI2_FW) && (pos[6] != 0)) {
- printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
- printk(KERN_ERR " Impossible to determine adapter PUN!\n");
- printk(KERN_ERR " Guessing adapter PUN = 7.\n");
- id = 7;
- } else {
- id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
- if (mca_dev->index == IBM_SCSI2_FW) {
- id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
- * for F/W adapters */
- }
- }
- if ((mca_dev->index == IBM_SCSI2_FW) &&
- (pos[4] & 0x01) && (pos[6] == 0)) {
- /* IRQ11 is used by SCSI-2 F/W Adapter/A */
- printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
- irq = IM_IRQ_FW;
- }
- }
-
-
-
- /* give detailed information on the subsystem. This helps me
- * additionally during debugging and analyzing bug-reports. */
- printk(KERN_INFO "IBM MCA SCSI: %s found, io=0x%x, scsi id=%d,\n",
- description, port, id);
- if (mca_dev->slot == MCA_INTEGSCSI)
- printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled.");
- else {
- if ((pos[2] & 0xf0) == 0xf0)
- printk(KERN_DEBUG " ROM Addr.=off,");
- else
- printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
-
- printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
- }
-
- /* check I/O region */
- if (!request_region(port, IM_N_IO_PORT, description)) {
- printk(KERN_ERR "IBM MCA SCSI: Unable to get I/O region 0x%x-0x%x (%d ports).\n", port, port + IM_N_IO_PORT - 1, IM_N_IO_PORT);
- goto out_fail;
- }
-
- /* register host */
- shpnt = scsi_host_alloc(&ibmmca_driver_template,
- sizeof(struct ibmmca_hostdata));
- if (!shpnt) {
- printk(KERN_ERR "IBM MCA SCSI: Unable to register host.\n");
- goto out_release;
- }
-
- dev_set_drvdata(dev, shpnt);
- if(request_irq(irq, interrupt_handler, IRQF_SHARED, description, dev)) {
- printk(KERN_ERR "IBM MCA SCSI: failed to request interrupt %d\n", irq);
- goto out_free_host;
- }
-
- /* request I/O region */
- special(shpnt) = mca_dev->index; /* important assignment or else crash! */
- subsystem_connector_size(shpnt) = 0; /* preset slot-size */
- shpnt->irq = irq; /* assign necessary stuff for the adapter */
- shpnt->io_port = port;
- shpnt->n_io_port = IM_N_IO_PORT;
- shpnt->this_id = id;
- shpnt->max_id = 8; /* 8 PUNs are default */
- /* now, the SCSI-subsystem is connected to Linux */
-
-#ifdef IM_DEBUG_PROBE
- ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
- printk("IBM MCA SCSI: Control Register contents: %x, status: %x\n", ctrl, inb(IM_STAT_REG(found)));
- printk("IBM MCA SCSI: This adapters' POS-registers: ");
- for (i = 0; i < 8; i++)
- printk("%x ", pos[i]);
- printk("\n");
-#endif
- reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
-
- for (i = 0; i < 16; i++) /* reset the tables */
- for (j = 0; j < 8; j++)
- get_ldn(shpnt)[i][j] = MAX_LOG_DEV;
-
- /* check which logical devices exist */
- /* after this line, local interrupting is possible: */
- local_checking_phase_flag(shpnt) = 1;
- check_devices(shpnt, mca_dev->index); /* call by value, using the global variable hosts */
- local_checking_phase_flag(shpnt) = 0;
-
- /* an ibm mca subsystem has been detected */
-
- for (k = 2; k < 7; k++)
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
- mca_device_set_name(mca_dev, description);
- /* FIXME: NEED TO REPLUMB TO SYSFS
- mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
- */
- mca_device_set_claim(mca_dev, 1);
- if (scsi_add_host(shpnt, dev)) {
- dev_printk(KERN_ERR, dev, "IBM MCA SCSI: scsi_add_host failed\n");
- goto out_free_host;
- }
- scsi_scan_host(shpnt);
-
- return 0;
- out_free_host:
- scsi_host_put(shpnt);
- out_release:
- release_region(port, IM_N_IO_PORT);
- out_fail:
- return ret;
-}
-
-static int __devexit ibmmca_remove(struct device *dev)
-{
- struct Scsi_Host *shpnt = dev_get_drvdata(dev);
- scsi_remove_host(shpnt);
- release_region(shpnt->io_port, shpnt->n_io_port);
- free_irq(shpnt->irq, dev);
- scsi_host_put(shpnt);
- return 0;
-}
-
-/* The following routine is the SCSI command queue for the midlevel driver */
-static int ibmmca_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
-{
- unsigned int ldn;
- unsigned int scsi_cmd;
- struct im_scb *scb;
- struct Scsi_Host *shpnt;
- int current_ldn;
- int id, lun;
- int target;
- int max_pun;
- int i;
- struct scatterlist *sg;
-
- shpnt = cmd->device->host;
-
- max_pun = subsystem_maxid(shpnt);
- if (ibm_ansi_order) {
- target = max_pun - 1 - cmd->device->id;
- if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
- target--;
- else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
- target++;
- } else
- target = cmd->device->id;
-
- /* if (target,lun) is NO LUN or not existing at all, return error */
- if ((get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_DEVICE)) {
- cmd->result = DID_NO_CONNECT << 16;
- if (done)
- done(cmd);
- return 0;
- }
-
- /*if (target,lun) unassigned, do further checks... */
- ldn = get_ldn(shpnt)[target][cmd->device->lun];
- if (ldn >= MAX_LOG_DEV) { /* on invalid ldn do special stuff */
- if (ldn > MAX_LOG_DEV) { /* dynamical remapping if ldn unassigned */
- current_ldn = next_ldn(shpnt); /* stop-value for one circle */
- while (ld(shpnt)[next_ldn(shpnt)].cmd) { /* search for a occupied, but not in */
- /* command-processing ldn. */
- next_ldn(shpnt)++;
- if (next_ldn(shpnt) >= MAX_LOG_DEV)
- next_ldn(shpnt) = 7;
- if (current_ldn == next_ldn(shpnt)) { /* One circle done ? */
- /* no non-processing ldn found */
- scmd_printk(KERN_WARNING, cmd,
- "IBM MCA SCSI: Cannot assign SCSI-device dynamically!\n"
- " On ldn 7-14 SCSI-commands everywhere in progress.\n"
- " Reporting DID_NO_CONNECT for device.\n");
- cmd->result = DID_NO_CONNECT << 16; /* return no connect */
- if (done)
- done(cmd);
- return 0;
- }
- }
-
- /* unmap non-processing ldn */
- for (id = 0; id < max_pun; id++)
- for (lun = 0; lun < 8; lun++) {
- if (get_ldn(shpnt)[id][lun] == next_ldn(shpnt)) {
- get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
- get_scsi(shpnt)[id][lun] = TYPE_NO_DEVICE;
- /* unmap entry */
- }
- }
- /* set reduced interrupt_handler-mode for checking */
- local_checking_phase_flag(shpnt) = 1;
- /* map found ldn to pun,lun */
- get_ldn(shpnt)[target][cmd->device->lun] = next_ldn(shpnt);
- /* change ldn to the right value, that is now next_ldn */
- ldn = next_ldn(shpnt);
- /* unassign all ldns (pun,lun,ldn does not matter for remove) */
- immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
- /* set only LDN for remapped device */
- immediate_assign(shpnt, target, cmd->device->lun, ldn, SET_LDN);
- /* get device information for ld[ldn] */
- if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
- ld(shpnt)[ldn].cmd = NULL; /* To prevent panic set 0, because
- devices that were not assigned,
- should have nothing in progress. */
- get_scsi(shpnt)[target][cmd->device->lun] = ld(shpnt)[ldn].device_type;
- /* increase assignment counters for statistics in /proc */
- IBM_DS(shpnt).dynamical_assignments++;
- IBM_DS(shpnt).ldn_assignments[ldn]++;
- } else
- /* panic here, because a device, found at boottime has
- vanished */
- panic("IBM MCA SCSI: ldn=0x%x, SCSI-device on (%d,%d) vanished!\n", ldn, target, cmd->device->lun);
- /* unassign again all ldns (pun,lun,ldn does not matter for remove) */
- immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
- /* remap all ldns, as written in the pun/lun table */
- lun = 0;
-#ifdef CONFIG_SCSI_MULTI_LUN
- for (lun = 0; lun < 8; lun++)
-#endif
- for (id = 0; id < max_pun; id++) {
- if (get_ldn(shpnt)[id][lun] <= MAX_LOG_DEV)
- immediate_assign(shpnt, id, lun, get_ldn(shpnt)[id][lun], SET_LDN);
- }
- /* set back to normal interrupt_handling */
- local_checking_phase_flag(shpnt) = 0;
-#ifdef IM_DEBUG_PROBE
- /* Information on syslog terminal */
- printk("IBM MCA SCSI: ldn=0x%x dynamically reassigned to (%d,%d).\n", ldn, target, cmd->device->lun);
-#endif
- /* increase next_ldn for next dynamical assignment */
- next_ldn(shpnt)++;
- if (next_ldn(shpnt) >= MAX_LOG_DEV)
- next_ldn(shpnt) = 7;
- } else { /* wall against Linux accesses to the subsystem adapter */
- cmd->result = DID_BAD_TARGET << 16;
- if (done)
- done(cmd);
- return 0;
- }
- }
-
- /*verify there is no command already in progress for this log dev */
- if (ld(shpnt)[ldn].cmd)
- panic("IBM MCA SCSI: cmd already in progress for this ldn.\n");
-
- /*save done in cmd, and save cmd for the interrupt handler */
- cmd->scsi_done = done;
- ld(shpnt)[ldn].cmd = cmd;
-
- /*fill scb information independent of the scsi command */
- scb = &(ld(shpnt)[ldn].scb);
- ld(shpnt)[ldn].tsb.dev_status = 0;
- scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE;
- scb->tsb_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].tsb));
- scsi_cmd = cmd->cmnd[0];
-
- if (scsi_sg_count(cmd)) {
- BUG_ON(scsi_sg_count(cmd) > 16);
-
- scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
- ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg_page(sg)) + sg->offset);
- ld(shpnt)[ldn].sge[i].byte_length = sg->length;
- }
- scb->enable |= IM_POINTER_TO_LIST;
- scb->sys_buf_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].sge[0]));
- scb->sys_buf_length = scsi_sg_count(cmd) * sizeof(struct im_sge);
- } else {
- scb->sys_buf_adr = isa_virt_to_bus(scsi_sglist(cmd));
- /* recent Linux midlevel SCSI places 1024 byte for inquiry
- * command. Far too much for old PS/2 hardware. */
- switch (scsi_cmd) {
- /* avoid command errors by setting bufferlengths to
- * ANSI-standard. Beware of forcing it to 255,
- * this could SEGV the kernel!!! */
- case INQUIRY:
- case REQUEST_SENSE:
- case MODE_SENSE:
- case MODE_SELECT:
- if (scsi_bufflen(cmd) > 255)
- scb->sys_buf_length = 255;
- else
- scb->sys_buf_length = scsi_bufflen(cmd);
- break;
- case TEST_UNIT_READY:
- scb->sys_buf_length = 0;
- break;
- default:
- scb->sys_buf_length = scsi_bufflen(cmd);
- break;
- }
- }
- /*fill scb information dependent on scsi command */
-
-#ifdef IM_DEBUG_CMD
- printk("issue scsi cmd=%02x to ldn=%d\n", scsi_cmd, ldn);
-#endif
-
- /* for specific device-type debugging: */
-#ifdef IM_DEBUG_CMD_SPEC_DEV
- if (ld(shpnt)[ldn].device_type == IM_DEBUG_CMD_DEVICE)
- printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(shpnt)[ldn].device_type, scsi_cmd, ldn);
-#endif
-
- /* for possible panics store current command */
- last_scsi_command(shpnt)[ldn] = scsi_cmd;
- last_scsi_type(shpnt)[ldn] = IM_SCB;
- /* update statistical info */
- IBM_DS(shpnt).total_accesses++;
- IBM_DS(shpnt).ldn_access[ldn]++;
-
- switch (scsi_cmd) {
- case READ_6:
- case WRITE_6:
- case READ_10:
- case WRITE_10:
- case READ_12:
- case WRITE_12:
- /* Distinguish between disk and other devices. Only disks (that are the
- most frequently accessed devices) should be supported by the
- IBM-SCSI-Subsystem commands. */
- switch (ld(shpnt)[ldn].device_type) {
- case TYPE_DISK: /* for harddisks enter here ... */
- case TYPE_MOD: /* ... try it also for MO-drives (send flames as */
- /* you like, if this won't work.) */
- if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12) {
- /* read command preparations */
- scb->enable |= IM_READ_CONTROL;
- IBM_DS(shpnt).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */
- scb->command = IM_READ_DATA_CMD | IM_NO_DISCONNECT;
- } else { /* write command preparations */
- IBM_DS(shpnt).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */
- scb->command = IM_WRITE_DATA_CMD | IM_NO_DISCONNECT;
- }
- if (scsi_cmd == READ_6 || scsi_cmd == WRITE_6) {
- scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[3]) << 0) | (((unsigned) cmd->cmnd[2]) << 8) | ((((unsigned) cmd->cmnd[1]) & 0x1f) << 16);
- scb->u2.blk.count = (unsigned) cmd->cmnd[4];
- } else {
- scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[5]) << 0) | (((unsigned) cmd->cmnd[4]) << 8) | (((unsigned) cmd->cmnd[3]) << 16) | (((unsigned) cmd->cmnd[2]) << 24);
- scb->u2.blk.count = (((unsigned) cmd->cmnd[8]) << 0) | (((unsigned) cmd->cmnd[7]) << 8);
- }
- last_scsi_logical_block(shpnt)[ldn] = scb->u1.log_blk_adr;
- last_scsi_blockcount(shpnt)[ldn] = scb->u2.blk.count;
- scb->u2.blk.length = ld(shpnt)[ldn].block_length;
- break;
- /* for other devices, enter here. Other types are not known by
- Linux! TYPE_NO_LUN is forbidden as valid device. */
- case TYPE_ROM:
- case TYPE_TAPE:
- case TYPE_PROCESSOR:
- case TYPE_WORM:
- case TYPE_SCANNER:
- case TYPE_MEDIUM_CHANGER:
- /* If there is a sequential-device, IBM recommends to use
- IM_OTHER_SCSI_CMD_CMD instead of subsystem READ/WRITE.
- This includes CD-ROM devices, too, due to the partial sequential
- read capabilities. */
- scb->command = IM_OTHER_SCSI_CMD_CMD;
- if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12)
- /* enable READ */
- scb->enable |= IM_READ_CONTROL;
- scb->enable |= IM_BYPASS_BUFFER;
- scb->u1.scsi_cmd_length = cmd->cmd_len;
- memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
- last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
- /* Read/write on this non-disk devices is also displayworthy,
- so flash-up the LED/display. */
- break;
- }
- break;
- case INQUIRY:
- IBM_DS(shpnt).ldn_inquiry_access[ldn]++;
- scb->command = IM_DEVICE_INQUIRY_CMD;
- scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
- scb->u1.log_blk_adr = 0;
- break;
- case TEST_UNIT_READY:
- scb->command = IM_OTHER_SCSI_CMD_CMD;
- scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
- scb->u1.log_blk_adr = 0;
- scb->u1.scsi_cmd_length = 6;
- memcpy(scb->u2.scsi_command, cmd->cmnd, 6);
- last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
- break;
- case READ_CAPACITY:
- /* the length of system memory buffer must be exactly 8 bytes */
- scb->command = IM_READ_CAPACITY_CMD;
- scb->enable |= IM_READ_CONTROL | IM_BYPASS_BUFFER;
- if (scb->sys_buf_length > 8)
- scb->sys_buf_length = 8;
- break;
- /* Commands that need read-only-mode (system <- device): */
- case REQUEST_SENSE:
- scb->command = IM_REQUEST_SENSE_CMD;
- scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
- break;
- /* Commands that need write-only-mode (system -> device): */
- case MODE_SELECT:
- case MODE_SELECT_10:
- IBM_DS(shpnt).ldn_modeselect_access[ldn]++;
- scb->command = IM_OTHER_SCSI_CMD_CMD;
- scb->enable |= IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; /*Select needs WRITE-enabled */
- scb->u1.scsi_cmd_length = cmd->cmd_len;
- memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
- last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
- break;
- /* For other commands, read-only is useful. Most other commands are
- running without an input-data-block. */
- default:
- scb->command = IM_OTHER_SCSI_CMD_CMD;
- scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
- scb->u1.scsi_cmd_length = cmd->cmd_len;
- memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
- last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
- break;
- }
- /*issue scb command, and return */
- if (++disk_rw_in_progress == 1)
- PS2_DISK_LED_ON(shpnt->host_no, target);
-
- if (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB) {
- issue_cmd(shpnt, isa_virt_to_bus(scb), IM_LONG_SCB | ldn);
- IBM_DS(shpnt).long_scbs++;
- } else {
- issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
- IBM_DS(shpnt).scbs++;
- }
- return 0;
-}
-
-static DEF_SCSI_QCMD(ibmmca_queuecommand)
-
-static int __ibmmca_abort(Scsi_Cmnd * cmd)
-{
- /* Abort does not work, as the adapter never generates an interrupt on
- * whatever situation is simulated, even when really pending commands
- * are running on the adapters' hardware ! */
-
- struct Scsi_Host *shpnt;
- unsigned int ldn;
- void (*saved_done) (Scsi_Cmnd *);
- int target;
- int max_pun;
- unsigned long imm_command;
-
-#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: Abort subroutine called...\n");
-#endif
-
- shpnt = cmd->device->host;
-
- max_pun = subsystem_maxid(shpnt);
- if (ibm_ansi_order) {
- target = max_pun - 1 - cmd->device->id;
- if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
- target--;
- else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
- target++;
- } else
- target = cmd->device->id;
-
- /* get logical device number, and disable system interrupts */
- printk(KERN_WARNING "IBM MCA SCSI: Sending abort to device pun=%d, lun=%d.\n", target, cmd->device->lun);
- ldn = get_ldn(shpnt)[target][cmd->device->lun];
-
- /*if cmd for this ldn has already finished, no need to abort */
- if (!ld(shpnt)[ldn].cmd) {
- return SUCCESS;
- }
-
- /* Clear ld.cmd, save done function, install internal done,
- * send abort immediate command (this enables sys. interrupts),
- * and wait until the interrupt arrives.
- */
- saved_done = cmd->scsi_done;
- cmd->scsi_done = internal_done;
- cmd->SCp.Status = 0;
- last_scsi_command(shpnt)[ldn] = IM_ABORT_IMM_CMD;
- last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
- imm_command = inl(IM_CMD_REG(shpnt));
- imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
- imm_command |= (unsigned long) (IM_ABORT_IMM_CMD);
- /* must wait for attention reg not busy */
- /* FIXME - timeout, politeness */
- while (1) {
- if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
- break;
- }
- /* write registers and enable system interrupts */
- outl(imm_command, IM_CMD_REG(shpnt));
- outb(IM_IMM_CMD | ldn, IM_ATTN_REG(shpnt));
-#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: Abort queued to adapter...\n");
-#endif
- spin_unlock_irq(shpnt->host_lock);
- while (!cmd->SCp.Status)
- yield();
- spin_lock_irq(shpnt->host_lock);
- cmd->scsi_done = saved_done;
-#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: Abort returned with adapter response...\n");
-#endif
-
- /*if abort went well, call saved done, then return success or error */
- if (cmd->result == (DID_ABORT << 16))
- {
- cmd->result |= DID_ABORT << 16;
- if (cmd->scsi_done)
- (cmd->scsi_done) (cmd);
- ld(shpnt)[ldn].cmd = NULL;
-#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: Abort finished with success.\n");
-#endif
- return SUCCESS;
- } else {
- cmd->result |= DID_NO_CONNECT << 16;
- if (cmd->scsi_done)
- (cmd->scsi_done) (cmd);
- ld(shpnt)[ldn].cmd = NULL;
-#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: Abort failed.\n");
-#endif
- return FAILED;
- }
-}
-
-static int ibmmca_abort(Scsi_Cmnd * cmd)
-{
- struct Scsi_Host *shpnt = cmd->device->host;
- int rc;
-
- spin_lock_irq(shpnt->host_lock);
- rc = __ibmmca_abort(cmd);
- spin_unlock_irq(shpnt->host_lock);
-
- return rc;
-}
-
-static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
-{
- struct Scsi_Host *shpnt;
- Scsi_Cmnd *cmd_aid;
- int ticks, i;
- unsigned long imm_command;
-
- BUG_ON(cmd == NULL);
-
- ticks = IM_RESET_DELAY * HZ;
- shpnt = cmd->device->host;
-
- if (local_checking_phase_flag(shpnt)) {
- printk(KERN_WARNING "IBM MCA SCSI: unable to reset while checking devices.\n");
- return FAILED;
- }
-
- /* issue reset immediate command to subsystem, and wait for interrupt */
- printk("IBM MCA SCSI: resetting all devices.\n");
- reset_status(shpnt) = IM_RESET_IN_PROGRESS;
- last_scsi_command(shpnt)[0xf] = IM_RESET_IMM_CMD;
- last_scsi_type(shpnt)[0xf] = IM_IMM_CMD;
- imm_command = inl(IM_CMD_REG(shpnt));
- imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
- imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
- /* must wait for attention reg not busy */
- while (1) {
- if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
- break;
- spin_unlock_irq(shpnt->host_lock);
- yield();
- spin_lock_irq(shpnt->host_lock);
- }
- /*write registers and enable system interrupts */
- outl(imm_command, IM_CMD_REG(shpnt));
- outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(shpnt));
- /* wait for interrupt finished or intr_stat register to be set, as the
- * interrupt will not be executed, while we are in here! */
-
- /* FIXME: This is really really icky we so want a sleeping version of this ! */
- while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(shpnt)) & 0x8f) != 0x8f)) {
- udelay((1 + 999 / HZ) * 1000);
- barrier();
- }
- /* if reset did not complete, just return an error */
- if (!ticks) {
- printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
- reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
- return FAILED;
- }
-
- if ((inb(IM_INTR_REG(shpnt)) & 0x8f) == 0x8f) {
- /* analysis done by this routine and not by the intr-routine */
- if (inb(IM_INTR_REG(shpnt)) == 0xaf)
- reset_status(shpnt) = IM_RESET_FINISHED_OK_NO_INT;
- else if (inb(IM_INTR_REG(shpnt)) == 0xcf)
- reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
- else /* failed, 4get it */
- reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS_NO_INT;
- outb(IM_EOI | 0xf, IM_ATTN_REG(shpnt));
- }
-
- /* if reset failed, just return an error */
- if (reset_status(shpnt) == IM_RESET_FINISHED_FAIL) {
- printk(KERN_ERR "IBM MCA SCSI: reset failed.\n");
- return FAILED;
- }
-
- /* so reset finished ok - call outstanding done's, and return success */
- printk(KERN_INFO "IBM MCA SCSI: Reset successfully completed.\n");
- for (i = 0; i < MAX_LOG_DEV; i++) {
- cmd_aid = ld(shpnt)[i].cmd;
- if (cmd_aid && cmd_aid->scsi_done) {
- ld(shpnt)[i].cmd = NULL;
- cmd_aid->result = DID_RESET << 16;
- }
- }
- return SUCCESS;
-}
-
-static int ibmmca_host_reset(Scsi_Cmnd * cmd)
-{
- struct Scsi_Host *shpnt = cmd->device->host;
- int rc;
-
- spin_lock_irq(shpnt->host_lock);
- rc = __ibmmca_host_reset(cmd);
- spin_unlock_irq(shpnt->host_lock);
-
- return rc;
-}
-
-static int ibmmca_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *info)
-{
- int size = capacity;
- info[0] = 64;
- info[1] = 32;
- info[2] = size / (info[0] * info[1]);
- if (info[2] >= 1024) {
- info[0] = 128;
- info[1] = 63;
- info[2] = size / (info[0] * info[1]);
- if (info[2] >= 1024) {
- info[0] = 255;
- info[1] = 63;
- info[2] = size / (info[0] * info[1]);
- if (info[2] >= 1024)
- info[2] = 1023;
- }
- }
- return 0;
-}
-
-/* calculate percentage of total accesses on a ldn */
-static int ldn_access_load(struct Scsi_Host *shpnt, int ldn)
-{
- if (IBM_DS(shpnt).total_accesses == 0)
- return (0);
- if (IBM_DS(shpnt).ldn_access[ldn] == 0)
- return (0);
- return (IBM_DS(shpnt).ldn_access[ldn] * 100) / IBM_DS(shpnt).total_accesses;
-}
-
-/* calculate total amount of r/w-accesses */
-static int ldn_access_total_read_write(struct Scsi_Host *shpnt)
-{
- int a;
- int i;
-
- a = 0;
- for (i = 0; i <= MAX_LOG_DEV; i++)
- a += IBM_DS(shpnt).ldn_read_access[i] + IBM_DS(shpnt).ldn_write_access[i];
- return (a);
-}
-
-static int ldn_access_total_inquiry(struct Scsi_Host *shpnt)
-{
- int a;
- int i;
-
- a = 0;
- for (i = 0; i <= MAX_LOG_DEV; i++)
- a += IBM_DS(shpnt).ldn_inquiry_access[i];
- return (a);
-}
-
-static int ldn_access_total_modeselect(struct Scsi_Host *shpnt)
-{
- int a;
- int i;
-
- a = 0;
- for (i = 0; i <= MAX_LOG_DEV; i++)
- a += IBM_DS(shpnt).ldn_modeselect_access[i];
- return (a);
-}
-
-/* routine to display info in the proc-fs-structure (a deluxe feature) */
-static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout)
-{
- int len = 0;
- int i, id, lun;
- unsigned long flags;
- int max_pun;
-
-
- spin_lock_irqsave(shpnt->host_lock, flags); /* Check it */
-
- max_pun = subsystem_maxid(shpnt);
-
- len += sprintf(buffer + len, "\n IBM-SCSI-Subsystem-Linux-Driver, Version %s\n\n\n", IBMMCA_SCSI_DRIVER_VERSION);
- len += sprintf(buffer + len, " SCSI Access-Statistics:\n");
- len += sprintf(buffer + len, " Device Scanning Order....: %s\n", (ibm_ansi_order) ? "IBM/ANSI" : "New Industry Standard");
-#ifdef CONFIG_SCSI_MULTI_LUN
- len += sprintf(buffer + len, " Multiple LUN probing.....: Yes\n");
-#else
- len += sprintf(buffer + len, " Multiple LUN probing.....: No\n");
-#endif
- len += sprintf(buffer + len, " This Hostnumber..........: %d\n", shpnt->host_no);
- len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(shpnt)));
- len += sprintf(buffer + len, " (Shared) IRQ.............: %d\n", IM_IRQ);
- len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(shpnt).total_interrupts);
- len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(shpnt).total_accesses);
- len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(shpnt).scbs);
- len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(shpnt).long_scbs);
- len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(shpnt));
- len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(shpnt));
- len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(shpnt));
- len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(shpnt).total_accesses - ldn_access_total_read_write(shpnt)
- - ldn_access_total_modeselect(shpnt)
- - ldn_access_total_inquiry(shpnt));
- len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(shpnt).total_errors);
- len += sprintf(buffer + len, " Logical-Device-Number (LDN) Access-Statistics:\n");
- len += sprintf(buffer + len, " LDN | Accesses [%%] | READ | WRITE | ASSIGNMENTS\n");
- len += sprintf(buffer + len, " -----|--------------|-----------|-----------|--------------\n");
- for (i = 0; i <= MAX_LOG_DEV; i++)
- len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(shpnt, i), IBM_DS(shpnt).ldn_read_access[i], IBM_DS(shpnt).ldn_write_access[i], IBM_DS(shpnt).ldn_assignments[i]);
- len += sprintf(buffer + len, " -----------------------------------------------------------\n\n");
- len += sprintf(buffer + len, " Dynamical-LDN-Assignment-Statistics:\n");
- len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(shpnt).total_scsi_devices);
- len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(shpnt).dyn_flag ? "Yes" : "No ");
- len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(shpnt));
- len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(shpnt).dynamical_assignments);
- len += sprintf(buffer + len, "\n Current SCSI-Device-Mapping:\n");
- len += sprintf(buffer + len, " Physical SCSI-Device Map Logical SCSI-Device Map\n");
- len += sprintf(buffer + len, " ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
- for (id = 0; id < max_pun; id++) {
- len += sprintf(buffer + len, " %2d ", id);
- for (lun = 0; lun < 8; lun++)
- len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(shpnt)[id][lun]));
- len += sprintf(buffer + len, " %2d ", id);
- for (lun = 0; lun < 8; lun++)
- len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(shpnt)[id][lun]));
- len += sprintf(buffer + len, "\n");
- }
-
- len += sprintf(buffer + len, "(A = IBM-Subsystem, D = Harddisk, T = Tapedrive, P = Processor, W = WORM,\n");
- len += sprintf(buffer + len, " R = CD-ROM, S = Scanner, M = MO-Drive, C = Medium-Changer, + = unprovided LUN,\n");
- len += sprintf(buffer + len, " - = nothing found, nothing assigned or unprobed LUN)\n\n");
-
- *start = buffer + offset;
- len -= offset;
- if (len > length)
- len = length;
- spin_unlock_irqrestore(shpnt->host_lock, flags);
- return len;
-}
-
-static int option_setup(char *str)
-{
- int ints[IM_MAX_HOSTS];
- char *cur = str;
- int i = 1;
-
- while (cur && isdigit(*cur) && i < IM_MAX_HOSTS) {
- ints[i++] = simple_strtoul(cur, NULL, 0);
- if ((cur = strchr(cur, ',')) != NULL)
- cur++;
- }
- ints[0] = i - 1;
- internal_ibmmca_scsi_setup(cur, ints);
- return 1;
-}
-
-__setup("ibmmcascsi=", option_setup);
-
-static struct mca_driver ibmmca_driver = {
- .id_table = ibmmca_id_table,
- .driver = {
- .name = "ibmmca",
- .bus = &mca_bus_type,
- .probe = ibmmca_probe,
- .remove = __devexit_p(ibmmca_remove),
- },
-};
-
-static int __init ibmmca_init(void)
-{
-#ifdef MODULE
- /* If the driver is run as module, read from conf.modules or cmd-line */
- if (boot_options)
- option_setup(boot_options);
-#endif
-
- return mca_register_driver_integrated(&ibmmca_driver, MCA_INTEGSCSI);
-}
-
-static void __exit ibmmca_exit(void)
-{
- mca_unregister_driver(&ibmmca_driver);
-}
-
-module_init(ibmmca_init);
-module_exit(ibmmca_exit);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 441d88ad99a7..d109cc3a17b6 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task)
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
- ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
+ memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
if (!link->sactive) {
- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
} else {
- link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
if (unlikely(link->eh_info.err_mask))
qc->flags |= ATA_QCFLAG_FAILED;
}
@@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task)
qc->flags |= ATA_QCFLAG_FAILED;
}
- dev->sata_dev.tf.feature = 0x04; /* status err */
- dev->sata_dev.tf.command = ATA_ERR;
+ dev->sata_dev.fis[3] = 0x04; /* status err */
+ dev->sata_dev.fis[2] = ATA_ERR;
}
}
@@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct domain_device *dev = qc->ap->private_data;
- memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
+ ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
return true;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6102ef2cb2d8..9d46fcbe7755 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
static inline u8
_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
{
- return ioc->cpu_msix_table[smp_processor_id()];
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
}
/**
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 6c6486f626ee..538230be5cca 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4473,17 +4473,14 @@ qla1280_exit(void)
pci_unregister_driver(&qla1280_pci_driver);
/* release any allocated firmware images */
for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
- if (qla1280_fw_tbl[i].fw) {
- release_firmware(qla1280_fw_tbl[i].fw);
- qla1280_fw_tbl[i].fw = NULL;
- }
+ release_firmware(qla1280_fw_tbl[i].fw);
+ qla1280_fw_tbl[i].fw = NULL;
}
}
module_init(qla1280_init);
module_exit(qla1280_exit);
-
MODULE_AUTHOR("Qlogic & Jes Sorensen");
MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 6208d562890d..317a7fdc3b82 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -25,3 +25,12 @@ config SCSI_QLA_FC
Firmware images can be retrieved from:
ftp://ftp.qlogic.com/outgoing/linux/firmware/
+
+config TCM_QLA2XXX
+ tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+ depends on SCSI_QLA_FC && TARGET_CORE
+ select LIBFC
+ select BTREE
+ default n
+ ---help---
+ Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 5df782f4a097..dce7d788cdc9 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,5 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o
+ qla_nx.o qla_target.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
+obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5926f5a87ea8..5ab953029f8d 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/kthread.h>
#include <linux/vmalloc.h>
@@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_block_requests(vha->host);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
if (IS_QLA82XX(ha)) {
+ ha->flags.isp82xx_no_md_cap = 1;
qla82xx_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
@@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
@@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d\n",
- ha->qla_stats.total_isp_aborts);
+ vha->qla_stats.total_isp_aborts);
}
static ssize_t
@@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
dma_addr_t stats_dma;
struct fc_host_statistics *pfc_host_stat;
- pfc_host_stat = &ha->fc_host_stat;
+ pfc_host_stat = &vha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->dumped_frames = stats->dumped_frames;
pfc_host_stat->nos_count = stats->nos_rcvd;
}
- pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
- pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
+ pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
+ pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
done_free:
dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
fc_host_supported_speeds(vha->host) =
fc_host_supported_speeds(base_vha->host);
+ qlt_vport_create(vha, ha);
qla24xx_vport_disable(fc_vport, disable);
if (ha->flags.cpu_affinity_enabled) {
@@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
- fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+ fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
+ (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
if (IS_CNA_CAPABLE(ha))
speed = FC_PORTSPEED_10GBIT;
+ else if (IS_QLA2031(ha))
+ speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+ FC_PORTSPEED_4GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index bc3cc6d91117..c68883806c54 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa =
bsg_job->request->rqst_data.h_els.port_id[0];
fcport->d_id.b.area =
@@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
int rval = 0;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
goto done_set_internal;
new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
uint16_t new_config[4];
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
goto done_reset_internal;
memset(new_config, 0 , sizeof(new_config));
@@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
if ((ha->current_topology == ISP_CFG_F ||
- (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
- ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+ ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
@@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
if (rval)
return rval;
+ /* Set the isp82xx_no_md_cap not to capture minidump */
+ ha->flags.isp82xx_no_md_cap = 1;
+
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 62324a1d5573..fdee5611f3e2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,27 +11,31 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x113e | 0x112c-0x112e |
+ * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x1140 | 0x111a-0x111b |
+ * | | | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2086 | 0x2020-0x2022 |
* | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
* | | | 0x302d-0x302e |
- * | DPC Thread | 0x401c | |
- * | Async Events | 0x505d | 0x502b-0x502f |
+ * | DPC Thread | 0x401c | 0x4002,0x4013 |
+ * | Async Events | 0x505f | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
- * | Timer Routines | 0x6011 | 0x600e-0x600f |
+ * | Timer Routines | 0x6011 | |
* | User Space Interactions | 0x709f | 0x7018,0x702e, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
- * | AER/EEH | 0x900f | |
+ * | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb054 | 0xb053 |
+ * | ISP82XX Specific | 0xb054 | 0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
+ * | Target Mode | 0xe06f | |
+ * | Target Mode Management | 0xf071 | |
+ * | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
@@ -379,6 +383,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
+qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
+ uint32_t **last_chain)
+{
+ struct qla2xxx_mqueue_chain *q;
+ struct qla2xxx_mqueue_header *qh;
+ uint32_t num_queues;
+ int que;
+ struct {
+ int length;
+ void *ring;
+ } aq, *aqp;
+
+ if (!ha->tgt.atio_q_length)
+ return ptr;
+
+ num_queues = 1;
+ aqp = &aq;
+ aqp->length = ha->tgt.atio_q_length;
+ aqp->ring = ha->tgt.atio_ring;
+
+ for (que = 0; que < num_queues; que++) {
+ /* aqp = ha->atio_q_map[que]; */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (aqp->length * sizeof(request_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(aqp->length * sizeof(request_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
+
+ ptr += aqp->length * sizeof(request_t);
+ }
+
+ return ptr;
+}
+
+static inline void *
qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
{
struct qla2xxx_mqueue_chain *q;
@@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
struct qla24xx_fw_dump *fw;
uint32_t ext_mem_cnt;
void *nxt;
+ void *nxt_chain;
+ uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
if (IS_QLA82XX(ha))
@@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_copy_eft(ha, nxt);
+ nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ if (last_chain) {
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ }
+
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
qla24xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -2218,6 +2284,7 @@ copy_queue:
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2157bdf1569a..f278df8cce0f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header {
uint32_t queue;
#define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2
+#define TYPE_ATIO_QUEUE 0x3
uint32_t number;
uint32_t size;
};
@@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
* not covered by upper categories
*/
+#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
+ * This is to be used with other levels where
+ * more verbosity is required. It might not
+ * be applicable to all the levels.
+ */
+#define ql_dbg_tgt 0x00004000 /* Target mode */
+#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
+#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a2443031dbe7..39007f53aec0 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -186,6 +186,7 @@
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
+#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
struct req_que;
@@ -1234,11 +1235,27 @@ typedef struct {
* ISP queue - response queue entry definition.
*/
typedef struct {
- uint8_t data[60];
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint8_t data[52];
uint32_t signature;
#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
} response_t;
+/*
+ * ISP queue - ATIO queue entry definition.
+ */
+struct atio {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t data[58];
+ uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
+};
+
typedef union {
uint16_t extended;
struct {
@@ -1719,11 +1736,13 @@ typedef struct fc_port {
struct fc_rport *rport, *drport;
u32 supported_classes;
- uint16_t vp_idx;
uint8_t fc4_type;
uint8_t scan_state;
} fc_port_t;
+#define QLA_FCPORT_SCAN_NONE 0
+#define QLA_FCPORT_SCAN_FOUND 1
+
/*
* Fibre channel port/lun states.
*/
@@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = {
#define FCF_LOGIN_NEEDED BIT_1
#define FCF_FCP2_DEVICE BIT_2
#define FCF_ASYNC_SENT BIT_3
+#define FCF_CONF_COMP_SUPPORTED BIT_4
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2419,6 +2439,40 @@ struct qlfc_fw {
uint32_t len;
};
+struct qlt_hw_data {
+ /* Protected by hw lock */
+ uint32_t enable_class_2:1;
+ uint32_t enable_explicit_conf:1;
+ uint32_t ini_mode_force_reverse:1;
+ uint32_t node_name_set:1;
+
+ dma_addr_t atio_dma; /* Physical address. */
+ struct atio *atio_ring; /* Base virtual address */
+ struct atio *atio_ring_ptr; /* Current address. */
+ uint16_t atio_ring_index; /* Current index. */
+ uint16_t atio_q_length;
+
+ void *target_lport_ptr;
+ struct qla_tgt_func_tmpl *tgt_ops;
+ struct qla_tgt *qla_tgt;
+ struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+ uint16_t current_handle;
+
+ struct qla_tgt_vp_map *tgt_vp_map;
+ struct mutex tgt_mutex;
+ struct mutex tgt_host_action_mutex;
+
+ int saved_set;
+ uint16_t saved_exchange_count;
+ uint32_t saved_firmware_options_1;
+ uint32_t saved_firmware_options_2;
+ uint32_t saved_firmware_options_3;
+ uint8_t saved_firmware_options[2];
+ uint8_t saved_add_firmware_options[2];
+
+ uint8_t tgt_node_name[WWN_SIZE];
+};
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -2460,7 +2514,9 @@ struct qla_hw_data {
uint32_t thermal_supported:1;
uint32_t isp82xx_reset_hdlr_active:1;
uint32_t isp82xx_reset_owner:1;
- /* 28 bits */
+ uint32_t isp82xx_no_md_cap:1;
+ uint32_t host_shutting_down:1;
+ /* 30 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2804,7 +2860,6 @@ struct qla_hw_data {
/* ISP2322: red, green, amber. */
uint16_t zio_mode;
uint16_t zio_timer;
- struct fc_host_statistics fc_host_stat;
struct qla_msix_entry *msix_entries;
@@ -2817,7 +2872,6 @@ struct qla_hw_data {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
- struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
@@ -2863,6 +2917,8 @@ struct qla_hw_data {
dma_addr_t md_tmplt_hdr_dma;
void *md_dump;
uint32_t md_dump_size;
+
+ struct qlt_hw_data tgt;
};
/*
@@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host {
#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
+#define SCR_PENDING 21 /* SCR in target mode */
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host {
struct req_que *req;
int fw_heartbeat_counter;
int seconds_since_last_heartbeat;
+ struct fc_host_statistics fc_host_stat;
+ struct qla_statistics qla_stats;
atomic_t vref_count;
} scsi_qla_host_t;
+#define SET_VP_IDX 1
+#define SET_AL_PA 2
+#define RESET_VP_IDX 3
+#define RESET_AL_PA 4
+struct qla_tgt_vp_map {
+ uint8_t idx;
+ scsi_qla_host_t *vha;
+};
+
/*
* Macros to help code, maintain, etc.
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9f065804bd12..9eacd2df111b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -175,6 +175,7 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
*/
+
extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
+extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
+extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -239,6 +242,9 @@ extern int
qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
extern int
+qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
+
+extern int
qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int
@@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
+
/*
* Global Function Prototypes in qla_sup.c source file.
*/
@@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *);
extern void qla2x00_sp_timeout(unsigned long);
extern void qla2x00_bsg_job_done(void *, void *, int);
extern void qla2x00_bsg_sp_free(void *, void *);
+extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 3128f80441f5..05260d25fe46 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
- ct_req->req.rff_id.fc4_feature = BIT_1;
+ qlt_rff_id(vha, ct_req);
+
ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
/* Execute MS IOCB */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b9465643396b..ca5084743135 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,6 +17,9 @@
#include <asm/prom.h>
#endif
+#include <target/target_core_base.h>
+#include "qla_target.h"
+
/*
* QLogic ISP2x00 Hardware Support Function Prototypes.
*/
@@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
}
- rval = qla2x00_init_rings(vha);
+
+ if (qla_ini_mode_enabled(vha))
+ rval = qla2x00_init_rings(vha);
+
ha->flags.chip_reset_done = 1;
if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mq_size += ha->max_rsp_queues *
(rsp->length * sizeof(response_t));
}
+ if (ha->tgt.atio_q_length)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto try_eft;
@@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+ /* Setup ATIO queue dma pointers for target mode */
+ icb->atio_q_inpointer = __constant_cpu_to_le16(0);
+ icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
+ icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
+ icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+
if (ha->mqenable || IS_QLA83XX(ha)) {
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = __constant_cpu_to_le16(rid);
@@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
}
+ qlt_24xx_config_rings(vha, reg);
+
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
}
@@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_unlock(&ha->vport_slock);
+ ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+ ha->tgt.atio_ring_index = 0;
+ /* Initialize ATIO queue entries */
+ qlt_init_atio_q_entries(vha);
+
ha->isp_ops->config_rings(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
vha->d_id.b.area = area;
vha->d_id.b.al_pa = al_pa;
+ spin_lock(&ha->vport_slock);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock(&ha->vport_slock);
+
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
"Topology - %s, Host Loop address 0x%x.\n",
@@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x0064,
- "Inconisistent NVRAM "
+ "Inconsistent NVRAM "
"detected: checksum=0x%x id=%c version=0x%x.\n",
chksum, nv->id[0], nv->nvram_version);
ql_log(ql_log_warn, vha, 0x0065,
@@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (IS_QLA23XX(ha)) {
nv->firmware_options[0] |= BIT_2;
nv->firmware_options[0] &= ~BIT_3;
- nv->firmware_options[0] &= ~BIT_6;
+ nv->special_options[0] &= ~BIT_6;
nv->add_firmware_options[1] |= BIT_5 | BIT_4;
if (IS_QLA2300(ha)) {
@@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
+ scsi_qla_host_t *vha = fcport->vha;
unsigned long flags;
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
- if (rport)
+ if (rport) {
fc_remote_port_delete(rport);
+ /*
+ * Release the target mode FC NEXUS in qla_target.c code
+ * if target mod is enabled.
+ */
+ qlt_fc_port_deleted(vha, fcport);
+ }
}
/**
@@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
/* Setup fcport template structure. */
fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
fcport->port_type = FCT_UNKNOWN;
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
+ fcport->scan_state = QLA_FCPORT_SCAN_NONE;
return fcport;
}
@@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
- new_fcport->vp_idx = vha->vp_idx;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (!found) {
/* New device, add to fcports list. */
- if (vha->vp_idx) {
- new_fcport->vha = vha;
- new_fcport->vp_idx = vha->vp_idx;
- }
list_add_tail(&new_fcport->list, &vha->vp_fcports);
/* Allocate a new replacement fcport. */
@@ -2800,8 +2827,6 @@ cleanup_allocation:
static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
-#define LS_UNKNOWN 2
- static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
char *link_speed;
int rval;
uint16_t mb[4];
@@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[6], fcport->port_name[7], rval,
fcport->fp_speed, mb[0], mb[1]);
} else {
- link_speed = link_speeds[LS_UNKNOWN];
- if (fcport->fp_speed < 5)
- link_speed = link_speeds[fcport->fp_speed];
- else if (fcport->fp_speed == 0x13)
- link_speed = link_speeds[5];
+ link_speed = qla2x00_get_link_speed_str(ha);
ql_dbg(ql_dbg_disc, vha, 0x2005,
"iIDMA adjusted to %s GB/s "
"on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"Unable to allocate fc remote port.\n");
return;
}
+ /*
+ * Create target mode FC NEXUS in qla_target.c if target mode is
+ * enabled..
+ */
+ qlt_fc_port_added(vha, fcport);
+
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@@ -2921,7 +2948,7 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport, *fcptemp;
+ fc_port_t *fcport;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
@@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
0xfc, mb, BIT_1|BIT_0);
if (rval != QLA_SUCCESS) {
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- return rval;
+ break;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
-#define QLA_FCPORT_SCAN 1
-#define QLA_FCPORT_FOUND 2
-
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- fcport->scan_state = QLA_FCPORT_SCAN;
- }
-
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
- /*
- * Logout all previous fabric devices marked lost, except
- * FCP2 devices.
- */
+ /* Add new ports to existing port list */
+ list_splice_tail_init(&new_fcports, &vha->vp_fcports);
+
+ /* Starting free loop ID. */
+ next_loopid = ha->min_external_loopid;
+
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- if (fcport->scan_state == QLA_FCPORT_SCAN &&
+ /* Logout lost/gone fabric devices (non-FCP2) */
+ if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
@@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- fcport->loop_id = FC_NO_LOOP_ID;
}
- }
- }
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
- /*
- * Scan through our port list and login entries that need to be
- * logged in.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
- (fcport->flags & FCF_LOGIN_NEEDED) == 0)
continue;
-
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
}
- /* Login and update database */
- qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
- }
-
- /* Exit if out of loop IDs. */
- if (rval != QLA_SUCCESS) {
- break;
- }
-
- /*
- * Login and add the new devices to our port list.
- */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- /* Find a new loop ID to use. */
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
+ fcport->scan_state = QLA_FCPORT_SCAN_NONE;
+
+ /* Login fabric devices that need a login */
+ if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
+ atomic_read(&vha->loop_down_timer) == 0) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+ base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ continue;
+ }
+ }
}
/* Login and update database */
qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
-
- if (vha->vp_idx) {
- fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
- }
- list_move_tail(&fcport->list, &vha->vp_fcports);
}
} while (0);
- /* Free all new device structures not processed. */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- list_del(&fcport->list);
- kfree(fcport);
- }
-
if (rval) {
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
- fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
found++;
@@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
if (mb[10] & BIT_1)
fcport->supported_classes |= FC_COS_CLASS3;
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (mb[10] & BIT_7)
+ fcport->flags |=
+ FCF_CONF_COMP_SUPPORTED;
+ }
+
rval = QLA_SUCCESS;
break;
} else if (mb[0] == MBS_LOOP_ID_USED) {
@@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- ha->qla_stats.total_isp_aborts++;
+ vha->qla_stats.total_isp_aborts++;
ql_log(ql_log_info, vha, 0x00af,
"Performing ISP error recovery - ha=%p.\n", ha);
@@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
+ unsigned long flags;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(vha)) {
@@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
vha->flags.online = 1;
+
+ /*
+ * Process any ATIO queue entries that came in
+ * while we weren't online.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (qla_tgt_mode_enabled(vha))
+ qlt_24xx_process_atio_queue(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
@@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x006b,
- "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "Inconsistent NVRAM detected: checksum=0x%x id=%c "
"version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
ql_log(ql_log_warn, vha, 0x006c,
"Falling back to functioning (yet invalid -- WWPN) "
@@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
rval = 1;
}
+ if (!qla_ini_mode_enabled(vha)) {
+ /* Don't enable full login after initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Don't enable LIP full login for initiator */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ }
+
+ qlt_24xx_config_nvram_stage1(vha, nv);
+
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
@@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
"QLA2462");
- /* Use alternate WWN? */
+ qlt_24xx_config_nvram_stage2(vha, icb);
+
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+ /* Use alternate WWN? */
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
}
@@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_info, vha, 0x0073,
- "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "Inconsistent NVRAM detected: checksum=0x%x id=%c "
"version=0x%x.\n", chksum, nv->id[0],
le16_to_cpu(nv->nvram_version));
ql_log(ql_log_info, vha, 0x0074,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index eac950924497..70dbf53d9e0f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/blkdev.h>
#include <linux/delay.h>
@@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
cflags = 0;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
- sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
- sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
return (cflags);
}
@@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
+ /* If still no head room then bail out */
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
/* Build command packet */
req->current_outstanding_cmd = handle;
@@ -470,7 +471,7 @@ queuing_error:
/**
* qla2x00_start_iocbs() - Execute the IOCB command
*/
-static void
+void
qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
@@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
return (ret);
}
+/*
+ * qla2x00_issue_marker
+ *
+ * Issue marker
+ * Caller CAN have hardware lock held as specified by ha_locked parameter.
+ * Might release it, then reaquire.
+ */
+int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
+{
+ if (ha_locked) {
+ if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ } else {
+ if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+
+ return QLA_SUCCESS;
+}
+
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
@@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_WRITE_DATA);
- ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_READ_DATA);
- ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
cur_seg = scsi_sglist(cmd);
@@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
- sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
- sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
return QLA_SUCCESS;
}
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
/* Build command packet. */
req->current_outstanding_cmd = handle;
@@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
status |= QDSS_GOT_Q_SPACE;
/* Build header part of command packet (excluding the OPCODE). */
@@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vp_idx;
+ logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
}
static void
@@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vp_idx;
+ logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
/* Implicit: mbx->mbx10 = 0. */
}
@@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- logio->vp_index = sp->fcport->vp_idx;
+ logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
- mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
}
static void
@@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->port_id[0] = fcport->d_id.b.al_pa;
tsk->port_id[1] = fcport->d_id.b.area;
tsk->port_id[2] = fcport->d_id.b.domain;
- tsk->vp_index = fcport->vp_idx;
+ tsk->vp_index = fcport->vha->vp_idx;
if (flags == TCF_LUN_RESET) {
int_to_scsilun(lun, &tsk->lun);
@@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
- els_iocb->vp_index = sp->fcport->vp_idx;
+ els_iocb->vp_index = sp->fcport->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
@@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->handle = sp->handle;
ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- ct_iocb->vp_index = sp->fcport->vp_idx;
+ ct_iocb->vp_index = sp->fcport->vha->vp_idx;
ct_iocb->comp_status = __constant_cpu_to_le16(0);
ct_iocb->cmd_dsd_count =
@@ -2343,11 +2364,10 @@ sufficient_dsds:
else
req->cnt = req->length -
(req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
ctx = sp->u.scmd.ctx =
mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!ctx) {
@@ -2362,7 +2382,7 @@ sufficient_dsds:
if (!ctx->fcp_cmnd) {
ql_log(ql_log_fatal, vha, 0x3011,
"Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
- goto queuing_error_fcp_cmnd;
+ goto queuing_error;
}
/* Initialize the DSD list and dma handle */
@@ -2400,7 +2420,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
/* Build IOCB segments */
if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2489,7 +2509,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ce42288049b5..6f67a9d4998b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/delay.h>
#include <linux/slab.h>
@@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
"IDC failed to post ACK.\n");
}
+#define LS_UNKNOWN 2
+char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha)
+{
+ static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
+ char *link_speed;
+ int fw_speed = ha->link_data_rate;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ link_speed = link_speeds[0];
+ else if (fw_speed == 0x13)
+ link_speed = link_speeds[6];
+ else {
+ link_speed = link_speeds[LS_UNKNOWN];
+ if (fw_speed < 6)
+ link_speed =
+ link_speeds[fw_speed];
+ }
+
+ return link_speed;
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
void
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
{
-#define LS_UNKNOWN 2
- static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
- char *link_speed;
uint16_t handle_cnt;
uint16_t cnt, mbx;
uint32_t handles[5];
@@ -454,8 +474,8 @@ skip_rio:
case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
ql_dbg(ql_dbg_async, vha, 0x5008,
"Asynchronous WAKEUP_THRES.\n");
- break;
+ break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
@@ -479,20 +499,14 @@ skip_rio:
break;
case MBA_LOOP_UP: /* Loop Up Event */
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- link_speed = link_speeds[0];
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
ha->link_data_rate = PORT_SPEED_1GB;
- } else {
- link_speed = link_speeds[LS_UNKNOWN];
- if (mb[1] < 6)
- link_speed = link_speeds[mb[1]];
- else if (mb[1] == 0x13)
- link_speed = link_speeds[6];
+ else
ha->link_data_rate = mb[1];
- }
ql_dbg(ql_dbg_async, vha, 0x500a,
- "LOOP UP detected (%s Gbps).\n", link_speed);
+ "LOOP UP detected (%s Gbps).\n",
+ qla2x00_get_link_speed_str(ha));
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -638,6 +652,8 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5010,
"Port unavailable %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x505e,
+ "Link is offline.\n");
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -670,12 +686,17 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
+
+ qlt_async_event(mb[0], vha, mb);
break;
}
ql_dbg(ql_dbg_async, vha, 0x5012,
"Port database changed %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x505f,
+ "Link is operational (%s Gbps).\n",
+ qla2x00_get_link_speed_str(ha));
/*
* Mark all devices as missing so we will login again.
@@ -684,8 +705,13 @@ skip_rio:
qla2x00_mark_all_devices_lost(vha, 1);
+ if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
+ set_bit(SCR_PENDING, &vha->dpc_flags);
+
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+
+ qlt_async_event(mb[0], vha, mb);
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -807,6 +833,8 @@ skip_rio:
mb[0], mb[1], mb[2], mb[3]);
}
+ qlt_async_event(mb[0], vha, mb);
+
if (!vha->vp_idx && ha->num_vhosts)
qla2x00_alert_all_vps(rsp, mb);
}
@@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
} else if (iop[0] & BIT_5)
fcport->port_type = FCT_INITIATOR;
+ if (iop[0] & BIT_7)
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+
if (logio->io_parameter[7] || logio->io_parameter[8])
fcport->supported_classes |= FC_COS_CLASS2;
if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (pkt->entry_status != 0) {
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
+
+ (void)qlt_24xx_process_response_error(vha, pkt);
+
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
continue;
@@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
case ELS_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
+ case ABTS_RECV_24XX:
+ /* ensure that the ATIO queue is empty */
+ qlt_24xx_process_atio_queue(vha);
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case NOTIFY_ACK_TYPE:
+ qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ break;
case MARKER_TYPE:
/* Do nothing in this case, this check is to prevent it
* from falling into default case
@@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
case 0x14:
qla24xx_process_response_queue(vha, rsp);
break;
+ case 0x1C: /* ATIO queue updated */
+ qlt_24xx_process_atio_queue(vha);
+ break;
+ case 0x1D: /* ATIO and response queues updated */
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id)
case 0x14:
qla24xx_process_response_queue(vha, rsp);
break;
+ case 0x1C: /* ATIO queue updated */
+ qlt_24xx_process_atio_queue(vha);
+ break;
+ case 0x1D: /* ATIO and response queues updated */
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2564,7 +2620,15 @@ void
qla2x00_free_irqs(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct rsp_que *rsp;
+
+ /*
+ * We need to check that ha->rsp_q_map is valid in case we are called
+ * from a probe failure context.
+ */
+ if (!ha->rsp_q_map || !ha->rsp_q_map[0])
+ return;
+ rsp = ha->rsp_q_map[0];
if (ha->flags.msix_enabled)
qla24xx_disable_msix(ha);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b4a23394a7bd..d5ce92c0a8fc 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/delay.h>
#include <linux/gfp.h>
@@ -270,11 +271,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ictrl = RD_REG_WORD(&reg->isp.ictrl);
}
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
- "MBX Command timeout for cmd %x.\n", command);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
- "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
- "mb[0] = 0x%x.\n", mb0);
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
/*
@@ -320,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101c,
- "Mailbox cmd timeout occured, cmd=0x%x, "
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
"mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
"abort.\n", command, mcp->mb[0],
ha->flags.eeh_busy);
@@ -345,7 +343,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101e,
- "Mailbox cmd timeout occured, cmd=0x%x, "
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
"mb[0]=0x%x. Scheduling ISP abort ",
command, mcp->mb[0]);
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
@@ -390,7 +388,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
+ "Entered %s.\n", __func__);
if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -424,7 +423,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
ql_dbg(ql_dbg_mbx, vha, 0x1023,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
+ "Done %s.\n", __func__);
}
return rval;
@@ -454,7 +454,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
@@ -489,10 +490,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
if (IS_FWI2_CAPABLE(ha)) {
- ql_dbg(ql_dbg_mbx, vha, 0x1027,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
"Done exchanges=%x.\n", mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+ "Done %s.\n", __func__);
}
}
@@ -523,7 +525,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
@@ -561,11 +564,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_attributes_h = mcp->mb[15];
ha->fw_attributes_ext[0] = mcp->mb[16];
ha->fw_attributes_ext[1] = mcp->mb[17];
- ql_dbg(ql_dbg_mbx, vha, 0x1139,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
"%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
__func__, mcp->mb[15], mcp->mb[6]);
} else
- ql_dbg(ql_dbg_mbx, vha, 0x112f,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
"%s: FwAttributes [Upper] invalid, MB6:%04x\n",
__func__, mcp->mb[6]);
}
@@ -576,7 +579,8 @@ failed:
ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
+ "Done %s.\n", __func__);
}
return rval;
}
@@ -602,7 +606,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
mcp->out_mb = MBX_0;
@@ -620,7 +625,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
fwopts[2] = mcp->mb[2];
fwopts[3] = mcp->mb[3];
- ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
+ "Done %s.\n", __func__);
}
return rval;
@@ -648,7 +654,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
mcp->mb[1] = fwopts[1];
@@ -676,7 +683,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
"Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
+ "Done %s.\n", __func__);
}
return rval;
@@ -704,7 +712,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
mcp->mb[1] = 0xAAAA;
@@ -734,7 +743,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
+ "Done %s.\n", __func__);
}
return rval;
@@ -762,7 +772,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_VERIFY_CHECKSUM;
mcp->out_mb = MBX_0;
@@ -787,7 +798,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
"Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
(mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
+ "Done %s.\n", __func__);
}
return rval;
@@ -819,7 +831,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_IOCB_COMMAND_A64;
mcp->mb[1] = 0;
@@ -842,7 +855,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
/* Mask reserved bits. */
sts_entry->entry_status &=
IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
- ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
+ "Done %s.\n", __func__);
}
return rval;
@@ -884,7 +898,8 @@ qla2x00_abort_command(srb_t *sp)
struct req_que *req = vha->req;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
+ "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -915,7 +930,8 @@ qla2x00_abort_command(srb_t *sp)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
+ "Done %s.\n", __func__);
}
return rval;
@@ -934,7 +950,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
l = l;
vha = fcport->vha;
- ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
+ "Entered %s.\n", __func__);
req = vha->hw->req_q_map[0];
rsp = req->rsp;
@@ -955,7 +972,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
+ "Failed=%x.\n", rval);
}
/* Issue marker IOCB. */
@@ -965,7 +983,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
ql_dbg(ql_dbg_mbx, vha, 0x1040,
"Failed to issue marker IOCB (%x).\n", rval2);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
+ "Done %s.\n", __func__);
}
return rval;
@@ -983,7 +1002,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
vha = fcport->vha;
- ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
+ "Entered %s.\n", __func__);
req = vha->hw->req_q_map[0];
rsp = req->rsp;
@@ -1012,7 +1032,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
ql_dbg(ql_dbg_mbx, vha, 0x1044,
"Failed to issue marker IOCB (%x).\n", rval2);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1046,7 +1067,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
mcp->mb[9] = vha->vp_idx;
@@ -1074,7 +1096,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
+ "Done %s.\n", __func__);
if (IS_CNA_CAPABLE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1115,7 +1138,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RETRY_COUNT;
mcp->out_mb = MBX_0;
@@ -1138,7 +1162,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
*tov = ratov;
}
- ql_dbg(ql_dbg_mbx, vha, 0x104b,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
"Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
}
@@ -1170,7 +1194,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
+ "Entered %s.\n", __func__);
if (IS_QLA82XX(ha) && ql2xdbwr)
qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1213,9 +1238,100 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_node_name_list
+ * Issue get node name list mailbox command, kmalloc()
+ * and return the resulting list. Caller must kfree() it!
+ *
+ * Input:
+ * ha = adapter state pointer.
+ * out_data = resulting list
+ * out_len = length of the resulting list
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_port_24xx_data *list = NULL;
+ void *pmap;
+ mbx_cmd_t mc;
+ dma_addr_t pmap_dma;
+ ulong dma_size;
+ int rval, left;
+
+ left = 1;
+ while (left > 0) {
+ dma_size = left * sizeof(*list);
+ pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
+ &pmap_dma, GFP_KERNEL);
+ if (!pmap) {
+ ql_log(ql_log_warn, vha, 0x113f,
+ "%s(%ld): DMA Alloc failed of %ld\n",
+ __func__, vha->host_no, dma_size);
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto out;
+ }
+
+ mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
+ mc.mb[1] = BIT_1 | BIT_3;
+ mc.mb[2] = MSW(pmap_dma);
+ mc.mb[3] = LSW(pmap_dma);
+ mc.mb[6] = MSW(MSD(pmap_dma));
+ mc.mb[7] = LSW(MSD(pmap_dma));
+ mc.mb[8] = dma_size;
+ mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
+ mc.in_mb = MBX_0|MBX_1;
+ mc.tov = 30;
+ mc.flags = MBX_DMA_IN;
+
+ rval = qla2x00_mailbox_command(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
+ (mc.mb[1] == 0xA)) {
+ left += le16_to_cpu(mc.mb[2]) /
+ sizeof(struct qla_port_24xx_data);
+ goto restart;
+ }
+ goto out_free;
+ }
+
+ left = 0;
+
+ list = kzalloc(dma_size, GFP_KERNEL);
+ if (!list) {
+ ql_log(ql_log_warn, vha, 0x1140,
+ "%s(%ld): failed to allocate node names list "
+ "structure.\n", __func__, vha->host_no);
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto out_free;
+ }
+
+ memcpy(list, pmap, dma_size);
+restart:
+ dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
}
+ *out_data = list;
+ *out_len = dma_size;
+
+out:
+ return rval;
+
+out_free:
+ dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
return rval;
}
@@ -1246,7 +1362,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
dma_addr_t pd_dma;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
+ "Entered %s.\n", __func__);
pd24 = NULL;
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1326,6 +1443,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
fcport->port_type = FCT_INITIATOR;
else
fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd24->prli_svc_param_word_3[0] & BIT_7)
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
} else {
uint64_t zero = 0;
@@ -1378,7 +1502,8 @@ gpd_error_out:
"Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1407,7 +1532,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
@@ -1433,7 +1559,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1465,7 +1592,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_PORT_NAME;
mcp->mb[9] = vha->vp_idx;
@@ -1499,7 +1627,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
name[7] = LSB(mcp->mb[7]);
}
- ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1527,7 +1656,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+ "Entered %s.\n", __func__);
if (IS_CNA_CAPABLE(vha->hw)) {
/* Logout across all FCFs. */
@@ -1564,7 +1694,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1596,9 +1727,10 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
+ "Entered %s.\n", __func__);
- ql_dbg(ql_dbg_mbx, vha, 0x105e,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
"Retry cnt=%d ratov=%d total tov=%d.\n",
vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
@@ -1622,7 +1754,8 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1641,7 +1774,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
+ "Entered %s.\n", __func__);
if (ha->flags.cpu_affinity_enabled)
req = ha->req_q_map[0];
@@ -1715,7 +1849,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
break;
}
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
+ "Done %s.\n", __func__);
iop[0] = le32_to_cpu(lg->io_parameter[0]);
@@ -1733,6 +1868,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mb[10] |= BIT_0; /* Class 2. */
if (lg->io_parameter[9] || lg->io_parameter[10])
mb[10] |= BIT_1; /* Class 3. */
+ if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
+ mb[10] |= BIT_7; /* Confirmed Completion
+ * Allowed
+ */
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1770,7 +1909,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1818,7 +1958,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1849,7 +1990,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
+ "Entered %s.\n", __func__);
if (IS_FWI2_CAPABLE(ha))
return qla24xx_login_fabric(vha, fcport->loop_id,
@@ -1891,7 +2033,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
+ "Done %s.\n", __func__);
}
return (rval);
@@ -1908,7 +2051,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
+ "Entered %s.\n", __func__);
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
@@ -1952,7 +2096,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
le32_to_cpu(lg->io_parameter[1]));
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1984,7 +2129,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
mcp->out_mb = MBX_1|MBX_0;
@@ -2007,7 +2153,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
"Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2035,7 +2182,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2052,7 +2200,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2078,7 +2227,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
+ "Entered %s.\n", __func__);
if (id_list == NULL)
return QLA_FUNCTION_FAILED;
@@ -2110,7 +2260,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
} else {
*entries = mcp->mb[1];
- ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2138,7 +2289,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
@@ -2154,7 +2306,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
ql_dbg(ql_dbg_mbx, vha, 0x107d,
"Failed mb[0]=%x.\n", mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x107e,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
"Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
"mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
@@ -2201,7 +2353,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
dma_addr_t pmap_dma;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
+ "Entered %s.\n", __func__);
pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
@@ -2224,7 +2377,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x1081,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
"mb0/mb1=%x/%X FC/AL position map size (%x).\n",
mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
@@ -2238,7 +2391,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2267,7 +2421,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
uint32_t *siter, *diter, dwords;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_STATUS;
mcp->mb[2] = MSW(stats_dma);
@@ -2301,7 +2456,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
rval = QLA_FUNCTION_FAILED;
} else {
/* Copy over data -- firmware data is LE. */
- ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
+ "Done %s.\n", __func__);
dwords = offsetof(struct link_statistics, unused1) / 4;
siter = diter = &stats->link_fail_cnt;
while (dwords--)
@@ -2324,7 +2480,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mbx_cmd_t *mcp = &mc;
uint32_t *siter, *diter, dwords;
- ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
mcp->mb[2] = MSW(stats_dma);
@@ -2346,7 +2503,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
"Failed mb[0]=%x.\n", mcp->mb[0]);
rval = QLA_FUNCTION_FAILED;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
+ "Done %s.\n", __func__);
/* Copy over data -- firmware data is LE. */
dwords = sizeof(struct link_statistics) / 4;
siter = diter = &stats->link_fail_cnt;
@@ -2375,7 +2533,8 @@ qla24xx_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
- ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
+ "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2404,7 +2563,7 @@ qla24xx_abort_command(srb_t *sp)
abt->port_id[0] = fcport->d_id.b.al_pa;
abt->port_id[1] = fcport->d_id.b.area;
abt->port_id[2] = fcport->d_id.b.domain;
- abt->vp_index = fcport->vp_idx;
+ abt->vp_index = fcport->vha->vp_idx;
abt->req_que_no = cpu_to_le16(req->id);
@@ -2423,7 +2582,8 @@ qla24xx_abort_command(srb_t *sp)
le16_to_cpu(abt->nport_handle));
rval = QLA_FUNCTION_FAILED;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2455,7 +2615,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ha = vha->hw;
req = vha->req;
- ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
+ "Entered %s.\n", __func__);
if (ha->flags.cpu_affinity_enabled)
rsp = ha->rsp_q_map[tag + 1];
@@ -2478,7 +2639,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
- tsk->p.tsk.vp_index = fcport->vp_idx;
+ tsk->p.tsk.vp_index = fcport->vha->vp_idx;
if (type == TCF_LUN_RESET) {
int_to_scsilun(l, &tsk->p.tsk.lun);
host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
@@ -2504,7 +2665,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
} else if (le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID) {
if (le32_to_cpu(sts->rsp_data_len) < 4) {
- ql_dbg(ql_dbg_mbx, vha, 0x1097,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
"Ignoring inconsistent data length -- not enough "
"response info (%d).\n",
le32_to_cpu(sts->rsp_data_len));
@@ -2523,7 +2684,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ql_dbg(ql_dbg_mbx, vha, 0x1099,
"Failed to issue marker IOCB (%x).\n", rval2);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2564,7 +2726,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
mcp->out_mb = MBX_0;
@@ -2576,7 +2739,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2596,7 +2760,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SERDES_PARAMS;
mcp->mb[1] = BIT_0;
@@ -2615,7 +2780,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2631,7 +2797,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_STOP_FIRMWARE;
mcp->mb[1] = 0;
@@ -2646,7 +2813,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
if (mcp->mb[0] == MBS_INVALID_COMMAND)
rval = QLA_INVALID_COMMAND;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2660,7 +2828,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2686,7 +2855,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2699,7 +2869,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2719,7 +2890,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2733,7 +2905,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
+ "Entered %s.\n", __func__);
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
!IS_QLA83XX(vha->hw))
@@ -2764,7 +2937,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
+ "Done %s.\n", __func__);
if (mb)
memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2782,7 +2956,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2804,7 +2979,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
+ "Done %s.\n", __func__);
if (wr)
*wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2829,7 +3005,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
+ "Entered %s.\n", __func__);
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2854,7 +3031,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
+ "Done %s.\n", __func__);
if (port_speed)
*port_speed = mcp->mb[3];
}
@@ -2870,7 +3048,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
+ "Entered %s.\n", __func__);
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2897,9 +3076,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
}
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x10b4,
+ "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2915,24 +3096,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
scsi_qla_host_t *vp;
unsigned long flags;
- ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
+ "Entered %s.\n", __func__);
if (rptid_entry->entry_status != 0)
return;
if (rptid_entry->format == 0) {
- ql_dbg(ql_dbg_mbx, vha, 0x10b7,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
"Format 0 : Number of VPs setup %d, number of "
"VPs acquired %d.\n",
MSB(le16_to_cpu(rptid_entry->vp_count)),
LSB(le16_to_cpu(rptid_entry->vp_count)));
- ql_dbg(ql_dbg_mbx, vha, 0x10b8,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
} else if (rptid_entry->format == 1) {
vp_idx = LSB(stat);
- ql_dbg(ql_dbg_mbx, vha, 0x10b9,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
"Format 1: VP[%d] enabled - status %d - with "
"port id %02x%02x%02x.\n", vp_idx, MSB(stat),
rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2999,7 +3181,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
/* This can be called by the parent */
- ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
+ "Entered %s.\n", __func__);
vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
if (!vpmod) {
@@ -3015,6 +3198,9 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
vpmod->vp_count = 1;
vpmod->vp_index1 = vha->vp_idx;
vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+
+ qlt_modify_vp_config(vha, vpmod);
+
memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
vpmod->entry_count = 1;
@@ -3035,7 +3221,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
rval = QLA_FUNCTION_FAILED;
} else {
/* EMPTY */
- ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
+ "Done %s.\n", __func__);
fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
}
dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3069,7 +3256,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
int vp_index = vha->vp_idx;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- ql_dbg(ql_dbg_mbx, vha, 0x10c1,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
"Entered %s enabling index %d.\n", __func__, vp_index);
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
@@ -3112,7 +3299,8 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
le16_to_cpu(vce->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3149,14 +3337,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
-
- /*
- * This command is implicitly executed by firmware during login for the
- * physical hosts
- */
- if (vp_idx == 0)
- return QLA_FUNCTION_FAILED;
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
mcp->mb[1] = format;
@@ -3185,7 +3367,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
+ "Entered %s.\n", __func__);
if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3219,7 +3402,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
ql_dbg(ql_dbg_mbx, vha, 0x1008,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3244,7 +3428,8 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
unsigned long flags;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
+ "Entered %s.\n", __func__);
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (mn == NULL) {
@@ -3285,7 +3470,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
status[0] = le16_to_cpu(mn->p.rsp.comp_status);
status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
le16_to_cpu(mn->p.rsp.failure_code) : 0;
- ql_dbg(ql_dbg_mbx, vha, 0x10ce,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
"cs=%x fc=%x.\n", status[0], status[1]);
if (status[0] != CS_COMPLETE) {
@@ -3299,7 +3484,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
retry = 1;
}
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d0,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
"Firmware updated to %x.\n",
le32_to_cpu(mn->p.rsp.fw_ver));
@@ -3316,9 +3501,11 @@ verify_done:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x10d1,
+ "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3334,7 +3521,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = req->options;
@@ -3388,7 +3576,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
ql_dbg(ql_dbg_mbx, vha, 0x10d4,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3404,7 +3593,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = rsp->options;
@@ -3456,7 +3646,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
ql_dbg(ql_dbg_mbx, vha, 0x10d7,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3469,7 +3660,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_IDC_ACK;
memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3483,7 +3675,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x10da,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3496,7 +3689,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
+ "Entered %s.\n", __func__);
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3514,7 +3708,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
+ "Done %s.\n", __func__);
*sector_size = mcp->mb[1];
}
@@ -3531,7 +3726,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3547,7 +3743,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3563,7 +3760,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3582,7 +3780,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3595,7 +3794,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_RESTART_MPI_FW;
mcp->out_mb = MBX_0;
@@ -3609,7 +3809,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3624,7 +3825,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -3654,7 +3856,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
ql_dbg(ql_dbg_mbx, vha, 0x10e9,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3669,7 +3872,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -3699,7 +3903,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
ql_dbg(ql_dbg_mbx, vha, 0x10ec,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3713,7 +3918,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
+ "Entered %s.\n", __func__);
if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3735,7 +3941,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
+ "Done %s.\n", __func__);
*actual_size = mcp->mb[2] << 2;
@@ -3752,7 +3959,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
+ "Entered %s.\n", __func__);
if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3775,7 +3983,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3788,7 +3997,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3805,7 +4015,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
ql_dbg(ql_dbg_mbx, vha, 0x10f5,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
+ "Done %s.\n", __func__);
*data = mcp->mb[3] << 16 | mcp->mb[2];
}
@@ -3821,7 +4032,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
uint32_t iter_cnt = 0x1;
- ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
+ "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3865,7 +4077,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
"mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
mcp->mb[3], mcp->mb[18], mcp->mb[19]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
+ "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3882,7 +4095,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
+ "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3926,7 +4140,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
+ "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3941,7 +4156,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10fd,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
"Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
mcp->mb[0] = MBC_ISP84XX_RESET;
@@ -3955,7 +4170,8 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
if (rval != QLA_SUCCESS)
ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
else
- ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
+ "Done %s.\n", __func__);
return rval;
}
@@ -3967,7 +4183,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3986,7 +4203,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
ql_dbg(ql_dbg_mbx, vha, 0x1101,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4003,7 +4221,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
rval = QLA_SUCCESS;
- ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
+ "Entered %s.\n", __func__);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
@@ -4046,7 +4265,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x1104,
"Failed=%x mb[0]=%x.\n", rval, mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4060,7 +4280,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -4078,7 +4299,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1107,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+ "Done %s.\n", __func__);
if (mcp->mb[1] != 0x7)
ha->link_data_rate = mcp->mb[1];
}
@@ -4094,7 +4316,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
+ "Entered %s.\n", __func__);
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4113,7 +4336,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
/* Copy all bits to preserve original value */
memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
- ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
+ "Done %s.\n", __func__);
}
return rval;
}
@@ -4125,7 +4349,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_PORT_CONFIG;
/* Copy all bits to preserve original setting */
@@ -4140,7 +4365,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x110d,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else
- ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
+ "Done %s.\n", __func__);
return rval;
}
@@ -4155,7 +4381,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
+ "Entered %s.\n", __func__);
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4183,7 +4410,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4196,7 +4424,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
uint8_t byte;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
+ "Entered %s.\n", __func__);
/* Integer part */
rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
@@ -4216,7 +4445,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
}
*frac = (byte >> 6) * 25;
- ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
+ "Done %s.\n", __func__);
fail:
return rval;
}
@@ -4229,7 +4459,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -4248,7 +4479,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1016,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4262,7 +4494,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
+ "Entered %s.\n", __func__);
if (!IS_QLA82XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4281,7 +4514,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x100c,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4295,7 +4529,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
int rval = QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
+ "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
@@ -4318,7 +4553,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
(mcp->mb[1] << 16) | mcp->mb[0],
(mcp->mb[3] << 16) | mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
+ "Done %s.\n", __func__);
ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
if (!ha->md_template_size) {
ql_dbg(ql_dbg_mbx, vha, 0x1122,
@@ -4337,7 +4573,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
int rval = QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
+ "Entered %s.\n", __func__);
ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
@@ -4372,7 +4609,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
((mcp->mb[1] << 16) | mcp->mb[0]),
((mcp->mb[3] << 16) | mcp->mb[2]));
} else
- ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
+ "Done %s.\n", __func__);
return rval;
}
@@ -4387,7 +4625,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
+ "Entered %s.\n", __func__);
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_SET_LED_CONFIG;
@@ -4412,7 +4651,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
ql_dbg(ql_dbg_mbx, vha, 0x1134,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4429,7 +4669,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
+ "Entered %s.\n", __func__);
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_GET_LED_CONFIG;
@@ -4454,7 +4695,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
led_cfg[4] = mcp->mb[5];
led_cfg[5] = mcp->mb[6];
}
- ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4471,7 +4713,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
if (!IS_QLA82XX(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1127,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
"Entered %s.\n", __func__);
memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4491,7 +4733,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
ql_dbg(ql_dbg_mbx, vha, 0x1128,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1129,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
"Done %s.\n", __func__);
}
@@ -4509,7 +4751,8 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
if (!IS_QLA83XX(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_WRITE_REMOTE_REG;
mcp->mb[1] = LSW(reg);
@@ -4527,7 +4770,7 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
ql_dbg(ql_dbg_mbx, vha, 0x1131,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1132,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
"Done %s.\n", __func__);
}
@@ -4543,13 +4786,14 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
mbx_cmd_t *mcp = &mc;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- ql_dbg(ql_dbg_mbx, vha, 0x113b,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
"Implicit LOGO Unsupported.\n");
return QLA_FUNCTION_FAILED;
}
- ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
+ "Entering %s.\n", __func__);
/* Perform Implicit LOGO. */
mcp->mb[0] = MBC_PORT_LOGOUT;
@@ -4564,7 +4808,8 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
ql_dbg(ql_dbg_mbx, vha, 0x113d,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
else
- ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
+ "Done %s.\n", __func__);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index aa062a1b0ca4..3e8b32419e68 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -6,6 +6,7 @@
*/
#include "qla_def.h"
#include "qla_gbl.h"
+#include "qla_target.h"
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
@@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+
+ qlt_update_vp_map(vha, SET_VP_IDX);
+
spin_unlock_irqrestore(&ha->vport_slock, flags);
mutex_unlock(&ha->vport_lock);
@@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
}
list_del(&vha->list);
+ qlt_update_vp_map(vha, RESET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
vp_id = vha->vp_idx;
@@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_vport, vha, 0xa001,
"Marking port dead, loop_id=0x%04x : %x.\n",
- fcport->loop_id, fcport->vp_idx);
+ fcport->loop_id, fcport->vha->vp_idx);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ /* Remove port id from vp target map */
+ qlt_update_vp_map(vha, RESET_AL_PA);
+
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
vha->flags.management_server_logged_in = 0;
@@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
- ql_dbg(ql_dbg_dpc, vha, 0x4012,
- "Entering %s.\n", __func__);
- ql_dbg(ql_dbg_dpc, vha, 0x4013,
- "vp_flags: 0x%lx.\n", vha->vp_flags);
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
+ "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
qla2x00_do_work(vha);
@@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
}
- ql_dbg(ql_dbg_dpc, vha, 0x401c,
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
"Exiting %s.\n", __func__);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de722a933438..caf627ba7fa8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
}
/* Offset in flash = lower 16 bits
- * Number of enteries = upper 16 bits
+ * Number of entries = upper 16 bits
*/
offset = n & 0xffffU;
n = (n >> 16) & 0xffffU;
- /* number of addr/value pair should not exceed 1024 enteries */
+ /* number of addr/value pair should not exceed 1024 entries */
if (n >= 1024) {
ql_log(ql_log_fatal, vha, 0x0071,
"Card flash not initialized:n=0x%x.\n", n);
@@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- ql_log(ql_log_info, NULL, 0xb054,
+ ql_log(ql_log_info, NULL, 0xb053,
"%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
@@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
ql_log(ql_log_info, vha, 0x00a1,
- "Firmware loaded successully from flash.\n");
+ "Firmware loaded successfully from flash.\n");
return QLA_SUCCESS;
} else {
ql_log(ql_log_warn, vha, 0x0108,
@@ -2461,7 +2461,7 @@ try_blob_fw:
blob = ha->hablob = qla2x00_request_firmware(vha);
if (!blob) {
ql_log(ql_log_fatal, vha, 0x00a3,
- "Firmware image not preset.\n");
+ "Firmware image not present.\n");
goto fw_load_failed;
}
@@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
if (!optrom) {
ql_log(ql_log_warn, vha, 0xb01b,
"Unable to allocate memory "
- "for optron burst write (%x KB).\n",
+ "for optrom burst write (%x KB).\n",
OPTROM_BURST_SIZE / 1024);
}
}
@@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
* changing the state to DEV_READY
*/
ql_log(ql_log_info, vha, 0xb023,
- "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
- ql_log(ql_log_info, vha, 0xb024,
- "DRV_ACTIVE:%d DRV_STATE:%d.\n",
+ "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
+ "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
drv_active, drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
@@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
if (ql2xmdenable) {
if (qla82xx_md_collect(vha))
ql_log(ql_log_warn, vha, 0xb02c,
- "Not able to collect minidump.\n");
+ "Minidump not collected.\n");
} else
ql_log(ql_log_warn, vha, 0xb04f,
"Minidump disabled.\n");
@@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
"Firmware version differs "
"Previous version: %d:%d:%d - "
"New version: %d:%d:%d\n",
+ fw_major_version, fw_minor_version,
+ fw_subminor_version,
ha->fw_major_version,
ha->fw_minor_version,
- ha->fw_subminor_version,
- fw_major_version, fw_minor_version,
- fw_subminor_version);
+ ha->fw_subminor_version);
/* Release MiniDump resources */
qla82xx_md_free(vha);
/* ALlocate MiniDump resources */
@@ -3325,6 +3324,30 @@ exit:
return rval;
}
+static int qla82xx_check_temp(scsi_qla_host_t *vha)
+{
+ uint32_t temp, temp_state, temp_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql_log(ql_log_warn, vha, 0x600e,
+ "Device temperature %d degrees C exceeds "
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ return 1;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ ql_log(ql_log_warn, vha, 0x600f,
+ "Device temperature %d degrees C exceeds "
+ "operating range. Immediate action needed.\n",
+ temp_val);
+ }
+ return 0;
+}
+
void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
/* don't poll if reset is going on */
if (!ha->flags.isp82xx_reset_hdlr_active) {
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ if (qla82xx_check_temp(vha)) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ ha->flags.isp82xx_fw_hung = 1;
+ qla82xx_clear_pending_mbx(vha);
+ } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6001,
"Adapter reset needed.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6002,
"Quiescent needed.\n");
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
- qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
qla82xx_clear_pending_mbx(vha);
@@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
goto md_failed;
}
+ if (ha->flags.isp82xx_no_md_cap) {
+ ql_log(ql_log_warn, vha, 0xb054,
+ "Forced reset from application, "
+ "ignore minidump capture\n");
+ ha->flags.isp82xx_no_md_cap = 0;
+ goto md_failed;
+ }
+
if (qla82xx_validate_template_chksum(vha)) {
ql_log(ql_log_info, vha, 0xb039,
"Template checksum validation error\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 4ac50e274661..6eb210e3cc63 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -26,6 +26,7 @@
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
@@ -561,7 +562,6 @@
#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
-#define PCIE_CHICKEN3 (0x120c8)
#define PCIE_SETUP_FUNCTION (0x12040)
#define PCIE_SETUP_FUNCTION2 (0x12048)
@@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_state(x) ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7db803377c64..6d1d873a20e2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,12 +13,13 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
-
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
+#include "qla_target.h"
+
/*
* Driver version
*/
@@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep;
*/
int ql_errlev = ql_log_all;
+int ql2xenableclass2;
+module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xenableclass2,
+ "Specify if Class 2 operations are supported from the very "
+ "beginning. Default is 0 - class 2 not supported.");
+
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = {
.max_sectors = 0xFFFF,
.shost_attrs = qla2x00_host_attrs,
+
+ .supported_mode = MODE_INITIATOR,
};
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
/* -------------------------------------------------------------------------- */
-static int qla2x00_alloc_queues(struct qla_hw_data *ha)
+static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+ struct rsp_que *rsp)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
"Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
+ /*
+ * Make sure we record at least the request and response queue zero in
+ * case we need to free them if part of the probe fails.
+ */
+ ha->rsp_q_map[0] = rsp;
+ ha->req_q_map[0] = req;
set_bit(0, ha->rsp_qid_map);
set_bit(0, ha->req_qid_map);
return 1;
@@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
- ql_dbg(ql_dbg_io, vha, 0x3001,
+ ql_dbg(ql_dbg_aer, vha, 0x9010,
"PCI Channel IO permanent failure, exiting "
"cmd=%p.\n", cmd);
cmd->result = DID_NO_CONNECT << 16;
} else {
- ql_dbg(ql_dbg_io, vha, 0x3002,
+ ql_dbg(ql_dbg_aer, vha, 0x9011,
"EEH_Busy, Requeuing the cmd=%p.\n", cmd);
cmd->result = DID_REQUEUE << 16;
}
@@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
rval = fc_remote_port_chkready(rport);
if (rval) {
cmd->result = rval;
- ql_dbg(ql_dbg_io, vha, 0x3003,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
"fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
cmd, rval);
goto qc24_fail_command;
@@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
ret = FAILED;
ql_log(ql_log_info, vha, 0x8012,
- "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
+ "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_fatal, vha, 0x8013,
@@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
+ ha->tgt.enable_class_2 = ql2xenableclass2;
/* Clear our data area */
ha->bars = bars;
@@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->gid_list_info_size = 8;
@@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->gid_list_info_size = 8;
@@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
+que_init:
+ /* Alloc arrays of request and response ring ptrs */
+ if (!qla2x00_alloc_queues(ha, req, rsp)) {
+ ql_log(ql_log_fatal, base_vha, 0x003d,
+ "Failed to allocate memory for queue pointers..."
+ "aborting.\n");
+ goto probe_init_failed;
+ }
+
+ qlt_probe_one_stage1(base_vha, ha);
+
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
@@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
- /* Alloc arrays of request and response ring ptrs */
-que_init:
- if (!qla2x00_alloc_queues(ha)) {
- ql_log(ql_log_fatal, base_vha, 0x003d,
- "Failed to allocate memory for queue pointers.. aborting.\n");
- goto probe_init_failed;
- }
-
- ha->rsp_q_map[0] = rsp;
- ha->req_q_map[0] = req;
+ /* Assign back pointers */
rsp->req = req;
req->rsp = rsp;
- set_bit(0, ha->req_qid_map);
- set_bit(0, ha->rsp_qid_map);
+
/* FWI2-capable only. */
req->req_q_in = &ha->iobase->isp24.req_q_in;
req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2514,6 +2534,14 @@ que_init:
ql_dbg(ql_dbg_init, base_vha, 0x00ee,
"DPC thread started successfully.\n");
+ /*
+ * If we're not coming up in initiator mode, we might sit for
+ * a while without waking up the dpc thread, which leads to a
+ * stuck process warning. So just kick the dpc once here and
+ * let the kthread start (and go back to sleep in qla2x00_do_dpc).
+ */
+ qla2xxx_wake_dpc(base_vha);
+
skip_dpc:
list_add_tail(&base_vha->list, &ha->vp_list);
base_vha->host->irq = ha->pdev->irq;
@@ -2559,7 +2587,11 @@ skip_dpc:
ql_dbg(ql_dbg_init, base_vha, 0x00f2,
"Init done and hba is online.\n");
- scsi_scan_host(host);
+ if (qla_ini_mode_enabled(base_vha))
+ scsi_scan_host(host);
+ else
+ ql_dbg(ql_dbg_init, base_vha, 0x0122,
+ "skipping scsi_scan_host() for non-initiator port\n");
qla2x00_alloc_sysfs_attr(base_vha);
@@ -2577,11 +2609,17 @@ skip_dpc:
base_vha->host_no,
ha->isp_ops->fw_version_str(base_vha, fw_str));
+ qlt_add_target(ha, base_vha);
+
return 0;
probe_init_failed:
qla2x00_free_req_que(ha, req);
+ ha->req_q_map[0] = NULL;
+ clear_bit(0, ha->req_qid_map);
qla2x00_free_rsp_que(ha, rsp);
+ ha->rsp_q_map[0] = NULL;
+ clear_bit(0, ha->rsp_qid_map);
ha->max_req_queues = ha->max_rsp_queues = 0;
probe_failed:
@@ -2621,6 +2659,22 @@ probe_out:
}
static void
+qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct task_struct *t = ha->dpc_thread;
+
+ if (ha->dpc_thread == NULL)
+ return;
+ /*
+ * qla2xxx_wake_dpc checks for ->dpc_thread
+ * so we need to zero it out.
+ */
+ ha->dpc_thread = NULL;
+ kthread_stop(t);
+}
+
+static void
qla2x00_shutdown(struct pci_dev *pdev)
{
scsi_qla_host_t *vha;
@@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev)
struct qla_hw_data *ha;
unsigned long flags;
+ /*
+ * If the PCI device is disabled that means that probe failed and any
+ * resources should be have cleaned up on probe exit.
+ */
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
base_vha = pci_get_drvdata(pdev);
ha = base_vha->hw;
+ ha->flags.host_shutting_down = 1;
+
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
struct Scsi_Host *scsi_host;
@@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->dpc_thread = NULL;
kthread_stop(t);
}
+ qlt_remove_target(ha, base_vha);
qla2x00_free_sysfs_attr(base_vha);
@@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (vha->timer_active)
qla2x00_stop_timer(vha);
- /* Kill the kernel thread for this host */
- if (ha->dpc_thread) {
- struct task_struct *t = ha->dpc_thread;
-
- /*
- * qla2xxx_wake_dpc checks for ->dpc_thread
- * so we need to zero it out.
- */
- ha->dpc_thread = NULL;
- kthread_stop(t);
- }
+ qla2x00_stop_dpc_thread(vha);
qla25xx_delete_queues(vha);
@@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
spin_unlock_irqrestore(vha->host->host_lock, flags);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
- } else
+ } else {
fc_remote_port_delete(rport);
+ qlt_fc_port_deleted(vha, fcport);
+ }
}
/*
@@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
int do_login, int defer)
{
if (atomic_read(&fcport->state) == FCS_ONLINE &&
- vha->vp_idx == fcport->vp_idx) {
+ vha->vp_idx == fcport->vha->vp_idx) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
qla2x00_schedule_rport_del(vha, fcport, defer);
}
@@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
+ if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
/*
@@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
if (defer)
qla2x00_schedule_rport_del(vha, fcport, defer);
- else if (vha->vp_idx == fcport->vp_idx)
+ else if (vha->vp_idx == fcport->vha->vp_idx)
qla2x00_schedule_rport_del(vha, fcport, defer);
}
}
@@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->init_cb)
goto fail;
+ if (qlt_mem_alloc(ha) < 0)
+ goto fail_free_init_cb;
+
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
- goto fail_free_init_cb;
+ goto fail_free_tgt_mem;
ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (!ha->srb_mempool)
@@ -3167,6 +3226,8 @@ fail_free_gid_list:
ha->gid_list_dma);
ha->gid_list = NULL;
ha->gid_list_dma = 0;
+fail_free_tgt_mem:
+ qlt_mem_free(ha);
fail_free_init_cb:
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
ha->init_cb_dma);
@@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->ctx_mempool)
mempool_destroy(ha->ctx_mempool);
+ qlt_mem_free(ha);
+
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
@@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
+
+ ha->tgt.atio_ring = NULL;
+ ha->tgt.atio_dma = 0;
+ ha->tgt.tgt_vp_map = NULL;
}
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data)
ha->dpc_active = 1;
- ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
- "DPC handler waking up.\n");
- ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
- "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
+ "DPC handler waking up, dpc_flags=0x%lx.\n",
+ base_vha->dpc_flags);
qla2x00_do_work(base_vha);
@@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data)
clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
}
+ if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
+ int ret;
+ ret = qla2x00_send_change_request(base_vha, 0x3, 0);
+ if (ret != QLA_SUCCESS)
+ ql_log(ql_log_warn, base_vha, 0x121,
+ "Failed to enable receiving of RSCN "
+ "requests: 0x%x.\n", ret);
+ clear_bit(SCR_PENDING, &base_vha->dpc_flags);
+ }
+
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
"Quiescence mode scheduled.\n");
@@ -4122,8 +4198,7 @@ qla2x00_release_firmware(void)
mutex_lock(&qla_fw_lock);
for (idx = 0; idx < FW_BLOBS; idx++)
- if (qla_fw_blobs[idx].fw)
- release_firmware(qla_fw_blobs[idx].fw);
+ release_firmware(qla_fw_blobs[idx].fw);
mutex_unlock(&qla_fw_lock);
}
@@ -4458,6 +4533,21 @@ qla2x00_module_init(void)
return -ENOMEM;
}
+ /* Initialize target kmem_cache and mem_pools */
+ ret = qlt_init();
+ if (ret < 0) {
+ kmem_cache_destroy(srb_cachep);
+ return ret;
+ } else if (ret > 0) {
+ /*
+ * If initiator mode is explictly disabled by qlt_init(),
+ * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
+ * performing scsi_scan_target() during LOOP UP event.
+ */
+ qla2xxx_transport_functions.disable_target_scan = 1;
+ qla2xxx_transport_vport_functions.disable_target_scan = 1;
+ }
+
/* Derive version string. */
strcpy(qla2x00_version_str, QLA2XXX_VERSION);
if (ql2xextended_error_logging)
@@ -4469,6 +4559,7 @@ qla2x00_module_init(void)
kmem_cache_destroy(srb_cachep);
ql_log(ql_log_fatal, NULL, 0x0002,
"fc_attach_transport failed...Failing load!.\n");
+ qlt_exit();
return -ENODEV;
}
@@ -4482,6 +4573,7 @@ qla2x00_module_init(void)
fc_attach_transport(&qla2xxx_transport_vport_functions);
if (!qla2xxx_transport_vport_template) {
kmem_cache_destroy(srb_cachep);
+ qlt_exit();
fc_release_transport(qla2xxx_transport_template);
ql_log(ql_log_fatal, NULL, 0x0004,
"fc_attach_transport vport failed...Failing load!.\n");
@@ -4493,6 +4585,7 @@ qla2x00_module_init(void)
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
kmem_cache_destroy(srb_cachep);
+ qlt_exit();
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
ql_log(ql_log_fatal, NULL, 0x0006,
@@ -4512,6 +4605,7 @@ qla2x00_module_exit(void)
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
+ qlt_exit();
if (ctx_cachep)
kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644
index 000000000000..77759c78cc21
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -0,0 +1,4973 @@
+/*
+ * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
+ *
+ * based on qla2x00t.c code:
+ *
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2006 - 2010 ID7 Ltd.
+ *
+ * Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+
+static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
+module_param(qlini_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(qlini_mode,
+ "Determines when initiator mode will be enabled. Possible values: "
+ "\"exclusive\" - initiator mode will be enabled on load, "
+ "disabled on enabling target mode and then on disabling target mode "
+ "enabled back; "
+ "\"disabled\" - initiator mode will never be enabled; "
+ "\"enabled\" (default) - initiator mode will always stay enabled.");
+
+static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+
+/*
+ * From scsi/fc/fc_fcp.h
+ */
+enum fcp_resp_rsp_codes {
+ FCP_TMF_CMPL = 0,
+ FCP_DATA_LEN_INVALID = 1,
+ FCP_CMND_FIELDS_INVALID = 2,
+ FCP_DATA_PARAM_MISMATCH = 3,
+ FCP_TMF_REJECTED = 4,
+ FCP_TMF_FAILED = 5,
+ FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * fc_pri_ta from scsi/fc/fc_fcp.h
+ */
+#define FCP_PTA_SIMPLE 0 /* simple task attribute */
+#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
+#define FCP_PTA_ORDERED 2 /* ordered task attribute */
+#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
+#define FCP_PTA_MASK 7 /* mask for task attribute field */
+#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
+#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
+
+/*
+ * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
+ * must be called under HW lock and could unlock/lock it inside.
+ * It isn't an issue, since in the current implementation on the time when
+ * those functions are called:
+ *
+ * - Either context is IRQ and only IRQ handler can modify HW data,
+ * including rings related fields,
+ *
+ * - Or access to target mode variables from struct qla_tgt doesn't
+ * cross those functions boundaries, except tgt_stop, which
+ * additionally protected by irq_cmd_count.
+ */
+/* Predefs for callbacks handed to qla2xxx LLD */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
+ struct atio_from_isp *pkt);
+static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ int fn, void *iocb, int flags);
+static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
+ *cmd, struct atio_from_isp *atio, int ha_locked);
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
+ struct qla_tgt_srr_imm *imm, int ha_lock);
+/*
+ * Global Variables
+ */
+static struct kmem_cache *qla_tgt_cmd_cachep;
+static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
+static mempool_t *qla_tgt_mgmt_cmd_mempool;
+static struct workqueue_struct *qla_tgt_wq;
+static DEFINE_MUTEX(qla_tgt_mutex);
+static LIST_HEAD(qla_tgt_glist);
+
+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static struct qla_tgt_sess *qlt_find_sess_by_port_name(
+ struct qla_tgt *tgt,
+ const uint8_t *port_name)
+{
+ struct qla_tgt_sess *sess;
+
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ if (!memcmp(sess->port_name, port_name, WWN_SIZE))
+ return sess;
+ }
+
+ return NULL;
+}
+
+/* Might release hw lock, then reaquire!! */
+static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
+{
+ /* Send marker if required */
+ if (unlikely(vha->marker_needed != 0)) {
+ int rc = qla2x00_issue_marker(vha, vha_locked);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03d,
+ "qla_target(%d): issue_marker() failed\n",
+ vha->vp_idx);
+ }
+ return rc;
+ }
+ return QLA_SUCCESS;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+ uint8_t *d_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t vp_idx;
+
+ if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
+ return NULL;
+
+ if (vha->d_id.b.al_pa == d_id[2])
+ return vha;
+
+ BUG_ON(ha->tgt.tgt_vp_map == NULL);
+ vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
+ uint16_t vp_idx)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx == vp_idx)
+ return vha;
+
+ BUG_ON(ha->tgt.tgt_vp_map == NULL);
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ switch (atio->u.raw.entry_type) {
+ case ATIO_TYPE7:
+ {
+ struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+ atio->u.isp24.fcp_hdr.d_id);
+ if (unlikely(NULL == host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03e,
+ "qla_target(%d): Received ATIO_TYPE7 "
+ "with unknown d_id %x:%x:%x\n", vha->vp_idx,
+ atio->u.isp24.fcp_hdr.d_id[0],
+ atio->u.isp24.fcp_hdr.d_id[1],
+ atio->u.isp24.fcp_hdr.d_id[2]);
+ break;
+ }
+ qlt_24xx_atio_pkt(host, atio);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct imm_ntfy_from_isp *entry =
+ (struct imm_ntfy_from_isp *)atio;
+
+ if ((entry->u.isp24.vp_index != 0xFF) &&
+ (entry->u.isp24.nport_handle != 0xFFFF)) {
+ host = qlt_find_host_by_vp_idx(vha,
+ entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03f,
+ "qla_target(%d): Received "
+ "ATIO (IMMED_NOTIFY_TYPE) "
+ "with unknown vp_index %d\n",
+ vha->vp_idx, entry->u.isp24.vp_index);
+ break;
+ }
+ }
+ qlt_24xx_atio_pkt(host, atio);
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe040,
+ "qla_target(%d): Received unknown ATIO atio "
+ "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+ break;
+ }
+
+ return;
+}
+
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
+{
+ switch (pkt->entry_type) {
+ case CTIO_TYPE7:
+ {
+ struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe041,
+ "qla_target(%d): Response pkt (CTIO_TYPE7) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct imm_ntfy_from_isp *entry =
+ (struct imm_ntfy_from_isp *)pkt;
+
+ host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe042,
+ "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->u.isp24.vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case NOTIFY_ACK_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
+ if (0xFF != entry->u.isp24.vp_index) {
+ host = qlt_find_host_by_vp_idx(vha,
+ entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe043,
+ "qla_target(%d): Response "
+ "pkt (NOTIFY_ACK_TYPE) "
+ "received, with unknown "
+ "vp_index %d\n", vha->vp_idx,
+ entry->u.isp24.vp_index);
+ break;
+ }
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case ABTS_RECV_24XX:
+ {
+ struct abts_recv_from_24xx *entry =
+ (struct abts_recv_from_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe044,
+ "qla_target(%d): Response pkt "
+ "(ABTS_RECV_24XX) received, with unknown "
+ "vp_index %d\n", vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case ABTS_RESP_24XX:
+ {
+ struct abts_resp_to_24xx *entry =
+ (struct abts_resp_to_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe045,
+ "qla_target(%d): Response pkt "
+ "(ABTS_RECV_24XX) received, with unknown "
+ "vp_index %d\n", vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ default:
+ qlt_response_pkt(vha, pkt);
+ break;
+ }
+
+}
+
+static void qlt_free_session_done(struct work_struct *work)
+{
+ struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
+ free_work);
+ struct qla_tgt *tgt = sess->tgt;
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+
+ BUG_ON(!tgt);
+ /*
+ * Release the target session for FC Nexus from fabric module code.
+ */
+ if (sess->se_sess != NULL)
+ ha->tgt.tgt_ops->free_session(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+ "Unregistration of sess %p finished\n", sess);
+
+ kfree(sess);
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
+ tgt->sess_count--;
+ if (tgt->sess_count == 0)
+ wake_up_all(&tgt->waitQ);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+void qlt_unreg_sess(struct qla_tgt_sess *sess)
+{
+ struct scsi_qla_host *vha = sess->vha;
+
+ vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+
+ list_del(&sess->sess_list_entry);
+ if (sess->deleted)
+ list_del(&sess->del_list_entry);
+
+ INIT_WORK(&sess->free_work, qlt_free_session_done);
+ schedule_work(&sess->free_work);
+}
+EXPORT_SYMBOL(qlt_unreg_sess);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ uint32_t unpacked_lun, lun = 0;
+ uint16_t loop_id;
+ int res = 0;
+ struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+
+ loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+ if (loop_id == 0xFFFF) {
+#if 0 /* FIXME: Re-enable Global event handling.. */
+ /* Global event */
+ atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
+ qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
+ if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+ sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
+ typeof(*sess), sess_list_entry);
+ switch (mcmd) {
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ mcmd = QLA_TGT_NEXUS_LOSS;
+ break;
+ case QLA_TGT_ABORT_ALL_SESS:
+ mcmd = QLA_TGT_ABORT_ALL;
+ break;
+ case QLA_TGT_NEXUS_LOSS:
+ case QLA_TGT_ABORT_ALL:
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe046,
+ "qla_target(%d): Not allowed "
+ "command %x in %s", vha->vp_idx,
+ mcmd, __func__);
+ sess = NULL;
+ break;
+ }
+ } else
+ sess = NULL;
+#endif
+ } else {
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe000,
+ "Using sess for qla_tgt_reset: %p\n", sess);
+ if (!sess) {
+ res = -ESRCH;
+ return res;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe047,
+ "scsi(%ld): resetting (session %p from port "
+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
+ "mcmd %x, loop_id %d)\n", vha->host_no, sess,
+ sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ mcmd, loop_id);
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
+ iocb, QLA24XX_MGMT_SEND_NACK);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+ bool immediate)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
+
+ if (sess->deleted)
+ return;
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion\n", sess);
+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+ sess->deleted = 1;
+
+ if (immediate)
+ dev_loss_tmo = 0;
+
+ sess->expires = jiffies + dev_loss_tmo * HZ;
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
+ "qla_target(%d): session for port %02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
+ "deletion in %u secs (expires: %lu) immed: %d\n",
+ sess->vha->vp_idx,
+ sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ sess->loop_id, dev_loss_tmo, sess->expires, immediate);
+
+ if (immediate)
+ schedule_delayed_work(&tgt->sess_del_work, 0);
+ else
+ schedule_delayed_work(&tgt->sess_del_work,
+ jiffies - sess->expires);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
+{
+ struct qla_tgt_sess *sess;
+
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
+ qlt_schedule_sess_for_deletion(sess, true);
+
+ /* At this point tgt could be already dead */
+}
+
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+ uint16_t *loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ dma_addr_t gid_list_dma;
+ struct gid_list_info *gid_list;
+ char *id_iter;
+ int res, rc, i;
+ uint16_t entries;
+
+ gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
+ "qla_target(%d): DMA Alloc failed of %u\n",
+ vha->vp_idx, qla2x00_gid_list_size(ha));
+ return -ENOMEM;
+ }
+
+ /* Get list of logged in devices */
+ rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
+ "qla_target(%d): get_id_list() failed: %x\n",
+ vha->vp_idx, rc);
+ res = -1;
+ goto out_free_id_list;
+ }
+
+ id_iter = (char *)gid_list;
+ res = -1;
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+ if ((gid->al_pa == s_id[2]) &&
+ (gid->area == s_id[1]) &&
+ (gid->domain == s_id[0])) {
+ *loop_id = le16_to_cpu(gid->loop_id);
+ res = 0;
+ break;
+ }
+ id_iter += ha->gid_list_info_size;
+ }
+
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
+ return res;
+}
+
+static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
+ struct qla_tgt_sess *sess)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_port_24xx_data *pmap24;
+ bool res, found = false;
+ int rc, i;
+ uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
+ uint16_t entries;
+ void *pmap;
+ int pmap_len;
+ fc_port_t *fcport;
+ int global_resets;
+
+retry:
+ global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+ rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
+ if (rc != QLA_SUCCESS) {
+ res = false;
+ goto out;
+ }
+
+ pmap24 = pmap;
+ entries = pmap_len/sizeof(*pmap24);
+
+ for (i = 0; i < entries; ++i) {
+ if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
+ loop_id = le16_to_cpu(pmap24[i].loop_id);
+ found = true;
+ break;
+ }
+ }
+
+ kfree(pmap);
+
+ if (!found) {
+ res = false;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
+ "qlt_check_fcport_exist(): loop_id %d", loop_id);
+
+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ if (fcport == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
+ "qla_target(%d): Allocation of tmp FC port failed",
+ vha->vp_idx);
+ res = false;
+ goto out;
+ }
+
+ fcport->loop_id = loop_id;
+
+ rc = qla2x00_get_port_database(vha, fcport, 0);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
+ "qla_target(%d): Failed to retrieve fcport "
+ "information -- get_port_database() returned %x "
+ "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+ res = false;
+ goto out_free_fcport;
+ }
+
+ if (global_resets !=
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
+ "qla_target(%d): global reset during session discovery"
+ " (counter was %d, new %d), retrying",
+ vha->vp_idx, global_resets,
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+ goto retry;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+ "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
+ "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
+ sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
+
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+
+ res = true;
+
+out_free_fcport:
+ kfree(fcport);
+
+out:
+ return res;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+{
+ BUG_ON(!sess->deleted);
+
+ list_del(&sess->del_list_entry);
+ sess->deleted = 0;
+}
+
+static void qlt_del_sess_work_fn(struct delayed_work *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt,
+ sess_del_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (!list_empty(&tgt->del_sess_list)) {
+ sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
+ del_list_entry);
+ if (time_after_eq(jiffies, sess->expires)) {
+ bool cancel;
+
+ qlt_undelete_sess(sess);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ cancel = qlt_check_fcport_exist(vha, sess);
+
+ if (cancel) {
+ if (sess->deleted) {
+ /*
+ * sess was again deleted while we were
+ * discovering it
+ */
+ spin_lock_irqsave(&ha->hardware_lock,
+ flags);
+ continue;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
+ "qla_target(%d): cancel deletion of "
+ "session for port %02x:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x (loop ID %d), because "
+ " it isn't deleted by firmware",
+ vha->vp_idx, sess->port_name[0],
+ sess->port_name[1], sess->port_name[2],
+ sess->port_name[3], sess->port_name[4],
+ sess->port_name[5], sess->port_name[6],
+ sess->port_name[7], sess->loop_id);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+ "Timeout: sess %p about to be deleted\n",
+ sess);
+ ha->tgt.tgt_ops->shutdown_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else {
+ schedule_delayed_work(&tgt->sess_del_work,
+ jiffies - sess->expires);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/*
+ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
+ * Caller must put it.
+ */
+static struct qla_tgt_sess *qlt_create_sess(
+ struct scsi_qla_host *vha,
+ fc_port_t *fcport,
+ bool local)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+ unsigned char be_sid[3];
+
+ /* Check to avoid double sessions */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+ sess_list_entry) {
+ if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ "Double sess %p found (s_id %x:%x:%x, "
+ "loop_id %d), updating to d_id %x:%x:%x, "
+ "loop_id %d", sess, sess->s_id.b.domain,
+ sess->s_id.b.al_pa, sess->s_id.b.area,
+ sess->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.al_pa, fcport->d_id.b.area,
+ fcport->loop_id);
+
+ if (sess->deleted)
+ qlt_undelete_sess(sess);
+
+ kref_get(&sess->se_sess->sess_kref);
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+ if (sess->local && !local)
+ sess->local = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return sess;
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
+ "qla_target(%u): session allocation failed, "
+ "all commands from port %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7]);
+
+ return NULL;
+ }
+ sess->tgt = ha->tgt.qla_tgt;
+ sess->vha = vha;
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->local = local;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+ "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
+ sess, ha->tgt.qla_tgt);
+
+ be_sid[0] = sess->s_id.b.domain;
+ be_sid[1] = sess->s_id.b.area;
+ be_sid[2] = sess->s_id.b.al_pa;
+ /*
+ * Determine if this fc_port->port_name is allowed to access
+ * target mode using explict NodeACLs+MappedLUNs, or using
+ * TPG demo mode. If this is successful a target mode FC nexus
+ * is created.
+ */
+ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+ &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
+ kfree(sess);
+ return NULL;
+ }
+ /*
+ * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
+ * access across ->hardware_lock reaquire.
+ */
+ kref_get(&sess->se_sess->sess_kref);
+
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+ BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
+ memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
+ ha->tgt.qla_tgt->sess_count++;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+ "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
+ " completion %ssupported) added\n",
+ vha->vp_idx, local ? "local " : "", fcport->port_name[0],
+ fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
+ fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
+ sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
+ "" : "not ");
+
+ return sess;
+}
+
+/*
+ * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ return;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+ sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_create_sess(vha, fcport, false);
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+
+ if (sess->deleted) {
+ qlt_undelete_sess(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+ "qla_target(%u): %ssession for port %02x:"
+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
+ "reappeared\n", vha->vp_idx, sess->local ? "local "
+ : "", sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ sess->loop_id);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+ "Reappeared sess %p\n", sess);
+ }
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+ }
+
+ if (sess && sess->local) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+ "qla_target(%u): local session for "
+ "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+ "(loop ID %d) became global\n", vha->vp_idx,
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7],
+ sess->loop_id);
+ sess->local = 0;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ return;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+ sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
+
+ sess->local = 1;
+ qlt_schedule_sess_for_deletion(sess, false);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+ int res;
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
+ "tgt %p, empty(sess_list)=%d sess_count=%d\n",
+ tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+ res = (tgt->sess_count == 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase1(struct qla_tgt *tgt)
+{
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+
+ if (tgt->tgt_stop || tgt->tgt_stopped) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
+ "Already in tgt->tgt_stop or tgt_stopped state\n");
+ dump_stack();
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+ vha->host_no, vha);
+ /*
+ * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
+ * Lock is needed, because we still can get an incoming packet.
+ */
+ mutex_lock(&ha->tgt.tgt_mutex);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tgt->tgt_stop = 1;
+ qlt_clear_tgt_db(tgt, true);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ flush_delayed_work_sync(&tgt->sess_del_work);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
+ "Waiting for sess works (tgt %p)", tgt);
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ while (!list_empty(&tgt->sess_works_list)) {
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+ flush_scheduled_work();
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ }
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
+ "Waiting for tgt %p: list_empty(sess_list)=%d "
+ "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
+ tgt->sess_count);
+
+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+
+ /* Big hammer */
+ if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
+ qlt_disable_vha(vha);
+
+ /* Wait for sessions to clear out (just in case) */
+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+}
+EXPORT_SYMBOL(qlt_stop_phase1);
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase2(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+
+ if (tgt->tgt_stopped) {
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+ "Already in tgt->tgt_stopped state\n");
+ dump_stack();
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+ "Waiting for %d IRQ commands to complete (tgt %p)",
+ tgt->irq_cmd_count, tgt);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (tgt->irq_cmd_count != 0) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ udelay(2);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ tgt->tgt_stop = 0;
+ tgt->tgt_stopped = 1;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+ tgt);
+}
+EXPORT_SYMBOL(qlt_stop_phase2);
+
+/* Called from qlt_remove_target() -> qla2x00_remove_one() */
+void qlt_release(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+
+ if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+ qlt_stop_phase2(tgt);
+
+ ha->tgt.qla_tgt = NULL;
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+ "Release of tgt %p finished\n", tgt);
+
+ kfree(tgt);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
+ const void *param, unsigned int param_size)
+{
+ struct qla_tgt_sess_work_param *prm;
+ unsigned long flags;
+
+ prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
+ if (!prm) {
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
+ "qla_target(%d): Unable to create session "
+ "work, command will be refused", 0);
+ return -ENOMEM;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
+ "Scheduling work (type %d, prm %p)"
+ " to find session for param %p (size %d, tgt %p)\n",
+ type, prm, param, param_size, tgt);
+
+ prm->type = type;
+ memcpy(&prm->tm_iocb, param, param_size);
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ schedule_work(&tgt->sess_work);
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy,
+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
+{
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ struct nack_to_isp *nack;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!pkt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe049,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return;
+ }
+
+ if (ha->tgt.qla_tgt != NULL)
+ ha->tgt.qla_tgt->notify_ack_expected++;
+
+ pkt->entry_type = NOTIFY_ACK_TYPE;
+ pkt->entry_count = 1;
+
+ nack = (struct nack_to_isp *)pkt;
+ nack->ox_id = ntfy->ox_id;
+
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
+ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ }
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+ nack->u.isp24.status = ntfy->u.isp24.status;
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+ nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
+ nack->u.isp24.srr_reject_code = srr_reject_code;
+ nack->u.isp24.srr_reject_code_expl = srr_explan;
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe005,
+ "qla_target(%d): Sending 24xx Notify Ack %d\n",
+ vha->vp_idx, nack->u.isp24.status);
+
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts, uint32_t status,
+ bool ids_reversed)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct abts_resp_to_24xx *resp;
+ uint32_t f_ctl;
+ uint8_t *p;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe006,
+ "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
+ ha, abts, status);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!resp) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet", vha->vp_idx, __func__);
+ return;
+ }
+
+ resp->entry_type = ABTS_RESP_24XX;
+ resp->entry_count = 1;
+ resp->nport_handle = abts->nport_handle;
+ resp->vp_index = vha->vp_idx;
+ resp->sof_type = abts->sof_type;
+ resp->exchange_address = abts->exchange_address;
+ resp->fcp_hdr_le = abts->fcp_hdr_le;
+ f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+ F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+ F_CTL_SEQ_INITIATIVE);
+ p = (uint8_t *)&f_ctl;
+ resp->fcp_hdr_le.f_ctl[0] = *p++;
+ resp->fcp_hdr_le.f_ctl[1] = *p++;
+ resp->fcp_hdr_le.f_ctl[2] = *p;
+ if (ids_reversed) {
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+ } else {
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+ }
+ resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+ if (status == FCP_TMF_CMPL) {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+ resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+ resp->payload.ba_acct.low_seq_cnt = 0x0000;
+ resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+ resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+ } else {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+ resp->payload.ba_rjt.reason_code =
+ BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+ /* Other bytes are zero */
+ }
+
+ ha->tgt.qla_tgt->abts_resp_expected++;
+
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+ struct abts_resp_from_24xx_fw *entry)
+{
+ struct ctio7_to_24xx *ctio;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe007,
+ "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (ctio == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04b,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return;
+ }
+
+ /*
+ * We've got on entrance firmware's response on by us generated
+ * ABTS response. So, in it ID fields are reversed.
+ */
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->nport_handle = entry->nport_handle;
+ ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = vha->vp_idx;
+ ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
+ ctio->exchange_addr = entry->exchange_addr_to_abort;
+ ctio->u.status1.flags =
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
+ ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+
+ qla2x00_start_iocbs(vha, vha->req);
+
+ qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
+ FCP_TMF_CMPL, true);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ int rc;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+ "qla_target(%d): task abort (tag=%d)\n",
+ vha->vp_idx, abts->exchange_addr_to_abort);
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (mcmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
+ "qla_target(%d): %s: Allocation of ABORT cmd failed",
+ vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+
+ mcmd->sess = sess;
+ memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+ abts->exchange_addr_to_abort);
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
+ "qla_target(%d): tgt_ops->handle_tmr()"
+ " failed: %d", vha->vp_idx, rc);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ uint32_t tag = abts->exchange_addr_to_abort;
+ uint8_t s_id[3];
+ int rc;
+
+ if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
+ "qla_target(%d): ABTS: Abort Sequence not "
+ "supported\n", vha->vp_idx);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
+ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
+ "qla_target(%d): ABTS: Unknown Exchange "
+ "Address received\n", vha->vp_idx);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
+ "qla_target(%d): task abort (s_id=%x:%x:%x, "
+ "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
+ abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+ le32_to_cpu(abts->fcp_hdr_le.parameter));
+
+ s_id[0] = abts->fcp_hdr_le.s_id[2];
+ s_id[1] = abts->fcp_hdr_le.s_id[1];
+ s_id[2] = abts->fcp_hdr_le.s_id[0];
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
+ "qla_target(%d): task abort for non-existant session\n",
+ vha->vp_idx);
+ rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+ QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
+ if (rc != 0) {
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
+ false);
+ }
+ return;
+ }
+
+ rc = __qlt_24xx_handle_abts(vha, abts, sess);
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+ "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
+ vha->vp_idx, rc);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+ struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
+{
+ struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
+ struct ctio7_to_24xx *ctio;
+
+ ql_dbg(ql_dbg_tgt, ha, 0xe008,
+ "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
+ ha, atio, resp_code);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
+ return;
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
+ if (ctio == NULL) {
+ ql_dbg(ql_dbg_tgt, ha, 0xe04c,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", ha->vp_idx, __func__);
+ return;
+ }
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->nport_handle = mcmd->sess->loop_id;
+ ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = ha->vp_idx;
+ ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_SEND_STATUS);
+ ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio->u.status1.scsi_status =
+ __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+ ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+ ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
+
+ qla2x00_start_iocbs(ha, ha->req);
+}
+
+void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+}
+EXPORT_SYMBOL(qlt_free_mcmd);
+
+/* callback from target fabric module code */
+void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ struct scsi_qla_host *vha = mcmd->sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
+ "TM response mcmd (%p) status %#x state %#x",
+ mcmd, mcmd->fc_tm_rsp, mcmd->flags);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
+ qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+ 0, 0, 0, 0, 0, 0);
+ else {
+ if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+ qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+ mcmd->fc_tm_rsp, false);
+ else
+ qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
+ mcmd->fc_tm_rsp);
+ }
+ /*
+ * Make the callback for ->free_mcmd() to queue_work() and invoke
+ * target_put_sess_cmd() to drop cmd_kref to 1. The final
+ * target_put_sess_cmd() call will be made from TFO->check_stop_free()
+ * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
+ * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
+ * qlt_xmit_tm_rsp() returns here..
+ */
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+EXPORT_SYMBOL(qlt_xmit_tm_rsp);
+
+/* No locks */
+static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
+{
+ struct qla_tgt_cmd *cmd = prm->cmd;
+
+ BUG_ON(cmd->sg_cnt == 0);
+
+ prm->sg = (struct scatterlist *)cmd->sg;
+ prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
+ cmd->sg_cnt, cmd->dma_data_direction);
+ if (unlikely(prm->seg_cnt == 0))
+ goto out_err;
+
+ prm->cmd->sg_mapped = 1;
+
+ /*
+ * If greater than four sg entries then we need to allocate
+ * the continuation entries
+ */
+ if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+ prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+ prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
+
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
+ prm->seg_cnt, prm->req_cnt);
+ return 0;
+
+out_err:
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
+ "qla_target(%d): PCI mapping failed: sg_cnt=%d",
+ 0, prm->cmd->sg_cnt);
+ return -1;
+}
+
+static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ BUG_ON(!cmd->sg_mapped);
+ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+ cmd->sg_mapped = 0;
+}
+
+static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+ uint32_t req_cnt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ha->iobase;
+ uint32_t cnt;
+
+ if (vha->req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+ "Request ring circled: cnt=%d, vha->->ring_index=%d, "
+ "vha->req->cnt=%d, req_cnt=%d\n", cnt,
+ vha->req->ring_index, vha->req->cnt, req_cnt);
+ if (vha->req->ring_index < cnt)
+ vha->req->cnt = cnt - vha->req->ring_index;
+ else
+ vha->req->cnt = vha->req->length -
+ (vha->req->ring_index - cnt);
+ }
+
+ if (unlikely(vha->req->cnt < (req_cnt + 2))) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe00b,
+ "qla_target(%d): There is no room in the "
+ "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
+ "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
+ vha->req->cnt, req_cnt);
+ return -EAGAIN;
+ }
+ vha->req->cnt -= req_cnt;
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
+{
+ /* Adjust ring index. */
+ vha->req->ring_index++;
+ if (vha->req->ring_index == vha->req->length) {
+ vha->req->ring_index = 0;
+ vha->req->ring_ptr = vha->req->ring;
+ } else {
+ vha->req->ring_ptr++;
+ }
+ return (cont_entry_t *)vha->req->ring_ptr;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t h;
+
+ h = ha->tgt.current_handle;
+ /* always increment cmd handle */
+ do {
+ ++h;
+ if (h > MAX_OUTSTANDING_COMMANDS)
+ h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
+ if (h == ha->tgt.current_handle) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04e,
+ "qla_target(%d): Ran out of "
+ "empty cmd slots in ha %p\n", vha->vp_idx, ha);
+ h = QLA_TGT_NULL_HANDLE;
+ break;
+ }
+ } while ((h == QLA_TGT_NULL_HANDLE) ||
+ (h == QLA_TGT_SKIP_HANDLE) ||
+ (ha->tgt.cmds[h-1] != NULL));
+
+ if (h != QLA_TGT_NULL_HANDLE)
+ ha->tgt.current_handle = h;
+
+ return h;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ uint32_t h;
+ struct ctio7_to_24xx *pkt;
+ struct qla_hw_data *ha = vha->hw;
+ struct atio_from_isp *atio = &prm->cmd->atio;
+
+ pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
+ prm->pkt = pkt;
+ memset(pkt, 0, sizeof(*pkt));
+
+ pkt->entry_type = CTIO_TYPE7;
+ pkt->entry_count = (uint8_t)prm->req_cnt;
+ pkt->vp_index = vha->vp_idx;
+
+ h = qlt_make_handle(vha);
+ if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+ /*
+ * CTIO type 7 from the firmware doesn't provide a way to
+ * know the initiator's LOOP ID, hence we can't find
+ * the session and, so, the command.
+ */
+ return -EAGAIN;
+ } else
+ ha->tgt.cmds[h-1] = prm->cmd;
+
+ pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->nport_handle = prm->cmd->loop_id;
+ pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ pkt->exchange_addr = atio->u.isp24.exchange_addr;
+ pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
+ pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00c,
+ "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
+ vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
+ le16_to_cpu(pkt->u.status0.ox_id));
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ int cnt;
+ uint32_t *dword_ptr;
+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+
+ /* Build continuation packets */
+ while (prm->seg_cnt > 0) {
+ cont_a64_entry_t *cont_pkt64 =
+ (cont_a64_entry_t *)qlt_get_req_pkt(vha);
+
+ /*
+ * Make sure that from cont_pkt64 none of
+ * 64-bit specific fields used for 32-bit
+ * addressing. Cast to (cont_entry_t *) for
+ * that.
+ */
+
+ memset(cont_pkt64, 0, sizeof(*cont_pkt64));
+
+ cont_pkt64->entry_count = 1;
+ cont_pkt64->sys_define = 0;
+
+ if (enable_64bit_addressing) {
+ cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+ dword_ptr =
+ (uint32_t *)&cont_pkt64->dseg_0_address;
+ } else {
+ cont_pkt64->entry_type = CONTINUE_TYPE;
+ dword_ptr =
+ (uint32_t *)&((cont_entry_t *)
+ cont_pkt64)->dseg_0_address;
+ }
+
+ /* Load continuation entry data segments */
+ for (cnt = 0;
+ cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
+ cnt++, prm->seg_cnt--) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32
+ (sg_dma_address(prm->sg)));
+ if (enable_64bit_addressing) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32
+ (sg_dma_address
+ (prm->sg)));
+ }
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00d,
+ "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
+ (long long unsigned int)
+ pci_dma_hi32(sg_dma_address(prm->sg)),
+ (long long unsigned int)
+ pci_dma_lo32(sg_dma_address(prm->sg)),
+ (int)sg_dma_len(prm->sg));
+
+ prm->sg = sg_next(prm->sg);
+ }
+ }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_data_segments(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ int cnt;
+ uint32_t *dword_ptr;
+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+ struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00e,
+ "iocb->scsi_status=%x, iocb->flags=%x\n",
+ le16_to_cpu(pkt24->u.status0.scsi_status),
+ le16_to_cpu(pkt24->u.status0.flags));
+
+ pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
+
+ /* Setup packet address segment pointer */
+ dword_ptr = pkt24->u.status0.dseg_0_address;
+
+ /* Set total data segment count */
+ if (prm->seg_cnt)
+ pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
+
+ if (prm->seg_cnt == 0) {
+ /* No data transfer */
+ *dword_ptr++ = 0;
+ *dword_ptr = 0;
+ return;
+ }
+
+ /* If scatter gather */
+ ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
+
+ /* Load command entry data segments */
+ for (cnt = 0;
+ (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
+ cnt++, prm->seg_cnt--) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+ if (enable_64bit_addressing) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32(
+ sg_dma_address(prm->sg)));
+ }
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe010,
+ "S/G Segment phys_addr=%llx:%llx, len=%d\n",
+ (long long unsigned int)pci_dma_hi32(sg_dma_address(
+ prm->sg)),
+ (long long unsigned int)pci_dma_lo32(sg_dma_address(
+ prm->sg)),
+ (int)sg_dma_len(prm->sg));
+
+ prm->sg = sg_next(prm->sg);
+ }
+
+ qlt_load_cont_data_segments(prm, vha);
+}
+
+static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
+{
+ return cmd->bufflen > 0;
+}
+
+/*
+ * Called without ha->hardware_lock held
+ */
+static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+ struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
+ uint32_t *full_req_cnt)
+{
+ struct qla_tgt *tgt = cmd->tgt;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (unlikely(cmd->aborted)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): terminating exchange "
+ "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
+ se_cmd, cmd->tag);
+
+ cmd->state = QLA_TGT_STATE_ABORTED;
+
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+
+ /* !! At this point cmd could be already freed !! */
+ return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
+ vha->vp_idx, cmd->tag);
+
+ prm->cmd = cmd;
+ prm->tgt = tgt;
+ prm->rq_result = scsi_status;
+ prm->sense_buffer = &cmd->sense_buffer[0];
+ prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
+ prm->sg = NULL;
+ prm->seg_cnt = -1;
+ prm->req_cnt = 1;
+ prm->add_status_pkt = 0;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
+ prm->rq_result, xmit_type);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+ return -EFAULT;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
+
+ if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
+ if (qlt_pci_map_calc_cnt(prm) != 0)
+ return -EAGAIN;
+ }
+
+ *full_req_cnt = prm->req_cnt;
+
+ if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ prm->residual = se_cmd->residual_count;
+ ql_dbg(ql_dbg_tgt, vha, 0xe014,
+ "Residual underflow: %d (tag %d, "
+ "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+ cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->bufflen, prm->rq_result);
+ prm->rq_result |= SS_RESIDUAL_UNDER;
+ } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ prm->residual = se_cmd->residual_count;
+ ql_dbg(ql_dbg_tgt, vha, 0xe015,
+ "Residual overflow: %d (tag %d, "
+ "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+ cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->bufflen, prm->rq_result);
+ prm->rq_result |= SS_RESIDUAL_OVER;
+ }
+
+ if (xmit_type & QLA_TGT_XMIT_STATUS) {
+ /*
+ * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
+ * ignored in *xmit_response() below
+ */
+ if (qlt_has_data(cmd)) {
+ if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
+ (IS_FWI2_CAPABLE(ha) &&
+ (prm->rq_result != 0))) {
+ prm->add_status_pkt = 1;
+ (*full_req_cnt)++;
+ }
+ }
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe016,
+ "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
+ prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
+
+ return 0;
+}
+
+static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
+ struct qla_tgt_cmd *cmd, int sending_sense)
+{
+ if (ha->tgt.enable_class_2)
+ return 0;
+
+ if (sending_sense)
+ return cmd->conf_compl_supported;
+ else
+ return ha->tgt.enable_explicit_conf &&
+ cmd->conf_compl_supported;
+}
+
+#ifdef CONFIG_QLA_TGT_DEBUG_SRR
+/*
+ * Original taken from the XFS code
+ */
+static unsigned long qlt_srr_random(void)
+{
+ static int Inited;
+ static unsigned long RandomValue;
+ static DEFINE_SPINLOCK(lock);
+ /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+ register long rv;
+ register long lo;
+ register long hi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lock, flags);
+ if (!Inited) {
+ RandomValue = jiffies;
+ Inited = 1;
+ }
+ rv = RandomValue;
+ hi = rv / 127773;
+ lo = rv % 127773;
+ rv = 16807 * lo - 2836 * hi;
+ if (rv <= 0)
+ rv += 2147483647;
+ RandomValue = rv;
+ spin_unlock_irqrestore(&lock, flags);
+ return rv;
+}
+
+static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{
+#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
+ if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
+ == 50) {
+ *xmit_type &= ~QLA_TGT_XMIT_STATUS;
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
+ "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
+ }
+#endif
+ /*
+ * It's currently not possible to simulate SRRs for FCP_WRITE without
+ * a physical link layer failure, so don't even try here..
+ */
+ if (cmd->dma_data_direction != DMA_FROM_DEVICE)
+ return;
+
+ if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
+ ((qlt_srr_random() % 100) == 20)) {
+ int i, leave = 0;
+ unsigned int tot_len = 0;
+
+ while (leave == 0)
+ leave = qlt_srr_random() % cmd->sg_cnt;
+
+ for (i = 0; i < leave; i++)
+ tot_len += cmd->sg[i].length;
+
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
+ "Cutting cmd %p (tag %d) buffer"
+ " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
+ " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
+ cmd->bufflen, cmd->sg_cnt);
+
+ cmd->bufflen = tot_len;
+ cmd->sg_cnt = leave;
+ }
+
+ if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
+ unsigned int offset = qlt_srr_random() % cmd->bufflen;
+
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
+ "Cutting cmd %p (tag %d) buffer head "
+ "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
+ cmd->bufflen);
+ if (offset == 0)
+ *xmit_type &= ~QLA_TGT_XMIT_DATA;
+ else if (qlt_set_data_offset(cmd, offset)) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
+ "qlt_set_data_offset() failed (tag %d)", cmd->tag);
+ }
+ }
+}
+#else
+static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{}
+#endif
+
+static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
+ struct qla_tgt_prm *prm)
+{
+ prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
+ (uint32_t)sizeof(ctio->u.status1.sense_data));
+ ctio->u.status0.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+ if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
+ ctio->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+ ctio->u.status0.residual = cpu_to_le32(prm->residual);
+ ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
+ if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
+ int i;
+
+ if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
+ if (prm->cmd->se_cmd.scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
+ "Skipping EXPLICIT_CONFORM and "
+ "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
+ "non GOOD status\n");
+ goto skip_explict_conf;
+ }
+ ctio->u.status1.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+skip_explict_conf:
+ ctio->u.status1.flags &=
+ ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ctio->u.status1.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ ctio->u.status1.scsi_status |=
+ __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+ ctio->u.status1.sense_length =
+ cpu_to_le16(prm->sense_buffer_len);
+ for (i = 0; i < prm->sense_buffer_len/4; i++)
+ ((uint32_t *)ctio->u.status1.sense_data)[i] =
+ cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+#if 0
+ if (unlikely((prm->sense_buffer_len % 4) != 0)) {
+ static int q;
+ if (q < 10) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04f,
+ "qla_target(%d): %d bytes of sense "
+ "lost", prm->tgt->ha->vp_idx,
+ prm->sense_buffer_len % 4);
+ q++;
+ }
+ }
+#endif
+ } else {
+ ctio->u.status1.flags &=
+ ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ctio->u.status1.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ ctio->u.status1.sense_length = 0;
+ memset(ctio->u.status1.sense_data, 0,
+ sizeof(ctio->u.status1.sense_data));
+ }
+
+ /* Sense with len > 24, is it possible ??? */
+}
+
+/*
+ * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
+ * QLA_TGT_XMIT_STATUS for >= 24xx silicon
+ */
+int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ uint8_t scsi_status)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct ctio7_to_24xx *pkt;
+ struct qla_tgt_prm prm;
+ uint32_t full_req_cnt = 0;
+ unsigned long flags = 0;
+ int res;
+
+ memset(&prm, 0, sizeof(prm));
+ qlt_check_srr_debug(cmd, &xmit_type);
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
+ "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
+ "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
+ 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
+
+ res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ &full_req_cnt);
+ if (unlikely(res != 0)) {
+ if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
+ return 0;
+
+ return res;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Does F/W have an IOCBs for this request */
+ res = qlt_check_reserve_free_req(vha, full_req_cnt);
+ if (unlikely(res))
+ goto out_unmap_unlock;
+
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ if (unlikely(res != 0))
+ goto out_unmap_unlock;
+
+
+ pkt = (struct ctio7_to_24xx *)prm.pkt;
+
+ if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
+ pkt->u.status0.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+ CTIO7_FLAGS_STATUS_MODE_0);
+
+ qlt_load_data_segments(&prm, vha);
+
+ if (prm.add_status_pkt == 0) {
+ if (xmit_type & QLA_TGT_XMIT_STATUS) {
+ pkt->u.status0.scsi_status =
+ cpu_to_le16(prm.rq_result);
+ pkt->u.status0.residual =
+ cpu_to_le32(prm.residual);
+ pkt->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_SEND_STATUS);
+ if (qlt_need_explicit_conf(ha, cmd, 0)) {
+ pkt->u.status0.flags |=
+ __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+ }
+
+ } else {
+ /*
+ * We have already made sure that there is sufficient
+ * amount of request entries to not drop HW lock in
+ * req_pkt().
+ */
+ struct ctio7_to_24xx *ctio =
+ (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe019,
+ "Building additional status packet\n");
+
+ memcpy(ctio, pkt, sizeof(*ctio));
+ ctio->entry_count = 1;
+ ctio->dseg_count = 0;
+ ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+ CTIO7_FLAGS_DATA_IN);
+
+ /* Real finish is ctio_m1's finish */
+ pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
+ pkt->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_DONT_RET_CTIO);
+ qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
+ &prm);
+ pr_debug("Status CTIO7: %p\n", ctio);
+ }
+ } else
+ qlt_24xx_init_ctio_to_isp(pkt, &prm);
+
+
+ cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01a,
+ "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
+ pkt, scsi_status);
+
+ qla2x00_start_iocbs(vha, vha->req);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return 0;
+
+out_unmap_unlock:
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(qlt_xmit_response);
+
+int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+{
+ struct ctio7_to_24xx *pkt;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = cmd->tgt;
+ struct qla_tgt_prm prm;
+ unsigned long flags;
+ int res = 0;
+
+ memset(&prm, 0, sizeof(prm));
+ prm.cmd = cmd;
+ prm.tgt = tgt;
+ prm.sg = NULL;
+ prm.req_cnt = 1;
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+ return -EIO;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
+ (int)vha->vp_idx);
+
+ /* Calculate number of entries and segments required */
+ if (qlt_pci_map_calc_cnt(&prm) != 0)
+ return -EAGAIN;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Does F/W have an IOCBs for this request */
+ res = qlt_check_reserve_free_req(vha, prm.req_cnt);
+ if (res != 0)
+ goto out_unlock_free_unmap;
+
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ if (unlikely(res != 0))
+ goto out_unlock_free_unmap;
+ pkt = (struct ctio7_to_24xx *)prm.pkt;
+ pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+ CTIO7_FLAGS_STATUS_MODE_0);
+ qlt_load_data_segments(&prm, vha);
+
+ cmd->state = QLA_TGT_STATE_NEED_DATA;
+
+ qla2x00_start_iocbs(vha, vha->req);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+
+out_unlock_free_unmap:
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(qlt_rdy_to_xfer);
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd,
+ struct atio_from_isp *atio)
+{
+ struct ctio7_to_24xx *ctio24;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ int ret = 0;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (pkt == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe050,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+
+ if (cmd != NULL) {
+ if (cmd->state < QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe051,
+ "qla_target(%d): Terminating cmd %p with "
+ "incorrect state %d\n", vha->vp_idx, cmd,
+ cmd->state);
+ } else
+ ret = 1;
+ }
+
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ ctio24 = (struct ctio7_to_24xx *)pkt;
+ ctio24->entry_type = CTIO_TYPE7;
+ ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->vp_index = vha->vp_idx;
+ ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
+ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+
+ /* Most likely, it isn't needed */
+ ctio24->u.status1.residual = get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]);
+ if (ctio24->u.status1.residual != 0)
+ ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+ qla2x00_start_iocbs(vha, vha->req);
+ return ret;
+}
+
+static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+{
+ unsigned long flags;
+ int rc;
+
+ if (qlt_issue_marker(vha, ha_locked) < 0)
+ return;
+
+ if (ha_locked) {
+ rc = __qlt_send_term_exchange(vha, cmd, atio);
+ goto done;
+ }
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ rc = __qlt_send_term_exchange(vha, cmd, atio);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+done:
+ if (rc == 1) {
+ if (!ha_locked && !in_interrupt())
+ msleep(250); /* just in case */
+
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ }
+}
+
+void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+{
+ BUG_ON(cmd->sg_mapped);
+
+ if (unlikely(cmd->free_sg))
+ kfree(cmd->sg);
+ kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+}
+EXPORT_SYMBOL(qlt_free_cmd);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd, void *ctio)
+{
+ struct qla_tgt_srr_ctio *sc;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_srr_imm *imm;
+
+ tgt->ctio_srr_id++;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+ "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
+
+ if (!ctio) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
+ "qla_target(%d): SRR CTIO, but ctio is NULL\n",
+ vha->vp_idx);
+ return -EINVAL;
+ }
+
+ sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
+ if (sc != NULL) {
+ sc->cmd = cmd;
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ sc->srr_id = tgt->ctio_srr_id;
+ list_add_tail(&sc->srr_list_entry,
+ &tgt->srr_ctio_list);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
+ "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+ int found = 0;
+ list_for_each_entry(imm, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (imm->srr_id == sc->srr_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
+ "Scheduling srr work\n");
+ schedule_work(&tgt->srr_work);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
+ "qla_target(%d): imm_srr_id "
+ "== ctio_srr_id (%d), but there is no "
+ "corresponding SRR IMM, deleting CTIO "
+ "SRR %p\n", vha->vp_idx,
+ tgt->ctio_srr_id, sc);
+ list_del(&sc->srr_list_entry);
+ spin_unlock(&tgt->srr_lock);
+
+ kfree(sc);
+ return -EINVAL;
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ } else {
+ struct qla_tgt_srr_imm *ti;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
+ "qla_target(%d): Unable to allocate SRR CTIO entry\n",
+ vha->vp_idx);
+ spin_lock(&tgt->srr_lock);
+ list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (imm->srr_id == tgt->ctio_srr_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
+ "IMM SRR %p deleted (id %d)\n",
+ imm, imm->srr_id);
+ list_del(&imm->srr_list_entry);
+ qlt_reject_free_srr_imm(vha, imm, 1);
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
+ struct qla_tgt_cmd *cmd, uint32_t status)
+{
+ int term = 0;
+
+ if (ctio != NULL) {
+ struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+ term = !(c->flags &
+ __constant_cpu_to_le16(OF_TERM_EXCH));
+ } else
+ term = 1;
+
+ if (term)
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+
+ return term;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
+ uint32_t handle)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ handle--;
+ if (ha->tgt.cmds[handle] != NULL) {
+ struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
+ ha->tgt.cmds[handle] = NULL;
+ return cmd;
+ } else
+ return NULL;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+ uint32_t handle, void *ctio)
+{
+ struct qla_tgt_cmd *cmd = NULL;
+
+ /* Clear out internal marks */
+ handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
+ CTIO_INTERMEDIATE_HANDLE_MARK);
+
+ if (handle != QLA_TGT_NULL_HANDLE) {
+ if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
+ "SKIP_HANDLE CTIO\n");
+ return NULL;
+ }
+ /* handle-1 is actually used */
+ if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe052,
+ "qla_target(%d): Wrong handle %x received\n",
+ vha->vp_idx, handle);
+ return NULL;
+ }
+ cmd = qlt_get_cmd(vha, handle);
+ if (unlikely(cmd == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe053,
+ "qla_target(%d): Suspicious: unable to "
+ "find the command with handle %x\n", vha->vp_idx,
+ handle);
+ return NULL;
+ }
+ } else if (ctio != NULL) {
+ /* We can't get loop ID from CTIO7 */
+ ql_dbg(ql_dbg_tgt, vha, 0xe054,
+ "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
+ "support NULL handles\n", vha->vp_idx);
+ return NULL;
+ }
+
+ return cmd;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
+ uint32_t status, void *ctio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct se_cmd *se_cmd;
+ struct target_core_fabric_ops *tfo;
+ struct qla_tgt_cmd *cmd;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01e,
+ "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
+ vha->vp_idx, ctio, status, handle);
+
+ if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
+ /* That could happen only in case of an error/reset/abort */
+ if (status != CTIO_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
+ "Intermediate CTIO received"
+ " (status %x)\n", status);
+ }
+ return;
+ }
+
+ cmd = qlt_ctio_to_cmd(vha, handle, ctio);
+ if (cmd == NULL)
+ return;
+
+ se_cmd = &cmd->se_cmd;
+ tfo = se_cmd->se_tfo;
+
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
+ if (unlikely(status != CTIO_SUCCESS)) {
+ switch (status & 0xFFFF) {
+ case CTIO_LIP_RESET:
+ case CTIO_TARGET_RESET:
+ case CTIO_ABORTED:
+ case CTIO_TIMEOUT:
+ case CTIO_INVALID_RX_ID:
+ /* They are OK */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
+ "qla_target(%d): CTIO with "
+ "status %#x received, state %x, se_cmd %p, "
+ "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
+ "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
+ status, cmd->state, se_cmd);
+ break;
+
+ case CTIO_PORT_LOGGED_OUT:
+ case CTIO_PORT_UNAVAILABLE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
+ "qla_target(%d): CTIO with PORT LOGGED "
+ "OUT (29) or PORT UNAVAILABLE (28) status %x "
+ "received (state %x, se_cmd %p)\n", vha->vp_idx,
+ status, cmd->state, se_cmd);
+ break;
+
+ case CTIO_SRR_RECEIVED:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
+ "qla_target(%d): CTIO with SRR_RECEIVED"
+ " status %x received (state %x, se_cmd %p)\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
+ break;
+ else
+ return;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+ "qla_target(%d): CTIO with error status "
+ "0x%x received (state %x, se_cmd %p\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ break;
+ }
+
+ if (cmd->state != QLA_TGT_STATE_NEED_DATA)
+ if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
+ return;
+ }
+
+ if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
+ } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ int rx_status = 0;
+
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+
+ if (unlikely(status != CTIO_SUCCESS))
+ rx_status = -EIO;
+ else
+ cmd->write_data_transferred = 1;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe020,
+ "Data received, context %x, rx_status %d\n",
+ 0x0, rx_status);
+
+ ha->tgt.tgt_ops->handle_data(cmd);
+ return;
+ } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
+ "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
+ "qla_target(%d): A command in state (%d) should "
+ "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+ }
+
+ if (unlikely(status != CTIO_SUCCESS)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
+ dump_stack();
+ }
+
+ ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+ if (likely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe021,
+ "CTIO, but target mode not enabled"
+ " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
+ return;
+ }
+
+ tgt->irq_cmd_count++;
+ qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
+ tgt->irq_cmd_count--;
+}
+
+static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
+ uint8_t task_codes)
+{
+ int fcp_task_attr;
+
+ switch (task_codes) {
+ case ATIO_SIMPLE_QUEUE:
+ fcp_task_attr = MSG_SIMPLE_TAG;
+ break;
+ case ATIO_HEAD_OF_QUEUE:
+ fcp_task_attr = MSG_HEAD_TAG;
+ break;
+ case ATIO_ORDERED_QUEUE:
+ fcp_task_attr = MSG_ORDERED_TAG;
+ break;
+ case ATIO_ACA_QUEUE:
+ fcp_task_attr = MSG_ACA_TAG;
+ break;
+ case ATIO_UNTAGGED:
+ fcp_task_attr = MSG_SIMPLE_TAG;
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
+ "qla_target: unknown task code %x, use ORDERED instead\n",
+ task_codes);
+ fcp_task_attr = MSG_ORDERED_TAG;
+ break;
+ }
+
+ return fcp_task_attr;
+}
+
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
+ uint8_t *);
+/*
+ * Process context for I/O path into tcm_qla2xxx code
+ */
+static void qlt_do_work(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ scsi_qla_host_t *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_sess *sess = NULL;
+ struct atio_from_isp *atio = &cmd->atio;
+ unsigned char *cdb;
+ unsigned long flags;
+ uint32_t data_length;
+ int ret, fcp_task_attr, data_dir, bidi = 0;
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ atio->u.isp24.fcp_hdr.s_id);
+ if (sess) {
+ if (unlikely(sess->tearing_down)) {
+ sess = NULL;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ goto out_term;
+ } else {
+ /*
+ * Do the extra kref_get() before dropping
+ * qla_hw_data->hardware_lock.
+ */
+ kref_get(&sess->se_sess->sess_kref);
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (unlikely(!sess)) {
+ uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+ "qla_target(%d): Unable to find wwn login"
+ " (s_id %x:%x:%x), trying to create it manually\n",
+ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+ if (atio->u.raw.entry_count > 1) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+ "Dropping multy entry cmd %p\n", cmd);
+ goto out_term;
+ }
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has an extra creation ref. */
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ if (!sess)
+ goto out_term;
+ }
+
+ cmd->sess = sess;
+ cmd->loop_id = sess->loop_id;
+ cmd->conf_compl_supported = sess->conf_compl_supported;
+
+ cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+ cmd->tag = atio->u.isp24.exchange_addr;
+ cmd->unpacked_lun = scsilun_to_int(
+ (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+
+ if (atio->u.isp24.fcp_cmnd.rddata &&
+ atio->u.isp24.fcp_cmnd.wrdata) {
+ bidi = 1;
+ data_dir = DMA_TO_DEVICE;
+ } else if (atio->u.isp24.fcp_cmnd.rddata)
+ data_dir = DMA_FROM_DEVICE;
+ else if (atio->u.isp24.fcp_cmnd.wrdata)
+ data_dir = DMA_TO_DEVICE;
+ else
+ data_dir = DMA_NONE;
+
+ fcp_task_attr = qlt_get_fcp_task_attr(vha,
+ atio->u.isp24.fcp_cmnd.task_attr);
+ data_length = be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe022,
+ "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
+ cmd, cmd->unpacked_lun, cmd->tag);
+
+ ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+ fcp_task_attr, data_dir, bidi);
+ if (ret != 0)
+ goto out_term;
+ /*
+ * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+ */
+ ha->tgt.tgt_ops->put_sess(sess);
+ return;
+
+out_term:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
+ /*
+ * cmd has not sent to target yet, so pass NULL as the second
+ * argument to qlt_send_term_exchange() and free the memory here.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+ kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_cmd *cmd;
+
+ if (unlikely(tgt->tgt_stop)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ "New command while device %p is shutting down\n", tgt);
+ return -EFAULT;
+ }
+
+ cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
+ if (!cmd) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
+ "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&cmd->cmd_list);
+
+ memcpy(&cmd->atio, atio, sizeof(*atio));
+ cmd->state = QLA_TGT_STATE_NEW;
+ cmd->tgt = ha->tgt.qla_tgt;
+ cmd->vha = vha;
+
+ INIT_WORK(&cmd->work, qlt_do_work);
+ queue_work(qla_tgt_wq, &cmd->work);
+ return 0;
+
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ int fn, void *iocb, int flags)
+{
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ int res;
+ uint8_t tmr_func;
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (!mcmd) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
+ "qla_target(%d): Allocation of management "
+ "command failed, some commands and their data could "
+ "leak\n", vha->vp_idx);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+ mcmd->sess = sess;
+
+ if (iocb) {
+ memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+ sizeof(mcmd->orig_iocb.imm_ntfy));
+ }
+ mcmd->tmr_func = fn;
+ mcmd->flags = flags;
+
+ switch (fn) {
+ case QLA_TGT_CLEAR_ACA:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
+ "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
+ tmr_func = TMR_CLEAR_ACA;
+ break;
+
+ case QLA_TGT_TARGET_RESET:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
+ "qla_target(%d): TARGET_RESET received\n",
+ sess->vha->vp_idx);
+ tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+
+ case QLA_TGT_LUN_RESET:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
+ "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
+ tmr_func = TMR_LUN_RESET;
+ break;
+
+ case QLA_TGT_CLEAR_TS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
+ "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
+ tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+
+ case QLA_TGT_ABORT_TS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
+ "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
+ tmr_func = TMR_ABORT_TASK_SET;
+ break;
+#if 0
+ case QLA_TGT_ABORT_ALL:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
+ "qla_target(%d): Doing ABORT_ALL_TASKS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_ABORT_ALL_SESS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
+ "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
+ "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_NEXUS_LOSS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
+ "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+#endif
+ default:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
+ "qla_target(%d): Unknown task mgmt fn 0x%x\n",
+ sess->vha->vp_idx, fn);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -ENOSYS;
+ }
+
+ res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
+ if (res != 0) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
+ "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
+ sess->vha->vp_idx, res);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+{
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt;
+ struct qla_tgt_sess *sess;
+ uint32_t lun, unpacked_lun;
+ int lun_size, fn;
+
+ tgt = ha->tgt.qla_tgt;
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
+ fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ a->u.isp24.fcp_hdr.s_id);
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
+ "qla_target(%d): task mgmt fn 0x%x for "
+ "non-existant session\n", vha->vp_idx, fn);
+ return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
+ sizeof(struct atio_from_isp));
+ }
+
+ return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_abort_task(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
+{
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ uint32_t lun, unpacked_lun;
+ int rc;
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (mcmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
+ "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
+ vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+
+ mcmd->sess = sess;
+ memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+ sizeof(mcmd->orig_iocb.imm_ntfy));
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
+ le16_to_cpu(iocb->u.isp2x.seq_id));
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
+ "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
+ vha->vp_idx, rc);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_abort_task(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ int loop_id;
+
+ loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
+
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ if (sess == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
+ "qla_target(%d): task abort for unexisting "
+ "session\n", vha->vp_idx);
+ return qlt_sched_sess_work(ha->tgt.qla_tgt,
+ QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
+ }
+
+ return __qlt_abort_task(vha, iocb, sess);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int res = 0;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
+ "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
+ " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
+ iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
+ iocb->u.isp24.status_subcode);
+
+ switch (iocb->u.isp24.status_subcode) {
+ case ELS_PLOGI:
+ case ELS_FLOGI:
+ case ELS_PRLI:
+ case ELS_LOGO:
+ case ELS_PRLO:
+ res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+ break;
+ case ELS_PDISC:
+ case ELS_ADISC:
+ {
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
+ res = 1; /* send notify ack */
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+ "qla_target(%d): Unsupported ELS command %x "
+ "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
+ res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+ break;
+ }
+
+ return res;
+}
+
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+ struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
+ size_t first_offset = 0, rem_offset = offset, tmp = 0;
+ int i, sg_srr_cnt, bufflen = 0;
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
+ "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
+ "cmd->sg_cnt: %u, direction: %d\n",
+ cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+
+ /*
+ * FIXME: Reject non zero SRR relative offset until we can test
+ * this code properly.
+ */
+ pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
+ return -1;
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
+ "Missing cmd->sg or zero cmd->sg_cnt in"
+ " qla_tgt_set_data_offset\n");
+ return -EINVAL;
+ }
+ /*
+ * Walk the current cmd->sg list until we locate the new sg_srr_start
+ */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
+ "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
+
+ if ((sg->length + tmp) > offset) {
+ first_offset = rem_offset;
+ sg_srr_start = sg;
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
+ "Found matching sg[%d], using %p as sg_srr_start, "
+ "and using first_offset: %zu\n", i, sg,
+ first_offset);
+ break;
+ }
+ tmp += sg->length;
+ rem_offset -= sg->length;
+ }
+
+ if (!sg_srr_start) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
+ "Unable to locate sg_srr_start for offset: %u\n", offset);
+ return -EINVAL;
+ }
+ sg_srr_cnt = (cmd->sg_cnt - i);
+
+ sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
+ if (!sg_srr) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
+ "Unable to allocate sgp\n");
+ return -ENOMEM;
+ }
+ sg_init_table(sg_srr, sg_srr_cnt);
+ sgp = &sg_srr[0];
+ /*
+ * Walk the remaining list for sg_srr_start, mapping to the newly
+ * allocated sg_srr taking first_offset into account.
+ */
+ for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
+ if (first_offset) {
+ sg_set_page(sgp, sg_page(sg),
+ (sg->length - first_offset), first_offset);
+ first_offset = 0;
+ } else {
+ sg_set_page(sgp, sg_page(sg), sg->length, 0);
+ }
+ bufflen += sgp->length;
+
+ sgp = sg_next(sgp);
+ if (!sgp)
+ break;
+ }
+
+ cmd->sg = sg_srr;
+ cmd->sg_cnt = sg_srr_cnt;
+ cmd->bufflen = bufflen;
+ cmd->offset += offset;
+ cmd->free_sg = 1;
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
+ cmd->sg_cnt);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
+ cmd->bufflen);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
+ cmd->offset);
+
+ if (cmd->sg_cnt < 0)
+ BUG();
+
+ if (cmd->bufflen < 0)
+ BUG();
+
+ return 0;
+}
+
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+ uint32_t srr_rel_offs, int *xmit_type)
+{
+ int res = 0, rel_offs;
+
+ rel_offs = srr_rel_offs - cmd->offset;
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
+ srr_rel_offs, rel_offs);
+
+ *xmit_type = QLA_TGT_XMIT_ALL;
+
+ if (rel_offs < 0) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
+ "qla_target(%d): SRR rel_offs (%d) < 0",
+ cmd->vha->vp_idx, rel_offs);
+ res = -1;
+ } else if (rel_offs == cmd->bufflen)
+ *xmit_type = QLA_TGT_XMIT_STATUS;
+ else if (rel_offs > 0)
+ res = qlt_set_data_offset(cmd, rel_offs);
+
+ return res;
+}
+
+/* No locks, thread context */
+static void qlt_handle_srr(struct scsi_qla_host *vha,
+ struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
+{
+ struct imm_ntfy_from_isp *ntfy =
+ (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_cmd *cmd = sctio->cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ unsigned long flags;
+ int xmit_type = 0, resp = 0;
+ uint32_t offset;
+ uint16_t srr_ui;
+
+ offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
+ srr_ui = ntfy->u.isp24.srr_ui;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
+ cmd, srr_ui);
+
+ switch (srr_ui) {
+ case SRR_IU_STATUS:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ xmit_type = QLA_TGT_XMIT_STATUS;
+ resp = 1;
+ break;
+ case SRR_IU_DATA_IN:
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
+ "Unable to process SRR_IU_DATA_IN due to"
+ " missing cmd->sg, state: %d\n", cmd->state);
+ dump_stack();
+ goto out_reject;
+ }
+ if (se_cmd->scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe02a,
+ "Rejecting SRR_IU_DATA_IN with non GOOD "
+ "scsi_status\n");
+ goto out_reject;
+ }
+ cmd->bufflen = se_cmd->data_length;
+
+ if (qlt_has_data(cmd)) {
+ if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ resp = 1;
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
+ "qla_target(%d): SRR for in data for cmd "
+ "without them (tag %d, SCSI status %d), "
+ "reject", vha->vp_idx, cmd->tag,
+ cmd->se_cmd.scsi_status);
+ goto out_reject;
+ }
+ break;
+ case SRR_IU_DATA_OUT:
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
+ "Unable to process SRR_IU_DATA_OUT due to"
+ " missing cmd->sg\n");
+ dump_stack();
+ goto out_reject;
+ }
+ if (se_cmd->scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe02b,
+ "Rejecting SRR_IU_DATA_OUT"
+ " with non GOOD scsi_status\n");
+ goto out_reject;
+ }
+ cmd->bufflen = se_cmd->data_length;
+
+ if (qlt_has_data(cmd)) {
+ if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (xmit_type & QLA_TGT_XMIT_DATA)
+ qlt_rdy_to_xfer(cmd);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
+ "qla_target(%d): SRR for out data for cmd "
+ "without them (tag %d, SCSI status %d), "
+ "reject", vha->vp_idx, cmd->tag,
+ cmd->se_cmd.scsi_status);
+ goto out_reject;
+ }
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
+ "qla_target(%d): Unknown srr_ui value %x",
+ vha->vp_idx, srr_ui);
+ goto out_reject;
+ }
+
+ /* Transmit response in case of status and data-in cases */
+ if (resp)
+ qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+
+ return;
+
+out_reject:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ dump_stack();
+ } else
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
+ struct qla_tgt_srr_imm *imm, int ha_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ kfree(imm);
+}
+
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_tgt_srr_ctio *sctio;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
+ tgt);
+
+restart:
+ spin_lock_irqsave(&tgt->srr_lock, flags);
+ list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
+ struct qla_tgt_srr_imm *imm, *i, *ti;
+ struct qla_tgt_cmd *cmd;
+ struct se_cmd *se_cmd;
+
+ imm = NULL;
+ list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (i->srr_id == sctio->srr_id) {
+ list_del(&i->srr_list_entry);
+ if (imm) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
+ "qla_target(%d): There must be "
+ "only one IMM SRR per CTIO SRR "
+ "(IMM SRR %p, id %d, CTIO %p\n",
+ vha->vp_idx, i, i->srr_id, sctio);
+ qlt_reject_free_srr_imm(tgt->vha, i, 0);
+ } else
+ imm = i;
+ }
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
+ "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
+ sctio->srr_id);
+
+ if (imm == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
+ "Not found matching IMM for SRR CTIO (id %d)\n",
+ sctio->srr_id);
+ continue;
+ } else
+ list_del(&sctio->srr_list_entry);
+
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+
+ cmd = sctio->cmd;
+ /*
+ * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
+ * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
+ * logic..
+ */
+ cmd->offset = 0;
+ if (cmd->free_sg) {
+ kfree(cmd->sg);
+ cmd->sg = NULL;
+ cmd->free_sg = 0;
+ }
+ se_cmd = &cmd->se_cmd;
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
+ "SRR cmd %p (se_cmd %p, tag %d, op %x), "
+ "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
+ se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+
+ qlt_handle_srr(vha, sctio, imm);
+
+ kfree(imm);
+ kfree(sctio);
+ goto restart;
+ }
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_tgt_srr_imm *imm;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_srr_ctio *sctio;
+
+ tgt->imm_srr_id++;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
+ vha->vp_idx);
+
+ imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
+ if (imm != NULL) {
+ memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
+
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ imm->srr_id = tgt->imm_srr_id;
+ list_add_tail(&imm->srr_list_entry,
+ &tgt->srr_imm_list);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
+ "IMM NTFY SRR %p added (id %d, ui %x)\n",
+ imm, imm->srr_id, iocb->u.isp24.srr_ui);
+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+ int found = 0;
+ list_for_each_entry(sctio, &tgt->srr_ctio_list,
+ srr_list_entry) {
+ if (sctio->srr_id == imm->srr_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
+ "Scheduling srr work\n");
+ schedule_work(&tgt->srr_work);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
+ "qla_target(%d): imm_srr_id "
+ "== ctio_srr_id (%d), but there is no "
+ "corresponding SRR CTIO, deleting IMM "
+ "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
+ imm);
+ list_del(&imm->srr_list_entry);
+
+ kfree(imm);
+
+ spin_unlock(&tgt->srr_lock);
+ goto out_reject;
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ } else {
+ struct qla_tgt_srr_ctio *ts;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
+ "qla_target(%d): Unable to allocate SRR IMM "
+ "entry, SRR request will be rejected\n", vha->vp_idx);
+
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
+ srr_list_entry) {
+ if (sctio->srr_id == tgt->imm_srr_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
+ "CTIO SRR %p deleted (id %d)\n",
+ sctio, sctio->srr_id);
+ list_del(&sctio->srr_list_entry);
+ qlt_send_term_exchange(vha, sctio->cmd,
+ &sctio->cmd->atio, 1);
+ kfree(sctio);
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ goto out_reject;
+ }
+
+ return;
+
+out_reject:
+ qlt_send_notify_ack(vha, iocb, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t add_flags = 0;
+ int send_notify_ack = 1;
+ uint16_t status;
+
+ status = le16_to_cpu(iocb->u.isp2x.status);
+ switch (status) {
+ case IMM_NTFY_LIP_RESET:
+ {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
+ "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
+ vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+
+ if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+ send_notify_ack = 0;
+ break;
+ }
+
+ case IMM_NTFY_LIP_LINK_REINIT:
+ {
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
+ "qla_target(%d): LINK REINIT (loop %#x, "
+ "subcode %x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ }
+ memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
+ tgt->link_reinit_iocb_pending = 1;
+ /*
+ * QLogic requires to wait after LINK REINIT for possible
+ * PDISC or ADISC ELS commands
+ */
+ send_notify_ack = 0;
+ break;
+ }
+
+ case IMM_NTFY_PORT_LOGOUT:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
+ "qla_target(%d): Port logout (loop "
+ "%#x, subcode %x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_GLBL_TPRLO:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
+ "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_PORT_CONFIG:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
+ "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
+ status);
+ if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_GLBL_LOGO:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
+ "qla_target(%d): Link failure detected\n",
+ vha->vp_idx);
+ /* I_T nexus loss */
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_IOCB_OVERFLOW:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
+ "qla_target(%d): Cannot provide requested "
+ "capability (IOCB overflowed the immediate notify "
+ "resource count)\n", vha->vp_idx);
+ break;
+
+ case IMM_NTFY_ABORT_TASK:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
+ "qla_target(%d): Abort Task (S %08x I %#x -> "
+ "L %#x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp2x.seq_id),
+ GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
+ le16_to_cpu(iocb->u.isp2x.lun));
+ if (qlt_abort_task(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_RESOURCE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
+ "qla_target(%d): Out of resources, host %ld\n",
+ vha->vp_idx, vha->host_no);
+ break;
+
+ case IMM_NTFY_MSG_RX:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
+ "qla_target(%d): Immediate notify task %x\n",
+ vha->vp_idx, iocb->u.isp2x.task_flags);
+ if (qlt_handle_task_mgmt(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_ELS:
+ if (qlt_24xx_handle_els(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_SRR:
+ qlt_prepare_srr_imm(vha, iocb);
+ send_notify_ack = 0;
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
+ "qla_target(%d): Received unknown immediate "
+ "notify status %x\n", vha->vp_idx, status);
+ break;
+ }
+
+ if (send_notify_ack)
+ qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * This function sends busy to ISP 2xxx or 24xx.
+ */
+static void qlt_send_busy(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status)
+{
+ struct ctio7_to_24xx *ctio24;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ struct qla_tgt_sess *sess = NULL;
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ atio->u.isp24.fcp_hdr.s_id);
+ if (!sess) {
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+ return;
+ }
+ /* Sending marker isn't necessary, since we called from ISR */
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!pkt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet", vha->vp_idx, __func__);
+ return;
+ }
+
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ ctio24 = (struct ctio7_to_24xx *)pkt;
+ ctio24->entry_type = CTIO_TYPE7;
+ ctio24->nport_handle = sess->loop_id;
+ ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->vp_index = vha->vp_idx;
+ ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(
+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
+ CTIO7_FLAGS_DONT_RET_CTIO);
+ /*
+ * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
+ * if the explicit conformation is used.
+ */
+ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.scsi_status = cpu_to_le16(status);
+ ctio24->u.status1.residual = get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]);
+ if (ctio24->u.status1.residual != 0)
+ ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ int rc;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
+ "ATIO pkt, but no tgt (ha %p)", ha);
+ return;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe02c,
+ "qla_target(%d): ATIO pkt %p: type %02x count %02x",
+ vha->vp_idx, atio, atio->u.raw.entry_type,
+ atio->u.raw.entry_count);
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (atio->u.raw.entry_type) {
+ case ATIO_TYPE7:
+ ql_dbg(ql_dbg_tgt, vha, 0xe02d,
+ "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
+ "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
+ vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
+ atio->u.isp24.fcp_cmnd.rddata,
+ atio->u.isp24.fcp_cmnd.wrdata,
+ atio->u.isp24.fcp_cmnd.add_cdb_len,
+ be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len])),
+ atio->u.isp24.fcp_hdr.s_id[0],
+ atio->u.isp24.fcp_hdr.s_id[1],
+ atio->u.isp24.fcp_hdr.s_id[2]);
+
+ if (unlikely(atio->u.isp24.exchange_addr ==
+ ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe058,
+ "qla_target(%d): ATIO_TYPE7 "
+ "received with UNKNOWN exchange address, "
+ "sending QUEUE_FULL\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+ break;
+ }
+ if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
+ rc = qlt_handle_cmd_for_atio(vha, atio);
+ else
+ rc = qlt_handle_task_mgmt(vha, atio);
+ if (unlikely(rc != 0)) {
+ if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+ qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+#else
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+ } else {
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe059,
+ "qla_target: Unable to send "
+ "command to target for req, "
+ "ignoring.\n");
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05a,
+ "qla_target(%d): Unable to send "
+ "command to target, sending BUSY "
+ "status.\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+ }
+ }
+ }
+ break;
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ if (unlikely(atio->u.isp2x.entry_status != 0)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05b,
+ "qla_target(%d): Received ATIO packet %x "
+ "with error status %x\n", vha->vp_idx,
+ atio->u.raw.entry_type,
+ atio->u.isp2x.entry_status);
+ break;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+ qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05c,
+ "qla_target(%d): Received unknown ATIO atio "
+ "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05d,
+ "qla_target(%d): Response pkt %x received, but no "
+ "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe02f,
+ "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
+ "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
+ pkt->entry_count, pkt->entry_status, pkt->handle);
+
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (pkt->entry_type) {
+ case CTIO_TYPE7:
+ {
+ struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
+ vha->vp_idx);
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case ACCEPT_TGT_IO_TYPE:
+ {
+ struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
+ int rc;
+ ql_dbg(ql_dbg_tgt, vha, 0xe031,
+ "ACCEPT_TGT_IO instance %d status %04x "
+ "lun %04x read/write %d data_length %04x "
+ "target_id %02x rx_id %04x\n ", vha->vp_idx,
+ le16_to_cpu(atio->u.isp2x.status),
+ le16_to_cpu(atio->u.isp2x.lun),
+ atio->u.isp2x.execution_codes,
+ le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
+ atio), atio->u.isp2x.rx_id);
+ if (atio->u.isp2x.status !=
+ __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05e,
+ "qla_target(%d): ATIO with error "
+ "status %x received\n", vha->vp_idx,
+ le16_to_cpu(atio->u.isp2x.status));
+ break;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe032,
+ "FCP CDB: 0x%02x, sizeof(cdb): %lu",
+ atio->u.isp2x.cdb[0], (unsigned long
+ int)sizeof(atio->u.isp2x.cdb));
+
+ rc = qlt_handle_cmd_for_atio(vha, atio);
+ if (unlikely(rc != 0)) {
+ if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+ qlt_send_busy(vha, atio, 0);
+#else
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+ } else {
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send "
+ "command to target, sending TERM "
+ "EXCHANGE for rsp\n");
+ qlt_send_term_exchange(vha, NULL,
+ atio, 1);
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send "
+ "command to target, sending BUSY "
+ "status\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, 0);
+ }
+ }
+ }
+ }
+ break;
+
+ case CONTINUE_TGT_IO_TYPE:
+ {
+ struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe033,
+ "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case CTIO_A64_TYPE:
+ {
+ struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
+ vha->vp_idx);
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
+ qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
+ break;
+
+ case NOTIFY_ACK_TYPE:
+ if (tgt->notify_ack_expected > 0) {
+ struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe036,
+ "NOTIFY_ACK seq %08x status %x\n",
+ le16_to_cpu(entry->u.isp2x.seq_id),
+ le16_to_cpu(entry->u.isp2x.status));
+ tgt->notify_ack_expected--;
+ if (entry->u.isp2x.status !=
+ __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe061,
+ "qla_target(%d): NOTIFY_ACK "
+ "failed %x\n", vha->vp_idx,
+ le16_to_cpu(entry->u.isp2x.status));
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe062,
+ "qla_target(%d): Unexpected NOTIFY_ACK received\n",
+ vha->vp_idx);
+ }
+ break;
+
+ case ABTS_RECV_24XX:
+ ql_dbg(ql_dbg_tgt, vha, 0xe037,
+ "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
+ qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
+ break;
+
+ case ABTS_RESP_24XX:
+ if (tgt->abts_resp_expected > 0) {
+ struct abts_resp_from_24xx_fw *entry =
+ (struct abts_resp_from_24xx_fw *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe038,
+ "ABTS_RESP_24XX: compl_status %x\n",
+ entry->compl_status);
+ tgt->abts_resp_expected--;
+ if (le16_to_cpu(entry->compl_status) !=
+ ABTS_RESP_COMPL_SUCCESS) {
+ if ((entry->error_subcode1 == 0x1E) &&
+ (entry->error_subcode2 == 0)) {
+ /*
+ * We've got a race here: aborted
+ * exchange not terminated, i.e.
+ * response for the aborted command was
+ * sent between the abort request was
+ * received and processed.
+ * Unfortunately, the firmware has a
+ * silly requirement that all aborted
+ * exchanges must be explicitely
+ * terminated, otherwise it refuses to
+ * send responses for the abort
+ * requests. So, we have to
+ * (re)terminate the exchange and retry
+ * the abort response.
+ */
+ qlt_24xx_retry_term_exchange(vha,
+ entry);
+ } else
+ ql_dbg(ql_dbg_tgt, vha, 0xe063,
+ "qla_target(%d): ABTS_RESP_24XX "
+ "failed %x (subcode %x:%x)",
+ vha->vp_idx, entry->compl_status,
+ entry->error_subcode1,
+ entry->error_subcode2);
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe064,
+ "qla_target(%d): Unexpected ABTS_RESP_24XX "
+ "received\n", vha->vp_idx);
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe065,
+ "qla_target(%d): Received unknown response pkt "
+ "type %x\n", vha->vp_idx, pkt->entry_type);
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
+ uint16_t *mailbox)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ int login_code;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe039,
+ "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
+ vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
+ ha->operating_mode, ha->current_topology);
+
+ if (!ha->tgt.tgt_ops)
+ return;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03a,
+ "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
+ return;
+ }
+
+ if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
+ IS_QLA2100(ha))
+ return;
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (code) {
+ case MBA_RESET: /* Reset */
+ case MBA_SYSTEM_ERR: /* System Error */
+ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
+ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
+ "qla_target(%d): System error async event %#x "
+ "occured", vha->vp_idx, code);
+ break;
+ case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MBA_LOOP_UP:
+ {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
+ "qla_target(%d): Async LOOP_UP occured "
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
+ break;
+ }
+
+ case MBA_LIP_OCCURRED:
+ case MBA_LOOP_DOWN:
+ case MBA_LIP_RESET:
+ case MBA_RSCN_UPDATE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
+ "qla_target(%d): Async event %#x occured "
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ break;
+
+ case MBA_PORT_UPDATE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
+ "qla_target(%d): Port update async event %#x "
+ "occured: updating the ports database (m[0]=%x, m[1]=%x, "
+ "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+ login_code = le16_to_cpu(mailbox[2]);
+ if (login_code == 0x4)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
+ "Async MB 2: Got PLOGI Complete\n");
+ else if (login_code == 0x7)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
+ "Async MB 2: Port Logged Out\n");
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
+ "qla_target(%d): Async event %#x occured: "
+ "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+ code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+ uint16_t loop_id)
+{
+ fc_port_t *fcport;
+ int rc;
+
+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ if (!fcport) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+ "qla_target(%d): Allocation of tmp FC port failed",
+ vha->vp_idx);
+ return NULL;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
+
+ fcport->loop_id = loop_id;
+
+ rc = qla2x00_get_port_database(vha, fcport, 0);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
+ "qla_target(%d): Failed to retrieve fcport "
+ "information -- get_port_database() returned %x "
+ "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+ kfree(fcport);
+ return NULL;
+ }
+
+ return fcport;
+}
+
+/* Must be called under tgt_mutex */
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+ uint8_t *s_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ fc_port_t *fcport = NULL;
+ int rc, global_resets;
+ uint16_t loop_id = 0;
+
+retry:
+ global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+ rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+ if (rc != 0) {
+ if ((s_id[0] == 0xFF) &&
+ (s_id[1] == 0xFC)) {
+ /*
+ * This is Domain Controller, so it should be
+ * OK to drop SCSI commands from it.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+ "Unable to find initiator with S_ID %x:%x:%x",
+ s_id[0], s_id[1], s_id[2]);
+ } else
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
+ "qla_target(%d): Unable to find "
+ "initiator with S_ID %x:%x:%x",
+ vha->vp_idx, s_id[0], s_id[1],
+ s_id[2]);
+ return NULL;
+ }
+
+ fcport = qlt_get_port_database(vha, loop_id);
+ if (!fcport)
+ return NULL;
+
+ if (global_resets !=
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+ "qla_target(%d): global reset during session discovery "
+ "(counter was %d, new %d), retrying", vha->vp_idx,
+ global_resets,
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+ goto retry;
+ }
+
+ sess = qlt_create_sess(vha, fcport, true);
+
+ kfree(fcport);
+ return sess;
+}
+
+static void qlt_abort_work(struct qla_tgt *tgt,
+ struct qla_tgt_sess_work_param *prm)
+{
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ unsigned long flags;
+ uint32_t be_s_id;
+ uint8_t s_id[3];
+ int rc;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
+ s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
+ s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ (unsigned char *)&be_s_id);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has got an extra creation ref */
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!sess)
+ goto out_term;
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+ if (rc != 0)
+ goto out_term;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
+ return;
+
+out_term:
+ qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_tmr_work(struct qla_tgt *tgt,
+ struct qla_tgt_sess_work_param *prm)
+{
+ struct atio_from_isp *a = &prm->tm_iocb2;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ unsigned long flags;
+ uint8_t *s_id = NULL; /* to hide compiler warnings */
+ int rc;
+ uint32_t lun, unpacked_lun;
+ int lun_size, fn;
+ void *iocb;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has got an extra creation ref */
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!sess)
+ goto out_term;
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+ iocb = a;
+ lun = a->u.isp24.fcp_cmnd.lun;
+ lun_size = sizeof(lun);
+ fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+ if (rc != 0)
+ goto out_term;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
+ return;
+
+out_term:
+ qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_sess_work_fn(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ while (!list_empty(&tgt->sess_works_list)) {
+ struct qla_tgt_sess_work_param *prm = list_entry(
+ tgt->sess_works_list.next, typeof(*prm),
+ sess_works_list_entry);
+
+ /*
+ * This work can be scheduled on several CPUs at time, so we
+ * must delete the entry to eliminate double processing
+ */
+ list_del(&prm->sess_works_list_entry);
+
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ switch (prm->type) {
+ case QLA_TGT_SESS_WORK_ABORT:
+ qlt_abort_work(tgt, prm);
+ break;
+ case QLA_TGT_SESS_WORK_TM:
+ qlt_tmr_work(tgt, prm);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+
+ kfree(prm);
+ }
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+{
+ struct qla_tgt *tgt;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
+ "Registering target for host %ld(%p)", base_vha->host_no, ha);
+
+ BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+
+ tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
+ "Unable to allocate struct qla_tgt\n");
+ return -ENOMEM;
+ }
+
+ if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
+ base_vha->host->hostt->supported_mode |= MODE_TARGET;
+
+ tgt->ha = ha;
+ tgt->vha = base_vha;
+ init_waitqueue_head(&tgt->waitQ);
+ INIT_LIST_HEAD(&tgt->sess_list);
+ INIT_LIST_HEAD(&tgt->del_sess_list);
+ INIT_DELAYED_WORK(&tgt->sess_del_work,
+ (void (*)(struct work_struct *))qlt_del_sess_work_fn);
+ spin_lock_init(&tgt->sess_work_lock);
+ INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
+ INIT_LIST_HEAD(&tgt->sess_works_list);
+ spin_lock_init(&tgt->srr_lock);
+ INIT_LIST_HEAD(&tgt->srr_ctio_list);
+ INIT_LIST_HEAD(&tgt->srr_imm_list);
+ INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
+ atomic_set(&tgt->tgt_global_resets_count, 0);
+
+ ha->tgt.qla_tgt = tgt;
+
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
+ "qla_target(%d): using 64 Bit PCI addressing",
+ base_vha->vp_idx);
+ tgt->tgt_enable_64bit_addr = 1;
+ /* 3 is reserved */
+ tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
+ tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
+ tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+
+ mutex_lock(&qla_tgt_mutex);
+ list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
+ mutex_unlock(&qla_tgt_mutex);
+
+ return 0;
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
+{
+ if (!ha->tgt.qla_tgt)
+ return 0;
+
+ mutex_lock(&qla_tgt_mutex);
+ list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+ mutex_unlock(&qla_tgt_mutex);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
+ vha->host_no, ha);
+ qlt_release(ha->tgt.qla_tgt);
+
+ return 0;
+}
+
+static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
+ unsigned char *b)
+{
+ int i;
+
+ pr_debug("qla2xxx HW vha->node_name: ");
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", vha->node_name[i]);
+ pr_debug("\n");
+ pr_debug("qla2xxx HW vha->port_name: ");
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", vha->port_name[i]);
+ pr_debug("\n");
+
+ pr_debug("qla2xxx passed configfs WWPN: ");
+ put_unaligned_be64(wwpn, b);
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", b[i]);
+ pr_debug("\n");
+}
+
+/**
+ * qla_tgt_lport_register - register lport with external module
+ *
+ * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
+ * @wwpn: Passwd FC target WWPN
+ * @callback: lport initialization callback for tcm_qla2xxx code
+ * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ */
+int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
+ int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+{
+ struct qla_tgt *tgt;
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int rc;
+ u8 b[WWN_SIZE];
+
+ mutex_lock(&qla_tgt_mutex);
+ list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
+ vha = tgt->vha;
+ ha = vha->hw;
+
+ host = vha->host;
+ if (!host)
+ continue;
+
+ if (ha->tgt.tgt_ops != NULL)
+ continue;
+
+ if (!(host->hostt->supported_mode & MODE_TARGET))
+ continue;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (host->active_mode & MODE_TARGET) {
+ pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
+ host->host_no);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (!scsi_host_get(host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe068,
+ "Unable to scsi_host_get() for"
+ " qla2xxx scsi_host\n");
+ continue;
+ }
+ qlt_lport_dump(vha, wwpn, b);
+
+ if (memcmp(vha->port_name, b, WWN_SIZE)) {
+ scsi_host_put(host);
+ continue;
+ }
+ /*
+ * Setup passed parameters ahead of invoking callback
+ */
+ ha->tgt.tgt_ops = qla_tgt_ops;
+ ha->tgt.target_lport_ptr = target_lport_ptr;
+ rc = (*callback)(vha);
+ if (rc != 0) {
+ ha->tgt.tgt_ops = NULL;
+ ha->tgt.target_lport_ptr = NULL;
+ }
+ mutex_unlock(&qla_tgt_mutex);
+ return rc;
+ }
+ mutex_unlock(&qla_tgt_mutex);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(qlt_lport_register);
+
+/**
+ * qla_tgt_lport_deregister - Degister lport
+ *
+ * @vha: Registered scsi_qla_host pointer
+ */
+void qlt_lport_deregister(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct Scsi_Host *sh = vha->host;
+ /*
+ * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
+ */
+ ha->tgt.target_lport_ptr = NULL;
+ ha->tgt.tgt_ops = NULL;
+ /*
+ * Release the Scsi_Host reference for the underlying qla2xxx host
+ */
+ scsi_host_put(sh);
+}
+EXPORT_SYMBOL(qlt_lport_deregister);
+
+/* Must be called under HW lock */
+void qlt_set_mode(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ switch (ql2x_ini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->host->active_mode = MODE_TARGET;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ vha->host->active_mode |= MODE_TARGET;
+ break;
+ default:
+ break;
+ }
+
+ if (ha->tgt.ini_mode_force_reverse)
+ qla_reverse_ini_mode(vha);
+}
+
+/* Must be called under HW lock */
+void qlt_clear_mode(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ switch (ql2x_ini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ vha->host->active_mode = MODE_UNKNOWN;
+ break;
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->host->active_mode = MODE_INITIATOR;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ vha->host->active_mode &= ~MODE_TARGET;
+ break;
+ default:
+ break;
+ }
+
+ if (ha->tgt.ini_mode_force_reverse)
+ qla_reverse_ini_mode(vha);
+}
+
+/*
+ * qla_tgt_enable_vha - NO LOCK HELD
+ *
+ * host_reset, bring up w/ Target Mode Enabled
+ */
+void
+qlt_enable_vha(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ unsigned long flags;
+
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe069,
+ "Unable to locate qla_tgt pointer from"
+ " struct qla_hw_data\n");
+ dump_stack();
+ return;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tgt->tgt_stopped = 0;
+ qlt_set_mode(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+}
+EXPORT_SYMBOL(qlt_enable_vha);
+
+/*
+ * qla_tgt_disable_vha - NO LOCK HELD
+ *
+ * Disable Target Mode and reset the adapter
+ */
+void
+qlt_disable_vha(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ unsigned long flags;
+
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe06a,
+ "Unable to locate qla_tgt pointer from"
+ " struct qla_hw_data\n");
+ dump_stack();
+ return;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_clear_mode(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+}
+
+/*
+ * Called from qla_init.c:qla24xx_vport_create() contex to setup
+ * the target mode specific struct scsi_qla_host and struct qla_hw_data
+ * members.
+ */
+void
+qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
+{
+ if (!qla_tgt_mode_enabled(vha))
+ return;
+
+ mutex_init(&ha->tgt.tgt_mutex);
+ mutex_init(&ha->tgt.tgt_host_action_mutex);
+
+ qlt_clear_mode(vha);
+
+ /*
+ * NOTE: Currently the value is kept the same for <24xx and
+ * >=24xx ISPs. If it is necessary to change it,
+ * the check should be added for specific ISPs,
+ * assigning the value appropriately.
+ */
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+}
+
+void
+qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+{
+ /*
+ * FC-4 Feature bit 0 indicates target functionality to the name server.
+ */
+ if (qla_tgt_mode_enabled(vha)) {
+ if (qla_ini_mode_enabled(vha))
+ ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+ else
+ ct_req->req.rff_id.fc4_feature = BIT_0;
+ } else if (qla_ini_mode_enabled(vha)) {
+ ct_req->req.rff_id.fc4_feature = BIT_1;
+ }
+}
+
+/*
+ * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
+ * @ha: HA context
+ *
+ * Beginning of ATIO ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlt_init_atio_q_entries(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t cnt;
+ struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
+
+ if (!qla_tgt_mode_enabled(vha))
+ return;
+
+ for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
+ pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt++;
+ }
+
+}
+
+/*
+ * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct atio_from_isp *pkt;
+ int cnt, i;
+
+ if (!vha->flags.online)
+ return;
+
+ while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+ pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+ cnt = pkt->u.raw.entry_count;
+
+ qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
+
+ for (i = 0; i < cnt; i++) {
+ ha->tgt.atio_ring_index++;
+ if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+ ha->tgt.atio_ring_index = 0;
+ ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+ } else
+ ha->tgt.atio_ring_ptr++;
+
+ pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+ }
+ wmb();
+ }
+
+ /* Adjust ring index */
+ WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+}
+
+void
+qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+/* FIXME: atio_q in/out for ha->mqenable=1..? */
+ if (ha->mqenable) {
+#if 0
+ WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
+ WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
+ RD_REG_DWORD(&reg->isp25mq.atio_q_out);
+#endif
+ } else {
+ /* Setup APTIO registers for target mode */
+ WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
+ WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
+ RD_REG_DWORD(&reg->isp24.atio_q_out);
+ }
+}
+
+void
+qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (qla_tgt_mode_enabled(vha)) {
+ if (!ha->tgt.saved_set) {
+ /* We save only once */
+ ha->tgt.saved_exchange_count = nv->exchange_count;
+ ha->tgt.saved_firmware_options_1 =
+ nv->firmware_options_1;
+ ha->tgt.saved_firmware_options_2 =
+ nv->firmware_options_2;
+ ha->tgt.saved_firmware_options_3 =
+ nv->firmware_options_3;
+ ha->tgt.saved_set = 1;
+ }
+
+ nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+ /* Enable target mode */
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+
+ /* Disable Full Login after LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Enable initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ /* Enable FC tapes support */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ /* Disable Full Login after LIP */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ /* Enable target PRLI control */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ } else {
+ if (ha->tgt.saved_set) {
+ nv->exchange_count = ha->tgt.saved_exchange_count;
+ nv->firmware_options_1 =
+ ha->tgt.saved_firmware_options_1;
+ nv->firmware_options_2 =
+ ha->tgt.saved_firmware_options_2;
+ nv->firmware_options_3 =
+ ha->tgt.saved_firmware_options_3;
+ }
+ return;
+ }
+
+ /* out-of-order frames reassembly */
+ nv->firmware_options_3 |= BIT_6|BIT_9;
+
+ if (ha->tgt.enable_class_2) {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) =
+ FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ } else {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+ nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ }
+}
+
+void
+qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
+ struct init_cb_24xx *icb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->tgt.node_name_set) {
+ memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+ icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ }
+}
+
+int
+qlt_24xx_process_response_error(struct scsi_qla_host *vha,
+ struct sts_entry_24xx *pkt)
+{
+ switch (pkt->entry_type) {
+ case ABTS_RECV_24XX:
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case NOTIFY_ACK_TYPE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+void
+qlt_modify_vp_config(struct scsi_qla_host *vha,
+ struct vp_config_entry_24xx *vpmod)
+{
+ if (qla_tgt_mode_enabled(vha))
+ vpmod->options_idx1 &= ~BIT_5;
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ vpmod->options_idx1 &= ~BIT_4;
+}
+
+void
+qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ mutex_init(&ha->tgt.tgt_mutex);
+ mutex_init(&ha->tgt.tgt_host_action_mutex);
+ qlt_clear_mode(base_vha);
+}
+
+int
+qlt_mem_alloc(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
+ MAX_MULTI_ID_FABRIC, GFP_KERNEL);
+ if (!ha->tgt.tgt_vp_map)
+ return -ENOMEM;
+
+ ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
+ (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
+ &ha->tgt.atio_dma, GFP_KERNEL);
+ if (!ha->tgt.atio_ring) {
+ kfree(ha->tgt.tgt_vp_map);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void
+qlt_mem_free(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->tgt.atio_ring) {
+ dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
+ sizeof(struct atio_from_isp), ha->tgt.atio_ring,
+ ha->tgt.atio_dma);
+ }
+ kfree(ha->tgt.tgt_vp_map);
+}
+
+/* vport_slock to be held by the caller */
+void
+qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ switch (cmd) {
+ case SET_VP_IDX:
+ vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
+ break;
+ case SET_AL_PA:
+ vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+ break;
+ case RESET_VP_IDX:
+ vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
+ break;
+ case RESET_AL_PA:
+ vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+ break;
+ }
+}
+
+static int __init qlt_parse_ini_mode(void)
+{
+ if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+ else
+ return false;
+
+ return true;
+}
+
+int __init qlt_init(void)
+{
+ int ret;
+
+ if (!qlt_parse_ini_mode()) {
+ ql_log(ql_log_fatal, NULL, 0xe06b,
+ "qlt_parse_ini_mode() failed\n");
+ return -EINVAL;
+ }
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
+ sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
+ NULL);
+ if (!qla_tgt_cmd_cachep) {
+ ql_log(ql_log_fatal, NULL, 0xe06c,
+ "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
+ return -ENOMEM;
+ }
+
+ qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
+ sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
+ qla_tgt_mgmt_cmd), 0, NULL);
+ if (!qla_tgt_mgmt_cmd_cachep) {
+ ql_log(ql_log_fatal, NULL, 0xe06d,
+ "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
+ mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
+ if (!qla_tgt_mgmt_cmd_mempool) {
+ ql_log(ql_log_fatal, NULL, 0xe06e,
+ "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
+ ret = -ENOMEM;
+ goto out_mgmt_cmd_cachep;
+ }
+
+ qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
+ if (!qla_tgt_wq) {
+ ql_log(ql_log_fatal, NULL, 0xe06f,
+ "alloc_workqueue for qla_tgt_wq failed\n");
+ ret = -ENOMEM;
+ goto out_cmd_mempool;
+ }
+ /*
+ * Return 1 to signal that initiator-mode is being disabled
+ */
+ return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
+
+out_cmd_mempool:
+ mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_mgmt_cmd_cachep:
+ kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+out:
+ kmem_cache_destroy(qla_tgt_cmd_cachep);
+ return ret;
+}
+
+void qlt_exit(void)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ destroy_workqueue(qla_tgt_wq);
+ mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+ kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+ kmem_cache_destroy(qla_tgt_cmd_cachep);
+}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644
index 000000000000..9f9ef1644fd9
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -0,0 +1,1004 @@
+/*
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ * Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * Additional file for the target driver support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * This is the global def file that is useful for including from the
+ * target portion.
+ */
+
+#ifndef __QLA_TARGET_H
+#define __QLA_TARGET_H
+
+#include "qla_def.h"
+
+/*
+ * Must be changed on any change in any initiator visible interfaces or
+ * data in the target add-on
+ */
+#define QLA2XXX_TARGET_MAGIC 269
+
+/*
+ * Must be changed on any change in any target visible interfaces or
+ * data in the initiator
+ */
+#define QLA2XXX_INITIATOR_MAGIC 57222
+
+#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
+#define QLA2XXX_INI_MODE_STR_DISABLED "disabled"
+#define QLA2XXX_INI_MODE_STR_ENABLED "enabled"
+
+#define QLA2XXX_INI_MODE_EXCLUSIVE 0
+#define QLA2XXX_INI_MODE_DISABLED 1
+#define QLA2XXX_INI_MODE_ENABLED 2
+
+#define QLA2XXX_COMMAND_COUNT_INIT 250
+#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
+
+/*
+ * Used to mark which completion handles (for RIO Status's) are for CTIO's
+ * vs. regular (non-target) info. This is checked for in
+ * qla2x00_process_response_queue() to see if a handle coming back in a
+ * multi-complete should come to the tgt driver or be handled there by qla2xxx
+ */
+#define CTIO_COMPLETION_HANDLE_MARK BIT_29
+#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#endif
+#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
+
+/* Used to mark CTIO as intermediate */
+#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
+
+#ifndef OF_SS_MODE_0
+/*
+ * ISP target entries - Flags bit definitions.
+ */
+#define OF_SS_MODE_0 0
+#define OF_SS_MODE_1 1
+#define OF_SS_MODE_2 2
+#define OF_SS_MODE_3 3
+
+#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */
+#define OF_DATA_IN BIT_6 /* Data in to initiator */
+ /* (data from target to initiator) */
+#define OF_DATA_OUT BIT_7 /* Data out from initiator */
+ /* (data from initiator to target) */
+#define OF_NO_DATA (BIT_7 | BIT_6)
+#define OF_INC_RC BIT_8 /* Increment command resource count */
+#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */
+#define OF_CONF_REQ BIT_13 /* Confirmation Requested */
+#define OF_TERM_EXCH BIT_14 /* Terminate exchange */
+#define OF_SSTS BIT_15 /* Send SCSI status */
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD32
+#define QLA_TGT_DATASEGS_PER_CMD32 3
+#define QLA_TGT_DATASEGS_PER_CONT32 7
+#define QLA_TGT_MAX_SG32(ql) \
+ (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
+ QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
+
+#define QLA_TGT_DATASEGS_PER_CMD64 2
+#define QLA_TGT_DATASEGS_PER_CONT64 5
+#define QLA_TGT_MAX_SG64(ql) \
+ (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
+ QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
+#define QLA_TGT_DATASEGS_PER_CMD_24XX 1
+#define QLA_TGT_DATASEGS_PER_CONT_24XX 5
+#define QLA_TGT_MAX_SG_24XX(ql) \
+ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+#endif
+#endif
+
+#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
+ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
+ : (uint16_t)(iocb)->u.isp2x.target.id.standard)
+
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
+/*
+ * ISP queue - immediate notify entry structure definition.
+ * This is sent by the ISP to the Target driver.
+ * This IOCB would have report of events sent by the
+ * initiator, that needs to be handled by the target
+ * driver immediately.
+ */
+struct imm_ntfy_from_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t lun;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t status_modifier;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+#define SRR_IU_DATA_IN 0x1
+#define SRR_IU_DATA_OUT 0x5
+#define SRR_IU_STATUS 0x7
+ uint16_t srr_ox_id;
+ uint8_t reserved_2[28];
+ } isp2x;
+ struct {
+ uint32_t reserved;
+ uint16_t nport_handle;
+ uint16_t reserved_2;
+ uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t reserved_3;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_ox_id;
+ uint8_t reserved_4[19];
+ uint8_t vp_index;
+ uint32_t reserved_5;
+ uint8_t port_id[3];
+ uint8_t reserved_6;
+ } isp24;
+ } u;
+ uint16_t reserved_7;
+ uint16_t ox_id;
+} __packed;
+#endif
+
+#ifndef NOTIFY_ACK_TYPE
+#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
+/*
+ * ISP queue - notify acknowledge entry structure definition.
+ * This is sent to the ISP from the target driver.
+ */
+struct nack_to_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t flags;
+ uint16_t resp_code;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_flags;
+ uint16_t srr_reject_code;
+ uint8_t srr_reject_vendor_uniq;
+ uint8_t srr_reject_code_expl;
+ uint8_t reserved_2[24];
+ } isp2x;
+ struct {
+ uint32_t handle;
+ uint16_t nport_handle;
+ uint16_t reserved_1;
+ uint16_t flags;
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t reserved_3;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_flags;
+ uint8_t reserved_4[19];
+ uint8_t vp_index;
+ uint8_t srr_reject_vendor_uniq;
+ uint8_t srr_reject_code_expl;
+ uint8_t srr_reject_code;
+ uint8_t reserved_5[5];
+ } isp24;
+ } u;
+ uint8_t reserved[2];
+ uint16_t ox_id;
+} __packed;
+#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
+
+#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
+
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
+
+#define NOTIFY_ACK_SUCCESS 0x01
+#endif
+
+#ifndef ACCEPT_TGT_IO_TYPE
+#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
+#endif
+
+#ifndef CONTINUE_TGT_IO_TYPE
+#define CONTINUE_TGT_IO_TYPE 0x17
+/*
+ * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure.
+ * This structure is sent to the ISP 2xxx from target driver.
+ */
+struct ctio_to_2xxx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ target_id_t target;
+ uint16_t rx_id;
+ uint16_t flags;
+ uint16_t status;
+ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ uint16_t dseg_count; /* Data segment count. */
+ uint32_t relative_offset;
+ uint32_t residual;
+ uint16_t reserved_1[3];
+ uint16_t scsi_status;
+ uint32_t transfer_length;
+ uint32_t dseg_0_address; /* Data segment 0 address. */
+ uint32_t dseg_0_length; /* Data segment 0 length. */
+ uint32_t dseg_1_address; /* Data segment 1 address. */
+ uint32_t dseg_1_length; /* Data segment 1 length. */
+ uint32_t dseg_2_address; /* Data segment 2 address. */
+ uint32_t dseg_2_length; /* Data segment 2 length. */
+} __packed;
+#define ATIO_PATH_INVALID 0x07
+#define ATIO_CANT_PROV_CAP 0x16
+#define ATIO_CDB_VALID 0x3D
+
+#define ATIO_EXEC_READ BIT_1
+#define ATIO_EXEC_WRITE BIT_0
+#endif
+
+#ifndef CTIO_A64_TYPE
+#define CTIO_A64_TYPE 0x1F
+#define CTIO_SUCCESS 0x01
+#define CTIO_ABORTED 0x02
+#define CTIO_INVALID_RX_ID 0x08
+#define CTIO_TIMEOUT 0x0B
+#define CTIO_LIP_RESET 0x0E
+#define CTIO_TARGET_RESET 0x17
+#define CTIO_PORT_UNAVAILABLE 0x28
+#define CTIO_PORT_LOGGED_OUT 0x29
+#define CTIO_PORT_CONF_CHANGED 0x2A
+#define CTIO_SRR_RECEIVED 0x45
+#endif
+
+#ifndef CTIO_RET_TYPE
+#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
+#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+
+struct fcp_hdr {
+ uint8_t r_ctl;
+ uint8_t d_id[3];
+ uint8_t cs_ctl;
+ uint8_t s_id[3];
+ uint8_t type;
+ uint8_t f_ctl[3];
+ uint8_t seq_id;
+ uint8_t df_ctl;
+ uint16_t seq_cnt;
+ uint16_t ox_id;
+ uint16_t rx_id;
+ uint32_t parameter;
+} __packed;
+
+struct fcp_hdr_le {
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+ uint8_t f_ctl[3];
+ uint8_t type;
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint32_t parameter;
+} __packed;
+
+#define F_CTL_EXCH_CONTEXT_RESP BIT_23
+#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
+#define F_CTL_LAST_SEQ BIT_20
+#define F_CTL_END_SEQ BIT_19
+#define F_CTL_SEQ_INITIATIVE BIT_16
+
+#define R_CTL_BASIC_LINK_SERV 0x80
+#define R_CTL_B_ACC 0x4
+#define R_CTL_B_RJT 0x5
+
+struct atio7_fcp_cmnd {
+ uint64_t lun;
+ uint8_t cmnd_ref;
+ uint8_t task_attr:3;
+ uint8_t reserved:5;
+ uint8_t task_mgmt_flags;
+#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6
+#define FCP_CMND_TASK_MGMT_TARGET_RESET 5
+#define FCP_CMND_TASK_MGMT_LU_RESET 4
+#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2
+#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1
+ uint8_t wrdata:1;
+ uint8_t rddata:1;
+ uint8_t add_cdb_len:6;
+ uint8_t cdb[16];
+ /*
+ * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
+ * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
+ * BUILD_BUG_ON in qlt_init().
+ */
+ uint8_t add_cdb[4];
+ /* uint32_t data_length; */
+} __packed;
+
+/*
+ * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure.
+ * This is sent from the ISP to the target driver.
+ */
+struct atio_from_isp {
+ union {
+ struct {
+ uint16_t entry_hdr;
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t rx_id;
+ uint16_t flags;
+ uint16_t status;
+ uint8_t command_ref;
+ uint8_t task_codes;
+ uint8_t task_flags;
+ uint8_t execution_codes;
+ uint8_t cdb[MAX_CMDSZ];
+ uint32_t data_length;
+ uint16_t lun;
+ uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
+ uint16_t reserved_32[6];
+ uint16_t ox_id;
+ } isp2x;
+ struct {
+ uint16_t entry_hdr;
+ uint8_t fcp_cmnd_len_low;
+ uint8_t fcp_cmnd_len_high:4;
+ uint8_t attr:4;
+ uint32_t exchange_addr;
+#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
+ struct fcp_hdr fcp_hdr;
+ struct atio7_fcp_cmnd fcp_cmnd;
+ } isp24;
+ struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t data[58];
+ uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
+ } raw;
+ } u;
+} __packed;
+
+#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
+
+/*
+ * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
+ * This structure is sent to the ISP 24xx from the target driver.
+ */
+
+struct ctio7_to_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint16_t nport_handle;
+#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
+ uint16_t timeout;
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t add_flags;
+ uint8_t initiator_id[3];
+ uint8_t reserved;
+ uint32_t exchange_addr;
+ union {
+ struct {
+ uint16_t reserved1;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t scsi_status;
+ uint32_t relative_offset;
+ uint32_t reserved2;
+ uint32_t transfer_length;
+ uint32_t reserved3;
+ /* Data segment 0 address. */
+ uint32_t dseg_0_address[2];
+ /* Data segment 0 length. */
+ uint32_t dseg_0_length;
+ } status0;
+ struct {
+ uint16_t sense_length;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t scsi_status;
+ uint16_t response_len;
+ uint16_t reserved;
+ uint8_t sense_data[24];
+ } status1;
+ } u;
+} __packed;
+
+/*
+ * ISP queue - CTIO type 7 from ISP 24xx to target driver
+ * returned entry structure.
+ */
+struct ctio7_from_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint16_t status;
+ uint16_t timeout;
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t reserved1[5];
+ uint32_t exchange_address;
+ uint16_t reserved2;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t reserved3;
+ uint32_t relative_offset;
+ uint8_t reserved4[24];
+} __packed;
+
+/* CTIO7 flags values */
+#define CTIO7_FLAGS_SEND_STATUS BIT_15
+#define CTIO7_FLAGS_TERMINATE BIT_14
+#define CTIO7_FLAGS_CONFORM_REQ BIT_13
+#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
+#define CTIO7_FLAGS_STATUS_MODE_0 0
+#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
+#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
+#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
+#define CTIO7_FLAGS_DSD_PTR BIT_2
+#define CTIO7_FLAGS_DATA_IN BIT_1
+#define CTIO7_FLAGS_DATA_OUT BIT_0
+
+#define ELS_PLOGI 0x3
+#define ELS_FLOGI 0x4
+#define ELS_LOGO 0x5
+#define ELS_PRLI 0x20
+#define ELS_PRLO 0x21
+#define ELS_TPRLO 0x24
+#define ELS_PDISC 0x50
+#define ELS_ADISC 0x52
+
+/*
+ * ISP queue - ABTS received/response entries structure definition for 24xx.
+ */
+#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
+#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */
+
+/*
+ * ISP queue - ABTS received IOCB entry structure definition for 24xx.
+ * The ABTS BLS received from the wire is sent to the
+ * target driver by the ISP 24xx.
+ * The IOCB is placed on the response queue.
+ */
+struct abts_recv_from_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint8_t reserved_1[6];
+ uint16_t nport_handle;
+ uint8_t reserved_2[2];
+ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ uint8_t reserved_4[16];
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+#define ABTS_PARAM_ABORT_SEQ BIT_0
+
+struct ba_acc_le {
+ uint16_t reserved;
+ uint8_t seq_id_last;
+ uint8_t seq_id_valid;
+#define SEQ_ID_VALID 0x80
+#define SEQ_ID_INVALID 0x00
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint16_t high_seq_cnt;
+ uint16_t low_seq_cnt;
+} __packed;
+
+struct ba_rjt_le {
+ uint8_t vendor_uniq;
+ uint8_t reason_expl;
+ uint8_t reason_code;
+#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1
+#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9
+ uint8_t reserved;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB entry structure definition for 24xx.
+ * The ABTS response to the ABTS received is sent by the
+ * target driver to the ISP 24xx.
+ * The IOCB is placed on the request queue.
+ */
+struct abts_resp_to_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle;
+ uint16_t reserved_1;
+ uint16_t nport_handle;
+ uint16_t control_flags;
+#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
+ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ union {
+ struct ba_acc_le ba_acct;
+ struct ba_rjt_le ba_rjt;
+ } __packed payload;
+ uint32_t reserved_4;
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure.
+ * The ABTS response with completion status to the ABTS response
+ * (sent by the target driver to the ISP 24xx) is sent by the
+ * ISP24xx firmware to the target driver.
+ * The IOCB is placed on the response queue.
+ */
+struct abts_resp_from_24xx_fw {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle;
+ uint16_t compl_status;
+#define ABTS_RESP_COMPL_SUCCESS 0
+#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
+ uint16_t nport_handle;
+ uint16_t reserved_1;
+ uint8_t reserved_2;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ uint8_t reserved_4[8];
+ uint32_t error_subcode1;
+#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
+ uint32_t error_subcode2;
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+/********************************************************************\
+ * Type Definitions used by initiator & target halves
+\********************************************************************/
+
+struct qla_tgt_mgmt_cmd;
+struct qla_tgt_sess;
+
+/*
+ * This structure provides a template of function calls that the
+ * target driver (from within qla_target.c) can issue to the
+ * target module (tcm_qla2xxx).
+ */
+struct qla_tgt_func_tmpl {
+
+ int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
+ unsigned char *, uint32_t, int, int, int);
+ int (*handle_data)(struct qla_tgt_cmd *);
+ int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
+ uint32_t);
+ void (*free_cmd)(struct qla_tgt_cmd *);
+ void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
+ void (*free_session)(struct qla_tgt_sess *);
+
+ int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
+ void *, uint8_t *, uint16_t);
+ struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+ const uint16_t);
+ struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+ const uint8_t *);
+ void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
+ void (*put_sess)(struct qla_tgt_sess *);
+ void (*shutdown_sess)(struct qla_tgt_sess *);
+};
+
+int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+
+#include <target/target_core_base.h>
+
+#define QLA_TGT_TIMEOUT 10 /* in seconds */
+
+#define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */
+
+/* Immediate notify status constants */
+#define IMM_NTFY_LIP_RESET 0x000E
+#define IMM_NTFY_LIP_LINK_REINIT 0x000F
+#define IMM_NTFY_IOCB_OVERFLOW 0x0016
+#define IMM_NTFY_ABORT_TASK 0x0020
+#define IMM_NTFY_PORT_LOGOUT 0x0029
+#define IMM_NTFY_PORT_CONFIG 0x002A
+#define IMM_NTFY_GLBL_TPRLO 0x002D
+#define IMM_NTFY_GLBL_LOGO 0x002E
+#define IMM_NTFY_RESOURCE 0x0034
+#define IMM_NTFY_MSG_RX 0x0036
+#define IMM_NTFY_SRR 0x0045
+#define IMM_NTFY_ELS 0x0046
+
+/* Immediate notify task flags */
+#define IMM_NTFY_TASK_MGMT_SHIFT 8
+
+#define QLA_TGT_CLEAR_ACA 0x40
+#define QLA_TGT_TARGET_RESET 0x20
+#define QLA_TGT_LUN_RESET 0x10
+#define QLA_TGT_CLEAR_TS 0x04
+#define QLA_TGT_ABORT_TS 0x02
+#define QLA_TGT_ABORT_ALL_SESS 0xFFFF
+#define QLA_TGT_ABORT_ALL 0xFFFE
+#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
+#define QLA_TGT_NEXUS_LOSS 0xFFFC
+
+/* Notify Acknowledge flags */
+#define NOTIFY_ACK_RES_COUNT BIT_8
+#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5
+#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
+
+/* Command's states */
+#define QLA_TGT_STATE_NEW 0 /* New command + target processing */
+#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
+#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
+#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
+#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */
+
+/* Special handles */
+#define QLA_TGT_NULL_HANDLE 0
+#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
+
+/* ATIO task_codes field */
+#define ATIO_SIMPLE_QUEUE 0
+#define ATIO_HEAD_OF_QUEUE 1
+#define ATIO_ORDERED_QUEUE 2
+#define ATIO_ACA_QUEUE 4
+#define ATIO_UNTAGGED 5
+
+/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
+#define FC_TM_SUCCESS 0
+#define FC_TM_BAD_FCP_DATA 1
+#define FC_TM_BAD_CMD 2
+#define FC_TM_FCP_DATA_MISMATCH 3
+#define FC_TM_REJECT 4
+#define FC_TM_FAILED 5
+
+/*
+ * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
+ * terminated, so no more actions is needed and success should be returned
+ * to target.
+ */
+#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
+
+#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+#else
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) 0
+#endif
+
+#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
+ (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
+
+struct qla_port_24xx_data {
+ uint8_t port_name[WWN_SIZE];
+ uint16_t loop_id;
+ uint16_t reserved;
+};
+
+struct qla_tgt {
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+
+ /*
+ * To sync between IRQ handlers and qlt_target_release(). Needed,
+ * because req_pkt() can drop/reaquire HW lock inside. Protected by
+ * HW lock.
+ */
+ int irq_cmd_count;
+
+ int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
+
+ /* Target's flags, serialized by pha->hardware_lock */
+ unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
+ unsigned int link_reinit_iocb_pending:1;
+
+ /*
+ * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
+ * OR hardware_lock for reading.
+ */
+ int tgt_stop; /* the target mode driver is being stopped */
+ int tgt_stopped; /* the target mode driver has been stopped */
+
+ /* Count of sessions refering qla_tgt. Protected by hardware_lock. */
+ int sess_count;
+
+ /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
+ struct list_head sess_list;
+
+ /* Protected by hardware_lock */
+ struct list_head del_sess_list;
+ struct delayed_work sess_del_work;
+
+ spinlock_t sess_work_lock;
+ struct list_head sess_works_list;
+ struct work_struct sess_work;
+
+ struct imm_ntfy_from_isp link_reinit_iocb;
+ wait_queue_head_t waitQ;
+ int notify_ack_expected;
+ int abts_resp_expected;
+ int modify_lun_expected;
+
+ int ctio_srr_id;
+ int imm_srr_id;
+ spinlock_t srr_lock;
+ struct list_head srr_ctio_list;
+ struct list_head srr_imm_list;
+ struct work_struct srr_work;
+
+ atomic_t tgt_global_resets_count;
+
+ struct list_head tgt_list_entry;
+};
+
+/*
+ * Equivilant to IT Nexus (Initiator-Target)
+ */
+struct qla_tgt_sess {
+ uint16_t loop_id;
+ port_id_t s_id;
+
+ unsigned int conf_compl_supported:1;
+ unsigned int deleted:1;
+ unsigned int local:1;
+ unsigned int tearing_down:1;
+
+ struct se_session *se_sess;
+ struct scsi_qla_host *vha;
+ struct qla_tgt *tgt;
+
+ struct list_head sess_list_entry;
+ unsigned long expires;
+ struct list_head del_list_entry;
+
+ uint8_t port_name[WWN_SIZE];
+ struct work_struct free_work;
+};
+
+struct qla_tgt_cmd {
+ struct qla_tgt_sess *sess;
+ int state;
+ struct se_cmd se_cmd;
+ struct work_struct free_work;
+ struct work_struct work;
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+
+ /* to save extra sess dereferences */
+ unsigned int conf_compl_supported:1;
+ unsigned int sg_mapped:1;
+ unsigned int free_sg:1;
+ unsigned int aborted:1; /* Needed in case of SRR */
+ unsigned int write_data_transferred:1;
+
+ struct scatterlist *sg; /* cmd data buffer SG vector */
+ int sg_cnt; /* SG segments count */
+ int bufflen; /* cmd buffer length */
+ int offset;
+ uint32_t tag;
+ uint32_t unpacked_lun;
+ enum dma_data_direction dma_data_direction;
+
+ uint16_t loop_id; /* to save extra sess dereferences */
+ struct qla_tgt *tgt; /* to save extra sess dereferences */
+ struct scsi_qla_host *vha;
+ struct list_head cmd_list;
+
+ struct atio_from_isp atio;
+};
+
+struct qla_tgt_sess_work_param {
+ struct list_head sess_works_list_entry;
+
+#define QLA_TGT_SESS_WORK_ABORT 1
+#define QLA_TGT_SESS_WORK_TM 2
+ int type;
+
+ union {
+ struct abts_recv_from_24xx abts;
+ struct imm_ntfy_from_isp tm_iocb;
+ struct atio_from_isp tm_iocb2;
+ };
+};
+
+struct qla_tgt_mgmt_cmd {
+ uint8_t tmr_func;
+ uint8_t fc_tm_rsp;
+ struct qla_tgt_sess *sess;
+ struct se_cmd se_cmd;
+ struct work_struct free_work;
+ unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK 1
+ union {
+ struct atio_from_isp atio;
+ struct imm_ntfy_from_isp imm_ntfy;
+ struct abts_recv_from_24xx abts;
+ } __packed orig_iocb;
+};
+
+struct qla_tgt_prm {
+ struct qla_tgt_cmd *cmd;
+ struct qla_tgt *tgt;
+ void *pkt;
+ struct scatterlist *sg; /* cmd data buffer SG vector */
+ int seg_cnt;
+ int req_cnt;
+ uint16_t rq_result;
+ uint16_t scsi_status;
+ unsigned char *sense_buffer;
+ int sense_buffer_len;
+ int residual;
+ int add_status_pkt;
+};
+
+struct qla_tgt_srr_imm {
+ struct list_head srr_list_entry;
+ int srr_id;
+ struct imm_ntfy_from_isp imm_ntfy;
+};
+
+struct qla_tgt_srr_ctio {
+ struct list_head srr_list_entry;
+ int srr_id;
+ struct qla_tgt_cmd *cmd;
+};
+
+#define QLA_TGT_XMIT_DATA 1
+#define QLA_TGT_XMIT_STATUS 2
+#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+
+
+extern struct qla_tgt_data qla_target;
+/*
+ * Internal function prototypes
+ */
+void qlt_disable_vha(struct scsi_qla_host *);
+
+/*
+ * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
+ */
+extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
+ int (*callback)(struct scsi_qla_host *), void *);
+extern void qlt_lport_deregister(struct scsi_qla_host *);
+extern void qlt_unreg_sess(struct qla_tgt_sess *);
+extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_set_mode(struct scsi_qla_host *ha);
+extern void qlt_clear_mode(struct scsi_qla_host *ha);
+extern int __init qlt_init(void);
+extern void qlt_exit(void);
+extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+
+/*
+ * This macro is used during early initializations when host->active_mode
+ * is not set. Right now, ha value is ignored.
+ */
+#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+
+static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
+{
+ return ha->host->active_mode & MODE_TARGET;
+}
+
+static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
+{
+ return ha->host->active_mode & MODE_INITIATOR;
+}
+
+static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+{
+ if (ha->host->active_mode & MODE_INITIATOR)
+ ha->host->active_mode &= ~MODE_INITIATOR;
+ else
+ ha->host->active_mode |= MODE_INITIATOR;
+}
+
+/*
+ * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+ */
+extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
+ struct atio_from_isp *);
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
+extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
+extern void qlt_enable_vha(struct scsi_qla_host *);
+extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
+extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
+extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *,
+ device_reg_t __iomem *);
+extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
+ struct nvram_24xx *);
+extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
+ struct init_cb_24xx *);
+extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
+ struct sts_entry_24xx *);
+extern void qlt_modify_vp_config(struct scsi_qla_host *,
+ struct vp_config_entry_24xx *);
+extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
+extern int qlt_mem_alloc(struct qla_hw_data *);
+extern void qlt_mem_free(struct qla_hw_data *);
+extern void qlt_stop_phase1(struct qla_tgt *);
+extern void qlt_stop_phase2(struct qla_tgt *);
+
+#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 000000000000..6e64314dbbb3
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,1919 @@
+/*******************************************************************************
+ * This file contains tcm implementation using v4 configfs fabric infrastructure
+ * for QLogic target mode HBAs
+ *
+ * ?? Copyright 2010-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL)
+ * version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
+ * the TCM_FC / Open-FCoE.org fabric module.
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+#include "tcm_qla2xxx.h"
+
+struct workqueue_struct *tcm_qla2xxx_free_wq;
+struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+ const char *cp;
+ char c;
+ u32 nibble;
+ u32 byte = 0;
+ u32 pos = 0;
+ u32 err;
+
+ *wwn = 0;
+ for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
+ c = *cp;
+ if (c == '\n' && cp[1] == '\0')
+ continue;
+ if (strict && pos++ == 2 && byte++ < 7) {
+ pos = 0;
+ if (c == ':')
+ continue;
+ err = 1;
+ goto fail;
+ }
+ if (c == '\0') {
+ err = 2;
+ if (strict && byte != 8)
+ goto fail;
+ return cp - name;
+ }
+ err = 3;
+ if (isdigit(c))
+ nibble = c - '0';
+ else if (isxdigit(c) && (islower(c) || !strict))
+ nibble = tolower(c) - 'a' + 10;
+ else
+ goto fail;
+ *wwn = (*wwn << 4) | nibble;
+ }
+ err = 4;
+fail:
+ pr_debug("err %u len %zu pos %u byte %u\n",
+ err, cp - name, pos, byte);
+ return -1;
+}
+
+static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
+{
+ u8 b[8];
+
+ put_unaligned_be64(wwn, b);
+ return snprintf(buf, len,
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static char *tcm_qla2xxx_get_fabric_name(void)
+{
+ return "qla2xxx";
+}
+
+/*
+ * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
+ */
+static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
+{
+ unsigned int i, j;
+ u8 wwn[8];
+
+ memset(wwn, 0, sizeof(wwn));
+
+ /* Validate and store the new name */
+ for (i = 0, j = 0; i < 16; i++) {
+ int value;
+
+ value = hex_to_bin(*ns++);
+ if (value >= 0)
+ j = (j << 4) | value;
+ else
+ return -EINVAL;
+
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+
+ *nm = wwn_to_u64(wwn);
+ return 0;
+}
+
+/*
+ * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
+ * store_fc_host_vport_create()
+ */
+static int tcm_qla2xxx_npiv_parse_wwn(
+ const char *name,
+ size_t count,
+ u64 *wwpn,
+ u64 *wwnn)
+{
+ unsigned int cnt = count;
+ int rc;
+
+ *wwpn = 0;
+ *wwnn = 0;
+
+ /* count may include a LF at end of string */
+ if (name[cnt-1] == '\n')
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (name[16] != ':'))
+ return -EINVAL;
+
+ rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
+ if (rc != 0)
+ return rc;
+
+ rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
+ u64 wwpn, u64 wwnn)
+{
+ u8 b[8], b2[8];
+
+ put_unaligned_be64(wwpn, b);
+ put_unaligned_be64(wwnn, b2);
+ return snprintf(buf, len,
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+ b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_name(void)
+{
+ return "qla2xxx_npiv";
+}
+
+static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ u8 proto_id;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ proto_id = fc_get_fabric_proto_ident(se_tpg);
+ break;
+ }
+
+ return proto_id;
+}
+
+static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+ return &lport->lport_name[0];
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+ return &lport->lport_npiv_name[0];
+}
+
+static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ return tpg->lport_tpgt;
+}
+
+static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ int ret = 0;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ int ret = 0;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ break;
+ }
+
+ return ret;
+}
+
+static char *tcm_qla2xxx_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ char *tid = NULL;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ break;
+ }
+
+ return tid;
+}
+
+static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_nacl *nacl;
+
+ nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
+ if (!nacl) {
+ pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
+ return NULL;
+ }
+
+ return &nacl->se_node_acl;
+}
+
+static void tcm_qla2xxx_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ kfree(nacl);
+}
+
+static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->lport_tpgt;
+}
+
+static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
+{
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
+ struct qla_tgt_mgmt_cmd, free_work);
+
+ transport_generic_free_cmd(&mcmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_mcmd(), and will call
+ * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
+ * release callback. qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
+ queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
+}
+
+static void tcm_qla2xxx_complete_free(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_cmd(), and will call
+ * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
+ * release callback. qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+{
+ INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free() context
+ */
+static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+{
+ return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+}
+
+/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+ * fabric descriptor @se_cmd command to release
+ */
+static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd;
+
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+ struct qla_tgt_mgmt_cmd, se_cmd);
+ qlt_free_mcmd(mcmd);
+ return;
+ }
+
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ qlt_free_cmd(cmd);
+}
+
+static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct scsi_qla_host *vha;
+ unsigned long flags;
+
+ BUG_ON(!sess);
+ vha = sess->vha;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ sess->tearing_down = 1;
+ target_splice_sess_cmd_list(se_sess);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+ return 1;
+}
+
+static void tcm_qla2xxx_close_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct scsi_qla_host *vha;
+ unsigned long flags;
+
+ BUG_ON(!sess);
+ vha = sess->vha;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ qlt_unreg_sess(sess);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+/*
+ * The LIO target core uses DMA_TO_DEVICE to mean that data is going
+ * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
+ * that data is coming from the target (eg handling a READ). However,
+ * this is just the opposite of what we have to tell the DMA mapping
+ * layer -- eg when handling a READ, the HBA will have to DMA the data
+ * out of memory so it can send it to the initiator, which means we
+ * need to use DMA_TO_DEVICE when we map the data.
+ */
+static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
+{
+ if (se_cmd->se_cmd_flags & SCF_BIDI)
+ return DMA_BIDIRECTIONAL;
+
+ switch (se_cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return DMA_FROM_DEVICE;
+ case DMA_FROM_DEVICE:
+ return DMA_TO_DEVICE;
+ case DMA_NONE:
+ default:
+ return DMA_NONE;
+ }
+}
+
+static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+
+ /*
+ * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+ * the SGL mappings into PCIe memory for incoming FCP WRITE data.
+ */
+ return qlt_rdy_to_xfer(cmd);
+}
+
+static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+ /*
+ * Check for WRITE_PENDING status to determine if we need to wait for
+ * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
+ */
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
+ se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+ wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
+ 3000);
+ return 0;
+ }
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ return 0;
+}
+
+static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ return cmd->tag;
+}
+
+static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+/*
+ * Called from process context in qla_target.c:qlt_do_work() code
+ */
+static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+ unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
+ int data_dir, int bidi)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct se_session *se_sess;
+ struct qla_tgt_sess *sess;
+ int flags = TARGET_SCF_ACK_KREF;
+
+ if (bidi)
+ flags |= TARGET_SCF_BIDI_OP;
+
+ sess = cmd->sess;
+ if (!sess) {
+ pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+ return -EINVAL;
+ }
+
+ se_sess = sess->se_sess;
+ if (!se_sess) {
+ pr_err("Unable to locate active struct se_session\n");
+ return -EINVAL;
+ }
+
+ target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+ cmd->unpacked_lun, data_length, fcp_task_attr,
+ data_dir, flags);
+ return 0;
+}
+
+static void tcm_qla2xxx_do_rsp(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ /*
+ * Dispatch ->queue_status from workqueue process context
+ */
+ transport_generic_request_failure(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ unsigned long flags;
+ /*
+ * Ensure that the complete FCP WRITE payload has been received.
+ * Otherwise return an exception via CHECK_CONDITION status.
+ */
+ if (!cmd->write_data_transferred) {
+ /*
+ * Check if se_cmd has already been aborted via LUN_RESET, and
+ * waiting upon completion in tcm_qla2xxx_write_pending_status()
+ */
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ if (se_cmd->transport_state & CMD_T_ABORTED) {
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+ complete(&se_cmd->t_transport_stop_comp);
+ return 0;
+ }
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
+ INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ return 0;
+ }
+ /*
+ * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
+ * status to the backstore processing thread.
+ */
+ return transport_generic_handle_data(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_issue_task_mgmt()
+ */
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+ uint8_t tmr_func, uint32_t tag)
+{
+ struct qla_tgt_sess *sess = mcmd->sess;
+ struct se_cmd *se_cmd = &mcmd->se_cmd;
+
+ return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
+ tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+}
+
+static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+ cmd->offset = 0;
+
+ /*
+ * Now queue completed DATA_IN the qla2xxx LLD and response ring
+ */
+ return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
+ se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+ int xmit_type = QLA_TGT_XMIT_STATUS;
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->sg = NULL;
+ cmd->sg_cnt = 0;
+ cmd->offset = 0;
+ cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+ if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ /*
+ * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
+ * for qla_tgt_xmit_response LLD code
+ */
+ se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ se_cmd->residual_count = se_cmd->data_length;
+
+ cmd->bufflen = 0;
+ }
+ /*
+ * Now queue status response to qla2xxx LLD code and response ring
+ */
+ return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+ struct qla_tgt_mgmt_cmd, se_cmd);
+
+ pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
+ mcmd, se_tmr->function, se_tmr->response);
+ /*
+ * Do translation between TCM TM response codes and
+ * QLA2xxx FC TM response codes.
+ */
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ mcmd->fc_tm_rsp = FC_TM_SUCCESS;
+ break;
+ case TMR_TASK_DOES_NOT_EXIST:
+ mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
+ break;
+ case TMR_FUNCTION_REJECTED:
+ mcmd->fc_tm_rsp = FC_TM_REJECT;
+ break;
+ case TMR_LUN_DOES_NOT_EXIST:
+ default:
+ mcmd->fc_tm_rsp = FC_TM_FAILED;
+ break;
+ }
+ /*
+ * Queue the TM response to QLA2xxx LLD to build a
+ * CTIO response packet.
+ */
+ qlt_xmit_tm_rsp(mcmd);
+
+ return 0;
+}
+
+static u16 tcm_qla2xxx_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
+ u32 sense_length)
+{
+ return 0;
+}
+
+/* Local pointer to allocated TCM configfs fabric module */
+struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+ struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+{
+ struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ void *node;
+
+ pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+ node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+ WARN_ON(node && (node != se_nacl));
+
+ pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+ se_nacl, nacl->nport_wwnn, nacl->nport_id);
+ /*
+ * Now clear the se_nacl and session pointers from our HW lport lookup
+ * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
+ *
+ * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
+ * target_wait_for_sess_cmds() before the session waits for outstanding
+ * I/O to complete, to avoid a race between session shutdown execution
+ * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
+ */
+ tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
+}
+
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+ struct se_session *se_sess = container_of(kref,
+ struct se_session, sess_kref);
+
+ qlt_unreg_sess(se_sess->fabric_sess_ptr);
+}
+
+static void tcm_qla2xxx_put_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct qla_hw_data *ha = sess->vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
+{
+ tcm_qla2xxx_put_session(sess->se_sess);
+}
+
+static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
+{
+ tcm_qla2xxx_shutdown_session(sess->se_sess);
+}
+
+static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct tcm_qla2xxx_nacl *nacl;
+ u64 wwnn;
+ u32 qla2xxx_nexus_depth;
+
+ if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
+ qla2xxx_nexus_depth = 1;
+
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NodeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, qla2xxx_nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+ return se_nacl;
+ }
+ /*
+ * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
+ */
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ nacl->nport_wwnn = wwnn;
+ tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
+
+ return se_nacl;
+}
+
+static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
+{
+ struct se_portal_group *se_tpg = se_acl->se_tpg;
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+
+ core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
+ kfree(nacl);
+}
+
+/* Start items for tcm_qla2xxx_tpg_attrib_cit */
+
+#define DEF_QLA_TPG_ATTRIB(name) \
+ \
+static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
+ struct tcm_qla2xxx_tpg, se_tpg); \
+ \
+ return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
+} \
+ \
+static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
+ struct tcm_qla2xxx_tpg, se_tpg); \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with" \
+ " ret: %d\n", ret); \
+ return -EINVAL; \
+ } \
+ ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
+ \
+ return (!ret) ? count : -EINVAL; \
+}
+
+#define DEF_QLA_TPG_ATTR_BOOL(_name) \
+ \
+static int tcm_qla2xxx_set_attrib_##_name( \
+ struct tcm_qla2xxx_tpg *tpg, \
+ unsigned long val) \
+{ \
+ struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
+ \
+ if ((val != 0) && (val != 1)) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->_name = val; \
+ return 0; \
+}
+
+#define QLA_TPG_ATTR(_name, _mode) \
+ TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
+DEF_QLA_TPG_ATTRIB(generate_node_acls);
+QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+
+/*
+ Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
+DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
+QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
+QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
+QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
+ &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
+ &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
+ &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
+ &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+ NULL,
+};
+
+/* End items for tcm_qla2xxx_tpg_attrib_cit */
+
+static ssize_t tcm_qla2xxx_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ atomic_read(&tpg->lport_tpg_enabled));
+}
+
+static ssize_t tcm_qla2xxx_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long op;
+ int rc;
+
+ rc = kstrtoul(page, 0, &op);
+ if (rc < 0) {
+ pr_err("kstrtoul() returned %d\n", rc);
+ return -EINVAL;
+ }
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %lu\n", op);
+ return -EINVAL;
+ }
+
+ if (op) {
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
+ } else {
+ if (!ha->tgt.qla_tgt) {
+ pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
+ return -ENODEV;
+ }
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(ha->tgt.qla_tgt);
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
+ &tcm_qla2xxx_tpg_enable.attr,
+ NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (!lport->qla_npiv_vp && (tpgt != 1)) {
+ pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tpg->lport = lport;
+ tpg->lport_tpgt = tpgt;
+ /*
+ * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+ * NodeACLs
+ */
+ QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
+ QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
+ QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
+
+ ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ /*
+ * Setup local TPG=1 pointer for non NPIV mode.
+ */
+ if (lport->qla_npiv_vp == NULL)
+ lport->tpg_1 = tpg;
+
+ return &tpg->se_tpg;
+}
+
+static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct qla_hw_data *ha = vha->hw;
+ /*
+ * Call into qla2x_target.c LLD logic to shutdown the active
+ * FC Nexuses and disable target mode operation for this qla_hw_data
+ */
+ if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
+ qlt_stop_phase1(ha->tgt.qla_tgt);
+
+ core_tpg_deregister(se_tpg);
+ /*
+ * Clear local TPG=1 pointer for non NPIV mode.
+ */
+ if (lport->qla_npiv_vp == NULL)
+ lport->tpg_1 = NULL;
+
+ kfree(tpg);
+}
+
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tpg->lport = lport;
+ tpg->lport_tpgt = tpgt;
+
+ ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ return &tpg->se_tpg;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+ scsi_qla_host_t *vha,
+ const uint8_t *s_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_nacl *nacl;
+ u32 key;
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return NULL;
+ }
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+
+ se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+ if (!se_nacl) {
+ pr_debug("Unable to locate s_id: 0x%06x\n", key);
+ return NULL;
+ }
+ pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
+ se_nacl, se_nacl->initiatorname);
+
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ if (!nacl->qla_tgt_sess) {
+ pr_err("Unable to locate struct qla_tgt_sess\n");
+ return NULL;
+ }
+
+ return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_s_id(
+ struct tcm_qla2xxx_lport *lport,
+ struct se_node_acl *new_se_nacl,
+ struct tcm_qla2xxx_nacl *nacl,
+ struct se_session *se_sess,
+ struct qla_tgt_sess *qla_tgt_sess,
+ uint8_t *s_id)
+{
+ u32 key;
+ void *slot;
+ int rc;
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ pr_debug("set_sess_by_s_id: %06x\n", key);
+
+ slot = btree_lookup32(&lport->lport_fcport_map, key);
+ if (!slot) {
+ if (new_se_nacl) {
+ pr_debug("Setting up new fc_port entry to new_se_nacl\n");
+ nacl->nport_id = key;
+ rc = btree_insert32(&lport->lport_fcport_map, key,
+ new_se_nacl, GFP_ATOMIC);
+ if (rc)
+ printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
+ (int)key);
+ } else {
+ pr_debug("Wiping nonexisting fc_port entry\n");
+ }
+
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (nacl->qla_tgt_sess) {
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
+ btree_remove32(&lport->lport_fcport_map, key);
+ nacl->qla_tgt_sess = NULL;
+ return;
+ }
+ pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
+ btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing existing fc_port entry\n");
+ btree_remove32(&lport->lport_fcport_map, key);
+ return;
+ }
+
+ pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
+ btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+
+ pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+ scsi_qla_host_t *vha,
+ const uint16_t loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_nacl *nacl;
+ struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return NULL;
+ }
+
+ pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+ fc_loopid = lport->lport_loopid_map + loop_id;
+ se_nacl = fc_loopid->se_nacl;
+ if (!se_nacl) {
+ pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
+ loop_id);
+ return NULL;
+ }
+
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+ if (!nacl->qla_tgt_sess) {
+ pr_err("Unable to locate struct qla_tgt_sess\n");
+ return NULL;
+ }
+
+ return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_loop_id(
+ struct tcm_qla2xxx_lport *lport,
+ struct se_node_acl *new_se_nacl,
+ struct tcm_qla2xxx_nacl *nacl,
+ struct se_session *se_sess,
+ struct qla_tgt_sess *qla_tgt_sess,
+ uint16_t loop_id)
+{
+ struct se_node_acl *saved_nacl;
+ struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+ pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+ fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
+ lport->lport_loopid_map)[loop_id];
+
+ saved_nacl = fc_loopid->se_nacl;
+ if (!saved_nacl) {
+ pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (nacl->qla_tgt_sess) {
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = NULL;
+ nacl->qla_tgt_sess = NULL;
+ return;
+ }
+
+ pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = NULL;
+ return;
+ }
+
+ pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+
+ pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Should always be called with qla_hw_data->hardware_lock held.
+ */
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
+ struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ unsigned char be_sid[3];
+
+ be_sid[0] = sess->s_id.b.domain;
+ be_sid[1] = sess->s_id.b.area;
+ be_sid[2] = sess->s_id.b.al_pa;
+
+ tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+ sess, be_sid);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+ sess, sess->loop_id);
+}
+
+static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ struct qla_hw_data *ha = tgt->ha;
+ struct se_session *se_sess;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_nacl *nacl;
+
+ BUG_ON(in_interrupt());
+
+ se_sess = sess->se_sess;
+ if (!se_sess) {
+ pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+ dump_stack();
+ return;
+ }
+ se_nacl = se_sess->se_node_acl;
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return;
+ }
+ target_wait_for_sess_cmds(se_sess, 0);
+
+ transport_deregister_session_configfs(sess->se_sess);
+ transport_deregister_session(sess->se_sess);
+}
+
+/*
+ * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
+ * to locate struct se_node_acl
+ */
+static int tcm_qla2xxx_check_initiator_node_acl(
+ scsi_qla_host_t *vha,
+ unsigned char *fc_wwpn,
+ void *qla_tgt_sess,
+ uint8_t *s_id,
+ uint16_t loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_tpg *tpg;
+ struct tcm_qla2xxx_nacl *nacl;
+ struct se_portal_group *se_tpg;
+ struct se_node_acl *se_nacl;
+ struct se_session *se_sess;
+ struct qla_tgt_sess *sess = qla_tgt_sess;
+ unsigned char port_name[36];
+ unsigned long flags;
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return -EINVAL;
+ }
+ /*
+ * Locate the TPG=1 reference..
+ */
+ tpg = lport->tpg_1;
+ if (!tpg) {
+ pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+ return -EINVAL;
+ }
+ se_tpg = &tpg->se_tpg;
+
+ se_sess = transport_init_session();
+ if (IS_ERR(se_sess)) {
+ pr_err("Unable to initialize struct se_session\n");
+ return PTR_ERR(se_sess);
+ }
+ /*
+ * Format the FCP Initiator port_name into colon seperated values to
+ * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
+ */
+ memset(&port_name, 0, 36);
+ snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
+ fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+ /*
+ * Locate our struct se_node_acl either from an explict NodeACL created
+ * via ConfigFS, or via running in TPG demo mode.
+ */
+ se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
+ port_name);
+ if (!se_sess->se_node_acl) {
+ transport_free_session(se_sess);
+ return -EINVAL;
+ }
+ se_nacl = se_sess->se_node_acl;
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ /*
+ * And now setup the new se_nacl and session pointers into our HW lport
+ * mappings for fabric S_ID and LOOP_ID.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
+ qla_tgt_sess, s_id);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
+ qla_tgt_sess, loop_id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ /*
+ * Finally register the new FC Nexus with TCM
+ */
+ __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+
+ return 0;
+}
+
+/*
+ * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
+ */
+static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+ .handle_cmd = tcm_qla2xxx_handle_cmd,
+ .handle_data = tcm_qla2xxx_handle_data,
+ .handle_tmr = tcm_qla2xxx_handle_tmr,
+ .free_cmd = tcm_qla2xxx_free_cmd,
+ .free_mcmd = tcm_qla2xxx_free_mcmd,
+ .free_session = tcm_qla2xxx_free_session,
+ .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
+ .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
+ .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
+ .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+ .put_sess = tcm_qla2xxx_put_sess,
+ .shutdown_sess = tcm_qla2xxx_shutdown_sess,
+};
+
+static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+{
+ int rc;
+
+ rc = btree_init32(&lport->lport_fcport_map);
+ if (rc) {
+ pr_err("Unable to initialize lport->lport_fcport_map btree\n");
+ return rc;
+ }
+
+ lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
+ 65536);
+ if (!lport->lport_loopid_map) {
+ pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
+ sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ btree_destroy32(&lport->lport_fcport_map);
+ return -ENOMEM;
+ }
+ memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
+ * 65536);
+ pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
+ sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ return 0;
+}
+
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ /*
+ * Setup local pointer to vha, NPIV VP pointer (if present) and
+ * vha->tcm_lport pointer
+ */
+ lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
+ lport->qla_vha = vha;
+
+ return 0;
+}
+
+static struct se_wwn *tcm_qla2xxx_make_lport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport;
+ u64 wwpn;
+ int ret = -ENODEV;
+
+ if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+ if (!lport) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ lport->lport_wwpn = wwpn;
+ tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
+ wwpn);
+
+ ret = tcm_qla2xxx_init_lport(lport);
+ if (ret != 0)
+ goto out;
+
+ ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
+ tcm_qla2xxx_lport_register_cb, lport);
+ if (ret != 0)
+ goto out_lport;
+
+ return &lport->lport_wwn;
+out_lport:
+ vfree(lport->lport_loopid_map);
+ btree_destroy32(&lport->lport_fcport_map);
+out:
+ kfree(lport);
+ return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct se_node_acl *node;
+ u32 key = 0;
+
+ /*
+ * Call into qla2x_target.c LLD logic to complete the
+ * shutdown of struct qla_tgt after the call to
+ * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
+ */
+ if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
+ qlt_stop_phase2(ha->tgt.qla_tgt);
+
+ qlt_lport_deregister(vha);
+
+ vfree(lport->lport_loopid_map);
+ btree_for_each_safe32(&lport->lport_fcport_map, key, node)
+ btree_remove32(&lport->lport_fcport_map, key);
+ btree_destroy32(&lport->lport_fcport_map);
+ kfree(lport);
+}
+
+static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport;
+ u64 npiv_wwpn, npiv_wwnn;
+ int ret;
+
+ if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
+ &npiv_wwpn, &npiv_wwnn) < 0)
+ return ERR_PTR(-EINVAL);
+
+ lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+ if (!lport) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ lport->lport_npiv_wwpn = npiv_wwpn;
+ lport->lport_npiv_wwnn = npiv_wwnn;
+ tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
+ TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
+
+/* FIXME: tcm_qla2xxx_npiv_make_lport */
+ ret = -ENOSYS;
+ if (ret != 0)
+ goto out;
+
+ return &lport->lport_wwn;
+out:
+ kfree(lport);
+ return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct Scsi_Host *sh = vha->host;
+ /*
+ * Notify libfc that we want to release the lport->npiv_vport
+ */
+ fc_vport_terminate(lport->npiv_vport);
+
+ scsi_host_put(sh);
+ kfree(lport);
+}
+
+
+static ssize_t tcm_qla2xxx_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page,
+ "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
+ UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_qla2xxx, version);
+
+static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
+ &tcm_qla2xxx_wwn_version.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_ops = {
+ .get_fabric_name = tcm_qla2xxx_get_fabric_name,
+ .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
+ .tpg_get_tag = tcm_qla2xxx_get_tag,
+ .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ tcm_qla2xxx_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ tcm_qla2xxx_check_prod_write_protect,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
+ .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .new_cmd_map = NULL,
+ .check_stop_free = tcm_qla2xxx_check_stop_free,
+ .release_cmd = tcm_qla2xxx_release_cmd,
+ .put_session = tcm_qla2xxx_put_session,
+ .shutdown_session = tcm_qla2xxx_shutdown_session,
+ .close_session = tcm_qla2xxx_close_session,
+ .sess_get_index = tcm_qla2xxx_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_qla2xxx_write_pending,
+ .write_pending_status = tcm_qla2xxx_write_pending_status,
+ .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
+ .get_task_tag = tcm_qla2xxx_get_task_tag,
+ .get_cmd_state = tcm_qla2xxx_get_cmd_state,
+ .queue_data_in = tcm_qla2xxx_queue_data_in,
+ .queue_status = tcm_qla2xxx_queue_status,
+ .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
+ .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_qla2xxx_make_lport,
+ .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
+ .fabric_make_tpg = tcm_qla2xxx_make_tpg,
+ .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+ .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
+ .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
+ .tpg_get_tag = tcm_qla2xxx_get_tag,
+ .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_false,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
+ .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
+ .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
+ .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .release_cmd = tcm_qla2xxx_release_cmd,
+ .put_session = tcm_qla2xxx_put_session,
+ .shutdown_session = tcm_qla2xxx_shutdown_session,
+ .close_session = tcm_qla2xxx_close_session,
+ .sess_get_index = tcm_qla2xxx_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_qla2xxx_write_pending,
+ .write_pending_status = tcm_qla2xxx_write_pending_status,
+ .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
+ .get_task_tag = tcm_qla2xxx_get_task_tag,
+ .get_cmd_state = tcm_qla2xxx_get_cmd_state,
+ .queue_data_in = tcm_qla2xxx_queue_data_in,
+ .queue_status = tcm_qla2xxx_queue_status,
+ .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
+ .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
+ .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
+ .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
+ .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+};
+
+static int tcm_qla2xxx_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric, *npiv_fabric;
+ int ret;
+
+ pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
+ UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine);
+ /*
+ * Register the top level struct config_item_type with TCM core
+ */
+ fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
+ if (IS_ERR(fabric)) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ return PTR_ERR(fabric);
+ }
+ /*
+ * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
+ */
+ fabric->tf_ops = tcm_qla2xxx_ops;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
+ tcm_qla2xxx_tpg_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ /*
+ * Register the fabric for use within TCM
+ */
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+ return ret;
+ }
+ /*
+ * Setup our local pointer to *fabric
+ */
+ tcm_qla2xxx_fabric_configfs = fabric;
+ pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
+
+ /*
+ * Register the top level struct config_item_type for NPIV with TCM core
+ */
+ npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
+ if (IS_ERR(npiv_fabric)) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ ret = PTR_ERR(npiv_fabric);
+ goto out_fabric;
+ }
+ /*
+ * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
+ */
+ npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
+ /*
+ * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ /*
+ * Register the npiv_fabric for use within TCM
+ */
+ ret = target_fabric_configfs_register(npiv_fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+ goto out_fabric;
+ }
+ /*
+ * Setup our local pointer to *npiv_fabric
+ */
+ tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
+ pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
+
+ tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
+ WQ_MEM_RECLAIM, 0);
+ if (!tcm_qla2xxx_free_wq) {
+ ret = -ENOMEM;
+ goto out_fabric_npiv;
+ }
+
+ tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
+ if (!tcm_qla2xxx_cmd_wq) {
+ ret = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(tcm_qla2xxx_free_wq);
+out_fabric_npiv:
+ target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+out_fabric:
+ target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+ return ret;
+}
+
+static void tcm_qla2xxx_deregister_configfs(void)
+{
+ destroy_workqueue(tcm_qla2xxx_cmd_wq);
+ destroy_workqueue(tcm_qla2xxx_free_wq);
+
+ target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+ tcm_qla2xxx_fabric_configfs = NULL;
+ pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
+
+ target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+ tcm_qla2xxx_npiv_fabric_configfs = NULL;
+ pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
+}
+
+static int __init tcm_qla2xxx_init(void)
+{
+ int ret;
+
+ ret = tcm_qla2xxx_register_configfs();
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void __exit tcm_qla2xxx_exit(void)
+{
+ tcm_qla2xxx_deregister_configfs();
+}
+
+MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_qla2xxx_init);
+module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 000000000000..825498103352
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,82 @@
+#include <target/target_core_base.h>
+#include <linux/btree.h>
+
+#define TCM_QLA2XXX_VERSION "v0.1"
+/* length of ASCII WWPNs including pad */
+#define TCM_QLA2XXX_NAMELEN 32
+/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
+#define TCM_QLA2XXX_NPIV_NAMELEN 66
+
+#include "qla_target.h"
+
+struct tcm_qla2xxx_nacl {
+ /* From libfc struct fc_rport->port_id */
+ u32 nport_id;
+ /* Binary World Wide unique Node Name for remote FC Initiator Nport */
+ u64 nport_wwnn;
+ /* ASCII formatted WWPN for FC Initiator Nport */
+ char nport_name[TCM_QLA2XXX_NAMELEN];
+ /* Pointer to qla_tgt_sess */
+ struct qla_tgt_sess *qla_tgt_sess;
+ /* Pointer to TCM FC nexus */
+ struct se_session *nport_nexus;
+ /* Returned by tcm_qla2xxx_make_nodeacl() */
+ struct se_node_acl se_node_acl;
+};
+
+struct tcm_qla2xxx_tpg_attrib {
+ int generate_node_acls;
+ int cache_dynamic_acls;
+ int demo_mode_write_protect;
+ int prod_mode_write_protect;
+};
+
+struct tcm_qla2xxx_tpg {
+ /* FC lport target portal group tag for TCM */
+ u16 lport_tpgt;
+ /* Atomic bit to determine TPG active status */
+ atomic_t lport_tpg_enabled;
+ /* Pointer back to tcm_qla2xxx_lport */
+ struct tcm_qla2xxx_lport *lport;
+ /* Used by tcm_qla2xxx_tpg_attrib_cit */
+ struct tcm_qla2xxx_tpg_attrib tpg_attrib;
+ /* Returned by tcm_qla2xxx_make_tpg() */
+ struct se_portal_group se_tpg;
+};
+
+#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
+
+struct tcm_qla2xxx_fc_loopid {
+ struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_lport {
+ /* SCSI protocol the lport is providing */
+ u8 lport_proto_id;
+ /* Binary World Wide unique Port Name for FC Target Lport */
+ u64 lport_wwpn;
+ /* Binary World Wide unique Port Name for FC NPIV Target Lport */
+ u64 lport_npiv_wwpn;
+ /* Binary World Wide unique Node Name for FC NPIV Target Lport */
+ u64 lport_npiv_wwnn;
+ /* ASCII formatted WWPN for FC Target Lport */
+ char lport_name[TCM_QLA2XXX_NAMELEN];
+ /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
+ char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
+ /* map for fc_port pointers in 24-bit FC Port ID space */
+ struct btree_head32 lport_fcport_map;
+ /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+ struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
+ /* Pointer to struct scsi_qla_host from qla2xxx LLD */
+ struct scsi_qla_host *qla_vha;
+ /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
+ struct scsi_qla_host *qla_npiv_vp;
+ /* Pointer to struct qla_tgt pointer */
+ struct qla_tgt lport_qla_tgt;
+ /* Pointer to struct fc_vport for NPIV vport from libfc */
+ struct fc_vport *npiv_vport;
+ /* Pointer to TPG=1 for non NPIV mode */
+ struct tcm_qla2xxx_tpg *tpg_1;
+ /* Returned by tcm_qla2xxx_make_lport() */
+ struct se_wwn lport_wwn;
+};
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 0b0a7d42137d..c681b2a355e1 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -9,6 +9,140 @@
#include "ql4_glbl.h"
#include "ql4_dbg.h"
+static ssize_t
+qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+
+ if (!is_qla8022(ha))
+ return -EINVAL;
+
+ if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+ ha->fw_dump_size);
+}
+
+static ssize_t
+qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ uint32_t dev_state;
+ long reading;
+ int ret = 0;
+
+ if (!is_qla8022(ha))
+ return -EINVAL;
+
+ if (off != 0)
+ return ret;
+
+ buf[1] = 0;
+ ret = kstrtol(buf, 10, &reading);
+ if (ret) {
+ ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ switch (reading) {
+ case 0:
+ /* clear dump collection flags */
+ if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ /* Reload minidump template */
+ qla4xxx_alloc_fw_dump(ha);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Firmware template reloaded\n"));
+ }
+ break;
+ case 1:
+ /* Set flag to read dump */
+ if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
+ !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+ set_bit(AF_82XX_DUMP_READING, &ha->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Raw firmware dump ready for read on (%ld).\n",
+ ha->host_no));
+ }
+ break;
+ case 2:
+ /* Reset HBA */
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_READY) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: Setting Need reset, reset_owner is 0x%x.\n",
+ __func__, ha->func_num);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_NEED_RESET);
+ set_bit(AF_82XX_RST_OWNER, &ha->flags);
+ } else
+ ql4_printk(KERN_INFO, ha,
+ "%s: Reset not performed as device state is 0x%x\n",
+ __func__, dev_state);
+
+ qla4_8xxx_idc_unlock(ha);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+ .attr = {
+ .name = "fw_dump",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla4_8xxx_sysfs_read_fw_dump,
+ .write = qla4_8xxx_sysfs_write_fw_dump,
+};
+
+static struct sysfs_entry {
+ char *name;
+ struct bin_attribute *attr;
+} bin_file_entries[] = {
+ { "fw_dump", &sysfs_fw_dump_attr },
+ { NULL },
+};
+
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
+{
+ struct Scsi_Host *host = ha->host;
+ struct sysfs_entry *iter;
+ int ret;
+
+ for (iter = bin_file_entries; iter->name; iter++) {
+ ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ ql4_printk(KERN_ERR, ha,
+ "Unable to create sysfs %s binary attribute (%d).\n",
+ iter->name, ret);
+ }
+}
+
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
+{
+ struct Scsi_Host *host = ha->host;
+ struct sysfs_entry *iter;
+
+ for (iter = bin_file_entries; iter->name; iter++)
+ sysfs_remove_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+}
+
/* Scsi_Host attributes. */
static ssize_t
qla4xxx_fw_version_show(struct device *dev,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7f2492e88be7..96a5616a8fda 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -398,6 +398,16 @@ struct isp_operations {
int (*get_sys_info) (struct scsi_qla_host *);
};
+struct ql4_mdump_size_table {
+ uint32_t size;
+ uint32_t size_cmask_02;
+ uint32_t size_cmask_04;
+ uint32_t size_cmask_08;
+ uint32_t size_cmask_10;
+ uint32_t size_cmask_FF;
+ uint32_t version;
+};
+
/*qla4xxx ipaddress configuration details */
struct ipaddress_config {
uint16_t ipv4_options;
@@ -485,6 +495,10 @@ struct scsi_qla_host {
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
+#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
+#define AF_82XX_RST_OWNER 25 /* 0x02000000 */
+#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
+
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -662,6 +676,11 @@ struct scsi_qla_host {
uint32_t nx_dev_init_timeout;
uint32_t nx_reset_timeout;
+ void *fw_dump;
+ uint32_t fw_dump_size;
+ uint32_t fw_dump_capture_mask;
+ void *fw_dump_tmplt_hdr;
+ uint32_t fw_dump_tmplt_size;
struct completion mbx_intr_comp;
@@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
#define PROCESS_ALL_AENS 0
#define FLUSH_DDB_CHANGED_AENS 1
+/* Defines for udev events */
+#define QL4_UEVENT_CODE_FW_DUMP 0
+
#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 210cd1d64475..7240948fb929 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -385,6 +385,11 @@ struct qla_flt_region {
#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
+#define MBOX_CMD_MINIDUMP 0x0129
+
+/* Minidump subcommand */
+#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00
+#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
@@ -1190,4 +1195,27 @@ struct ql_iscsi_stats {
uint8_t reserved2[264]; /* 0x0308 - 0x040F */
};
+#define QLA82XX_DBG_STATE_ARRAY_LEN 16
+#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
+
+struct qla4_8xxx_minidump_template_hdr {
+ uint32_t entry_type;
+ uint32_t first_entry_offset;
+ uint32_t size_of_template;
+ uint32_t capture_debug_level;
+ uint32_t num_of_entries;
+ uint32_t version;
+ uint32_t driver_timestamp;
+ uint32_t checksum;
+
+ uint32_t driver_capture_mask;
+ uint32_t driver_info_word2;
+ uint32_t driver_info_word3;
+ uint32_t driver_info_word4;
+
+ uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+};
+
#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 910536667cf5..20b49d019043 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+ dma_addr_t phys_addr);
+int qla4xxx_req_template_size(struct scsi_qla_host *ha);
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
+extern int ql4xmdcapmask;
+extern int ql4xenablemd;
extern struct device_attribute *qla4xxx_host_attrs[];
#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90ee5d8fa731..bf36723b84e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
return ipv4_wait|ipv6_wait;
}
+/**
+ * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
+ * @ha: pointer to host adapter structure.
+ **/
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
+{
+ int status;
+ uint32_t capture_debug_level;
+ int hdr_entry_bit, k;
+ void *md_tmp;
+ dma_addr_t md_tmp_dma;
+ struct qla4_8xxx_minidump_template_hdr *md_hdr;
+
+ if (ha->fw_dump) {
+ ql4_printk(KERN_WARNING, ha,
+ "Firmware dump previously allocated.\n");
+ return;
+ }
+
+ status = qla4xxx_req_template_size(ha);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to get template size\n",
+ ha->host_no);
+ return;
+ }
+
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+
+ /* Allocate memory for saving the template */
+ md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+ &md_tmp_dma, GFP_KERNEL);
+
+ /* Request template */
+ status = qla4xxx_get_minidump_template(ha, md_tmp_dma);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to get minidump template\n",
+ ha->host_no);
+ goto alloc_cleanup;
+ }
+
+ md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
+
+ capture_debug_level = md_hdr->capture_debug_level;
+
+ /* Get capture mask based on module loadtime setting. */
+ if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
+ ha->fw_dump_capture_mask = ql4xmdcapmask;
+ else
+ ha->fw_dump_capture_mask = capture_debug_level;
+
+ md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
+ md_hdr->num_of_entries));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n",
+ ha->fw_dump_tmplt_size));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
+ ha->fw_dump_capture_mask));
+
+ /* Calculate fw_dump_size */
+ for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
+ hdr_entry_bit <<= 1, k++) {
+ if (hdr_entry_bit & ha->fw_dump_capture_mask)
+ ha->fw_dump_size += md_hdr->capture_size_array[k];
+ }
+
+ /* Total firmware dump size including command header */
+ ha->fw_dump_size += ha->fw_dump_tmplt_size;
+ ha->fw_dump = vmalloc(ha->fw_dump_size);
+ if (!ha->fw_dump)
+ goto alloc_cleanup;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Minidump Tempalate Size = 0x%x KB\n",
+ ha->fw_dump_tmplt_size));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
+
+ memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
+ ha->fw_dump_tmplt_hdr = ha->fw_dump;
+
+alloc_cleanup:
+ dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+ md_tmp, md_tmp_dma);
+}
+
static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
{
uint32_t timeout_count;
@@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
"control block\n", ha->host_no, __func__));
return status;
}
+
if (!qla4xxx_fw_ready(ha))
return status;
+ if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+ qla4xxx_alloc_fw_dump(ha);
+
return qla4xxx_get_firmware_status(ha);
}
@@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
+ ddb_entry->unblock_sess(ddb_entry->sess);
status = QLA_SUCCESS;
break;
case DDB_DS_SESSION_FAILED:
@@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
}
break;
case DDB_DS_SESSION_ACTIVE:
+ case DDB_DS_DISCOVERY:
switch (state) {
case DDB_DS_SESSION_FAILED:
/*
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 7ac21dabbf22..cab8f665a41f 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
}
}
- if (is_qla8022(ha)) {
- if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
- DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
- "prematurely completing mbx cmd as firmware "
- "recovery detected\n", ha->host_no, __func__));
- return status;
- }
- /* Do not send any mbx cmd if h/w is in failed state*/
- qla4_8xxx_idc_lock(ha);
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla4_8xxx_idc_unlock(ha);
- if (dev_state == QLA82XX_DEV_FAILED) {
- ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
- "failed state, do not send any mailbox commands\n",
- ha->host_no, __func__);
- return status;
- }
- }
-
if ((is_aer_supported(ha)) &&
(test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
msleep(10);
}
+ if (is_qla8022(ha)) {
+ if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
+ ha->host_no, __func__));
+ goto mbox_exit;
+ }
+ /* Do not send any mbx cmd if h/w is in failed state*/
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla4_8xxx_idc_unlock(ha);
+ if (dev_state == QLA82XX_DEV_FAILED) {
+ ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
+ ha->host_no, __func__);
+ goto mbox_exit;
+ }
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->mbox_status_count = outCount;
@@ -270,6 +270,79 @@ mbox_exit:
return status;
}
+/**
+ * qla4xxx_get_minidump_template - Get the firmware template
+ * @ha: Pointer to host adapter structure.
+ * @phys_addr: dma address for template
+ *
+ * Obtain the minidump template from firmware during initialization
+ * as it may not be available when minidump is desired.
+ **/
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+ dma_addr_t phys_addr)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+ mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
+ mbox_cmd[2] = LSDW(phys_addr);
+ mbox_cmd[3] = MSDW(phys_addr);
+ mbox_cmd[4] = ha->fw_dump_tmplt_size;
+ mbox_cmd[5] = 0;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
+ ha->host_no, __func__, mbox_cmd[0],
+ mbox_sts[0], mbox_sts[1]));
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_req_template_size - Get minidump template size from firmware.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_req_template_size(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+ mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status == QLA_SUCCESS) {
+ ha->fw_dump_tmplt_size = mbox_sts[1];
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
+ __func__, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5], mbox_sts[6], mbox_sts[7]));
+ if (ha->fw_dump_tmplt_size == 0)
+ status = QLA_ERROR;
+ } else {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
+ __func__, mbox_sts[0], mbox_sts[1]);
+ status = QLA_ERROR;
+ }
+
+ return status;
+}
+
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
{
set_bit(AF_FW_RECOVERY, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e1e46b6dac75..228b67020d2c 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/pci.h>
+#include <linux/ratelimit.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
@@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
return data;
}
+/* Minidump related functions */
+static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
+ u32 data, uint8_t flag)
+{
+ uint32_t win_read, off_value, rval = QLA_SUCCESS;
+
+ off_value = off & 0xFFFF0000;
+ writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /* Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != off_value) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+ __func__, off_value, win_read, off));
+ return QLA_ERROR;
+ }
+
+ off_value = off & 0x0000FFFF;
+
+ if (flag)
+ writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase));
+ else
+ rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase));
+
+ return rval;
+}
+
#define CRB_WIN_LOCK_TIMEOUT 100000000
int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
}
if (j >= MAX_CTL_CHECK) {
- if (printk_ratelimit())
- ql4_printk(KERN_ERR, ha,
- "failed to read through agent\n");
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n",
+ __func__);
break;
}
@@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit())
ql4_printk(KERN_ERR, ha,
- "failed to write through agent\n");
+ "%s: failed to read through agent\n",
+ __func__);
ret = -1;
break;
}
@@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_active |= (1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+ __func__, ha->host_no, drv_active);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
}
@@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_active &= ~(1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+ __func__, ha->host_no, drv_active);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
}
@@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_state |= (1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+ __func__, ha->host_no, drv_state);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_state &= ~(1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+ __func__, ha->host_no, drv_state);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
qla4_8xxx_rom_unlock(ha);
}
+static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_minidump_entry_crb *crb_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+ r_addr = crb_hdr->addr;
+ r_stride = crb_hdr->crb_strd.addr_stride;
+ loop_cnt = crb_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_addr);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ unsigned long p_wait, w_time, p_mask;
+ uint32_t c_value_w, c_value_r;
+ struct qla82xx_minidump_entry_cache *cache_hdr;
+ int rval = QLA_ERROR;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+ p_wait = cache_hdr->cache_ctrl.poll_wait;
+ p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+ for (i = 0; i < loop_count; i++) {
+ qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+
+ if (c_value_w)
+ qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+
+ if (p_mask) {
+ w_time = jiffies + p_wait;
+ do {
+ c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
+ 0, 0);
+ if ((c_value_r & p_mask) == 0) {
+ break;
+ } else if (time_after_eq(jiffies, w_time)) {
+ /* capturing dump failed */
+ return rval;
+ }
+ } while (1);
+ }
+
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr)
+{
+ struct qla82xx_minidump_entry_crb *crb_entry;
+ uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
+ uint32_t crb_addr;
+ unsigned long wtime;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+ int i;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+
+ crb_addr = crb_entry->addr;
+ for (i = 0; i < crb_entry->op_count; i++) {
+ opcode = crb_entry->crb_ctrl.opcode;
+ if (opcode & QLA82XX_DBG_OPCODE_WR) {
+ qla4_8xxx_md_rw_32(ha, crb_addr,
+ crb_entry->value_1, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WR;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_RW) {
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_RW;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_AND) {
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value &= crb_entry->value_2;
+ opcode &= ~QLA82XX_DBG_OPCODE_AND;
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value |= crb_entry->value_3;
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value |= crb_entry->value_3;
+ qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+ poll_time = crb_entry->crb_strd.poll_timeout;
+ wtime = jiffies + poll_time;
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+
+ do {
+ if ((read_value & crb_entry->value_2) ==
+ crb_entry->value_1)
+ break;
+ else if (time_after_eq(jiffies, wtime)) {
+ /* capturing dump failed */
+ rval = QLA_ERROR;
+ break;
+ } else
+ read_value = qla4_8xxx_md_rw_32(ha,
+ crb_addr, 0, 0);
+ } while (1);
+ opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ index = crb_entry->crb_ctrl.state_index_v;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ if (crb_entry->crb_ctrl.state_index_v) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value =
+ tmplt_hdr->saved_state_array[index];
+ } else {
+ read_value = crb_entry->value_1;
+ }
+
+ qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value = tmplt_hdr->saved_state_array[index];
+ read_value <<= crb_entry->crb_ctrl.shl;
+ read_value >>= crb_entry->crb_ctrl.shr;
+ if (crb_entry->value_2)
+ read_value &= crb_entry->value_2;
+ read_value |= crb_entry->value_3;
+ read_value += crb_entry->value_1;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+ }
+ crb_addr += crb_entry->crb_strd.addr_stride;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+ return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_minidump_entry_rdocm *ocm_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
+ r_addr = ocm_hdr->read_addr;
+ r_stride = ocm_hdr->read_addr_stride;
+ loop_cnt = ocm_hdr->op_count;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, r_stride, loop_cnt));
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
+ __func__, (loop_cnt * sizeof(uint32_t))));
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ struct qla82xx_minidump_entry_mux *mux_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
+ r_addr = mux_hdr->read_addr;
+ s_addr = mux_hdr->select_addr;
+ s_stride = mux_hdr->select_value_stride;
+ s_value = mux_hdr->select_value;
+ loop_cnt = mux_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
+ r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(s_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ uint32_t c_value_w;
+ struct qla82xx_minidump_entry_cache *cache_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+ for (i = 0; i < loop_count; i++) {
+ qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+ qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t s_addr, r_addr;
+ uint32_t r_stride, r_value, r_cnt, qid = 0;
+ uint32_t i, k, loop_cnt;
+ struct qla82xx_minidump_entry_queue *q_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
+ s_addr = q_hdr->select_addr;
+ r_cnt = q_hdr->rd_strd.read_addr_cnt;
+ r_stride = q_hdr->rd_strd.read_addr_stride;
+ loop_cnt = q_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
+ r_addr = q_hdr->read_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ qid += q_hdr->q_strd.queue_id_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+
+static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value;
+ uint32_t i, loop_cnt;
+ struct qla82xx_minidump_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
+ r_addr = rom_hdr->read_addr;
+ loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, loop_cnt));
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
+ (r_addr & 0xFFFF0000), 1);
+ r_value = qla4_8xxx_md_rw_32(ha,
+ MD_DIRECT_ROM_READ_BASE +
+ (r_addr & 0x0000FFFF), 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += sizeof(uint32_t);
+ }
+ *d_ptr = data_ptr;
+}
+
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value, r_data;
+ uint32_t i, j, loop_cnt;
+ struct qla82xx_minidump_entry_rdmem *m_hdr;
+ unsigned long flags;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
+ r_addr = m_hdr->read_addr;
+ loop_cnt = m_hdr->read_data_size/16;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size));
+
+ if (r_addr & 0xf) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+ __func__, r_addr));
+ return QLA_ERROR;
+ }
+
+ if (m_hdr->read_data_size % 16) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+ __func__, m_hdr->read_data_size));
+ return QLA_ERROR;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size, loop_cnt));
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+ r_value = 0;
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+ r_value = MIU_TA_CTL_ENABLE;
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+ r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
+ 0, 0);
+ if ((r_value & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n",
+ __func__);
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ return QLA_SUCCESS;
+ }
+
+ for (j = 0; j < 4; j++) {
+ r_data = qla4_8xxx_md_rw_32(ha,
+ MD_MIU_TEST_AGT_RDDATA[j],
+ 0, 0);
+ *data_ptr++ = cpu_to_le32(r_data);
+ }
+
+ r_addr += 16;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
+ __func__, (loop_cnt * 16)));
+
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ int index)
+{
+ entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ ha->host_no, index, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask));
+}
+
+/**
+ * qla82xx_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
+{
+ int num_entry_hdr = 0;
+ struct qla82xx_minidump_entry_hdr *entry_hdr;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+ uint32_t *data_ptr;
+ uint32_t data_collected = 0;
+ int i, rval = QLA_ERROR;
+ uint64_t now;
+ uint32_t timestamp;
+
+ if (!ha->fw_dump) {
+ ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
+ __func__, ha->host_no);
+ return rval;
+ }
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
+ ha->fw_dump_tmplt_size);
+ data_collected += ha->fw_dump_tmplt_size;
+
+ num_entry_hdr = tmplt_hdr->num_of_entries;
+ ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
+ __func__, data_ptr);
+ ql4_printk(KERN_INFO, ha,
+ "[%s]: no of entry headers in Template: 0x%x\n",
+ __func__, num_entry_hdr);
+ ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
+ __func__, ha->fw_dump_capture_mask);
+ ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
+ __func__, ha->fw_dump_size, ha->fw_dump_size);
+
+ /* Update current timestamp before taking dump */
+ now = get_jiffies_64();
+ timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+ tmplt_hdr->driver_timestamp = timestamp;
+
+ entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+ (((uint8_t *)ha->fw_dump_tmplt_hdr) +
+ tmplt_hdr->first_entry_offset);
+
+ /* Walk through the entry headers - validate/perform required action */
+ for (i = 0; i < num_entry_hdr; i++) {
+ if (data_collected >= ha->fw_dump_size) {
+ ql4_printk(KERN_INFO, ha,
+ "Data collected: [0x%x], Total Dump size: [0x%x]\n",
+ data_collected, ha->fw_dump_size);
+ return rval;
+ }
+
+ if (!(entry_hdr->d_ctrl.entry_capture_mask &
+ ha->fw_dump_capture_mask)) {
+ entry_hdr->d_ctrl.driver_flags |=
+ QLA82XX_DBG_SKIPPED_FLAG;
+ goto skip_nxt_entry;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Data collected: [0x%x], Dump size left:[0x%x]\n",
+ data_collected,
+ (ha->fw_dump_size - data_collected)));
+
+ /* Decode the entry type and take required action to capture
+ * debug data
+ */
+ switch (entry_hdr->entry_type) {
+ case QLA82XX_RDEND:
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA82XX_CNTRL:
+ rval = qla4_8xxx_minidump_process_control(ha,
+ entry_hdr);
+ if (rval != QLA_SUCCESS) {
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_RDCRB:
+ qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDMEM:
+ rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_BOARD:
+ case QLA82XX_RDROM:
+ qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_L2DTG:
+ case QLA82XX_L2ITG:
+ case QLA82XX_L2DAT:
+ case QLA82XX_L2INS:
+ rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_L1DAT:
+ case QLA82XX_L1INS:
+ qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDOCM:
+ qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDMUX:
+ qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_QUEUE:
+ qla4_8xxx_minidump_process_queue(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDNOP:
+ default:
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+
+ data_collected = (uint8_t *)data_ptr -
+ ((uint8_t *)((uint8_t *)ha->fw_dump +
+ ha->fw_dump_tmplt_size));
+skip_nxt_entry:
+ /* next entry in the template */
+ entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+ (((uint8_t *)entry_hdr) +
+ entry_hdr->entry_size);
+ }
+
+ if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
+ ql4_printk(KERN_INFO, ha,
+ "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
+ data_collected, ha->fw_dump_size);
+ goto md_failed;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
+ __func__, i));
+md_failed:
+ return rval;
+}
+
+/**
+ * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
+ * @ha: pointer to adapter structure
+ **/
+static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
+{
+ char event_string[40];
+ char *envp[] = { event_string, NULL };
+
+ switch (code) {
+ case QL4_UEVENT_CODE_FW_DUMP:
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ ha->host_no);
+ break;
+ default:
+ /*do nothing*/
+ break;
+ }
+
+ kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
+}
+
/**
* qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
* @ha: pointer to adapter structure
@@ -1659,6 +2324,15 @@ dev_initialize:
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
qla4_8xxx_idc_unlock(ha);
+ if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+ !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+ if (!qla4_8xxx_collect_md_data(ha)) {
+ qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+ } else {
+ ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ }
+ }
rval = qla4_8xxx_try_start_fw(ha);
qla4_8xxx_idc_lock(ha);
@@ -1686,6 +2360,7 @@ static void
qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
{
uint32_t dev_state, drv_state, drv_active;
+ uint32_t active_mask = 0xFFFFFFFF;
unsigned long reset_timeout;
ql4_printk(KERN_INFO, ha,
@@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
qla4_8xxx_idc_lock(ha);
}
- qla4_8xxx_set_rst_ready(ha);
+ if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s(%ld): reset acknowledged\n",
+ __func__, ha->host_no));
+ qla4_8xxx_set_rst_ready(ha);
+ } else {
+ active_mask = (~(1 << (ha->func_num * 4)));
+ }
/* wait for 10 seconds for reset ack from all functions */
reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
__func__, ha->host_no, drv_state, drv_active);
- while (drv_state != drv_active) {
+ while (drv_state != (drv_active & active_mask)) {
if (time_after_eq(jiffies, reset_timeout)) {
- printk("%s: RESET TIMEOUT!\n", DRIVER_NAME);
+ ql4_printk(KERN_INFO, ha,
+ "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+ DRIVER_NAME, drv_state, drv_active);
break;
}
+ /*
+ * When reset_owner times out, check which functions
+ * acked/did not ack
+ */
+ if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha,
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, ha->host_no, drv_state,
+ drv_active);
+ }
qla4_8xxx_idc_unlock(ha);
msleep(1000);
qla4_8xxx_idc_lock(ha);
@@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
}
+ /* Clear RESET OWNER as we are not going to use it any further */
+ clear_bit(AF_82XX_RST_OWNER, &ha->flags);
+
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
/* Force to DEV_COLD unless someone else is starting a reset */
if (dev_state != QLA82XX_DEV_INITIALIZING) {
ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ qla4_8xxx_set_rst_ready(ha);
}
}
@@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
}
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown"));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
while (1) {
if (time_after_eq(jiffies, dev_init_timeout)) {
- ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Device Init Failed 0x%x = %s\n",
+ DRIVER_NAME,
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
}
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- ql4_printk(KERN_INFO, ha,
- "2:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
@@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_NEED_RESET);
+ set_bit(AF_82XX_RST_OWNER, &ha->flags);
} else
ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
@@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
qla4_8xxx_clear_rst_ready(ha);
qla4_8xxx_idc_unlock(ha);
- if (rval == QLA_SUCCESS)
+ if (rval == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
clear_bit(AF_FW_RECOVERY, &ha->flags);
+ }
return rval;
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc7500e47b8b..30258479f100 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -792,4 +792,196 @@ struct crb_addr_pair {
#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
+/* Minidump related */
+
+/* Entry Type Defines */
+#define QLA82XX_RDNOP 0
+#define QLA82XX_RDCRB 1
+#define QLA82XX_RDMUX 2
+#define QLA82XX_QUEUE 3
+#define QLA82XX_BOARD 4
+#define QLA82XX_RDOCM 6
+#define QLA82XX_PREGS 7
+#define QLA82XX_L1DTG 8
+#define QLA82XX_L1ITG 9
+#define QLA82XX_L1DAT 11
+#define QLA82XX_L1INS 12
+#define QLA82XX_L2DTG 21
+#define QLA82XX_L2ITG 22
+#define QLA82XX_L2DAT 23
+#define QLA82XX_L2INS 24
+#define QLA82XX_RDROM 71
+#define QLA82XX_RDMEM 72
+#define QLA82XX_CNTRL 98
+#define QLA82XX_RDEND 255
+
+/* Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA82XX_DBG_OPCODE_WR 0x01
+#define QLA82XX_DBG_OPCODE_RW 0x02
+#define QLA82XX_DBG_OPCODE_AND 0x04
+#define QLA82XX_DBG_OPCODE_OR 0x08
+#define QLA82XX_DBG_OPCODE_POLL 0x10
+#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
+#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
+#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
+
+/* Driver Flags */
+#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
+#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
+ * mismatch */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla82xx_minidump_entry_hdr {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t entry_capture_size;
+ struct {
+ uint8_t entry_capture_mask;
+ uint8_t entry_code;
+ uint8_t driver_code;
+ uint8_t driver_flags;
+ } d_ctrl;
+};
+
+/* Read CRB entry header */
+struct qla82xx_minidump_entry_crb {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t addr;
+ struct {
+ uint8_t addr_stride;
+ uint8_t state_index_a;
+ uint16_t poll_timeout;
+ } crb_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ struct {
+ uint8_t opcode;
+ uint8_t state_index_v;
+ uint8_t shl;
+ uint8_t shr;
+ } crb_ctrl;
+
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t value_3;
+};
+
+struct qla82xx_minidump_entry_cache {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t tag_reg_addr;
+ struct {
+ uint16_t tag_value_stride;
+ uint16_t init_tag_value;
+ } addr_ctrl;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t control_addr;
+ struct {
+ uint16_t write_value;
+ uint8_t poll_mask;
+ uint8_t poll_wait;
+ } cache_ctrl;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_1;
+ } read_ctrl;
+};
+
+/* Read OCM */
+struct qla82xx_minidump_entry_rdocm {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t rsvd_0;
+ uint32_t rsvd_1;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_2;
+ uint32_t rsvd_3;
+ uint32_t read_addr;
+ uint32_t read_addr_stride;
+};
+
+/* Read Memory */
+struct qla82xx_minidump_entry_rdmem {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Read ROM */
+struct qla82xx_minidump_entry_rdrom {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Mux entry */
+struct qla82xx_minidump_entry_mux {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t rsvd_0;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t select_value;
+ uint32_t select_value_stride;
+ uint32_t read_addr;
+ uint32_t rsvd_1;
+};
+
+/* Queue entry */
+struct qla82xx_minidump_entry_queue {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ struct {
+ uint16_t queue_id_stride;
+ uint16_t rsvd_0;
+ } q_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_1;
+ uint32_t rsvd_2;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_3;
+ } rd_strd;
+};
+
+#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
+#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
+#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
+#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
+#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
+#define QLA82XX_MINIDUMP_MEM_SIZE 0
+#define QLA82XX_MAX_ENTRY_HDR 4
+
+struct qla82xx_minidump {
+ uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
+ uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
+ uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
+ uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
+ uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
+ uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
+};
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
+#define RQST_TMPLT_SIZE 0x0
+#define RQST_TMPLT 0x1
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
+ 0x410000AC, 0x410000B8, 0x410000BC };
#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ee47820c30a6..cd15678f9ada 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
" Maximum queue depth to report for target devices.\n"
"\t\t Default: 32.");
+static int ql4xqfulltracking = 1;
+module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xqfulltracking,
+ " Enable or disable dynamic tracking and adjustment of\n"
+ "\t\t scsi device queue depth.\n"
+ "\t\t 0 - Disable.\n"
+ "\t\t 1 - Enable. (Default)");
+
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
" Target Session Recovery Timeout.\n"
"\t\t Default: 120 sec.");
+int ql4xmdcapmask = 0x1F;
+module_param(ql4xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xmdcapmask,
+ " Set the Minidump driver capture mask level.\n"
+ "\t\t Default is 0x1F.\n"
+ "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
+
+int ql4xenablemd = 1;
+module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemd,
+ " Set to enable minidump.\n"
+ "\t\t 0 - disable minidump\n"
+ "\t\t 1 - enable minidump (Default)");
+
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
* SCSI host template entry points
@@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
static umode_t ql4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+ int reason);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
QLA82XX_LEGACY_INTR_CONFIG;
@@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.slave_configure = qla4xxx_slave_configure,
.slave_alloc = qla4xxx_slave_alloc,
.slave_destroy = qla4xxx_slave_destroy,
+ .change_queue_depth = qla4xxx_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
- unsigned long flags;
+ unsigned long flags, wtime;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t ddb_state;
+ int ret;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto destroy_session;
+ }
+
+ wtime = jiffies + (HZ * LOGOUT_TOV);
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto destroy_session;
+
+ if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ goto destroy_session;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+destroy_session:
qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_free_ddb(ha, ddb_entry);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
iscsi_session_teardown(cls_sess);
+
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
}
static struct iscsi_cls_conn *
@@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+
ha->queues_len = 0;
ha->queues = NULL;
ha->queues_dma = 0;
@@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
ha->response_dma = 0;
ha->shadow_regs = NULL;
ha->shadow_regs_dma = 0;
+ ha->fw_dump = NULL;
+ ha->fw_dump_size = 0;
/* Free srb pool. */
if (ha->srb_mempool)
@@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
set_bit(AF_INIT_DONE, &ha->flags);
+ qla4_8xxx_alloc_sysfs_attr(ha);
+
printk(KERN_INFO
" QLogic iSCSI HBA Driver version: %s\n"
" QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
iscsi_boot_destroy_kset(ha->boot_kset);
qla4xxx_destroy_fw_ddb_session(ha);
+ qla4_8xxx_free_sysfs_attr(ha);
scsi_remove_host(ha->host);
@@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
scsi_deactivate_tcq(sdev, 1);
}
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+ int reason)
+{
+ if (!ql4xqfulltracking)
+ return -EOPNOTSUPP;
+
+ return iscsi_change_queue_depth(sdev, qdepth, reason);
+}
+
/**
* qla4xxx_del_from_active_array - returns an active srb
* @ha: Pointer to host adapter structure.
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 97b30c108e36..cc1cc3518b87 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 61c82a345f82..bbbc9c918d4c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level;
EXPORT_SYMBOL(scsi_logging_level);
#endif
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD)
-/* sd and scsi_pm need to coordinate flushing async actions */
+/* sd, scsi core and power management need to coordinate flushing async actions */
LIST_HEAD(scsi_sd_probe_domain);
EXPORT_SYMBOL(scsi_sd_probe_domain);
-#endif
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 386f0c53bea7..d0f71e5d065f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -664,7 +664,7 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
}
/**
- * scsi_eh_prep_cmnd - Save a scsi command info as part of error recory
+ * scsi_eh_prep_cmnd - Save a scsi command info as part of error recovery
* @scmd: SCSI command structure to hijack
* @ses: structure to save restore information
* @cmnd: CDB to send. Can be NULL if no new cmnd is needed
@@ -739,7 +739,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
EXPORT_SYMBOL(scsi_eh_prep_cmnd);
/**
- * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory
+ * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recovery
* @scmd: SCSI command structure to restore
* @ses: saved information from a coresponding call to scsi_eh_prep_cmnd
*
@@ -762,7 +762,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
EXPORT_SYMBOL(scsi_eh_restore_cmnd);
/**
- * scsi_send_eh_cmnd - submit a scsi command as part of error recory
+ * scsi_send_eh_cmnd - submit a scsi command as part of error recovery
* @scmd: SCSI command structure to hijack
* @cmnd: CDB to send
* @cmnd_size: size in bytes of @cmnd
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ddfd31d4ce..6dfb9785d345 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
- struct scsi_target *starget;
if (!sdev)
return 0;
shost = sdev->host;
- starget = scsi_target(sdev);
- if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
- scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
+ /*
+ * Ignore host/starget busy state.
+ * Since block layer does not have a concept of fairness across
+ * multiple queues, congestion of host/starget needs to be handled
+ * in SCSI layer.
+ */
+ if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
return 1;
return 0;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index f661a41fa4c6..d4201ded3b22 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
drv = dev->driver;
- if (drv && drv->suspend)
+ if (drv && drv->suspend) {
err = drv->suspend(dev, msg);
+ if (err)
+ scsi_device_resume(to_scsi_device(dev));
+ }
}
dev_dbg(dev, "scsi suspend: %d\n", err);
return err;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 01b03744f1f9..2e5fe584aad3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
do {
if (list_empty(&scanning_hosts))
- return 0;
+ goto out;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
@@ -179,8 +179,11 @@ int scsi_complete_async_scans(void)
}
done:
spin_unlock(&async_scan_lock);
-
kfree(data);
+
+ out:
+ async_synchronize_full_domain(&scsi_sd_probe_domain);
+
return 0;
}
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
index 74708fcaf82f..072734538876 100644
--- a/drivers/scsi/scsi_wait_scan.c
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/device.h>
-#include <scsi/scsi_scan.h>
+#include "scsi_priv.h"
static int __init wait_scan_init(void)
{
@@ -22,11 +22,6 @@ static int __init wait_scan_init(void)
* and might not yet have reached the scsi async scanning
*/
wait_for_device_probe();
- /*
- * and then we wait for the actual asynchronous scsi scan
- * to finish.
- */
- scsi_complete_async_scans();
return 0;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6f0a4c612b3b..6f72b80121a0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1899,6 +1899,8 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
{
if (sdp->host->max_cmd_len < 16)
return 0;
+ if (sdp->try_rc_10_first)
+ return 0;
if (sdp->scsi_level > SCSI_SPC_2)
return 1;
if (scsi_device_protection(sdp))
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 8ac6ce792b69..a318264a4ba1 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -17,7 +17,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*----------------------------------------------------------------------------
*
- * MCA card detection code by Trent McNair.
+ * MCA card detection code by Trent McNair. (now deleted)
* Fixes to not explicitly nul bss data from Xavier Bestel.
* Some multiboard fixes from Rolf Eike Beer.
* Auto probing of EISA config space from Trevor Hemsley.
@@ -32,7 +32,6 @@
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/init.h>
-#include <linux/mca.h>
#include <linux/eisa.h>
#include <linux/interrupt.h>
#include <scsi/scsi_host.h>
@@ -43,7 +42,7 @@
#include "53c700.h"
-/* Must be enough for both EISA and MCA */
+/* Must be enough for EISA */
#define MAX_SLOTS 8
static __u8 __initdata id_array[MAX_SLOTS] = { [0 ... MAX_SLOTS-1] = 7 };
@@ -89,7 +88,7 @@ param_setup(char *str)
__setup("sim710=", param_setup);
static struct scsi_host_template sim710_driver_template = {
- .name = "LSI (Symbios) 710 MCA/EISA",
+ .name = "LSI (Symbios) 710 EISA",
.proc_name = "sim710",
.this_id = 7,
.module = THIS_MODULE,
@@ -169,114 +168,6 @@ sim710_device_remove(struct device *dev)
return 0;
}
-#ifdef CONFIG_MCA
-
-/* CARD ID 01BB and 01BA use the same pos values */
-#define MCA_01BB_IO_PORTS { 0x0000, 0x0000, 0x0800, 0x0C00, 0x1000, 0x1400, \
- 0x1800, 0x1C00, 0x2000, 0x2400, 0x2800, \
- 0x2C00, 0x3000, 0x3400, 0x3800, 0x3C00, \
- 0x4000, 0x4400, 0x4800, 0x4C00, 0x5000 }
-
-#define MCA_01BB_IRQS { 3, 5, 11, 14 }
-
-/* CARD ID 004f */
-#define MCA_004F_IO_PORTS { 0x0000, 0x0200, 0x0300, 0x0400, 0x0500, 0x0600 }
-#define MCA_004F_IRQS { 5, 9, 14 }
-
-static short sim710_mca_id_table[] = { 0x01bb, 0x01ba, 0x004f, 0};
-
-static __init int
-sim710_mca_probe(struct device *dev)
-{
- struct mca_device *mca_dev = to_mca_device(dev);
- int slot = mca_dev->slot;
- int pos[3];
- unsigned int base;
- int irq_vector;
- short id = sim710_mca_id_table[mca_dev->index];
- static int io_004f_by_pos[] = MCA_004F_IO_PORTS;
- static int irq_004f_by_pos[] = MCA_004F_IRQS;
- static int io_01bb_by_pos[] = MCA_01BB_IO_PORTS;
- static int irq_01bb_by_pos[] = MCA_01BB_IRQS;
- char *name;
- int clock;
-
- pos[0] = mca_device_read_stored_pos(mca_dev, 2);
- pos[1] = mca_device_read_stored_pos(mca_dev, 3);
- pos[2] = mca_device_read_stored_pos(mca_dev, 4);
-
- /*
- * 01BB & 01BA port base by bits 7,6,5,4,3,2 in pos[2]
- *
- * 000000 <disabled> 001010 0x2800
- * 000001 <invalid> 001011 0x2C00
- * 000010 0x0800 001100 0x3000
- * 000011 0x0C00 001101 0x3400
- * 000100 0x1000 001110 0x3800
- * 000101 0x1400 001111 0x3C00
- * 000110 0x1800 010000 0x4000
- * 000111 0x1C00 010001 0x4400
- * 001000 0x2000 010010 0x4800
- * 001001 0x2400 010011 0x4C00
- * 010100 0x5000
- *
- * 00F4 port base by bits 3,2,1 in pos[0]
- *
- * 000 <disabled> 001 0x200
- * 010 0x300 011 0x400
- * 100 0x500 101 0x600
- *
- * 01BB & 01BA IRQ is specified in pos[0] bits 7 and 6:
- *
- * 00 3 10 11
- * 01 5 11 14
- *
- * 00F4 IRQ specified by bits 6,5,4 in pos[0]
- *
- * 100 5 101 9
- * 110 14
- */
-
- if (id == 0x01bb || id == 0x01ba) {
- base = io_01bb_by_pos[(pos[2] & 0xFC) >> 2];
- irq_vector =
- irq_01bb_by_pos[((pos[0] & 0xC0) >> 6)];
-
- clock = 50;
- if (id == 0x01bb)
- name = "NCR 3360/3430 SCSI SubSystem";
- else
- name = "NCR Dual SIOP SCSI Host Adapter Board";
- } else if ( id == 0x004f ) {
- base = io_004f_by_pos[((pos[0] & 0x0E) >> 1)];
- irq_vector =
- irq_004f_by_pos[((pos[0] & 0x70) >> 4) - 4];
- clock = 50;
- name = "NCR 53c710 SCSI Host Adapter Board";
- } else {
- return -ENODEV;
- }
- mca_device_set_name(mca_dev, name);
- mca_device_set_claim(mca_dev, 1);
- base = mca_device_transform_ioport(mca_dev, base);
- irq_vector = mca_device_transform_irq(mca_dev, irq_vector);
-
- return sim710_probe_common(dev, base, irq_vector, clock,
- 0, id_array[slot]);
-}
-
-static struct mca_driver sim710_mca_driver = {
- .id_table = sim710_mca_id_table,
- .driver = {
- .name = "sim710",
- .bus = &mca_bus_type,
- .probe = sim710_mca_probe,
- .remove = __devexit_p(sim710_device_remove),
- },
-};
-
-#endif /* CONFIG_MCA */
-
#ifdef CONFIG_EISA
static struct eisa_device_id sim710_eisa_ids[] = {
{ "CPQ4410" },
@@ -344,10 +235,6 @@ static int __init sim710_init(void)
param_setup(sim710);
#endif
-#ifdef CONFIG_MCA
- err = mca_register_driver(&sim710_mca_driver);
-#endif
-
#ifdef CONFIG_EISA
err = eisa_driver_register(&sim710_eisa_driver);
#endif
@@ -361,11 +248,6 @@ static int __init sim710_init(void)
static void __exit sim710_exit(void)
{
-#ifdef CONFIG_MCA
- if (MCA_bus)
- mca_unregister_driver(&sim710_mca_driver);
-#endif
-
#ifdef CONFIG_EISA
eisa_driver_unregister(&sim710_eisa_driver);
#endif
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4e010b727818..6a4fd00117ca 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = pci_request_regions(pdev, UFSHCD);
if (err < 0) {
dev_err(&pdev->dev, "request regions failed\n");
- goto out_disable;
+ goto out_host_put;
}
hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1925,9 @@ out_iounmap:
iounmap(hba->mmio_base);
out_release_regions:
pci_release_regions(pdev);
-out_disable:
+out_host_put:
scsi_host_put(host);
+out_disable:
pci_clear_master(pdev);
pci_disable_device(pdev);
out_error:
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index 91b6d52f74eb..f0d015dd0fef 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -2,6 +2,7 @@
* Helper routines for SuperH Clock Pulse Generator blocks (CPG).
*
* Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2010 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -13,26 +14,44 @@
#include <linux/io.h>
#include <linux/sh_clk.h>
-static int sh_clk_mstp32_enable(struct clk *clk)
+static unsigned int sh_clk_read(struct clk *clk)
{
- iowrite32(ioread32(clk->mapped_reg) & ~(1 << clk->enable_bit),
- clk->mapped_reg);
+ if (clk->flags & CLK_ENABLE_REG_8BIT)
+ return ioread8(clk->mapped_reg);
+ else if (clk->flags & CLK_ENABLE_REG_16BIT)
+ return ioread16(clk->mapped_reg);
+
+ return ioread32(clk->mapped_reg);
+}
+
+static void sh_clk_write(int value, struct clk *clk)
+{
+ if (clk->flags & CLK_ENABLE_REG_8BIT)
+ iowrite8(value, clk->mapped_reg);
+ else if (clk->flags & CLK_ENABLE_REG_16BIT)
+ iowrite16(value, clk->mapped_reg);
+ else
+ iowrite32(value, clk->mapped_reg);
+}
+
+static int sh_clk_mstp_enable(struct clk *clk)
+{
+ sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
return 0;
}
-static void sh_clk_mstp32_disable(struct clk *clk)
+static void sh_clk_mstp_disable(struct clk *clk)
{
- iowrite32(ioread32(clk->mapped_reg) | (1 << clk->enable_bit),
- clk->mapped_reg);
+ sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
}
-static struct sh_clk_ops sh_clk_mstp32_clk_ops = {
- .enable = sh_clk_mstp32_enable,
- .disable = sh_clk_mstp32_disable,
+static struct sh_clk_ops sh_clk_mstp_clk_ops = {
+ .enable = sh_clk_mstp_enable,
+ .disable = sh_clk_mstp_disable,
.recalc = followparent_recalc,
};
-int __init sh_clk_mstp32_register(struct clk *clks, int nr)
+int __init sh_clk_mstp_register(struct clk *clks, int nr)
{
struct clk *clkp;
int ret = 0;
@@ -40,7 +59,7 @@ int __init sh_clk_mstp32_register(struct clk *clks, int nr)
for (k = 0; !ret && (k < nr); k++) {
clkp = clks + k;
- clkp->ops = &sh_clk_mstp32_clk_ops;
+ clkp->ops = &sh_clk_mstp_clk_ops;
ret |= clk_register(clkp);
}
@@ -72,7 +91,7 @@ static unsigned long sh_clk_div6_recalc(struct clk *clk)
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, NULL);
- idx = ioread32(clk->mapped_reg) & 0x003f;
+ idx = sh_clk_read(clk) & 0x003f;
return clk->freq_table[idx].frequency;
}
@@ -98,10 +117,10 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
if (ret < 0)
return ret;
- value = ioread32(clk->mapped_reg) &
+ value = sh_clk_read(clk) &
~(((1 << clk->src_width) - 1) << clk->src_shift);
- iowrite32(value | (i << clk->src_shift), clk->mapped_reg);
+ sh_clk_write(value | (i << clk->src_shift), clk);
/* Rebuild the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
@@ -119,10 +138,10 @@ static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
if (idx < 0)
return idx;
- value = ioread32(clk->mapped_reg);
+ value = sh_clk_read(clk);
value &= ~0x3f;
value |= idx;
- iowrite32(value, clk->mapped_reg);
+ sh_clk_write(value, clk);
return 0;
}
@@ -133,9 +152,9 @@ static int sh_clk_div6_enable(struct clk *clk)
ret = sh_clk_div6_set_rate(clk, clk->rate);
if (ret == 0) {
- value = ioread32(clk->mapped_reg);
+ value = sh_clk_read(clk);
value &= ~0x100; /* clear stop bit to enable clock */
- iowrite32(value, clk->mapped_reg);
+ sh_clk_write(value, clk);
}
return ret;
}
@@ -144,10 +163,10 @@ static void sh_clk_div6_disable(struct clk *clk)
{
unsigned long value;
- value = ioread32(clk->mapped_reg);
+ value = sh_clk_read(clk);
value |= 0x100; /* stop clock */
value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
- iowrite32(value, clk->mapped_reg);
+ sh_clk_write(value, clk);
}
static struct sh_clk_ops sh_clk_div6_clk_ops = {
@@ -182,7 +201,7 @@ static int __init sh_clk_init_parent(struct clk *clk)
return -EINVAL;
}
- val = (ioread32(clk->mapped_reg) >> clk->src_shift);
+ val = (sh_clk_read(clk) >> clk->src_shift);
val &= (1 << clk->src_width) - 1;
if (val >= clk->parent_num) {
@@ -252,7 +271,7 @@ static unsigned long sh_clk_div4_recalc(struct clk *clk)
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, &clk->arch_flags);
- idx = (ioread32(clk->mapped_reg) >> clk->enable_bit) & 0x000f;
+ idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f;
return clk->freq_table[idx].frequency;
}
@@ -270,15 +289,15 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
*/
if (parent->flags & CLK_ENABLE_ON_INIT)
- value = ioread32(clk->mapped_reg) & ~(1 << 7);
+ value = sh_clk_read(clk) & ~(1 << 7);
else
- value = ioread32(clk->mapped_reg) | (1 << 7);
+ value = sh_clk_read(clk) | (1 << 7);
ret = clk_reparent(clk, parent);
if (ret < 0)
return ret;
- iowrite32(value, clk->mapped_reg);
+ sh_clk_write(value, clk);
/* Rebiuld the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
@@ -295,10 +314,10 @@ static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
if (idx < 0)
return idx;
- value = ioread32(clk->mapped_reg);
+ value = sh_clk_read(clk);
value &= ~(0xf << clk->enable_bit);
value |= (idx << clk->enable_bit);
- iowrite32(value, clk->mapped_reg);
+ sh_clk_write(value, clk);
if (d4t->kick)
d4t->kick(clk);
@@ -308,13 +327,13 @@ static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
static int sh_clk_div4_enable(struct clk *clk)
{
- iowrite32(ioread32(clk->mapped_reg) & ~(1 << 8), clk->mapped_reg);
+ sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
return 0;
}
static void sh_clk_div4_disable(struct clk *clk)
{
- iowrite32(ioread32(clk->mapped_reg) | (1 << 8), clk->mapped_reg);
+ sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
}
static struct sh_clk_ops sh_clk_div4_clk_ops = {
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
index 5fea1ee8799a..14eb01ef5d72 100644
--- a/drivers/sh/intc/dynamic.c
+++ b/drivers/sh/intc/dynamic.c
@@ -55,11 +55,3 @@ void destroy_irq(unsigned int irq)
{
irq_free_desc(irq);
}
-
-void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
-{
- int i;
-
- for (i = 0; i < nr_vecs; i++)
- irq_reserve_irq(evt2irq(vectors[i].vect));
-}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 00c024039c97..cd2fe350e724 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -311,7 +311,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
+ depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
select S3C64XX_DMA if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index acc88b4d2869..249077e5cc48 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -216,9 +216,6 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
if (pdata) {
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
- } else {
- master->bus_num = -1;
- master->num_chipselect = 1;
}
sp->bitbang.master = spi_master_get(master);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 6eee64a5d240..b2d4b9e4e010 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -25,12 +25,12 @@
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
@@ -78,10 +78,7 @@ struct mcfqspi {
wait_queue_head_t waitq;
- struct work_struct work;
- struct workqueue_struct *workq;
- spinlock_t lock;
- struct list_head msgq;
+ struct device *dev;
};
static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
@@ -303,120 +300,80 @@ static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
}
}
-static void mcfqspi_work(struct work_struct *work)
+static int mcfqspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work);
- unsigned long flags;
-
- spin_lock_irqsave(&mcfqspi->lock, flags);
- while (!list_empty(&mcfqspi->msgq)) {
- struct spi_message *msg;
- struct spi_device *spi;
- struct spi_transfer *xfer;
- int status = 0;
-
- msg = container_of(mcfqspi->msgq.next, struct spi_message,
- queue);
-
- list_del_init(&msg->queue);
- spin_unlock_irqrestore(&mcfqspi->lock, flags);
-
- spi = msg->spi;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- bool cs_high = spi->mode & SPI_CS_HIGH;
- u16 qmr = MCFQSPI_QMR_MSTR;
-
- if (xfer->bits_per_word)
- qmr |= xfer->bits_per_word << 10;
- else
- qmr |= spi->bits_per_word << 10;
- if (spi->mode & SPI_CPHA)
- qmr |= MCFQSPI_QMR_CPHA;
- if (spi->mode & SPI_CPOL)
- qmr |= MCFQSPI_QMR_CPOL;
- if (xfer->speed_hz)
- qmr |= mcfqspi_qmr_baud(xfer->speed_hz);
- else
- qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
- mcfqspi_wr_qmr(mcfqspi, qmr);
-
- mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
-
- mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
- if ((xfer->bits_per_word ? xfer->bits_per_word :
- spi->bits_per_word) == 8)
- mcfqspi_transfer_msg8(mcfqspi, xfer->len,
- xfer->tx_buf,
- xfer->rx_buf);
- else
- mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2,
- xfer->tx_buf,
- xfer->rx_buf);
- mcfqspi_wr_qir(mcfqspi, 0);
-
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
- if (xfer->cs_change) {
- if (!list_is_last(&xfer->transfer_list,
- &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi,
- spi->chip_select,
- cs_high);
- } else {
- if (list_is_last(&xfer->transfer_list,
- &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi,
- spi->chip_select,
- cs_high);
- }
- msg->actual_length += xfer->len;
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *t;
+ int status = 0;
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ bool cs_high = spi->mode & SPI_CS_HIGH;
+ u16 qmr = MCFQSPI_QMR_MSTR;
+
+ if (t->bits_per_word)
+ qmr |= t->bits_per_word << 10;
+ else
+ qmr |= spi->bits_per_word << 10;
+ if (spi->mode & SPI_CPHA)
+ qmr |= MCFQSPI_QMR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ qmr |= MCFQSPI_QMR_CPOL;
+ if (t->speed_hz)
+ qmr |= mcfqspi_qmr_baud(t->speed_hz);
+ else
+ qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
+ mcfqspi_wr_qmr(mcfqspi, qmr);
+
+ mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
+
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+ if ((t->bits_per_word ? t->bits_per_word :
+ spi->bits_per_word) == 8)
+ mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf,
+ t->rx_buf);
+ else
+ mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
+ t->rx_buf);
+ mcfqspi_wr_qir(mcfqspi, 0);
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+ if (t->cs_change) {
+ if (!list_is_last(&t->transfer_list, &msg->transfers))
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
+ cs_high);
+ } else {
+ if (list_is_last(&t->transfer_list, &msg->transfers))
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
+ cs_high);
}
- msg->status = status;
- msg->complete(msg->context);
-
- spin_lock_irqsave(&mcfqspi->lock, flags);
+ msg->actual_length += t->len;
}
- spin_unlock_irqrestore(&mcfqspi->lock, flags);
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+
}
-static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg)
+static int mcfqspi_prepare_transfer_hw(struct spi_master *master)
{
- struct mcfqspi *mcfqspi;
- struct spi_transfer *xfer;
- unsigned long flags;
-
- mcfqspi = spi_master_get_devdata(spi->master);
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (xfer->bits_per_word && ((xfer->bits_per_word < 8)
- || (xfer->bits_per_word > 16))) {
- dev_dbg(&spi->dev,
- "%d bits per word is not supported\n",
- xfer->bits_per_word);
- goto fail;
- }
- if (xfer->speed_hz) {
- u32 real_speed = MCFQSPI_BUSCLK /
- mcfqspi_qmr_baud(xfer->speed_hz);
- if (real_speed != xfer->speed_hz)
- dev_dbg(&spi->dev,
- "using speed %d instead of %d\n",
- real_speed, xfer->speed_hz);
- }
- }
- msg->status = -EINPROGRESS;
- msg->actual_length = 0;
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- spin_lock_irqsave(&mcfqspi->lock, flags);
- list_add_tail(&msg->queue, &mcfqspi->msgq);
- queue_work(mcfqspi->workq, &mcfqspi->work);
- spin_unlock_irqrestore(&mcfqspi->lock, flags);
+ pm_runtime_get_sync(mcfqspi->dev);
+
+ return 0;
+}
+
+static int mcfqspi_unprepare_transfer_hw(struct spi_master *master)
+{
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ pm_runtime_put_sync(mcfqspi->dev);
return 0;
-fail:
- msg->status = -EINVAL;
- return -EINVAL;
}
static int mcfqspi_setup(struct spi_device *spi)
@@ -502,21 +459,10 @@ static int __devinit mcfqspi_probe(struct platform_device *pdev)
}
clk_enable(mcfqspi->clk);
- mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent));
- if (!mcfqspi->workq) {
- dev_dbg(&pdev->dev, "create_workqueue failed\n");
- status = -ENOMEM;
- goto fail4;
- }
- INIT_WORK(&mcfqspi->work, mcfqspi_work);
- spin_lock_init(&mcfqspi->lock);
- INIT_LIST_HEAD(&mcfqspi->msgq);
- init_waitqueue_head(&mcfqspi->waitq);
-
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_dbg(&pdev->dev, "platform data is missing\n");
- goto fail5;
+ goto fail4;
}
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
@@ -525,28 +471,33 @@ static int __devinit mcfqspi_probe(struct platform_device *pdev)
status = mcfqspi_cs_setup(mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "error initializing cs_control\n");
- goto fail5;
+ goto fail4;
}
+ init_waitqueue_head(&mcfqspi->waitq);
+ mcfqspi->dev = &pdev->dev;
+
master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
master->setup = mcfqspi_setup;
- master->transfer = mcfqspi_transfer;
+ master->transfer_one_message = mcfqspi_transfer_one_message;
+ master->prepare_transfer_hardware = mcfqspi_prepare_transfer_hw;
+ master->unprepare_transfer_hardware = mcfqspi_unprepare_transfer_hw;
platform_set_drvdata(pdev, master);
status = spi_register_master(master);
if (status) {
dev_dbg(&pdev->dev, "spi_register_master failed\n");
- goto fail6;
+ goto fail5;
}
+ pm_runtime_enable(mcfqspi->dev);
+
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
return 0;
-fail6:
- mcfqspi_cs_teardown(mcfqspi);
fail5:
- destroy_workqueue(mcfqspi->workq);
+ mcfqspi_cs_teardown(mcfqspi);
fail4:
clk_disable(mcfqspi->clk);
clk_put(mcfqspi->clk);
@@ -570,12 +521,12 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pm_runtime_disable(mcfqspi->dev);
/* disable the hardware (set the baud rate to 0) */
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
platform_set_drvdata(pdev, NULL);
mcfqspi_cs_teardown(mcfqspi);
- destroy_workqueue(mcfqspi->workq);
clk_disable(mcfqspi->clk);
clk_put(mcfqspi->clk);
free_irq(mcfqspi->irq, mcfqspi);
@@ -587,11 +538,13 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int mcfqspi_suspend(struct device *dev)
{
- struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
clk_disable(mcfqspi->clk);
@@ -600,27 +553,47 @@ static int mcfqspi_suspend(struct device *dev)
static int mcfqspi_resume(struct device *dev)
{
- struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ spi_master_resume(master);
clk_enable(mcfqspi->clk);
return 0;
}
+#endif
-static struct dev_pm_ops mcfqspi_dev_pm_ops = {
- .suspend = mcfqspi_suspend,
- .resume = mcfqspi_resume,
-};
+#ifdef CONFIG_PM_RUNTIME
+static int mcfqspi_runtime_suspend(struct device *dev)
+{
+ struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
-#define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops)
-#else
-#define MCFQSPI_DEV_PM_OPS NULL
+ clk_disable(mcfqspi->clk);
+
+ return 0;
+}
+
+static int mcfqspi_runtime_resume(struct device *dev)
+{
+ struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+
+ clk_enable(mcfqspi->clk);
+
+ return 0;
+}
#endif
+static const struct dev_pm_ops mcfqspi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(mcfqspi_suspend, mcfqspi_resume)
+ SET_RUNTIME_PM_OPS(mcfqspi_runtime_suspend, mcfqspi_runtime_resume,
+ NULL)
+};
+
static struct platform_driver mcfqspi_driver = {
.driver.name = DRIVER_NAME,
.driver.owner = THIS_MODULE,
- .driver.pm = MCFQSPI_DEV_PM_OPS,
+ .driver.pm = &mcfqspi_pm,
.probe = mcfqspi_probe,
.remove = __devexit_p(mcfqspi_remove),
};
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 14f7cc9523f0..ff81abbb3066 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -164,18 +164,7 @@ static struct pci_driver dw_spi_driver = {
.resume = spi_resume,
};
-static int __init mrst_spi_init(void)
-{
- return pci_register_driver(&dw_spi_driver);
-}
-
-static void __exit mrst_spi_exit(void)
-{
- pci_unregister_driver(&dw_spi_driver);
-}
-
-module_init(mrst_spi_init);
-module_exit(mrst_spi_exit);
+module_pci_driver(dw_spi_driver);
MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index e8055073e84d..f97f1d248800 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -76,7 +76,6 @@
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
- * @irq: IRQ number used by the driver
* @min_rate: minimum clock rate (in Hz) supported by the controller
* @max_rate: maximum clock rate (in Hz) supported by the controller
* @running: is the queue running
@@ -114,7 +113,6 @@ struct ep93xx_spi {
struct clk *clk;
void __iomem *regs_base;
unsigned long sspdr_phys;
- int irq;
unsigned long min_rate;
unsigned long max_rate;
bool running;
@@ -1031,6 +1029,7 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
struct ep93xx_spi_info *info;
struct ep93xx_spi *espi;
struct resource *res;
+ int irq;
int error;
info = pdev->dev.platform_data;
@@ -1070,8 +1069,8 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
espi->pdev = pdev;
- espi->irq = platform_get_irq(pdev, 0);
- if (espi->irq < 0) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
error = -EBUSY;
dev_err(&pdev->dev, "failed to get irq resources\n");
goto fail_put_clock;
@@ -1084,26 +1083,20 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
goto fail_put_clock;
}
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- dev_err(&pdev->dev, "unable to request iomem resources\n");
- error = -EBUSY;
- goto fail_put_clock;
- }
-
espi->sspdr_phys = res->start + SSPDR;
- espi->regs_base = ioremap(res->start, resource_size(res));
+
+ espi->regs_base = devm_request_and_ioremap(&pdev->dev, res);
if (!espi->regs_base) {
dev_err(&pdev->dev, "failed to map resources\n");
error = -ENODEV;
- goto fail_free_mem;
+ goto fail_put_clock;
}
- error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
- "ep93xx-spi", espi);
+ error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
+ 0, "ep93xx-spi", espi);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto fail_unmap_regs;
+ goto fail_put_clock;
}
if (info->use_dma && ep93xx_spi_setup_dma(espi))
@@ -1128,7 +1121,7 @@ static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
- (unsigned long)res->start, espi->irq);
+ (unsigned long)res->start, irq);
return 0;
@@ -1136,11 +1129,6 @@ fail_free_queue:
destroy_workqueue(espi->wq);
fail_free_dma:
ep93xx_spi_release_dma(espi);
- free_irq(espi->irq, espi);
-fail_unmap_regs:
- iounmap(espi->regs_base);
-fail_free_mem:
- release_mem_region(res->start, resource_size(res));
fail_put_clock:
clk_put(espi->clk);
fail_release_master:
@@ -1154,7 +1142,6 @@ static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_master_get_devdata(master);
- struct resource *res;
spin_lock_irq(&espi->lock);
espi->running = false;
@@ -1180,10 +1167,6 @@ static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
spin_unlock_irq(&espi->lock);
ep93xx_spi_release_dma(espi);
- free_irq(espi->irq, espi);
- iounmap(espi->regs_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
clk_put(espi->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7523a2429d09..27bdc47b5250 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_spi.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <sysdev/fsl_soc.h>
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 2674fad7f68a..1503574b215a 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -22,7 +22,7 @@
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/of_platform.h>
-#include <linux/of_spi.h>
+#include <linux/spi/spi.h>
#include <sysdev/fsl_soc.h>
#include "spi-fsl-lib.h"
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 5f748c0d96bd..6a62934ca74c 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -933,7 +933,7 @@ err:
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
{
- struct device *dev = spi->dev.parent;
+ struct device *dev = spi->dev.parent->parent;
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
u16 cs = spi->chip_select;
int gpio = pinfo->gpios[cs];
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 69c9a6601f45..47877d687614 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -86,7 +86,8 @@ struct spi_imx_data {
struct completion xfer_done;
void __iomem *base;
int irq;
- struct clk *clk;
+ struct clk *clk_per;
+ struct clk *clk_ipg;
unsigned long spi_clk;
unsigned int count;
@@ -853,15 +854,22 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
goto out_free_irq;
}
- spi_imx->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(spi_imx->clk)) {
- dev_err(&pdev->dev, "unable to get clock\n");
- ret = PTR_ERR(spi_imx->clk);
+ spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(spi_imx->clk_ipg)) {
+ ret = PTR_ERR(spi_imx->clk_ipg);
goto out_free_irq;
}
- clk_enable(spi_imx->clk);
- spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
+ spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(spi_imx->clk_per)) {
+ ret = PTR_ERR(spi_imx->clk_per);
+ goto out_free_irq;
+ }
+
+ clk_prepare_enable(spi_imx->clk_per);
+ clk_prepare_enable(spi_imx->clk_ipg);
+
+ spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
spi_imx->devtype_data->reset(spi_imx);
@@ -879,8 +887,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
return ret;
out_clk_put:
- clk_disable(spi_imx->clk);
- clk_put(spi_imx->clk);
+ clk_disable_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
out_free_irq:
free_irq(spi_imx->irq, spi_imx);
out_iounmap:
@@ -908,8 +916,8 @@ static int __devexit spi_imx_remove(struct platform_device *pdev)
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable(spi_imx->clk);
- clk_put(spi_imx->clk);
+ clk_disable_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
free_irq(spi_imx->irq, spi_imx);
iounmap(spi_imx->base);
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index 933eb9d9ddd4..0759b5db9883 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -219,9 +219,6 @@ static void spi_lm70llp_attach(struct parport *p)
}
pp = spi_master_get_devdata(master);
- master->bus_num = -1; /* dynamic alloc of a bus number */
- master->num_chipselect = 1;
-
/*
* SPI and bitbang hookup.
*/
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 57633d963456..cb3a3830b0a5 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -433,7 +433,6 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
goto err_alloc;
}
- master->bus_num = -1;
master->setup = mpc52xx_spi_setup;
master->transfer = mpc52xx_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
@@ -479,8 +478,6 @@ static int __devinit mpc52xx_spi_probe(struct platform_device *op)
gpio_direction_output(gpio_cs, 1);
ms->gpio_cs[i] = gpio_cs;
}
- } else {
- master->num_chipselect = 1;
}
spin_lock_init(&ms->lock);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index bb9274c2526d..0c73dd4f43a0 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -44,9 +44,7 @@
#include <plat/mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
-
-/* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */
-#define OMAP2_MCSPI_MAX_CTRL 4
+#define SPI_AUTOSUSPEND_TIMEOUT 2000
#define OMAP2_MCSPI_REVISION 0x00
#define OMAP2_MCSPI_SYSSTATUS 0x14
@@ -111,19 +109,25 @@ struct omap2_mcspi_dma {
#define DMA_MIN_BYTES 160
+/*
+ * Used for context save and restore, structure members to be updated whenever
+ * corresponding registers are modified.
+ */
+struct omap2_mcspi_regs {
+ u32 modulctrl;
+ u32 wakeupenable;
+ struct list_head cs;
+};
+
struct omap2_mcspi {
- struct work_struct work;
- /* lock protects queue and registers */
- spinlock_t lock;
- struct list_head msg_queue;
struct spi_master *master;
/* Virtual base address of the controller */
void __iomem *base;
unsigned long phys;
/* SPI1 has 4 channels, while SPI2 has 2 */
struct omap2_mcspi_dma *dma_channels;
- struct device *dev;
- struct workqueue_struct *wq;
+ struct device *dev;
+ struct omap2_mcspi_regs ctx;
};
struct omap2_mcspi_cs {
@@ -135,17 +139,6 @@ struct omap2_mcspi_cs {
u32 chconf0;
};
-/* used for context save and restore, structure members to be updated whenever
- * corresponding registers are modified.
- */
-struct omap2_mcspi_regs {
- u32 modulctrl;
- u32 wakeupenable;
- struct list_head cs;
-};
-
-static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL];
-
#define MOD_REG_BIT(val, mask, set) do { \
if (set) \
val |= mask; \
@@ -236,9 +229,12 @@ static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
static void omap2_mcspi_set_master_mode(struct spi_master *master)
{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
u32 l;
- /* setup when switching from (reset default) slave mode
+ /*
+ * Setup when switching from (reset default) slave mode
* to single-channel master mode
*/
l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
@@ -247,29 +243,26 @@ static void omap2_mcspi_set_master_mode(struct spi_master *master)
MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
- omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l;
+ ctx->modulctrl = l;
}
static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
{
- struct spi_master *spi_cntrl;
- struct omap2_mcspi_cs *cs;
- spi_cntrl = mcspi->master;
+ struct spi_master *spi_cntrl = mcspi->master;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
/* McSPI: context restore */
- mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL,
- omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl);
-
- mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE,
- omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable);
+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
- list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs,
- node)
+ list_for_each_entry(cs, &ctx->cs, node)
__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi)
{
- pm_runtime_put_sync(mcspi->dev);
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
}
static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
@@ -277,6 +270,23 @@ static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi)
return pm_runtime_get_sync(mcspi->dev);
}
+static int omap2_prepare_transfer(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(mcspi->dev);
+ return 0;
+}
+
+static int omap2_unprepare_transfer(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+ return 0;
+}
+
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{
unsigned long timeout;
@@ -777,7 +787,8 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
static int omap2_mcspi_setup(struct spi_device *spi)
{
int ret;
- struct omap2_mcspi *mcspi;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs = spi->controller_state;
@@ -787,7 +798,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
return -EINVAL;
}
- mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
if (!cs) {
@@ -799,8 +809,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
cs->chconf0 = 0;
spi->controller_state = cs;
/* Link this to context save list */
- list_add_tail(&cs->node,
- &omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs);
+ list_add_tail(&cs->node, &ctx->cs);
}
if (mcspi_dma->dma_rx_channel == -1
@@ -833,7 +842,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
cs = spi->controller_state;
list_del(&cs->node);
- kfree(spi->controller_state);
+ kfree(cs);
}
if (spi->chip_select < spi->master->num_chipselect) {
@@ -850,144 +859,122 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
}
}
-static void omap2_mcspi_work(struct work_struct *work)
+static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
{
- struct omap2_mcspi *mcspi;
-
- mcspi = container_of(work, struct omap2_mcspi, work);
-
- if (omap2_mcspi_enable_clocks(mcspi) < 0)
- return;
-
- spin_lock_irq(&mcspi->lock);
/* We only enable one channel at a time -- the one whose message is
- * at the head of the queue -- although this controller would gladly
+ * -- although this controller would gladly
* arbitrate among multiple channels. This corresponds to "single
* channel" master mode. As a side effect, we need to manage the
* chipselect with the FORCE bit ... CS != channel enable.
*/
- while (!list_empty(&mcspi->msg_queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
- int cs_active = 0;
- struct omap2_mcspi_cs *cs;
- struct omap2_mcspi_device_config *cd;
- int par_override = 0;
- int status = 0;
- u32 chconf;
-
- m = container_of(mcspi->msg_queue.next, struct spi_message,
- queue);
-
- list_del_init(&m->queue);
- spin_unlock_irq(&mcspi->lock);
-
- spi = m->spi;
- cs = spi->controller_state;
- cd = spi->controller_data;
- omap2_mcspi_set_enable(spi, 1);
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
- status = -EINVAL;
- break;
- }
- if (par_override || t->speed_hz || t->bits_per_word) {
- par_override = 1;
- status = omap2_mcspi_setup_transfer(spi, t);
- if (status < 0)
- break;
- if (!t->speed_hz && !t->bits_per_word)
- par_override = 0;
- }
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ struct omap2_mcspi_cs *cs;
+ struct omap2_mcspi_device_config *cd;
+ int par_override = 0;
+ int status = 0;
+ u32 chconf;
- if (!cs_active) {
- omap2_mcspi_force_cs(spi, 1);
- cs_active = 1;
- }
+ spi = m->spi;
+ cs = spi->controller_state;
+ cd = spi->controller_data;
- chconf = mcspi_cached_chconf0(spi);
- chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
- chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+ omap2_mcspi_set_enable(spi, 1);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override || t->speed_hz || t->bits_per_word) {
+ par_override = 1;
+ status = omap2_mcspi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ if (!t->speed_hz && !t->bits_per_word)
+ par_override = 0;
+ }
- if (t->tx_buf == NULL)
- chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
- else if (t->rx_buf == NULL)
- chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
-
- if (cd && cd->turbo_mode && t->tx_buf == NULL) {
- /* Turbo mode is for more than one word */
- if (t->len > ((cs->word_len + 7) >> 3))
- chconf |= OMAP2_MCSPI_CHCONF_TURBO;
- }
+ if (!cs_active) {
+ omap2_mcspi_force_cs(spi, 1);
+ cs_active = 1;
+ }
- mcspi_write_chconf0(spi, chconf);
+ chconf = mcspi_cached_chconf0(spi);
+ chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+ chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
- if (t->len) {
- unsigned count;
+ if (t->tx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
+ else if (t->rx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
- /* RX_ONLY mode needs dummy data in TX reg */
- if (t->tx_buf == NULL)
- __raw_writel(0, cs->base
- + OMAP2_MCSPI_TX0);
+ if (cd && cd->turbo_mode && t->tx_buf == NULL) {
+ /* Turbo mode is for more than one word */
+ if (t->len > ((cs->word_len + 7) >> 3))
+ chconf |= OMAP2_MCSPI_CHCONF_TURBO;
+ }
- if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
- count = omap2_mcspi_txrx_dma(spi, t);
- else
- count = omap2_mcspi_txrx_pio(spi, t);
- m->actual_length += count;
+ mcspi_write_chconf0(spi, chconf);
- if (count != t->len) {
- status = -EIO;
- break;
- }
- }
+ if (t->len) {
+ unsigned count;
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (t->tx_buf == NULL)
+ __raw_writel(0, cs->base
+ + OMAP2_MCSPI_TX0);
+
+ if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
+ count = omap2_mcspi_txrx_dma(spi, t);
+ else
+ count = omap2_mcspi_txrx_pio(spi, t);
+ m->actual_length += count;
- /* ignore the "leave it on after last xfer" hint */
- if (t->cs_change) {
- omap2_mcspi_force_cs(spi, 0);
- cs_active = 0;
+ if (count != t->len) {
+ status = -EIO;
+ break;
}
}
- /* Restore defaults if they were overriden */
- if (par_override) {
- par_override = 0;
- status = omap2_mcspi_setup_transfer(spi, NULL);
- }
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
- if (cs_active)
+ /* ignore the "leave it on after last xfer" hint */
+ if (t->cs_change) {
omap2_mcspi_force_cs(spi, 0);
+ cs_active = 0;
+ }
+ }
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap2_mcspi_setup_transfer(spi, NULL);
+ }
- omap2_mcspi_set_enable(spi, 0);
-
- m->status = status;
- m->complete(m->context);
+ if (cs_active)
+ omap2_mcspi_force_cs(spi, 0);
- spin_lock_irq(&mcspi->lock);
- }
+ omap2_mcspi_set_enable(spi, 0);
- spin_unlock_irq(&mcspi->lock);
+ m->status = status;
- omap2_mcspi_disable_clocks(mcspi);
}
-static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
+static int omap2_mcspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
{
struct omap2_mcspi *mcspi;
- unsigned long flags;
struct spi_transfer *t;
+ mcspi = spi_master_get_devdata(master);
m->actual_length = 0;
m->status = 0;
/* reject invalid messages and transfers */
- if (list_empty(&m->transfers) || !m->complete)
+ if (list_empty(&m->transfers))
return -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
const void *tx_buf = t->tx_buf;
@@ -999,7 +986,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
|| (t->bits_per_word &&
( t->bits_per_word < 4
|| t->bits_per_word > 32))) {
- dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
+ dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
t->speed_hz,
len,
tx_buf ? "tx" : "",
@@ -1008,7 +995,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
return -EINVAL;
}
if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
- dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n",
+ dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
t->speed_hz,
OMAP2_MCSPI_MAX_FREQ >> 15);
return -EINVAL;
@@ -1018,51 +1005,46 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
continue;
if (tx_buf != NULL) {
- t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
+ t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
len, DMA_TO_DEVICE);
- if (dma_mapping_error(&spi->dev, t->tx_dma)) {
- dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+ if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
+ dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
'T', len);
return -EINVAL;
}
}
if (rx_buf != NULL) {
- t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
+ t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&spi->dev, t->rx_dma)) {
- dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+ if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
+ dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
'R', len);
if (tx_buf != NULL)
- dma_unmap_single(&spi->dev, t->tx_dma,
+ dma_unmap_single(mcspi->dev, t->tx_dma,
len, DMA_TO_DEVICE);
return -EINVAL;
}
}
}
- mcspi = spi_master_get_devdata(spi->master);
-
- spin_lock_irqsave(&mcspi->lock, flags);
- list_add_tail(&m->queue, &mcspi->msg_queue);
- queue_work(mcspi->wq, &mcspi->work);
- spin_unlock_irqrestore(&mcspi->lock, flags);
-
+ omap2_mcspi_work(mcspi, m);
+ spi_finalize_current_message(master);
return 0;
}
static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
- u32 tmp;
- int ret = 0;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ int ret = 0;
ret = omap2_mcspi_enable_clocks(mcspi);
if (ret < 0)
return ret;
- tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
- mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp);
- omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp;
+ mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
+ OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
omap2_mcspi_set_master_mode(master);
omap2_mcspi_disable_clocks(mcspi);
@@ -1102,14 +1084,13 @@ static const struct of_device_id omap_mcspi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
-static int __init omap2_mcspi_probe(struct platform_device *pdev)
+static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct omap2_mcspi_platform_config *pdata;
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
- char wq_name[20];
u32 regs_offset = 0;
static int bus_num = 1;
struct device_node *node = pdev->dev.of_node;
@@ -1125,7 +1106,9 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = omap2_mcspi_setup;
- master->transfer = omap2_mcspi_transfer;
+ master->prepare_transfer_hardware = omap2_prepare_transfer;
+ master->unprepare_transfer_hardware = omap2_unprepare_transfer;
+ master->transfer_one_message = omap2_mcspi_transfer_one_message;
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
@@ -1150,13 +1133,6 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
mcspi = spi_master_get_devdata(master);
mcspi->master = master;
- sprintf(wq_name, "omap2_mcspi/%d", master->bus_num);
- mcspi->wq = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 1);
- if (mcspi->wq == NULL) {
- status = -ENOMEM;
- goto free_master;
- }
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
@@ -1166,32 +1142,24 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
r->start += regs_offset;
r->end += regs_offset;
mcspi->phys = r->start;
- if (!request_mem_region(r->start, resource_size(r),
- dev_name(&pdev->dev))) {
- status = -EBUSY;
- goto free_master;
- }
- mcspi->base = ioremap(r->start, resource_size(r));
+ mcspi->base = devm_request_and_ioremap(&pdev->dev, r);
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
- goto release_region;
+ goto free_master;
}
mcspi->dev = &pdev->dev;
- INIT_WORK(&mcspi->work, omap2_mcspi_work);
- spin_lock_init(&mcspi->lock);
- INIT_LIST_HEAD(&mcspi->msg_queue);
- INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs);
+ INIT_LIST_HEAD(&mcspi->ctx.cs);
mcspi->dma_channels = kcalloc(master->num_chipselect,
sizeof(struct omap2_mcspi_dma),
GFP_KERNEL);
if (mcspi->dma_channels == NULL)
- goto unmap_io;
+ goto free_master;
for (i = 0; i < master->num_chipselect; i++) {
char dma_ch_name[14];
@@ -1224,6 +1192,8 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
if (status < 0)
goto dma_chnl_free;
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
if (status || omap2_mcspi_master_setup(mcspi) < 0)
@@ -1241,23 +1211,17 @@ disable_pm:
pm_runtime_disable(&pdev->dev);
dma_chnl_free:
kfree(mcspi->dma_channels);
-unmap_io:
- iounmap(mcspi->base);
-release_region:
- release_mem_region(r->start, resource_size(r));
free_master:
kfree(master);
platform_set_drvdata(pdev, NULL);
return status;
}
-static int __exit omap2_mcspi_remove(struct platform_device *pdev)
+static int __devexit omap2_mcspi_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *dma_channels;
- struct resource *r;
- void __iomem *base;
master = dev_get_drvdata(&pdev->dev);
mcspi = spi_master_get_devdata(master);
@@ -1265,14 +1229,9 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
omap2_mcspi_disable_clocks(mcspi);
pm_runtime_disable(&pdev->dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
- base = mcspi->base;
spi_unregister_master(master);
- iounmap(base);
kfree(dma_channels);
- destroy_workqueue(mcspi->wq);
platform_set_drvdata(pdev, NULL);
return 0;
@@ -1291,13 +1250,12 @@ static int omap2_mcspi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
- struct omap2_mcspi_cs *cs;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
omap2_mcspi_enable_clocks(mcspi);
- list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs,
- node) {
+ list_for_each_entry(cs, &ctx->cs, node) {
if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
-
/*
* We need to toggle CS state for OMAP take this
* change in account.
@@ -1327,21 +1285,9 @@ static struct platform_driver omap2_mcspi_driver = {
.pm = &omap2_mcspi_pm_ops,
.of_match_table = omap_mcspi_of_match,
},
- .remove = __exit_p(omap2_mcspi_remove),
+ .probe = omap2_mcspi_probe,
+ .remove = __devexit_p(omap2_mcspi_remove),
};
-
-static int __init omap2_mcspi_init(void)
-{
- return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
-}
-subsys_initcall(omap2_mcspi_init);
-
-static void __exit omap2_mcspi_exit(void)
-{
- platform_driver_unregister(&omap2_mcspi_driver);
-
-}
-module_exit(omap2_mcspi_exit);
-
+module_platform_driver(omap2_mcspi_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index e496f799b7a9..dfd04e91fa6d 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -16,8 +16,8 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "orion_spi"
@@ -46,6 +46,7 @@ struct orion_spi {
unsigned int max_speed;
unsigned int min_speed;
struct orion_spi_info *spi_info;
+ struct clk *clk;
};
static struct workqueue_struct *orion_spi_wq;
@@ -104,7 +105,7 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
orion_spi = spi_master_get_devdata(spi->master);
- tclk_hz = orion_spi->spi_info->tclk;
+ tclk_hz = clk_get_rate(orion_spi->clk);
/*
* the supported rates are: 4,6,8...30
@@ -450,6 +451,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
struct orion_spi *spi;
struct resource *r;
struct orion_spi_info *spi_info;
+ unsigned long tclk_hz;
int status = 0;
spi_info = pdev->dev.platform_data;
@@ -476,19 +478,28 @@ static int __init orion_spi_probe(struct platform_device *pdev)
spi->master = master;
spi->spi_info = spi_info;
- spi->max_speed = DIV_ROUND_UP(spi_info->tclk, 4);
- spi->min_speed = DIV_ROUND_UP(spi_info->tclk, 30);
+ spi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ status = PTR_ERR(spi->clk);
+ goto out;
+ }
+
+ clk_prepare(spi->clk);
+ clk_enable(spi->clk);
+ tclk_hz = clk_get_rate(spi->clk);
+ spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
+ spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
- goto out;
+ goto out_rel_clk;
}
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
status = -EBUSY;
- goto out;
+ goto out_rel_clk;
}
spi->base = ioremap(r->start, SZ_1K);
@@ -508,7 +519,9 @@ static int __init orion_spi_probe(struct platform_device *pdev)
out_rel_mem:
release_mem_region(r->start, resource_size(r));
-
+out_rel_clk:
+ clk_disable_unprepare(spi->clk);
+ clk_put(spi->clk);
out:
spi_master_put(master);
return status;
@@ -526,6 +539,9 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
cancel_work_sync(&spi->work);
+ clk_disable_unprepare(spi->clk);
+ clk_put(spi->clk);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, resource_size(r));
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 98ec53285fc7..75ac9d48ef46 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -30,7 +30,6 @@
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/of_platform.h>
-#include <linux/of_spi.h>
#include <linux/of_gpio.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -467,9 +466,6 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
bbp->master->setup = spi_ppc4xx_setup;
bbp->master->cleanup = spi_ppc4xx_cleanup;
- /* Allocate bus num dynamically. */
- bbp->master->bus_num = -1;
-
/* the spi->mode bits understood by this driver: */
bbp->master->mode_bits =
SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 3fb44afe27b4..9f6ba34b172c 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -164,17 +164,7 @@ static struct pci_driver ce4100_spi_driver = {
.remove = __devexit_p(ce4100_spi_remove),
};
-static int __init ce4100_spi_init(void)
-{
- return pci_register_driver(&ce4100_spi_driver);
-}
-module_init(ce4100_spi_init);
-
-static void __exit ce4100_spi_exit(void)
-{
- pci_unregister_driver(&ce4100_spi_driver);
-}
-module_exit(ce4100_spi_exit);
+module_pci_driver(ce4100_spi_driver);
MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 354f170eab95..4894bde4bbff 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -31,7 +31,11 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
+#include <linux/spi/rspi.h>
#define RSPI_SPCR 0x00
#define RSPI_SSLP 0x01
@@ -141,6 +145,16 @@ struct rspi_data {
spinlock_t lock;
struct clk *clk;
unsigned char spsr;
+
+ /* for dmaengine */
+ struct sh_dmae_slave dma_tx;
+ struct sh_dmae_slave dma_rx;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ int irq;
+
+ unsigned dma_width_16bit:1;
+ unsigned dma_callbacked:1;
};
static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
@@ -265,11 +279,125 @@ static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
return 0;
}
-static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static void rspi_dma_complete(void *arg)
+{
+ struct rspi_data *rspi = arg;
+
+ rspi->dma_callbacked = 1;
+ wake_up_interruptible(&rspi->wait);
+}
+
+static int rspi_dma_map_sg(struct scatterlist *sg, void *buf, unsigned len,
+ struct dma_chan *chan,
+ enum dma_transfer_direction dir)
+{
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, buf, len);
+ sg_dma_len(sg) = len;
+ return dma_map_sg(chan->device->dev, sg, 1, dir);
+}
+
+static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
+ enum dma_transfer_direction dir)
+{
+ dma_unmap_sg(chan->device->dev, sg, 1, dir);
+}
+
+static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len)
+{
+ u16 *dst = buf;
+ const u8 *src = data;
+
+ while (len) {
+ *dst++ = (u16)(*src++);
+ len--;
+ }
+}
+
+static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
+{
+ u8 *dst = buf;
+ const u16 *src = data;
+
+ while (len) {
+ *dst++ = (u8)*src++;
+ len--;
+ }
+}
+
+static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
+{
+ struct scatterlist sg;
+ void *buf = NULL;
+ struct dma_async_tx_descriptor *desc;
+ unsigned len;
+ int ret = 0;
+
+ if (rspi->dma_width_16bit) {
+ /*
+ * If DMAC bus width is 16-bit, the driver allocates a dummy
+ * buffer. And, the driver converts original data into the
+ * DMAC data as the following format:
+ * original data: 1st byte, 2nd byte ...
+ * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
+ */
+ len = t->len * 2;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ rspi_memory_to_8bit(buf, t->tx_buf, t->len);
+ } else {
+ len = t->len;
+ buf = (void *)t->tx_buf;
+ }
+
+ if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) {
+ ret = -EFAULT;
+ goto end_nomap;
+ }
+ desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ ret = -EIO;
+ goto end;
+ }
+
+ /*
+ * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
+ * called. So, this driver disables the IRQ while DMA transfer.
+ */
+ disable_irq(rspi->irq);
+
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
+ rspi_enable_irq(rspi, SPCR_SPTIE);
+ rspi->dma_callbacked = 0;
+
+ desc->callback = rspi_dma_complete;
+ desc->callback_param = rspi;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(rspi->chan_tx);
+
+ ret = wait_event_interruptible_timeout(rspi->wait,
+ rspi->dma_callbacked, HZ);
+ if (ret > 0 && rspi->dma_callbacked)
+ ret = 0;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ rspi_disable_irq(rspi, SPCR_SPTIE);
+
+ enable_irq(rspi->irq);
+
+end:
+ rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
+end_nomap:
+ if (rspi->dma_width_16bit)
+ kfree(buf);
+
+ return ret;
+}
+
+static void rspi_receive_init(struct rspi_data *rspi)
{
- int remain = t->len;
- u8 *data;
unsigned char spsr;
spsr = rspi_read8(rspi, RSPI_SPSR);
@@ -278,6 +406,15 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
if (spsr & SPSR_OVRF)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
RSPI_SPCR);
+}
+
+static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int remain = t->len;
+ u8 *data;
+
+ rspi_receive_init(rspi);
data = (u8 *)t->rx_buf;
while (remain > 0) {
@@ -307,6 +444,120 @@ static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
return 0;
}
+static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
+{
+ struct scatterlist sg, sg_dummy;
+ void *dummy = NULL, *rx_buf = NULL;
+ struct dma_async_tx_descriptor *desc, *desc_dummy;
+ unsigned len;
+ int ret = 0;
+
+ if (rspi->dma_width_16bit) {
+ /*
+ * If DMAC bus width is 16-bit, the driver allocates a dummy
+ * buffer. And, finally the driver converts the DMAC data into
+ * actual data as the following format:
+ * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
+ * actual data: 1st byte, 2nd byte ...
+ */
+ len = t->len * 2;
+ rx_buf = kmalloc(len, GFP_KERNEL);
+ if (!rx_buf)
+ return -ENOMEM;
+ } else {
+ len = t->len;
+ rx_buf = t->rx_buf;
+ }
+
+ /* prepare dummy transfer to generate SPI clocks */
+ dummy = kzalloc(len, GFP_KERNEL);
+ if (!dummy) {
+ ret = -ENOMEM;
+ goto end_nomap;
+ }
+ if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
+ DMA_TO_DEVICE)) {
+ ret = -EFAULT;
+ goto end_nomap;
+ }
+ desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
+ DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_dummy) {
+ ret = -EIO;
+ goto end_dummy_mapped;
+ }
+
+ /* prepare receive transfer */
+ if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
+ DMA_FROM_DEVICE)) {
+ ret = -EFAULT;
+ goto end_dummy_mapped;
+
+ }
+ desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ ret = -EIO;
+ goto end;
+ }
+
+ rspi_receive_init(rspi);
+
+ /*
+ * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
+ * called. So, this driver disables the IRQ while DMA transfer.
+ */
+ disable_irq(rspi->irq);
+
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
+ rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
+ rspi->dma_callbacked = 0;
+
+ desc->callback = rspi_dma_complete;
+ desc->callback_param = rspi;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(rspi->chan_rx);
+
+ desc_dummy->callback = NULL; /* No callback */
+ dmaengine_submit(desc_dummy);
+ dma_async_issue_pending(rspi->chan_tx);
+
+ ret = wait_event_interruptible_timeout(rspi->wait,
+ rspi->dma_callbacked, HZ);
+ if (ret > 0 && rspi->dma_callbacked)
+ ret = 0;
+ else if (!ret)
+ ret = -ETIMEDOUT;
+ rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
+
+ enable_irq(rspi->irq);
+
+end:
+ rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
+end_dummy_mapped:
+ rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
+end_nomap:
+ if (rspi->dma_width_16bit) {
+ if (!ret)
+ rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
+ kfree(rx_buf);
+ }
+ kfree(dummy);
+
+ return ret;
+}
+
+static int rspi_is_dma(struct rspi_data *rspi, struct spi_transfer *t)
+{
+ if (t->tx_buf && rspi->chan_tx)
+ return 1;
+ /* If the module receives data by DMAC, it also needs TX DMAC */
+ if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
+ return 1;
+
+ return 0;
+}
+
static void rspi_work(struct work_struct *work)
{
struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
@@ -325,12 +576,18 @@ static void rspi_work(struct work_struct *work)
list_for_each_entry(t, &mesg->transfers, transfer_list) {
if (t->tx_buf) {
- ret = rspi_send_pio(rspi, mesg, t);
+ if (rspi_is_dma(rspi, t))
+ ret = rspi_send_dma(rspi, t);
+ else
+ ret = rspi_send_pio(rspi, mesg, t);
if (ret < 0)
goto error;
}
if (t->rx_buf) {
- ret = rspi_receive_pio(rspi, mesg, t);
+ if (rspi_is_dma(rspi, t))
+ ret = rspi_receive_dma(rspi, t);
+ else
+ ret = rspi_receive_pio(rspi, mesg, t);
if (ret < 0)
goto error;
}
@@ -406,11 +663,58 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
return ret;
}
+static bool rspi_filter(struct dma_chan *chan, void *filter_param)
+{
+ chan->private = filter_param;
+ return true;
+}
+
+static void __devinit rspi_request_dma(struct rspi_data *rspi,
+ struct platform_device *pdev)
+{
+ struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
+ dma_cap_mask_t mask;
+
+ if (!rspi_pd)
+ return;
+
+ rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
+
+ /* If the module receives data by DMAC, it also needs TX DMAC */
+ if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ rspi->dma_rx.slave_id = rspi_pd->dma_rx_id;
+ rspi->chan_rx = dma_request_channel(mask, rspi_filter,
+ &rspi->dma_rx);
+ if (rspi->chan_rx)
+ dev_info(&pdev->dev, "Use DMA when rx.\n");
+ }
+ if (rspi_pd->dma_tx_id) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ rspi->dma_tx.slave_id = rspi_pd->dma_tx_id;
+ rspi->chan_tx = dma_request_channel(mask, rspi_filter,
+ &rspi->dma_tx);
+ if (rspi->chan_tx)
+ dev_info(&pdev->dev, "Use DMA when tx\n");
+ }
+}
+
+static void __devexit rspi_release_dma(struct rspi_data *rspi)
+{
+ if (rspi->chan_tx)
+ dma_release_channel(rspi->chan_tx);
+ if (rspi->chan_rx)
+ dma_release_channel(rspi->chan_rx);
+}
+
static int __devexit rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = dev_get_drvdata(&pdev->dev);
spi_unregister_master(rspi->master);
+ rspi_release_dma(rspi);
free_irq(platform_get_irq(pdev, 0), rspi);
clk_put(rspi->clk);
iounmap(rspi->addr);
@@ -483,6 +787,9 @@ static int __devinit rspi_probe(struct platform_device *pdev)
goto error3;
}
+ rspi->irq = irq;
+ rspi_request_dma(rspi, pdev);
+
ret = spi_register_master(master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
@@ -494,6 +801,7 @@ static int __devinit rspi_probe(struct platform_device *pdev)
return 0;
error4:
+ rspi_release_dma(rspi);
free_irq(irq, rspi);
error3:
clk_put(rspi->clk);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 52fe495bb32a..ecc3d9763d10 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -19,7 +19,7 @@
#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
#define DRIVER_NAME "sirfsoc_spi"
@@ -127,7 +127,7 @@ struct sirfsoc_spi {
void __iomem *base;
u32 ctrl_freq; /* SPI controller clock speed */
struct clk *clk;
- struct pinmux *pmx;
+ struct pinctrl *p;
/* rx & tx bufs from the spi_transfer */
const void *tx;
@@ -560,17 +560,15 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
- sspi->pmx = pinmux_get(&pdev->dev, NULL);
- ret = IS_ERR(sspi->pmx);
+ sspi->p = pinctrl_get_select_default(&pdev->dev);
+ ret = IS_ERR(sspi->p);
if (ret)
goto free_master;
- pinmux_enable(sspi->pmx);
-
sspi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(sspi->clk)) {
ret = -EINVAL;
- goto free_pmx;
+ goto free_pin;
}
clk_enable(sspi->clk);
sspi->ctrl_freq = clk_get_rate(sspi->clk);
@@ -598,9 +596,8 @@ static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
free_clk:
clk_disable(sspi->clk);
clk_put(sspi->clk);
-free_pmx:
- pinmux_disable(sspi->pmx);
- pinmux_put(sspi->pmx);
+free_pin:
+ pinctrl_put(sspi->p);
free_master:
spi_master_put(master);
err_cs:
@@ -623,8 +620,7 @@ static int __devexit spi_sirfsoc_remove(struct platform_device *pdev)
}
clk_disable(sspi->clk);
clk_put(sspi->clk);
- pinmux_disable(sspi->pmx);
- pinmux_put(sspi->pmx);
+ pinctrl_put(sspi->p);
spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index ec47d3bdfd13..cd56dcf46320 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1438,7 +1438,6 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
plat_dev->id, data->io_remap_addr);
/* initialize members of SPI master */
- master->bus_num = -1;
master->num_chipselect = PCH_MAX_CS;
master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
@@ -1779,7 +1778,7 @@ static struct pci_driver pch_spi_pcidev_driver = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
- .remove = pch_spi_remove,
+ .remove = __devexit_p(pch_spi_remove),
.suspend = pch_spi_suspend,
.resume = pch_spi_resume,
};
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3d8f662e4fe9..1041cb83d67a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2,6 +2,7 @@
* SPI init/core code
*
* Copyright (C) 2005 David Brownell
+ * Copyright (C) 2008 Secret Lab Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,15 +20,16 @@
*/
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
-#include <linux/of_spi.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
#include <linux/sched.h>
@@ -530,7 +532,7 @@ static void spi_pump_messages(struct kthread_work *work)
/* Lock queue and check for queue work */
spin_lock_irqsave(&master->queue_lock, flags);
if (list_empty(&master->queue) || !master->running) {
- if (master->busy) {
+ if (master->busy && master->unprepare_transfer_hardware) {
ret = master->unprepare_transfer_hardware(master);
if (ret) {
spin_unlock_irqrestore(&master->queue_lock, flags);
@@ -560,7 +562,7 @@ static void spi_pump_messages(struct kthread_work *work)
master->busy = true;
spin_unlock_irqrestore(&master->queue_lock, flags);
- if (!was_busy) {
+ if (!was_busy && master->prepare_transfer_hardware) {
ret = master->prepare_transfer_hardware(master);
if (ret) {
dev_err(&master->dev,
@@ -798,6 +800,94 @@ err_init_queue:
/*-------------------------------------------------------------------------*/
+#if defined(CONFIG_OF) && !defined(CONFIG_SPARC)
+/**
+ * of_register_spi_devices() - Register child devices onto the SPI bus
+ * @master: Pointer to spi_master device
+ *
+ * Registers an spi_device for each child node of master node which has a 'reg'
+ * property.
+ */
+static void of_register_spi_devices(struct spi_master *master)
+{
+ struct spi_device *spi;
+ struct device_node *nc;
+ const __be32 *prop;
+ int rc;
+ int len;
+
+ if (!master->dev.of_node)
+ return;
+
+ for_each_child_of_node(master->dev.of_node, nc) {
+ /* Alloc an spi_device */
+ spi = spi_alloc_device(master);
+ if (!spi) {
+ dev_err(&master->dev, "spi_device alloc error for %s\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ continue;
+ }
+
+ /* Select device driver */
+ if (of_modalias_node(nc, spi->modalias,
+ sizeof(spi->modalias)) < 0) {
+ dev_err(&master->dev, "cannot find modalias for %s\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ continue;
+ }
+
+ /* Device address */
+ prop = of_get_property(nc, "reg", &len);
+ if (!prop || len < sizeof(*prop)) {
+ dev_err(&master->dev, "%s has no 'reg' property\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ continue;
+ }
+ spi->chip_select = be32_to_cpup(prop);
+
+ /* Mode (clock phase/polarity/etc.) */
+ if (of_find_property(nc, "spi-cpha", NULL))
+ spi->mode |= SPI_CPHA;
+ if (of_find_property(nc, "spi-cpol", NULL))
+ spi->mode |= SPI_CPOL;
+ if (of_find_property(nc, "spi-cs-high", NULL))
+ spi->mode |= SPI_CS_HIGH;
+
+ /* Device speed */
+ prop = of_get_property(nc, "spi-max-frequency", &len);
+ if (!prop || len < sizeof(*prop)) {
+ dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ continue;
+ }
+ spi->max_speed_hz = be32_to_cpup(prop);
+
+ /* IRQ */
+ spi->irq = irq_of_parse_and_map(nc, 0);
+
+ /* Store a pointer to the node in the device structure */
+ of_node_get(nc);
+ spi->dev.of_node = nc;
+
+ /* Register the new device */
+ request_module(spi->modalias);
+ rc = spi_add_device(spi);
+ if (rc) {
+ dev_err(&master->dev, "spi_device register error %s\n",
+ nc->full_name);
+ spi_dev_put(spi);
+ }
+
+ }
+}
+#else
+static void of_register_spi_devices(struct spi_master *master) { }
+#endif
+
static void spi_master_release(struct device *dev)
{
struct spi_master *master;
@@ -846,6 +936,8 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
return NULL;
device_initialize(&master->dev);
+ master->bus_num = -1;
+ master->num_chipselect = 1;
master->dev.class = &spi_master_class;
master->dev.parent = get_device(dev);
spi_master_set_devdata(master, &master[1]);
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index bad7ba517a1c..f551e5376147 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -29,6 +29,8 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4322) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43222) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4324) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4325) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) },
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index ed4124469a3a..e9d94968f394 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -178,6 +178,18 @@ err_pci:
#define SPEX(_outvar, _offset, _mask, _shift) \
SPEX16(_outvar, _offset, _mask, _shift)
+#define SPEX_ARRAY8(_field, _offset, _mask, _shift) \
+ do { \
+ SPEX(_field[0], _offset + 0, _mask, _shift); \
+ SPEX(_field[1], _offset + 2, _mask, _shift); \
+ SPEX(_field[2], _offset + 4, _mask, _shift); \
+ SPEX(_field[3], _offset + 6, _mask, _shift); \
+ SPEX(_field[4], _offset + 8, _mask, _shift); \
+ SPEX(_field[5], _offset + 10, _mask, _shift); \
+ SPEX(_field[6], _offset + 12, _mask, _shift); \
+ SPEX(_field[7], _offset + 14, _mask, _shift); \
+ } while (0)
+
static inline u8 ssb_crc8(u8 crc, u8 data)
{
@@ -360,8 +372,9 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
SPEX(et0mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0M, 14);
SPEX(et1mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1M, 15);
SPEX(board_rev, SSB_SPROM1_BINF, SSB_SPROM1_BINF_BREV, 0);
- SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE,
- SSB_SPROM1_BINF_CCODE_SHIFT);
+ if (out->revision == 1)
+ SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE,
+ SSB_SPROM1_BINF_CCODE_SHIFT);
SPEX(ant_available_a, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTA,
SSB_SPROM1_BINF_ANTA_SHIFT);
SPEX(ant_available_bg, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTBG,
@@ -387,6 +400,8 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
SPEX(boardflags_lo, SSB_SPROM1_BFLLO, 0xFFFF, 0);
if (out->revision >= 2)
SPEX(boardflags_hi, SSB_SPROM2_BFLHI, 0xFFFF, 0);
+ SPEX(alpha2[0], SSB_SPROM1_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM1_CCODE, 0x00ff, 0);
/* Extract the antenna gain values. */
out->antenna_gain.a0 = r123_extract_antgain(out->revision, in,
@@ -455,14 +470,17 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0);
SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A,
SSB_SPROM4_ETHPHY_ET1A_SHIFT);
+ SPEX(board_rev, SSB_SPROM4_BOARDREV, 0xFFFF, 0);
if (out->revision == 4) {
- SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
+ SPEX(alpha2[0], SSB_SPROM4_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM4_CCODE, 0x00ff, 0);
SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
SPEX(boardflags2_lo, SSB_SPROM4_BFL2LO, 0xFFFF, 0);
SPEX(boardflags2_hi, SSB_SPROM4_BFL2HI, 0xFFFF, 0);
} else {
- SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
+ SPEX(alpha2[0], SSB_SPROM5_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM5_CCODE, 0x00ff, 0);
SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
SPEX(boardflags2_lo, SSB_SPROM5_BFL2LO, 0xFFFF, 0);
@@ -525,7 +543,9 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
v = in[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
}
- SPEX(country_code, SSB_SPROM8_CCODE, 0xFFFF, 0);
+ SPEX(board_rev, SSB_SPROM8_BOARDREV, 0xFFFF, 0);
+ SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
+ SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
SPEX(boardflags_lo, SSB_SPROM8_BFLLO, 0xFFFF, 0);
SPEX(boardflags_hi, SSB_SPROM8_BFLHI, 0xFFFF, 0);
SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, 0xFFFF, 0);
@@ -655,6 +675,63 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G,
SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+ SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
+ SSB_SPROM8_LEDDC_ON_SHIFT);
+ SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF,
+ SSB_SPROM8_LEDDC_OFF_SHIFT);
+
+ SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN,
+ SSB_SPROM8_TXRXC_TXCHAIN_SHIFT);
+ SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN,
+ SSB_SPROM8_TXRXC_RXCHAIN_SHIFT);
+ SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH,
+ SSB_SPROM8_TXRXC_SWITCH_SHIFT);
+
+ SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0);
+
+ SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0);
+ SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0);
+
+ SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP,
+ SSB_SPROM8_RAWTS_RAWTEMP_SHIFT);
+ SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER,
+ SSB_SPROM8_RAWTS_MEASPOWER_SHIFT);
+ SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX,
+ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE,
+ SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT);
+ SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX,
+ SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT);
+ SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX,
+ SSB_SPROM8_OPT_CORRX_TEMP_OPTION,
+ SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT);
+ SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP,
+ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR,
+ SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT);
+ SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP,
+ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP,
+ SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT);
+ SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL,
+ SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT);
+
+ SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0);
+ SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0);
+ SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0);
+ SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0);
+
+ SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH,
+ SSB_SPROM8_THERMAL_TRESH_SHIFT);
+ SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET,
+ SSB_SPROM8_THERMAL_OFFSET_SHIFT);
+ SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA,
+ SSB_SPROM8_TEMPDELTA_PHYCAL,
+ SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT);
+ SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD,
+ SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT);
+ SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA,
+ SSB_SPROM8_TEMPDELTA_HYSTERESIS,
+ SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT);
sprom_extract_r458(out, in);
/* TODO - get remaining rev 8 stuff needed */
@@ -784,7 +861,6 @@ static void ssb_pci_get_boardinfo(struct ssb_bus *bus,
{
bi->vendor = bus->host_pci->subsystem_vendor;
bi->type = bus->host_pci->subsystem_device;
- bi->rev = bus->host_pci->revision;
}
int ssb_pci_get_invariants(struct ssb_bus *bus,
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 9f1f27e7c86e..e84dbecd0991 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/falloc.h>
#include <linux/miscdevice.h>
#include <linux/security.h>
#include <linux/mm.h>
@@ -269,7 +270,7 @@ out:
return ret;
}
-static inline unsigned long calc_vm_may_flags(unsigned long prot)
+static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
{
return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
_calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
@@ -363,11 +364,12 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
- struct inode *inode = range->asma->file->f_dentry->d_inode;
loff_t start = range->pgstart * PAGE_SIZE;
- loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+ loff_t end = (range->pgend + 1) * PAGE_SIZE;
- vmtruncate_range(inode, start, end);
+ do_fallocate(range->asma->file,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ start, end - start);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 1c3d6386ea36..aeac1caba3f9 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -30,6 +30,7 @@
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/errno.h>
+#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
@@ -981,6 +982,8 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
}
EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
+#if IS_ENABLED(CONFIG_USB)
+
static int comedi_old_usb_auto_config(struct usb_interface *intf,
struct comedi_driver *driver)
{
@@ -1043,3 +1046,5 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
comedi_driver_unregister(comedi_driver);
}
EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
+
+#endif
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 292af0f7f451..51665132c61b 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -104,7 +104,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
void netlink_exit(struct sock *sock)
{
- sock_release(sock->sk_socket);
+ netlink_kernel_release(sock);
}
int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 0338c7cd0a8b..f03fbd3bb454 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -29,8 +29,6 @@ Then fill in the following:
* info->driver_module:
Set to THIS_MODULE. Used to ensure correct ownership
of various resources allocate by the core.
- * info->num_interrupt_lines:
- Number of event triggering hardware lines the device has.
* info->event_attrs:
Attributes used to enable / disable hardware events.
* info->attrs:
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 2490dd25093b..8f1b3af02f29 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -13,6 +13,7 @@ config AD7291
config AD7298
tristate "Analog Devices AD7298 ADC driver"
depends on SPI
+ select IIO_KFIFO_BUF if IIO_BUFFER
help
Say yes here to build support for Analog Devices AD7298
8 Channel ADC with temperature sensor.
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 10ab6dc823b9..a13afff2dfe6 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -235,7 +235,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
.indexed = 1, \
.channel = num, \
.address = num, \
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.scan_index = num, \
.scan_type = IIO_ST('s', 16, 16, 0), \
}
diff --git a/drivers/staging/media/as102/as10x_cmd.c b/drivers/staging/media/as102/as10x_cmd.c
index 262bb94ad27e..a73df10982d0 100644
--- a/drivers/staging/media/as102/as10x_cmd.c
+++ b/drivers/staging/media/as102/as10x_cmd.c
@@ -31,7 +31,7 @@
*/
int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -54,8 +54,6 @@ int as10x_cmd_turn_on(struct as10x_bus_adapter_t *adap)
(uint8_t *) prsp,
sizeof(prsp->body.turn_on.rsp) +
HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -77,7 +75,7 @@ out:
*/
int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -99,8 +97,6 @@ int as10x_cmd_turn_off(struct as10x_bus_adapter_t *adap)
sizeof(pcmd->body.turn_off.req) + HEADER_SIZE,
(uint8_t *) prsp,
sizeof(prsp->body.turn_off.rsp) + HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -124,7 +120,7 @@ out:
int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
struct as10x_tune_args *ptune)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *preq, *prsp;
ENTER();
@@ -159,8 +155,6 @@ int as10x_cmd_set_tune(struct as10x_bus_adapter_t *adap,
(uint8_t *) prsp,
sizeof(prsp->body.set_tune.rsp)
+ HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -184,7 +178,7 @@ out:
int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
struct as10x_tune_status *pstatus)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *preq, *prsp;
ENTER();
@@ -208,8 +202,6 @@ int as10x_cmd_get_tune_status(struct as10x_bus_adapter_t *adap,
sizeof(preq->body.get_tune_status.req) + HEADER_SIZE,
(uint8_t *) prsp,
sizeof(prsp->body.get_tune_status.rsp) + HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -241,7 +233,7 @@ out:
*/
int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -266,8 +258,6 @@ int as10x_cmd_get_tps(struct as10x_bus_adapter_t *adap, struct as10x_tps *ptps)
(uint8_t *) prsp,
sizeof(prsp->body.get_tps.rsp) +
HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -305,7 +295,7 @@ out:
int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
struct as10x_demod_stats *pdemod_stats)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -330,8 +320,6 @@ int as10x_cmd_get_demod_stats(struct as10x_bus_adapter_t *adap,
(uint8_t *) prsp,
sizeof(prsp->body.get_demod_stats.rsp)
+ HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
@@ -370,7 +358,7 @@ out:
int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
uint8_t *is_ready)
{
- int error;
+ int error = AS10X_CMD_ERROR;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
@@ -395,8 +383,6 @@ int as10x_cmd_get_impulse_resp(struct as10x_bus_adapter_t *adap,
(uint8_t *) prsp,
sizeof(prsp->body.get_impulse_rsp.rsp)
+ HEADER_SIZE);
- } else {
- error = AS10X_CMD_ERROR;
}
if (error < 0)
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index 280c84ec4cc2..c365cdf714ea 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -898,6 +898,10 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&pd->dmaq);
mutex_init(&pd->mux);
pd->vdev->lock = &pd->mux; /* for locking v4l2_file_operations */
+ /* Locking in file operations other than ioctl should be done
+ by the driver, not the V4L2 core.
+ This driver needs auditing so that this flag can be removed. */
+ set_bit(V4L2_FL_LOCK_ALL_FOPS, &pd->vdev->flags);
spin_lock_init(&pd->lock);
pd->csr2 = csr2_init;
pd->config = config_init;
diff --git a/drivers/staging/media/easycap/easycap_main.c b/drivers/staging/media/easycap/easycap_main.c
index 6f83d362ab0d..a1c45e4dcdce 100644
--- a/drivers/staging/media/easycap/easycap_main.c
+++ b/drivers/staging/media/easycap/easycap_main.c
@@ -700,214 +700,7 @@ static int videodev_release(struct video_device *pvideo_device)
JOM(4, "ending successfully\n");
return 0;
}
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-/*****************************************************************************/
-/*--------------------------------------------------------------------------*/
-/*
- * THIS FUNCTION IS CALLED FROM WITHIN easycap_usb_disconnect() AND IS
- * PROTECTED BY SEMAPHORES SET AND CLEARED BY easycap_usb_disconnect().
- *
- * BY THIS STAGE THE DEVICE HAS ALREADY BEEN PHYSICALLY UNPLUGGED, SO
- * peasycap->pusb_device IS NO LONGER VALID.
- */
-/*---------------------------------------------------------------------------*/
-static void easycap_delete(struct kref *pkref)
-{
- struct easycap *peasycap;
- struct data_urb *pdata_urb;
- struct list_head *plist_head, *plist_next;
- int k, m, gone, kd;
- int allocation_video_urb;
- int allocation_video_page;
- int allocation_video_struct;
- int allocation_audio_urb;
- int allocation_audio_page;
- int allocation_audio_struct;
- int registered_video, registered_audio;
-
- peasycap = container_of(pkref, struct easycap, kref);
- if (!peasycap) {
- SAM("ERROR: peasycap is NULL: cannot perform deletions\n");
- return;
- }
- kd = easycap_isdongle(peasycap);
-/*---------------------------------------------------------------------------*/
-/*
- * FREE VIDEO.
- */
-/*---------------------------------------------------------------------------*/
- if (peasycap->purb_video_head) {
- m = 0;
- list_for_each(plist_head, peasycap->purb_video_head) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb && pdata_urb->purb) {
- usb_free_urb(pdata_urb->purb);
- pdata_urb->purb = NULL;
- peasycap->allocation_video_urb--;
- m++;
- }
- }
-
- JOM(4, "%i video urbs freed\n", m);
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing video data_urb structures.\n");
- m = 0;
- list_for_each_safe(plist_head, plist_next,
- peasycap->purb_video_head) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb) {
- peasycap->allocation_video_struct -=
- sizeof(struct data_urb);
- kfree(pdata_urb);
- m++;
- }
- }
- JOM(4, "%i video data_urb structures freed\n", m);
- JOM(4, "setting peasycap->purb_video_head=NULL\n");
- peasycap->purb_video_head = NULL;
- }
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing video isoc buffers.\n");
- m = 0;
- for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
- if (peasycap->video_isoc_buffer[k].pgo) {
- free_pages((unsigned long)
- peasycap->video_isoc_buffer[k].pgo,
- VIDEO_ISOC_ORDER);
- peasycap->video_isoc_buffer[k].pgo = NULL;
- peasycap->allocation_video_page -=
- BIT(VIDEO_ISOC_ORDER);
- m++;
- }
- }
- JOM(4, "isoc video buffers freed: %i pages\n",
- m * (0x01 << VIDEO_ISOC_ORDER));
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing video field buffers.\n");
- gone = 0;
- for (k = 0; k < FIELD_BUFFER_MANY; k++) {
- for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
- if (peasycap->field_buffer[k][m].pgo) {
- free_page((unsigned long)
- peasycap->field_buffer[k][m].pgo);
- peasycap->field_buffer[k][m].pgo = NULL;
- peasycap->allocation_video_page -= 1;
- gone++;
- }
- }
- }
- JOM(4, "video field buffers freed: %i pages\n", gone);
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing video frame buffers.\n");
- gone = 0;
- for (k = 0; k < FRAME_BUFFER_MANY; k++) {
- for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
- if (peasycap->frame_buffer[k][m].pgo) {
- free_page((unsigned long)
- peasycap->frame_buffer[k][m].pgo);
- peasycap->frame_buffer[k][m].pgo = NULL;
- peasycap->allocation_video_page -= 1;
- gone++;
- }
- }
- }
- JOM(4, "video frame buffers freed: %i pages\n", gone);
-/*---------------------------------------------------------------------------*/
-/*
- * FREE AUDIO.
- */
-/*---------------------------------------------------------------------------*/
- if (peasycap->purb_audio_head) {
- JOM(4, "freeing audio urbs\n");
- m = 0;
- list_for_each(plist_head, (peasycap->purb_audio_head)) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb && pdata_urb->purb) {
- usb_free_urb(pdata_urb->purb);
- pdata_urb->purb = NULL;
- peasycap->allocation_audio_urb--;
- m++;
- }
- }
- JOM(4, "%i audio urbs freed\n", m);
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing audio data_urb structures.\n");
- m = 0;
- list_for_each_safe(plist_head, plist_next,
- peasycap->purb_audio_head) {
- pdata_urb = list_entry(plist_head,
- struct data_urb, list_head);
- if (pdata_urb) {
- peasycap->allocation_audio_struct -=
- sizeof(struct data_urb);
- kfree(pdata_urb);
- m++;
- }
- }
- JOM(4, "%i audio data_urb structures freed\n", m);
- JOM(4, "setting peasycap->purb_audio_head=NULL\n");
- peasycap->purb_audio_head = NULL;
- }
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing audio isoc buffers.\n");
- m = 0;
- for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
- if (peasycap->audio_isoc_buffer[k].pgo) {
- free_pages((unsigned long)
- (peasycap->audio_isoc_buffer[k].pgo),
- AUDIO_ISOC_ORDER);
- peasycap->audio_isoc_buffer[k].pgo = NULL;
- peasycap->allocation_audio_page -=
- BIT(AUDIO_ISOC_ORDER);
- m++;
- }
- }
- JOM(4, "easyoss_delete(): isoc audio buffers freed: %i pages\n",
- m * (0x01 << AUDIO_ISOC_ORDER));
-/*---------------------------------------------------------------------------*/
- JOM(4, "freeing easycap structure.\n");
- allocation_video_urb = peasycap->allocation_video_urb;
- allocation_video_page = peasycap->allocation_video_page;
- allocation_video_struct = peasycap->allocation_video_struct;
- registered_video = peasycap->registered_video;
- allocation_audio_urb = peasycap->allocation_audio_urb;
- allocation_audio_page = peasycap->allocation_audio_page;
- allocation_audio_struct = peasycap->allocation_audio_struct;
- registered_audio = peasycap->registered_audio;
-
- if (0 <= kd && DONGLE_MANY > kd) {
- if (mutex_lock_interruptible(&mutex_dongle)) {
- SAY("ERROR: cannot down mutex_dongle\n");
- } else {
- JOM(4, "locked mutex_dongle\n");
- easycapdc60_dongle[kd].peasycap = NULL;
- mutex_unlock(&mutex_dongle);
- JOM(4, "unlocked mutex_dongle\n");
- JOT(4, " null-->dongle[%i].peasycap\n", kd);
- allocation_video_struct -= sizeof(struct easycap);
- }
- } else {
- SAY("ERROR: cannot purge dongle[].peasycap");
- }
- kfree(peasycap);
-
-/*---------------------------------------------------------------------------*/
- SAY("%8i=video urbs after all deletions\n", allocation_video_urb);
- SAY("%8i=video pages after all deletions\n", allocation_video_page);
- SAY("%8i=video structs after all deletions\n", allocation_video_struct);
- SAY("%8i=video devices after all deletions\n", registered_video);
- SAY("%8i=audio urbs after all deletions\n", allocation_audio_urb);
- SAY("%8i=audio pages after all deletions\n", allocation_audio_page);
- SAY("%8i=audio structs after all deletions\n", allocation_audio_struct);
- SAY("%8i=audio devices after all deletions\n", registered_audio);
-
- JOT(4, "ending.\n");
- return;
-}
/*****************************************************************************/
static unsigned int easycap_poll(struct file *file, poll_table *wait)
{
@@ -2842,6 +2635,813 @@ static void easycap_complete(struct urb *purb)
return;
}
+static struct easycap *alloc_easycap(u8 bInterfaceNumber)
+{
+ struct easycap *peasycap;
+ int i;
+
+ peasycap = kzalloc(sizeof(struct easycap), GFP_KERNEL);
+ if (!peasycap) {
+ SAY("ERROR: Could not allocate peasycap\n");
+ return NULL;
+ }
+
+ if (mutex_lock_interruptible(&mutex_dongle)) {
+ SAY("ERROR: cannot lock mutex_dongle\n");
+ kfree(peasycap);
+ return NULL;
+ }
+
+ /* Find a free dongle in easycapdc60_dongle array */
+ for (i = 0; i < DONGLE_MANY; i++) {
+
+ if ((!easycapdc60_dongle[i].peasycap) &&
+ (!mutex_is_locked(&easycapdc60_dongle[i].mutex_video)) &&
+ (!mutex_is_locked(&easycapdc60_dongle[i].mutex_audio))) {
+
+ easycapdc60_dongle[i].peasycap = peasycap;
+ peasycap->isdongle = i;
+ JOM(8, "intf[%i]: peasycap-->easycap"
+ "_dongle[%i].peasycap\n",
+ bInterfaceNumber, i);
+ break;
+ }
+ }
+
+ mutex_unlock(&mutex_dongle);
+
+ if (i >= DONGLE_MANY) {
+ SAM("ERROR: too many dongles\n");
+ kfree(peasycap);
+ return NULL;
+ }
+
+ return peasycap;
+}
+
+static void free_easycap(struct easycap *peasycap)
+{
+ int allocation_video_urb;
+ int allocation_video_page;
+ int allocation_video_struct;
+ int allocation_audio_urb;
+ int allocation_audio_page;
+ int allocation_audio_struct;
+ int registered_video, registered_audio;
+ int kd;
+
+ JOM(4, "freeing easycap structure.\n");
+ allocation_video_urb = peasycap->allocation_video_urb;
+ allocation_video_page = peasycap->allocation_video_page;
+ allocation_video_struct = peasycap->allocation_video_struct;
+ registered_video = peasycap->registered_video;
+ allocation_audio_urb = peasycap->allocation_audio_urb;
+ allocation_audio_page = peasycap->allocation_audio_page;
+ allocation_audio_struct = peasycap->allocation_audio_struct;
+ registered_audio = peasycap->registered_audio;
+
+ kd = easycap_isdongle(peasycap);
+ if (0 <= kd && DONGLE_MANY > kd) {
+ if (mutex_lock_interruptible(&mutex_dongle)) {
+ SAY("ERROR: cannot down mutex_dongle\n");
+ } else {
+ JOM(4, "locked mutex_dongle\n");
+ easycapdc60_dongle[kd].peasycap = NULL;
+ mutex_unlock(&mutex_dongle);
+ JOM(4, "unlocked mutex_dongle\n");
+ JOT(4, " null-->dongle[%i].peasycap\n", kd);
+ allocation_video_struct -= sizeof(struct easycap);
+ }
+ } else {
+ SAY("ERROR: cannot purge dongle[].peasycap");
+ }
+
+ /* Free device structure */
+ kfree(peasycap);
+
+ SAY("%8i=video urbs after all deletions\n", allocation_video_urb);
+ SAY("%8i=video pages after all deletions\n", allocation_video_page);
+ SAY("%8i=video structs after all deletions\n", allocation_video_struct);
+ SAY("%8i=video devices after all deletions\n", registered_video);
+ SAY("%8i=audio urbs after all deletions\n", allocation_audio_urb);
+ SAY("%8i=audio pages after all deletions\n", allocation_audio_page);
+ SAY("%8i=audio structs after all deletions\n", allocation_audio_struct);
+ SAY("%8i=audio devices after all deletions\n", registered_audio);
+}
+
+/*
+ * FIXME: Identify the appropriate pointer peasycap for interfaces
+ * 1 and 2. The address of peasycap->pusb_device is reluctantly used
+ * for this purpose.
+ */
+static struct easycap *get_easycap(struct usb_device *usbdev,
+ u8 bInterfaceNumber)
+{
+ int i;
+ struct easycap *peasycap;
+
+ for (i = 0; i < DONGLE_MANY; i++) {
+ if (easycapdc60_dongle[i].peasycap->pusb_device == usbdev) {
+ peasycap = easycapdc60_dongle[i].peasycap;
+ JOT(8, "intf[%i]: dongle[%i].peasycap\n",
+ bInterfaceNumber, i);
+ break;
+ }
+ }
+ if (i >= DONGLE_MANY) {
+ SAY("ERROR: peasycap is unknown when probing interface %i\n",
+ bInterfaceNumber);
+ return NULL;
+ }
+ if (!peasycap) {
+ SAY("ERROR: peasycap is NULL when probing interface %i\n",
+ bInterfaceNumber);
+ return NULL;
+ }
+
+ return peasycap;
+}
+
+static void init_easycap(struct easycap *peasycap,
+ struct usb_device *usbdev,
+ struct usb_interface *intf,
+ u8 bInterfaceNumber)
+{
+ /* Save usb_device and usb_interface */
+ peasycap->pusb_device = usbdev;
+ peasycap->pusb_interface = intf;
+
+ peasycap->minor = -1;
+ kref_init(&peasycap->kref);
+ JOM(8, "intf[%i]: after kref_init(..._video) "
+ "%i=peasycap->kref.refcount.counter\n",
+ bInterfaceNumber, peasycap->kref.refcount.counter);
+
+ /* module params */
+ peasycap->gain = (s8)clamp(easycap_gain, 0, 31);
+
+ init_waitqueue_head(&peasycap->wq_video);
+ init_waitqueue_head(&peasycap->wq_audio);
+ init_waitqueue_head(&peasycap->wq_trigger);
+
+ peasycap->allocation_video_struct = sizeof(struct easycap);
+
+ peasycap->microphone = false;
+
+ peasycap->video_interface = -1;
+ peasycap->video_altsetting_on = -1;
+ peasycap->video_altsetting_off = -1;
+ peasycap->video_endpointnumber = -1;
+ peasycap->video_isoc_maxframesize = -1;
+ peasycap->video_isoc_buffer_size = -1;
+
+ peasycap->audio_interface = -1;
+ peasycap->audio_altsetting_on = -1;
+ peasycap->audio_altsetting_off = -1;
+ peasycap->audio_endpointnumber = -1;
+ peasycap->audio_isoc_maxframesize = -1;
+ peasycap->audio_isoc_buffer_size = -1;
+
+ peasycap->frame_buffer_many = FRAME_BUFFER_MANY;
+
+ peasycap->ntsc = easycap_ntsc;
+ JOM(8, "defaulting initially to %s\n",
+ easycap_ntsc ? "NTSC" : "PAL");
+}
+
+static int populate_inputset(struct easycap *peasycap)
+{
+ struct inputset *inputset;
+ struct easycap_format *peasycap_format;
+ struct v4l2_pix_format *pix;
+ int m, i, k, mask, fmtidx;
+ s32 value;
+
+ inputset = peasycap->inputset;
+
+ fmtidx = peasycap->ntsc ? NTSC_M : PAL_BGHIN;
+
+ m = 0;
+ mask = 0;
+ for (i = 0; easycap_standard[i].mask != 0xffff; i++) {
+ if (fmtidx == easycap_standard[i].v4l2_standard.index) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].standard_offset = i;
+ mask = easycap_standard[i].mask;
+ }
+ }
+
+ if (m != 1) {
+ SAM("ERROR: inputset->standard_offset unpopulated, %i=m\n", m);
+ return -ENOENT;
+ }
+
+ peasycap_format = &easycap_format[0];
+ m = 0;
+ for (i = 0; peasycap_format->v4l2_format.fmt.pix.width; i++) {
+ pix = &peasycap_format->v4l2_format.fmt.pix;
+ if (((peasycap_format->mask & 0x0F) == (mask & 0x0F))
+ && pix->field == V4L2_FIELD_NONE
+ && pix->pixelformat == V4L2_PIX_FMT_UYVY
+ && pix->width == 640 && pix->height == 480) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].format_offset = i;
+ break;
+ }
+ peasycap_format++;
+ }
+ if (m != 1) {
+ SAM("ERROR: inputset[]->format_offset unpopulated\n");
+ return -ENOENT;
+ }
+
+ m = 0;
+ for (i = 0; easycap_control[i].id != 0xffffffff; i++) {
+ value = easycap_control[i].default_value;
+ if (V4L2_CID_BRIGHTNESS == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].brightness = value;
+ } else if (V4L2_CID_CONTRAST == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].contrast = value;
+ } else if (V4L2_CID_SATURATION == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].saturation = value;
+ } else if (V4L2_CID_HUE == easycap_control[i].id) {
+ m++;
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].hue = value;
+ }
+ }
+
+ if (m != 4) {
+ SAM("ERROR: inputset[]->brightness underpopulated\n");
+ return -ENOENT;
+ }
+
+ for (k = 0; k < INPUT_MANY; k++)
+ inputset[k].input = k;
+ JOM(4, "populated inputset[]\n");
+
+ return 0;
+}
+
+static int alloc_framebuffers(struct easycap *peasycap)
+{
+ int i, j;
+ void *pbuf;
+
+ JOM(4, "allocating %i frame buffers of size %li\n",
+ FRAME_BUFFER_MANY, (long int)FRAME_BUFFER_SIZE);
+ JOM(4, ".... each scattered over %li pages\n",
+ FRAME_BUFFER_SIZE/PAGE_SIZE);
+
+ for (i = 0; i < FRAME_BUFFER_MANY; i++) {
+ for (j = 0; j < FRAME_BUFFER_SIZE/PAGE_SIZE; j++) {
+ if (peasycap->frame_buffer[i][j].pgo)
+ SAM("attempting to reallocate framebuffers\n");
+ else {
+ pbuf = (void *)__get_free_page(GFP_KERNEL);
+ if (!pbuf) {
+ SAM("ERROR: Could not allocate "
+ "framebuffer %i page %i\n", i, j);
+ return -ENOMEM;
+ }
+ peasycap->allocation_video_page += 1;
+ peasycap->frame_buffer[i][j].pgo = pbuf;
+ }
+ peasycap->frame_buffer[i][j].pto =
+ peasycap->frame_buffer[i][j].pgo;
+ }
+ }
+
+ peasycap->frame_fill = 0;
+ peasycap->frame_read = 0;
+ JOM(4, "allocation of frame buffers done: %i pages\n", i*j);
+
+ return 0;
+}
+
+static void free_framebuffers(struct easycap *peasycap)
+{
+ int k, m, gone;
+
+ JOM(4, "freeing video frame buffers.\n");
+ gone = 0;
+ for (k = 0; k < FRAME_BUFFER_MANY; k++) {
+ for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
+ if (peasycap->frame_buffer[k][m].pgo) {
+ free_page((unsigned long)
+ peasycap->frame_buffer[k][m].pgo);
+ peasycap->frame_buffer[k][m].pgo = NULL;
+ peasycap->allocation_video_page -= 1;
+ gone++;
+ }
+ }
+ }
+ JOM(4, "video frame buffers freed: %i pages\n", gone);
+}
+
+static int alloc_fieldbuffers(struct easycap *peasycap)
+{
+ int i, j;
+ void *pbuf;
+
+ JOM(4, "allocating %i field buffers of size %li\n",
+ FIELD_BUFFER_MANY, (long int)FIELD_BUFFER_SIZE);
+ JOM(4, ".... each scattered over %li pages\n",
+ FIELD_BUFFER_SIZE/PAGE_SIZE);
+
+ for (i = 0; i < FIELD_BUFFER_MANY; i++) {
+ for (j = 0; j < FIELD_BUFFER_SIZE/PAGE_SIZE; j++) {
+ if (peasycap->field_buffer[i][j].pgo) {
+ SAM("ERROR: attempting to reallocate "
+ "fieldbuffers\n");
+ } else {
+ pbuf = (void *) __get_free_page(GFP_KERNEL);
+ if (!pbuf) {
+ SAM("ERROR: Could not allocate "
+ "fieldbuffer %i page %i\n", i, j);
+ return -ENOMEM;
+ }
+ peasycap->allocation_video_page += 1;
+ peasycap->field_buffer[i][j].pgo = pbuf;
+ }
+ peasycap->field_buffer[i][j].pto =
+ peasycap->field_buffer[i][j].pgo;
+ }
+ /* TODO: Hardcoded 0x0200 meaning? */
+ peasycap->field_buffer[i][0].kount = 0x0200;
+ }
+ peasycap->field_fill = 0;
+ peasycap->field_page = 0;
+ peasycap->field_read = 0;
+ JOM(4, "allocation of field buffers done: %i pages\n", i*j);
+
+ return 0;
+}
+
+static void free_fieldbuffers(struct easycap *peasycap)
+{
+ int k, m, gone;
+
+ JOM(4, "freeing video field buffers.\n");
+ gone = 0;
+ for (k = 0; k < FIELD_BUFFER_MANY; k++) {
+ for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
+ if (peasycap->field_buffer[k][m].pgo) {
+ free_page((unsigned long)
+ peasycap->field_buffer[k][m].pgo);
+ peasycap->field_buffer[k][m].pgo = NULL;
+ peasycap->allocation_video_page -= 1;
+ gone++;
+ }
+ }
+ }
+ JOM(4, "video field buffers freed: %i pages\n", gone);
+}
+
+static int alloc_isocbuffers(struct easycap *peasycap)
+{
+ int i;
+ void *pbuf;
+
+ JOM(4, "allocating %i isoc video buffers of size %i\n",
+ VIDEO_ISOC_BUFFER_MANY,
+ peasycap->video_isoc_buffer_size);
+ JOM(4, ".... each occupying contiguous memory pages\n");
+
+ for (i = 0; i < VIDEO_ISOC_BUFFER_MANY; i++) {
+ pbuf = (void *)__get_free_pages(GFP_KERNEL,
+ VIDEO_ISOC_ORDER);
+ if (!pbuf) {
+ SAM("ERROR: Could not allocate isoc "
+ "video buffer %i\n", i);
+ return -ENOMEM;
+ }
+ peasycap->allocation_video_page += BIT(VIDEO_ISOC_ORDER);
+
+ peasycap->video_isoc_buffer[i].pgo = pbuf;
+ peasycap->video_isoc_buffer[i].pto =
+ pbuf + peasycap->video_isoc_buffer_size;
+ peasycap->video_isoc_buffer[i].kount = i;
+ }
+ JOM(4, "allocation of isoc video buffers done: %i pages\n",
+ i * (0x01 << VIDEO_ISOC_ORDER));
+ return 0;
+}
+
+static void free_isocbuffers(struct easycap *peasycap)
+{
+ int k, m;
+
+ JOM(4, "freeing video isoc buffers.\n");
+ m = 0;
+ for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
+ if (peasycap->video_isoc_buffer[k].pgo) {
+ free_pages((unsigned long)
+ peasycap->video_isoc_buffer[k].pgo,
+ VIDEO_ISOC_ORDER);
+ peasycap->video_isoc_buffer[k].pgo = NULL;
+ peasycap->allocation_video_page -=
+ BIT(VIDEO_ISOC_ORDER);
+ m++;
+ }
+ }
+ JOM(4, "isoc video buffers freed: %i pages\n",
+ m * (0x01 << VIDEO_ISOC_ORDER));
+}
+
+static int create_video_urbs(struct easycap *peasycap)
+{
+ struct urb *purb;
+ struct data_urb *pdata_urb;
+ int i, j;
+
+ JOM(4, "allocating %i struct urb.\n", VIDEO_ISOC_BUFFER_MANY);
+ JOM(4, "using %i=peasycap->video_isoc_framesperdesc\n",
+ peasycap->video_isoc_framesperdesc);
+ JOM(4, "using %i=peasycap->video_isoc_maxframesize\n",
+ peasycap->video_isoc_maxframesize);
+ JOM(4, "using %i=peasycap->video_isoc_buffer_sizen",
+ peasycap->video_isoc_buffer_size);
+
+ for (i = 0; i < VIDEO_ISOC_BUFFER_MANY; i++) {
+ purb = usb_alloc_urb(peasycap->video_isoc_framesperdesc,
+ GFP_KERNEL);
+ if (!purb) {
+ SAM("ERROR: usb_alloc_urb returned NULL for buffer "
+ "%i\n", i);
+ return -ENOMEM;
+ }
+
+ peasycap->allocation_video_urb += 1;
+ pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
+ if (!pdata_urb) {
+ SAM("ERROR: Could not allocate struct data_urb.\n");
+ return -ENOMEM;
+ }
+
+ peasycap->allocation_video_struct +=
+ sizeof(struct data_urb);
+
+ pdata_urb->purb = purb;
+ pdata_urb->isbuf = i;
+ pdata_urb->length = 0;
+ list_add_tail(&(pdata_urb->list_head),
+ peasycap->purb_video_head);
+
+ if (!i) {
+ JOM(4, "initializing video urbs thus:\n");
+ JOM(4, " purb->interval = 1;\n");
+ JOM(4, " purb->dev = peasycap->pusb_device;\n");
+ JOM(4, " purb->pipe = usb_rcvisocpipe"
+ "(peasycap->pusb_device,%i);\n",
+ peasycap->video_endpointnumber);
+ JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
+ JOM(4, " purb->transfer_buffer = peasycap->"
+ "video_isoc_buffer[.].pgo;\n");
+ JOM(4, " purb->transfer_buffer_length = %i;\n",
+ peasycap->video_isoc_buffer_size);
+ JOM(4, " purb->complete = easycap_complete;\n");
+ JOM(4, " purb->context = peasycap;\n");
+ JOM(4, " purb->start_frame = 0;\n");
+ JOM(4, " purb->number_of_packets = %i;\n",
+ peasycap->video_isoc_framesperdesc);
+ JOM(4, " for (j = 0; j < %i; j++)\n",
+ peasycap->video_isoc_framesperdesc);
+ JOM(4, " {\n");
+ JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",
+ peasycap->video_isoc_maxframesize);
+ JOM(4, " purb->iso_frame_desc[j].length = %i;\n",
+ peasycap->video_isoc_maxframesize);
+ JOM(4, " }\n");
+ }
+
+ purb->interval = 1;
+ purb->dev = peasycap->pusb_device;
+ purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
+ peasycap->video_endpointnumber);
+
+ purb->transfer_flags = URB_ISO_ASAP;
+ purb->transfer_buffer = peasycap->video_isoc_buffer[i].pgo;
+ purb->transfer_buffer_length =
+ peasycap->video_isoc_buffer_size;
+
+ purb->complete = easycap_complete;
+ purb->context = peasycap;
+ purb->start_frame = 0;
+ purb->number_of_packets = peasycap->video_isoc_framesperdesc;
+
+ for (j = 0; j < peasycap->video_isoc_framesperdesc; j++) {
+ purb->iso_frame_desc[j].offset =
+ j * peasycap->video_isoc_maxframesize;
+ purb->iso_frame_desc[j].length =
+ peasycap->video_isoc_maxframesize;
+ }
+ }
+ JOM(4, "allocation of %i struct urb done.\n", i);
+ return 0;
+}
+
+static void free_video_urbs(struct easycap *peasycap)
+{
+ struct list_head *plist_head, *plist_next;
+ struct data_urb *pdata_urb;
+ int m;
+
+ if (peasycap->purb_video_head) {
+ m = 0;
+ list_for_each(plist_head, peasycap->purb_video_head) {
+ pdata_urb = list_entry(plist_head,
+ struct data_urb, list_head);
+ if (pdata_urb && pdata_urb->purb) {
+ usb_free_urb(pdata_urb->purb);
+ pdata_urb->purb = NULL;
+ peasycap->allocation_video_urb--;
+ m++;
+ }
+ }
+
+ JOM(4, "%i video urbs freed\n", m);
+ JOM(4, "freeing video data_urb structures.\n");
+ m = 0;
+ list_for_each_safe(plist_head, plist_next,
+ peasycap->purb_video_head) {
+ pdata_urb = list_entry(plist_head,
+ struct data_urb, list_head);
+ if (pdata_urb) {
+ peasycap->allocation_video_struct -=
+ sizeof(struct data_urb);
+ kfree(pdata_urb);
+ m++;
+ }
+ }
+ JOM(4, "%i video data_urb structures freed\n", m);
+ JOM(4, "setting peasycap->purb_video_head=NULL\n");
+ peasycap->purb_video_head = NULL;
+ }
+}
+
+static int alloc_audio_buffers(struct easycap *peasycap)
+{
+ void *pbuf;
+ int k;
+
+ JOM(4, "allocating %i isoc audio buffers of size %i\n",
+ AUDIO_ISOC_BUFFER_MANY,
+ peasycap->audio_isoc_buffer_size);
+ JOM(4, ".... each occupying contiguous memory pages\n");
+
+ for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
+ pbuf = (void *)__get_free_pages(GFP_KERNEL, AUDIO_ISOC_ORDER);
+ if (!pbuf) {
+ SAM("ERROR: Could not allocate isoc audio buffer %i\n",
+ k);
+ return -ENOMEM;
+ }
+ peasycap->allocation_audio_page += BIT(AUDIO_ISOC_ORDER);
+
+ peasycap->audio_isoc_buffer[k].pgo = pbuf;
+ peasycap->audio_isoc_buffer[k].pto =
+ pbuf + peasycap->audio_isoc_buffer_size;
+ peasycap->audio_isoc_buffer[k].kount = k;
+ }
+
+ JOM(4, "allocation of isoc audio buffers done.\n");
+ return 0;
+}
+
+static void free_audio_buffers(struct easycap *peasycap)
+{
+ int k, m;
+
+ JOM(4, "freeing audio isoc buffers.\n");
+ m = 0;
+ for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
+ if (peasycap->audio_isoc_buffer[k].pgo) {
+ free_pages((unsigned long)
+ (peasycap->audio_isoc_buffer[k].pgo),
+ AUDIO_ISOC_ORDER);
+ peasycap->audio_isoc_buffer[k].pgo = NULL;
+ peasycap->allocation_audio_page -=
+ BIT(AUDIO_ISOC_ORDER);
+ m++;
+ }
+ }
+ JOM(4, "easyoss_delete(): isoc audio buffers freed: %i pages\n",
+ m * (0x01 << AUDIO_ISOC_ORDER));
+}
+
+static int create_audio_urbs(struct easycap *peasycap)
+{
+ struct urb *purb;
+ struct data_urb *pdata_urb;
+ int k, j;
+
+ JOM(4, "allocating %i struct urb.\n", AUDIO_ISOC_BUFFER_MANY);
+ JOM(4, "using %i=peasycap->audio_isoc_framesperdesc\n",
+ peasycap->audio_isoc_framesperdesc);
+ JOM(4, "using %i=peasycap->audio_isoc_maxframesize\n",
+ peasycap->audio_isoc_maxframesize);
+ JOM(4, "using %i=peasycap->audio_isoc_buffer_size\n",
+ peasycap->audio_isoc_buffer_size);
+
+ for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
+ purb = usb_alloc_urb(peasycap->audio_isoc_framesperdesc,
+ GFP_KERNEL);
+ if (!purb) {
+ SAM("ERROR: usb_alloc_urb returned NULL for buffer "
+ "%i\n", k);
+ return -ENOMEM;
+ }
+ peasycap->allocation_audio_urb += 1 ;
+ pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
+ if (!pdata_urb) {
+ usb_free_urb(purb);
+ SAM("ERROR: Could not allocate struct data_urb.\n");
+ return -ENOMEM;
+ }
+ peasycap->allocation_audio_struct +=
+ sizeof(struct data_urb);
+
+ pdata_urb->purb = purb;
+ pdata_urb->isbuf = k;
+ pdata_urb->length = 0;
+ list_add_tail(&(pdata_urb->list_head),
+ peasycap->purb_audio_head);
+
+ if (!k) {
+ JOM(4, "initializing audio urbs thus:\n");
+ JOM(4, " purb->interval = 1;\n");
+ JOM(4, " purb->dev = peasycap->pusb_device;\n");
+ JOM(4, " purb->pipe = usb_rcvisocpipe(peasycap->"
+ "pusb_device,%i);\n",
+ peasycap->audio_endpointnumber);
+ JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
+ JOM(4, " purb->transfer_buffer = "
+ "peasycap->audio_isoc_buffer[.].pgo;\n");
+ JOM(4, " purb->transfer_buffer_length = %i;\n",
+ peasycap->audio_isoc_buffer_size);
+ JOM(4, " purb->complete = easycap_alsa_complete;\n");
+ JOM(4, " purb->context = peasycap;\n");
+ JOM(4, " purb->start_frame = 0;\n");
+ JOM(4, " purb->number_of_packets = %i;\n",
+ peasycap->audio_isoc_framesperdesc);
+ JOM(4, " for (j = 0; j < %i; j++)\n",
+ peasycap->audio_isoc_framesperdesc);
+ JOM(4, " {\n");
+ JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",
+ peasycap->audio_isoc_maxframesize);
+ JOM(4, " purb->iso_frame_desc[j].length = %i;\n",
+ peasycap->audio_isoc_maxframesize);
+ JOM(4, " }\n");
+ }
+
+ purb->interval = 1;
+ purb->dev = peasycap->pusb_device;
+ purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
+ peasycap->audio_endpointnumber);
+ purb->transfer_flags = URB_ISO_ASAP;
+ purb->transfer_buffer = peasycap->audio_isoc_buffer[k].pgo;
+ purb->transfer_buffer_length =
+ peasycap->audio_isoc_buffer_size;
+ purb->complete = easycap_alsa_complete;
+ purb->context = peasycap;
+ purb->start_frame = 0;
+ purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
+ for (j = 0; j < peasycap->audio_isoc_framesperdesc; j++) {
+ purb->iso_frame_desc[j].offset =
+ j * peasycap->audio_isoc_maxframesize;
+ purb->iso_frame_desc[j].length =
+ peasycap->audio_isoc_maxframesize;
+ }
+ }
+ JOM(4, "allocation of %i struct urb done.\n", k);
+ return 0;
+}
+
+static void free_audio_urbs(struct easycap *peasycap)
+{
+ struct list_head *plist_head, *plist_next;
+ struct data_urb *pdata_urb;
+ int m;
+
+ if (peasycap->purb_audio_head) {
+ JOM(4, "freeing audio urbs\n");
+ m = 0;
+ list_for_each(plist_head, (peasycap->purb_audio_head)) {
+ pdata_urb = list_entry(plist_head,
+ struct data_urb, list_head);
+ if (pdata_urb && pdata_urb->purb) {
+ usb_free_urb(pdata_urb->purb);
+ pdata_urb->purb = NULL;
+ peasycap->allocation_audio_urb--;
+ m++;
+ }
+ }
+ JOM(4, "%i audio urbs freed\n", m);
+ JOM(4, "freeing audio data_urb structures.\n");
+ m = 0;
+ list_for_each_safe(plist_head, plist_next,
+ peasycap->purb_audio_head) {
+ pdata_urb = list_entry(plist_head,
+ struct data_urb, list_head);
+ if (pdata_urb) {
+ peasycap->allocation_audio_struct -=
+ sizeof(struct data_urb);
+ kfree(pdata_urb);
+ m++;
+ }
+ }
+ JOM(4, "%i audio data_urb structures freed\n", m);
+ JOM(4, "setting peasycap->purb_audio_head=NULL\n");
+ peasycap->purb_audio_head = NULL;
+ }
+}
+
+static void config_easycap(struct easycap *peasycap,
+ u8 bInterfaceNumber,
+ u8 bInterfaceClass,
+ u8 bInterfaceSubClass)
+{
+ if ((USB_CLASS_VIDEO == bInterfaceClass) ||
+ (USB_CLASS_VENDOR_SPEC == bInterfaceClass)) {
+ if (-1 == peasycap->video_interface) {
+ peasycap->video_interface = bInterfaceNumber;
+ JOM(4, "setting peasycap->video_interface=%i\n",
+ peasycap->video_interface);
+ } else {
+ if (peasycap->video_interface != bInterfaceNumber) {
+ SAM("ERROR: attempting to reset "
+ "peasycap->video_interface\n");
+ SAM("...... continuing with "
+ "%i=peasycap->video_interface\n",
+ peasycap->video_interface);
+ }
+ }
+ } else if ((USB_CLASS_AUDIO == bInterfaceClass) &&
+ (USB_SUBCLASS_AUDIOSTREAMING == bInterfaceSubClass)) {
+ if (-1 == peasycap->audio_interface) {
+ peasycap->audio_interface = bInterfaceNumber;
+ JOM(4, "setting peasycap->audio_interface=%i\n",
+ peasycap->audio_interface);
+ } else {
+ if (peasycap->audio_interface != bInterfaceNumber) {
+ SAM("ERROR: attempting to reset "
+ "peasycap->audio_interface\n");
+ SAM("...... continuing with "
+ "%i=peasycap->audio_interface\n",
+ peasycap->audio_interface);
+ }
+ }
+ }
+}
+
+/*
+ * This function is called from within easycap_usb_disconnect() and is
+ * protected by semaphores set and cleared by easycap_usb_disconnect().
+ * By this stage the device has already been physically unplugged,
+ * so peasycap->pusb_device is no longer valid.
+ */
+static void easycap_delete(struct kref *pkref)
+{
+ struct easycap *peasycap;
+
+ peasycap = container_of(pkref, struct easycap, kref);
+ if (!peasycap) {
+ SAM("ERROR: peasycap is NULL: cannot perform deletions\n");
+ return;
+ }
+
+ /* Free video urbs */
+ free_video_urbs(peasycap);
+
+ /* Free video isoc buffers */
+ free_isocbuffers(peasycap);
+
+ /* Free video field buffers */
+ free_fieldbuffers(peasycap);
+
+ /* Free video frame buffers */
+ free_framebuffers(peasycap);
+
+ /* Free audio urbs */
+ free_audio_urbs(peasycap);
+
+ /* Free audio isoc buffers */
+ free_audio_buffers(peasycap);
+
+ free_easycap(peasycap);
+
+ JOT(4, "ending.\n");
+}
+
static const struct v4l2_file_operations v4l2_fops = {
.owner = THIS_MODULE,
.open = easycap_open_noinode,
@@ -2850,6 +3450,36 @@ static const struct v4l2_file_operations v4l2_fops = {
.mmap = easycap_mmap,
};
+static int easycap_register_video(struct easycap *peasycap)
+{
+ /*
+ * FIXME: This is believed to be harmless,
+ * but may well be unnecessary or wrong.
+ */
+ peasycap->video_device.v4l2_dev = NULL;
+
+ strcpy(&peasycap->video_device.name[0], "easycapdc60");
+ peasycap->video_device.fops = &v4l2_fops;
+ peasycap->video_device.minor = -1;
+ peasycap->video_device.release = (void *)(&videodev_release);
+
+ video_set_drvdata(&(peasycap->video_device), (void *)peasycap);
+
+ if (0 != (video_register_device(&(peasycap->video_device),
+ VFL_TYPE_GRABBER, -1))) {
+ videodev_release(&(peasycap->video_device));
+ return -ENODEV;
+ }
+
+ peasycap->registered_video++;
+
+ SAM("registered with videodev: %i=minor\n",
+ peasycap->video_device.minor);
+ peasycap->minor = peasycap->video_device.minor;
+
+ return 0;
+}
+
/*
* When the device is plugged, this function is called three times,
* one for each interface.
@@ -2861,24 +3491,15 @@ static int easycap_usb_probe(struct usb_interface *intf,
struct usb_host_interface *alt;
struct usb_endpoint_descriptor *ep;
struct usb_interface_descriptor *interface;
- struct urb *purb;
struct easycap *peasycap;
- int ndong;
- struct data_urb *pdata_urb;
- int i, j, k, m, rc;
+ int i, j, rc;
u8 bInterfaceNumber;
u8 bInterfaceClass;
u8 bInterfaceSubClass;
- void *pbuf;
int okalt[8], isokalt;
int okepn[8];
int okmps[8];
int maxpacketsize;
- u16 mask;
- s32 value;
- struct easycap_format *peasycap_format;
- int fmtidx;
- struct inputset *inputset;
usbdev = interface_to_usbdev(intf);
@@ -2916,76 +3537,16 @@ static int easycap_usb_probe(struct usb_interface *intf,
* interfaces 1 and 2 are probed.
*/
if (0 == bInterfaceNumber) {
- peasycap = kzalloc(sizeof(struct easycap), GFP_KERNEL);
- if (!peasycap) {
- SAY("ERROR: Could not allocate peasycap\n");
- return -ENOMEM;
- }
-
- /* Perform urgent initializations */
- peasycap->minor = -1;
- kref_init(&peasycap->kref);
- JOM(8, "intf[%i]: after kref_init(..._video) "
- "%i=peasycap->kref.refcount.counter\n",
- bInterfaceNumber, peasycap->kref.refcount.counter);
-
- /* module params */
- peasycap->gain = (s8)clamp(easycap_gain, 0, 31);
-
- init_waitqueue_head(&peasycap->wq_video);
- init_waitqueue_head(&peasycap->wq_audio);
- init_waitqueue_head(&peasycap->wq_trigger);
-
- if (mutex_lock_interruptible(&mutex_dongle)) {
- SAY("ERROR: cannot down mutex_dongle\n");
- return -ERESTARTSYS;
- }
-
- for (ndong = 0; ndong < DONGLE_MANY; ndong++) {
- if ((!easycapdc60_dongle[ndong].peasycap) &&
- (!mutex_is_locked(&easycapdc60_dongle
- [ndong].mutex_video)) &&
- (!mutex_is_locked(&easycapdc60_dongle
- [ndong].mutex_audio))) {
- easycapdc60_dongle[ndong].peasycap = peasycap;
- peasycap->isdongle = ndong;
- JOM(8, "intf[%i]: peasycap-->easycap"
- "_dongle[%i].peasycap\n",
- bInterfaceNumber, ndong);
- break;
- }
- }
-
- if (DONGLE_MANY <= ndong) {
- SAM("ERROR: too many dongles\n");
- mutex_unlock(&mutex_dongle);
+ /*
+ * Alloc structure and save it in a free slot in
+ * easycapdc60_dongle array
+ */
+ peasycap = alloc_easycap(bInterfaceNumber);
+ if (!peasycap)
return -ENOMEM;
- }
- mutex_unlock(&mutex_dongle);
-
- peasycap->allocation_video_struct = sizeof(struct easycap);
-
- /* and further initialize the structure */
- peasycap->pusb_device = usbdev;
- peasycap->pusb_interface = intf;
- peasycap->microphone = false;
-
- peasycap->video_interface = -1;
- peasycap->video_altsetting_on = -1;
- peasycap->video_altsetting_off = -1;
- peasycap->video_endpointnumber = -1;
- peasycap->video_isoc_maxframesize = -1;
- peasycap->video_isoc_buffer_size = -1;
-
- peasycap->audio_interface = -1;
- peasycap->audio_altsetting_on = -1;
- peasycap->audio_altsetting_off = -1;
- peasycap->audio_endpointnumber = -1;
- peasycap->audio_isoc_maxframesize = -1;
- peasycap->audio_isoc_buffer_size = -1;
-
- peasycap->frame_buffer_many = FRAME_BUFFER_MANY;
+ /* Perform basic struct initialization */
+ init_easycap(peasycap, usbdev, intf, bInterfaceNumber);
/* Dynamically fill in the available formats */
rc = easycap_video_fillin_formats();
@@ -2996,136 +3557,19 @@ static int easycap_usb_probe(struct usb_interface *intf,
JOM(4, "%i formats available\n", rc);
/* Populate easycap.inputset[] */
- inputset = peasycap->inputset;
- fmtidx = peasycap->ntsc ? NTSC_M : PAL_BGHIN;
- m = 0;
- mask = 0;
- for (i = 0; 0xFFFF != easycap_standard[i].mask; i++) {
- if (fmtidx == easycap_standard[i].v4l2_standard.index) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].standard_offset = i;
-
- mask = easycap_standard[i].mask;
- }
- }
- if (1 != m) {
- SAM("ERROR: "
- "inputset->standard_offset unpopulated, %i=m\n", m);
- return -ENOENT;
- }
-
- peasycap_format = &easycap_format[0];
- m = 0;
- for (i = 0; peasycap_format->v4l2_format.fmt.pix.width; i++) {
- struct v4l2_pix_format *pix =
- &peasycap_format->v4l2_format.fmt.pix;
- if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) &&
- pix->field == V4L2_FIELD_NONE &&
- pix->pixelformat == V4L2_PIX_FMT_UYVY &&
- pix->width == 640 && pix->height == 480) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].format_offset = i;
- break;
- }
- peasycap_format++;
- }
- if (1 != m) {
- SAM("ERROR: inputset[]->format_offset unpopulated\n");
- return -ENOENT;
- }
-
- m = 0;
- for (i = 0; 0xFFFFFFFF != easycap_control[i].id; i++) {
- value = easycap_control[i].default_value;
- if (V4L2_CID_BRIGHTNESS == easycap_control[i].id) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].brightness = value;
- } else if (V4L2_CID_CONTRAST == easycap_control[i].id) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].contrast = value;
- } else if (V4L2_CID_SATURATION == easycap_control[i].id) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].saturation = value;
- } else if (V4L2_CID_HUE == easycap_control[i].id) {
- m++;
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].hue = value;
- }
- }
-
- if (4 != m) {
- SAM("ERROR: inputset[]->brightness underpopulated\n");
- return -ENOENT;
- }
- for (k = 0; k < INPUT_MANY; k++)
- inputset[k].input = k;
- JOM(4, "populated inputset[]\n");
+ rc = populate_inputset(peasycap);
+ if (rc < 0)
+ return rc;
JOM(4, "finished initialization\n");
} else {
-
- /*
- * FIXME: Identify the appropriate pointer
- * peasycap for interfaces 1 and 2.
- * The address of peasycap->pusb_device
- * is reluctantly used for this purpose.
- */
- for (ndong = 0; ndong < DONGLE_MANY; ndong++) {
- if (usbdev == easycapdc60_dongle[ndong].peasycap->
- pusb_device) {
- peasycap = easycapdc60_dongle[ndong].peasycap;
- JOT(8, "intf[%i]: dongle[%i].peasycap\n",
- bInterfaceNumber, ndong);
- break;
- }
- }
- if (DONGLE_MANY <= ndong) {
- SAY("ERROR: peasycap is unknown when probing interface %i\n",
- bInterfaceNumber);
+ peasycap = get_easycap(usbdev, bInterfaceNumber);
+ if (!peasycap)
return -ENODEV;
- }
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL when probing interface %i\n",
- bInterfaceNumber);
- return -ENODEV;
- }
}
- if ((USB_CLASS_VIDEO == bInterfaceClass) ||
- (USB_CLASS_VENDOR_SPEC == bInterfaceClass)) {
- if (-1 == peasycap->video_interface) {
- peasycap->video_interface = bInterfaceNumber;
- JOM(4, "setting peasycap->video_interface=%i\n",
- peasycap->video_interface);
- } else {
- if (peasycap->video_interface != bInterfaceNumber) {
- SAM("ERROR: attempting to reset "
- "peasycap->video_interface\n");
- SAM("...... continuing with "
- "%i=peasycap->video_interface\n",
- peasycap->video_interface);
- }
- }
- } else if ((USB_CLASS_AUDIO == bInterfaceClass) &&
- (USB_SUBCLASS_AUDIOSTREAMING == bInterfaceSubClass)) {
- if (-1 == peasycap->audio_interface) {
- peasycap->audio_interface = bInterfaceNumber;
- JOM(4, "setting peasycap->audio_interface=%i\n",
- peasycap->audio_interface);
- } else {
- if (peasycap->audio_interface != bInterfaceNumber) {
- SAM("ERROR: attempting to reset "
- "peasycap->audio_interface\n");
- SAM("...... continuing with "
- "%i=peasycap->audio_interface\n",
- peasycap->audio_interface);
- }
- }
- }
+ config_easycap(peasycap, bInterfaceNumber,
+ bInterfaceClass,
+ bInterfaceSubClass);
/*
* Investigate all altsettings. This is done in detail
@@ -3368,173 +3812,23 @@ static int easycap_usb_probe(struct usb_interface *intf,
*/
INIT_LIST_HEAD(&(peasycap->urb_video_head));
peasycap->purb_video_head = &(peasycap->urb_video_head);
- JOM(4, "allocating %i frame buffers of size %li\n",
- FRAME_BUFFER_MANY, (long int)FRAME_BUFFER_SIZE);
- JOM(4, ".... each scattered over %li pages\n",
- FRAME_BUFFER_SIZE/PAGE_SIZE);
-
- for (k = 0; k < FRAME_BUFFER_MANY; k++) {
- for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++) {
- if (peasycap->frame_buffer[k][m].pgo)
- SAM("attempting to reallocate frame "
- " buffers\n");
- else {
- pbuf = (void *)__get_free_page(GFP_KERNEL);
- if (!pbuf) {
- SAM("ERROR: Could not allocate frame "
- "buffer %i page %i\n", k, m);
- return -ENOMEM;
- }
-
- peasycap->allocation_video_page += 1;
- peasycap->frame_buffer[k][m].pgo = pbuf;
- }
- peasycap->frame_buffer[k][m].pto =
- peasycap->frame_buffer[k][m].pgo;
- }
- }
-
- peasycap->frame_fill = 0;
- peasycap->frame_read = 0;
- JOM(4, "allocation of frame buffers done: %i pages\n", k *
- m);
- JOM(4, "allocating %i field buffers of size %li\n",
- FIELD_BUFFER_MANY, (long int)FIELD_BUFFER_SIZE);
- JOM(4, ".... each scattered over %li pages\n",
- FIELD_BUFFER_SIZE/PAGE_SIZE);
-
- for (k = 0; k < FIELD_BUFFER_MANY; k++) {
- for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++) {
- if (peasycap->field_buffer[k][m].pgo) {
- SAM("ERROR: attempting to reallocate "
- "field buffers\n");
- } else {
- pbuf = (void *) __get_free_page(GFP_KERNEL);
- if (!pbuf) {
- SAM("ERROR: Could not allocate field"
- " buffer %i page %i\n", k, m);
- return -ENOMEM;
- }
-
- peasycap->allocation_video_page += 1;
- peasycap->field_buffer[k][m].pgo = pbuf;
- }
- peasycap->field_buffer[k][m].pto =
- peasycap->field_buffer[k][m].pgo;
- }
- peasycap->field_buffer[k][0].kount = 0x0200;
- }
- peasycap->field_fill = 0;
- peasycap->field_page = 0;
- peasycap->field_read = 0;
- JOM(4, "allocation of field buffers done: %i pages\n", k *
- m);
- JOM(4, "allocating %i isoc video buffers of size %i\n",
- VIDEO_ISOC_BUFFER_MANY,
- peasycap->video_isoc_buffer_size);
- JOM(4, ".... each occupying contiguous memory pages\n");
-
- for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
- pbuf = (void *)__get_free_pages(GFP_KERNEL,
- VIDEO_ISOC_ORDER);
- if (!pbuf) {
- SAM("ERROR: Could not allocate isoc video buffer "
- "%i\n", k);
- return -ENOMEM;
- }
- peasycap->allocation_video_page +=
- BIT(VIDEO_ISOC_ORDER);
- peasycap->video_isoc_buffer[k].pgo = pbuf;
- peasycap->video_isoc_buffer[k].pto =
- pbuf + peasycap->video_isoc_buffer_size;
- peasycap->video_isoc_buffer[k].kount = k;
- }
- JOM(4, "allocation of isoc video buffers done: %i pages\n",
- k * (0x01 << VIDEO_ISOC_ORDER));
-
- /* Allocate and initialize multiple struct usb */
- JOM(4, "allocating %i struct urb.\n", VIDEO_ISOC_BUFFER_MANY);
- JOM(4, "using %i=peasycap->video_isoc_framesperdesc\n",
- peasycap->video_isoc_framesperdesc);
- JOM(4, "using %i=peasycap->video_isoc_maxframesize\n",
- peasycap->video_isoc_maxframesize);
- JOM(4, "using %i=peasycap->video_isoc_buffer_sizen",
- peasycap->video_isoc_buffer_size);
-
- for (k = 0; k < VIDEO_ISOC_BUFFER_MANY; k++) {
- purb = usb_alloc_urb(peasycap->video_isoc_framesperdesc,
- GFP_KERNEL);
- if (!purb) {
- SAM("ERROR: usb_alloc_urb returned NULL for buffer "
- "%i\n", k);
- return -ENOMEM;
- }
+ rc = alloc_framebuffers(peasycap);
+ if (rc < 0)
+ return rc;
- peasycap->allocation_video_urb += 1;
- pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
- if (!pdata_urb) {
- SAM("ERROR: Could not allocate struct data_urb.\n");
- return -ENOMEM;
- }
+ rc = alloc_fieldbuffers(peasycap);
+ if (rc < 0)
+ return rc;
- peasycap->allocation_video_struct +=
- sizeof(struct data_urb);
+ rc = alloc_isocbuffers(peasycap);
+ if (rc < 0)
+ return rc;
- pdata_urb->purb = purb;
- pdata_urb->isbuf = k;
- pdata_urb->length = 0;
- list_add_tail(&(pdata_urb->list_head),
- peasycap->purb_video_head);
-
- /* Initialize allocated urbs */
- if (!k) {
- JOM(4, "initializing video urbs thus:\n");
- JOM(4, " purb->interval = 1;\n");
- JOM(4, " purb->dev = peasycap->pusb_device;\n");
- JOM(4, " purb->pipe = usb_rcvisocpipe"
- "(peasycap->pusb_device,%i);\n",
- peasycap->video_endpointnumber);
- JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
- JOM(4, " purb->transfer_buffer = peasycap->"
- "video_isoc_buffer[.].pgo;\n");
- JOM(4, " purb->transfer_buffer_length = %i;\n",
- peasycap->video_isoc_buffer_size);
- JOM(4, " purb->complete = easycap_complete;\n");
- JOM(4, " purb->context = peasycap;\n");
- JOM(4, " purb->start_frame = 0;\n");
- JOM(4, " purb->number_of_packets = %i;\n",
- peasycap->video_isoc_framesperdesc);
- JOM(4, " for (j = 0; j < %i; j++)\n",
- peasycap->video_isoc_framesperdesc);
- JOM(4, " {\n");
- JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",
- peasycap->video_isoc_maxframesize);
- JOM(4, " purb->iso_frame_desc[j].length = %i;\n",
- peasycap->video_isoc_maxframesize);
- JOM(4, " }\n");
- }
-
- purb->interval = 1;
- purb->dev = peasycap->pusb_device;
- purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
- peasycap->video_endpointnumber);
- purb->transfer_flags = URB_ISO_ASAP;
- purb->transfer_buffer = peasycap->video_isoc_buffer[k].pgo;
- purb->transfer_buffer_length =
- peasycap->video_isoc_buffer_size;
- purb->complete = easycap_complete;
- purb->context = peasycap;
- purb->start_frame = 0;
- purb->number_of_packets = peasycap->video_isoc_framesperdesc;
- for (j = 0; j < peasycap->video_isoc_framesperdesc; j++) {
- purb->iso_frame_desc[j].offset = j *
- peasycap->video_isoc_maxframesize;
- purb->iso_frame_desc[j].length =
- peasycap->video_isoc_maxframesize;
- }
- }
- JOM(4, "allocation of %i struct urb done.\n", k);
+ /* Allocate and initialize video urbs */
+ rc = create_video_urbs(peasycap);
+ if (rc < 0)
+ return rc;
/* Save pointer peasycap in this interface */
usb_set_intfdata(intf, peasycap);
@@ -3545,9 +3839,6 @@ static int easycap_usb_probe(struct usb_interface *intf,
* because some udev rules triggers easycap_open()
* immediately after registration, causing a clash.
*/
- peasycap->ntsc = easycap_ntsc;
- JOM(8, "defaulting initially to %s\n",
- easycap_ntsc ? "NTSC" : "PAL");
rc = reset(peasycap);
if (rc) {
SAM("ERROR: reset() rc = %i\n", rc);
@@ -3562,33 +3853,12 @@ static int easycap_usb_probe(struct usb_interface *intf,
JOM(4, "registered device instance: %s\n",
peasycap->v4l2_device.name);
- /*
- * FIXME: This is believed to be harmless,
- * but may well be unnecessary or wrong.
- */
- peasycap->video_device.v4l2_dev = NULL;
-
-
- strcpy(&peasycap->video_device.name[0], "easycapdc60");
- peasycap->video_device.fops = &v4l2_fops;
- peasycap->video_device.minor = -1;
- peasycap->video_device.release = (void *)(&videodev_release);
-
- video_set_drvdata(&(peasycap->video_device), (void *)peasycap);
-
- if (0 != (video_register_device(&(peasycap->video_device),
- VFL_TYPE_GRABBER, -1))) {
+ rc = easycap_register_video(peasycap);
+ if (rc < 0) {
dev_err(&intf->dev,
"Not able to register with videodev\n");
- videodev_release(&(peasycap->video_device));
return -ENODEV;
}
-
- peasycap->registered_video++;
- SAM("registered with videodev: %i=minor\n",
- peasycap->video_device.minor);
- peasycap->minor = peasycap->video_device.minor;
-
break;
}
/* 1: Audio control */
@@ -3711,109 +3981,14 @@ static int easycap_usb_probe(struct usb_interface *intf,
INIT_LIST_HEAD(&(peasycap->urb_audio_head));
peasycap->purb_audio_head = &(peasycap->urb_audio_head);
- JOM(4, "allocating %i isoc audio buffers of size %i\n",
- AUDIO_ISOC_BUFFER_MANY,
- peasycap->audio_isoc_buffer_size);
- JOM(4, ".... each occupying contiguous memory pages\n");
-
- for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
- pbuf = (void *)__get_free_pages(GFP_KERNEL,
- AUDIO_ISOC_ORDER);
- if (!pbuf) {
- SAM("ERROR: Could not allocate isoc audio buffer "
- "%i\n", k);
- return -ENOMEM;
- }
- peasycap->allocation_audio_page +=
- BIT(AUDIO_ISOC_ORDER);
-
- peasycap->audio_isoc_buffer[k].pgo = pbuf;
- peasycap->audio_isoc_buffer[k].pto = pbuf +
- peasycap->audio_isoc_buffer_size;
- peasycap->audio_isoc_buffer[k].kount = k;
- }
- JOM(4, "allocation of isoc audio buffers done.\n");
+ alloc_audio_buffers(peasycap);
+ if (rc < 0)
+ return rc;
/* Allocate and initialize urbs */
- JOM(4, "allocating %i struct urb.\n", AUDIO_ISOC_BUFFER_MANY);
- JOM(4, "using %i=peasycap->audio_isoc_framesperdesc\n",
- peasycap->audio_isoc_framesperdesc);
- JOM(4, "using %i=peasycap->audio_isoc_maxframesize\n",
- peasycap->audio_isoc_maxframesize);
- JOM(4, "using %i=peasycap->audio_isoc_buffer_size\n",
- peasycap->audio_isoc_buffer_size);
-
- for (k = 0; k < AUDIO_ISOC_BUFFER_MANY; k++) {
- purb = usb_alloc_urb(peasycap->audio_isoc_framesperdesc,
- GFP_KERNEL);
- if (!purb) {
- SAM("ERROR: usb_alloc_urb returned NULL for buffer "
- "%i\n", k);
- return -ENOMEM;
- }
- peasycap->allocation_audio_urb += 1 ;
- pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
- if (!pdata_urb) {
- usb_free_urb(purb);
- SAM("ERROR: Could not allocate struct data_urb.\n");
- return -ENOMEM;
- }
- peasycap->allocation_audio_struct +=
- sizeof(struct data_urb);
-
- pdata_urb->purb = purb;
- pdata_urb->isbuf = k;
- pdata_urb->length = 0;
- list_add_tail(&(pdata_urb->list_head),
- peasycap->purb_audio_head);
-
- if (!k) {
- JOM(4, "initializing audio urbs thus:\n");
- JOM(4, " purb->interval = 1;\n");
- JOM(4, " purb->dev = peasycap->pusb_device;\n");
- JOM(4, " purb->pipe = usb_rcvisocpipe(peasycap->"
- "pusb_device,%i);\n",
- peasycap->audio_endpointnumber);
- JOM(4, " purb->transfer_flags = URB_ISO_ASAP;\n");
- JOM(4, " purb->transfer_buffer = "
- "peasycap->audio_isoc_buffer[.].pgo;\n");
- JOM(4, " purb->transfer_buffer_length = %i;\n",
- peasycap->audio_isoc_buffer_size);
- JOM(4, " purb->complete = easycap_alsa_complete;\n");
- JOM(4, " purb->context = peasycap;\n");
- JOM(4, " purb->start_frame = 0;\n");
- JOM(4, " purb->number_of_packets = %i;\n",
- peasycap->audio_isoc_framesperdesc);
- JOM(4, " for (j = 0; j < %i; j++)\n",
- peasycap->audio_isoc_framesperdesc);
- JOM(4, " {\n");
- JOM(4, " purb->iso_frame_desc[j].offset = j*%i;\n",
- peasycap->audio_isoc_maxframesize);
- JOM(4, " purb->iso_frame_desc[j].length = %i;\n",
- peasycap->audio_isoc_maxframesize);
- JOM(4, " }\n");
- }
-
- purb->interval = 1;
- purb->dev = peasycap->pusb_device;
- purb->pipe = usb_rcvisocpipe(peasycap->pusb_device,
- peasycap->audio_endpointnumber);
- purb->transfer_flags = URB_ISO_ASAP;
- purb->transfer_buffer = peasycap->audio_isoc_buffer[k].pgo;
- purb->transfer_buffer_length =
- peasycap->audio_isoc_buffer_size;
- purb->complete = easycap_alsa_complete;
- purb->context = peasycap;
- purb->start_frame = 0;
- purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
- for (j = 0; j < peasycap->audio_isoc_framesperdesc; j++) {
- purb->iso_frame_desc[j].offset = j *
- peasycap->audio_isoc_maxframesize;
- purb->iso_frame_desc[j].length =
- peasycap->audio_isoc_maxframesize;
- }
- }
- JOM(4, "allocation of %i struct urb done.\n", k);
+ rc = create_audio_urbs(peasycap);
+ if (rc < 0)
+ return rc;
/* Save pointer peasycap in this interface */
usb_set_intfdata(intf, peasycap);
@@ -3843,15 +4018,13 @@ static int easycap_usb_probe(struct usb_interface *intf,
SAM("ends successfully for interface %i\n", bInterfaceNumber);
return 0;
}
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
+
/*
- * WHEN THIS FUNCTION IS CALLED THE EasyCAP HAS ALREADY BEEN PHYSICALLY
- * UNPLUGGED. HENCE peasycap->pusb_device IS NO LONGER VALID.
- *
- * THIS FUNCTION AFFECTS ALSA. BEWARE.
+ * When this function is called the device has already been
+ * physically unplugged.
+ * Hence, peasycap->pusb_device is no longer valid.
+ * This function affects alsa.
*/
-/*---------------------------------------------------------------------------*/
static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
{
struct usb_host_interface *pusb_host_interface;
@@ -3876,6 +4049,7 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
minor = pusb_interface->minor;
JOT(4, "intf[%i]: minor=%i\n", bInterfaceNumber, minor);
+ /* There is nothing to do for Interface Number 1 */
if (1 == bInterfaceNumber)
return;
@@ -3884,11 +4058,8 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
SAY("ERROR: peasycap is NULL\n");
return;
}
-/*---------------------------------------------------------------------------*/
-/*
- * IF THE WAIT QUEUES ARE NOT CLEARED A DEADLOCK IS POSSIBLE. BEWARE.
-*/
-/*---------------------------------------------------------------------------*/
+
+ /* If the waitqueues are not cleared a deadlock is possible */
peasycap->video_eof = 1;
peasycap->audio_eof = 1;
wake_up_interruptible(&(peasycap->wq_video));
@@ -3904,15 +4075,14 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
default:
break;
}
-/*--------------------------------------------------------------------------*/
-/*
- * DEREGISTER
- *
- * THIS PROCEDURE WILL BLOCK UNTIL easycap_poll(), VIDEO IOCTL AND AUDIO
- * IOCTL ARE ALL UNLOCKED. IF THIS IS NOT DONE AN Oops CAN OCCUR WHEN
- * AN EasyCAP IS UNPLUGGED WHILE THE URBS ARE RUNNING. BEWARE.
- */
-/*--------------------------------------------------------------------------*/
+
+ /*
+ * Deregister
+ * This procedure will block until easycap_poll(),
+ * video and audio ioctl are all unlocked.
+ * If this is not done an oops can occur when an easycap
+ * is unplugged while the urbs are running.
+ */
kd = easycap_isdongle(peasycap);
switch (bInterfaceNumber) {
case 0: {
@@ -3929,7 +4099,6 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
} else {
SAY("ERROR: %i=kd is bad: cannot lock dongle\n", kd);
}
-/*---------------------------------------------------------------------------*/
if (!peasycap->v4l2_device.name[0]) {
SAM("ERROR: peasycap->v4l2_device.name is empty\n");
if (0 <= kd && DONGLE_MANY > kd)
@@ -3945,7 +4114,6 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
JOM(4, "intf[%i]: video_unregister_device() minor=%i\n",
bInterfaceNumber, minor);
peasycap->registered_video--;
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
if (0 <= kd && DONGLE_MANY > kd) {
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
@@ -3981,12 +4149,12 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
default:
break;
}
-/*---------------------------------------------------------------------------*/
-/*
- * CALL easycap_delete() IF NO REMAINING REFERENCES TO peasycap
- * (ALSO WHEN ALSA HAS BEEN IN USE)
- */
-/*---------------------------------------------------------------------------*/
+
+ /*
+ * If no remaining references to peasycap,
+ * call easycap_delete.
+ * (Also when alsa has been in use)
+ */
if (!peasycap->kref.refcount.counter) {
SAM("ERROR: peasycap->kref.refcount.counter is zero "
"so cannot call kref_put()\n");
@@ -4021,17 +4189,11 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
JOT(4, "unlocked dongle[%i].mutex_video\n", kd);
}
-/*---------------------------------------------------------------------------*/
JOM(4, "ends\n");
return;
}
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
-/*
- * PARAMETERS APPLICABLE TO ENTIRE DRIVER, I.E. BOTH VIDEO AND AUDIO
- */
-/*---------------------------------------------------------------------------*/
+/* Devices supported by this driver */
static struct usb_device_id easycap_usb_device_id_table[] = {
{USB_DEVICE(USB_EASYCAP_VENDOR_ID, USB_EASYCAP_PRODUCT_ID)},
{ }
@@ -4066,14 +4228,11 @@ static int __init easycap_module_init(void)
return rc;
}
-/*****************************************************************************/
+
static void __exit easycap_module_exit(void)
{
usb_deregister(&easycap_usb_driver);
}
-/*****************************************************************************/
module_init(easycap_module_init);
module_exit(easycap_module_exit);
-
-/*****************************************************************************/
diff --git a/drivers/staging/media/go7007/go7007-v4l2.c b/drivers/staging/media/go7007/go7007-v4l2.c
index 3ef4cd8b4de3..c184ad30fbd8 100644
--- a/drivers/staging/media/go7007/go7007-v4l2.c
+++ b/drivers/staging/media/go7007/go7007-v4l2.c
@@ -76,7 +76,6 @@ static void abort_queued(struct go7007 *go)
static int go7007_streamoff(struct go7007 *go)
{
- int retval = -EINVAL;
unsigned long flags;
mutex_lock(&go->hw_lock);
@@ -87,7 +86,6 @@ static int go7007_streamoff(struct go7007 *go)
abort_queued(go);
spin_unlock_irqrestore(&go->spinlock, flags);
go7007_reset_encoder(go);
- retval = 0;
}
mutex_unlock(&go->hw_lock);
return 0;
diff --git a/drivers/staging/media/go7007/s2250-loader.c b/drivers/staging/media/go7007/s2250-loader.c
index 7c5af4f289b6..f1bd159ac195 100644
--- a/drivers/staging/media/go7007/s2250-loader.c
+++ b/drivers/staging/media/go7007/s2250-loader.c
@@ -165,3 +165,5 @@ module_usb_driver(s2250loader_driver);
MODULE_AUTHOR("");
MODULE_DESCRIPTION("firmware loader for Sensoray 2250/2251");
MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(S2250_LOADER_FIRMWARE);
+MODULE_FIRMWARE(S2250_FIRMWARE);
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index d7cf5ef076a5..2944fde89f44 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -70,10 +70,6 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
static int ir_open(void *data);
static void ir_close(void *data);
-/* Driver init/exit prototypes */
-static int __init imon_init(void);
-static void __exit imon_exit(void);
-
/*** G L O B A L S ***/
#define IMON_DATA_BUF_SZ 35
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 352a20229ca2..f4e4d9003f38 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -80,10 +80,6 @@ static ssize_t vfd_write(struct file *file, const char *buf,
static int ir_open(void *data);
static void ir_close(void *data);
-/* Driver init/exit prototypes */
-static int __init sasem_init(void);
-static void __exit sasem_exit(void);
-
/*** G L O B A L S ***/
#define SASEM_DATA_BUF_SZ 32
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 3295ea63f3eb..97ef67036e3f 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -129,6 +129,7 @@ static void send_space_homebrew(long length);
static struct lirc_serial hardware[] = {
[LIRC_HOMEBREW] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
.signal_pin = UART_MSR_DCD,
.signal_pin_change = UART_MSR_DDCD,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -145,6 +146,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_IRDEO] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = UART_MCR_OUT2,
@@ -156,6 +158,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_IRDEO_REMOTE] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
@@ -167,6 +170,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_ANIMAX] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
.signal_pin = UART_MSR_DCD,
.signal_pin_change = UART_MSR_DDCD,
.on = 0,
@@ -177,6 +181,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_IGOR] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -201,6 +206,7 @@ static struct lirc_serial hardware[] = {
* See also http://www.nslu2-linux.org for this device
*/
[LIRC_NSLU2] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_NSLU2].lock),
.signal_pin = UART_MSR_CTS,
.signal_pin_change = UART_MSR_DCTS,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index 490a7f15604b..8b864afb40b6 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -36,12 +36,6 @@ struct omap_crtc {
struct drm_framebuffer *old_fb;
};
-static void omap_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
-{
- /* not supported.. at least not yet */
-}
-
static void omap_crtc_destroy(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -198,7 +192,6 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
- .gamma_set = omap_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = omap_crtc_page_flip_locked,
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 0d2acca376ca..4beab9447ceb 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -58,7 +58,7 @@ static void omap_fb_output_poll_changed(struct drm_device *dev)
}
}
-static struct drm_mode_config_funcs omap_mode_config_funcs = {
+static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
.output_poll_changed = omap_fb_output_poll_changed,
};
@@ -726,7 +726,7 @@ static void dev_irq_uninstall(struct drm_device *dev)
DBG("irq_uninstall: dev=%p", dev);
}
-static struct vm_operations_struct omap_gem_vm_ops = {
+static const struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 11acd4c35ed2..8c6ed3b0c6f6 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -208,7 +208,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
*/
ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
if (ret) {
- dev_err(dev->dev, "could not map (paddr)!\n");
+ dev_err(dev->dev,
+ "could not map (paddr)! Skipping framebuffer alloc\n");
ret = -ENOMEM;
goto fail;
}
@@ -388,8 +389,11 @@ void omap_fbdev_free(struct drm_device *dev)
fbi = helper->fbdev;
- unregister_framebuffer(fbi);
- framebuffer_release(fbi);
+ /* only cleanup framebuffer if it is present */
+ if (fbi) {
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+ }
drm_fb_helper_fini(helper);
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index 4e7ef0e6b79c..d46764b5aaba 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
return oid;
}
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
/* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored)
}
static struct frontswap_ops zcache_frontswap_ops = {
- .put_page = zcache_frontswap_put_page,
- .get_page = zcache_frontswap_get_page,
+ .store = zcache_frontswap_store,
+ .load = zcache_frontswap_load,
.invalidate_page = zcache_frontswap_flush_page,
.invalidate_area = zcache_frontswap_flush_area,
.init = zcache_frontswap_init
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bd18e2d0513..69f616c6964e 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
/* - */
{USB_DEVICE(0x20F4, 0x646B)},
{USB_DEVICE(0x083A, 0xC512)},
+ {USB_DEVICE(0x25D4, 0x4CA1)},
+ {USB_DEVICE(0x25D4, 0x4CAB)},
/* RTL8191SU */
/* Realtek */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 2734dacacbaf..784c796b9848 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1;
* Swizzling increases objects per swaptype, increasing tmem concurrency
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
* Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
+ * frontswap_load(), but has side-effects. Hence using 8.
*/
#define SWIZ_BITS 8
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
@@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
return oid;
}
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
/* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored)
}
static struct frontswap_ops zcache_frontswap_ops = {
- .put_page = zcache_frontswap_put_page,
- .get_page = zcache_frontswap_get_page,
+ .store = zcache_frontswap_store,
+ .load = zcache_frontswap_load,
.invalidate_page = zcache_frontswap_flush_page,
.invalidate_area = zcache_frontswap_flush_area,
.init = zcache_frontswap_init
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index b28794b72125..18303686eb58 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -32,5 +32,6 @@ config TCM_PSCSI
source "drivers/target/loopback/Kconfig"
source "drivers/target/tcm_fc/Kconfig"
source "drivers/target/iscsi/Kconfig"
+source "drivers/target/sbp/Kconfig"
endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 62e54053bcd8..61648d84fbb6 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
obj-$(CONFIG_TCM_FC) += tcm_fc/
obj-$(CONFIG_ISCSI_TARGET) += iscsi/
+obj-$(CONFIG_SBP_TARGET) += sbp/
diff --git a/drivers/target/sbp/Kconfig b/drivers/target/sbp/Kconfig
new file mode 100644
index 000000000000..132da544eafc
--- /dev/null
+++ b/drivers/target/sbp/Kconfig
@@ -0,0 +1,11 @@
+config SBP_TARGET
+ tristate "FireWire SBP-2 fabric module"
+ depends on FIREWIRE && EXPERIMENTAL
+ help
+ Say Y or M here to enable SCSI target functionality over FireWire.
+ This enables you to expose SCSI devices to other nodes on the FireWire
+ bus, for example hard disks. Similar to FireWire Target Disk mode on
+ many Apple computers.
+
+ To compile this driver as a module, say M here: The module will be
+ called sbp-target.
diff --git a/drivers/target/sbp/Makefile b/drivers/target/sbp/Makefile
new file mode 100644
index 000000000000..27747ad054c4
--- /dev/null
+++ b/drivers/target/sbp/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SBP_TARGET) += sbp_target.o
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
new file mode 100644
index 000000000000..7e6136e2ce81
--- /dev/null
+++ b/drivers/target/sbp/sbp_target.c
@@ -0,0 +1,2621 @@
+/*
+ * SBP2 target driver (SCSI over IEEE1394 in target mode)
+ *
+ * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define KMSG_COMPONENT "sbp_target"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+#include <asm/unaligned.h>
+
+#include "sbp_target.h"
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *sbp_fabric_configfs;
+
+/* FireWire address region for management and command block address handlers */
+static const struct fw_address_region sbp_register_region = {
+ .start = CSR_REGISTER_BASE + 0x10000,
+ .end = 0x1000000000000ULL,
+};
+
+static const u32 sbp_unit_directory_template[] = {
+ 0x1200609e, /* unit_specifier_id: NCITS/T10 */
+ 0x13010483, /* unit_sw_version: 1155D Rev 4 */
+ 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
+ 0x390104d8, /* command_set: SPC-2 */
+ 0x3b000000, /* command_set_revision: 0 */
+ 0x3c000001, /* firmware_revision: 1 */
+};
+
+#define SESSION_MAINTENANCE_INTERVAL HZ
+
+static atomic_t login_id = ATOMIC_INIT(0);
+
+static void session_maintenance_work(struct work_struct *);
+static int sbp_run_transaction(struct fw_card *, int, int, int, int,
+ unsigned long long, void *, size_t);
+
+static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
+{
+ int ret;
+ __be32 high, low;
+
+ ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
+ req->node_addr, req->generation, req->speed,
+ (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
+ &high, sizeof(high));
+ if (ret != RCODE_COMPLETE)
+ return ret;
+
+ ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
+ req->node_addr, req->generation, req->speed,
+ (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
+ &low, sizeof(low));
+ if (ret != RCODE_COMPLETE)
+ return ret;
+
+ *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
+
+ return RCODE_COMPLETE;
+}
+
+static struct sbp_session *sbp_session_find_by_guid(
+ struct sbp_tpg *tpg, u64 guid)
+{
+ struct se_session *se_sess;
+ struct sbp_session *sess, *found = NULL;
+
+ spin_lock_bh(&tpg->se_tpg.session_lock);
+ list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
+ sess = se_sess->fabric_sess_ptr;
+ if (sess->guid == guid)
+ found = sess;
+ }
+ spin_unlock_bh(&tpg->se_tpg.session_lock);
+
+ return found;
+}
+
+static struct sbp_login_descriptor *sbp_login_find_by_lun(
+ struct sbp_session *session, struct se_lun *lun)
+{
+ struct sbp_login_descriptor *login, *found = NULL;
+
+ spin_lock_bh(&session->lock);
+ list_for_each_entry(login, &session->login_list, link) {
+ if (login->lun == lun)
+ found = login;
+ }
+ spin_unlock_bh(&session->lock);
+
+ return found;
+}
+
+static int sbp_login_count_all_by_lun(
+ struct sbp_tpg *tpg,
+ struct se_lun *lun,
+ int exclusive)
+{
+ struct se_session *se_sess;
+ struct sbp_session *sess;
+ struct sbp_login_descriptor *login;
+ int count = 0;
+
+ spin_lock_bh(&tpg->se_tpg.session_lock);
+ list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
+ sess = se_sess->fabric_sess_ptr;
+
+ spin_lock_bh(&sess->lock);
+ list_for_each_entry(login, &sess->login_list, link) {
+ if (login->lun != lun)
+ continue;
+
+ if (!exclusive || login->exclusive)
+ count++;
+ }
+ spin_unlock_bh(&sess->lock);
+ }
+ spin_unlock_bh(&tpg->se_tpg.session_lock);
+
+ return count;
+}
+
+static struct sbp_login_descriptor *sbp_login_find_by_id(
+ struct sbp_tpg *tpg, int login_id)
+{
+ struct se_session *se_sess;
+ struct sbp_session *sess;
+ struct sbp_login_descriptor *login, *found = NULL;
+
+ spin_lock_bh(&tpg->se_tpg.session_lock);
+ list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
+ sess = se_sess->fabric_sess_ptr;
+
+ spin_lock_bh(&sess->lock);
+ list_for_each_entry(login, &sess->login_list, link) {
+ if (login->login_id == login_id)
+ found = login;
+ }
+ spin_unlock_bh(&sess->lock);
+ }
+ spin_unlock_bh(&tpg->se_tpg.session_lock);
+
+ return found;
+}
+
+static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
+{
+ struct se_portal_group *se_tpg = &tpg->se_tpg;
+ struct se_lun *se_lun;
+
+ if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
+ return ERR_PTR(-EINVAL);
+
+ spin_lock(&se_tpg->tpg_lun_lock);
+ se_lun = se_tpg->tpg_lun_list[lun];
+
+ if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+ se_lun = ERR_PTR(-ENODEV);
+
+ spin_unlock(&se_tpg->tpg_lun_lock);
+
+ return se_lun;
+}
+
+static struct sbp_session *sbp_session_create(
+ struct sbp_tpg *tpg,
+ u64 guid)
+{
+ struct sbp_session *sess;
+ int ret;
+ char guid_str[17];
+ struct se_node_acl *se_nacl;
+
+ sess = kmalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ pr_err("failed to allocate session descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sess->se_sess = transport_init_session();
+ if (IS_ERR(sess->se_sess)) {
+ pr_err("failed to init se_session\n");
+
+ ret = PTR_ERR(sess->se_sess);
+ kfree(sess);
+ return ERR_PTR(ret);
+ }
+
+ snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
+
+ se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
+ if (!se_nacl) {
+ pr_warn("Node ACL not found for %s\n", guid_str);
+
+ transport_free_session(sess->se_sess);
+ kfree(sess);
+
+ return ERR_PTR(-EPERM);
+ }
+
+ sess->se_sess->se_node_acl = se_nacl;
+
+ spin_lock_init(&sess->lock);
+ INIT_LIST_HEAD(&sess->login_list);
+ INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
+
+ sess->guid = guid;
+
+ transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
+
+ return sess;
+}
+
+static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
+{
+ spin_lock_bh(&sess->lock);
+ if (!list_empty(&sess->login_list)) {
+ spin_unlock_bh(&sess->lock);
+ return;
+ }
+ spin_unlock_bh(&sess->lock);
+
+ if (cancel_work)
+ cancel_delayed_work_sync(&sess->maint_work);
+
+ transport_deregister_session_configfs(sess->se_sess);
+ transport_deregister_session(sess->se_sess);
+
+ if (sess->card)
+ fw_card_put(sess->card);
+
+ kfree(sess);
+}
+
+static void sbp_target_agent_unregister(struct sbp_target_agent *);
+
+static void sbp_login_release(struct sbp_login_descriptor *login,
+ bool cancel_work)
+{
+ struct sbp_session *sess = login->sess;
+
+ /* FIXME: abort/wait on tasks */
+
+ sbp_target_agent_unregister(login->tgt_agt);
+
+ if (sess) {
+ spin_lock_bh(&sess->lock);
+ list_del(&login->link);
+ spin_unlock_bh(&sess->lock);
+
+ sbp_session_release(sess, cancel_work);
+ }
+
+ kfree(login);
+}
+
+static struct sbp_target_agent *sbp_target_agent_register(
+ struct sbp_login_descriptor *);
+
+static void sbp_management_request_login(
+ struct sbp_management_agent *agent, struct sbp_management_request *req,
+ int *status_data_size)
+{
+ struct sbp_tport *tport = agent->tport;
+ struct sbp_tpg *tpg = tport->tpg;
+ struct se_lun *se_lun;
+ int ret;
+ u64 guid;
+ struct sbp_session *sess;
+ struct sbp_login_descriptor *login;
+ struct sbp_login_response_block *response;
+ int login_response_len;
+
+ se_lun = sbp_get_lun_from_tpg(tpg,
+ LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
+ if (IS_ERR(se_lun)) {
+ pr_notice("login to unknown LUN: %d\n",
+ LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
+ return;
+ }
+
+ ret = read_peer_guid(&guid, req);
+ if (ret != RCODE_COMPLETE) {
+ pr_warn("failed to read peer GUID: %d\n", ret);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ return;
+ }
+
+ pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
+ se_lun->unpacked_lun, guid);
+
+ sess = sbp_session_find_by_guid(tpg, guid);
+ if (sess) {
+ login = sbp_login_find_by_lun(sess, se_lun);
+ if (login) {
+ pr_notice("initiator already logged-in\n");
+
+ /*
+ * SBP-2 R4 says we should return access denied, but
+ * that can confuse initiators. Instead we need to
+ * treat this like a reconnect, but send the login
+ * response block like a fresh login.
+ *
+ * This is required particularly in the case of Apple
+ * devices booting off the FireWire target, where
+ * the firmware has an active login to the target. When
+ * the OS takes control of the session it issues its own
+ * LOGIN rather than a RECONNECT. To avoid the machine
+ * waiting until the reconnect_hold expires, we can skip
+ * the ACCESS_DENIED errors to speed things up.
+ */
+
+ goto already_logged_in;
+ }
+ }
+
+ /*
+ * check exclusive bit in login request
+ * reject with access_denied if any logins present
+ */
+ if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
+ sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
+ pr_warn("refusing exclusive login with other active logins\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ /*
+ * check exclusive bit in any existing login descriptor
+ * reject with access_denied if any exclusive logins present
+ */
+ if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
+ pr_warn("refusing login while another exclusive login present\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ /*
+ * check we haven't exceeded the number of allowed logins
+ * reject with resources_unavailable if we have
+ */
+ if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
+ tport->max_logins_per_lun) {
+ pr_warn("max number of logins reached\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+ return;
+ }
+
+ if (!sess) {
+ sess = sbp_session_create(tpg, guid);
+ if (IS_ERR(sess)) {
+ switch (PTR_ERR(sess)) {
+ case -EPERM:
+ ret = SBP_STATUS_ACCESS_DENIED;
+ break;
+ default:
+ ret = SBP_STATUS_RESOURCES_UNAVAIL;
+ break;
+ }
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(ret));
+ return;
+ }
+
+ sess->node_id = req->node_addr;
+ sess->card = fw_card_get(req->card);
+ sess->generation = req->generation;
+ sess->speed = req->speed;
+
+ schedule_delayed_work(&sess->maint_work,
+ SESSION_MAINTENANCE_INTERVAL);
+ }
+
+ /* only take the latest reconnect_hold into account */
+ sess->reconnect_hold = min(
+ 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
+ tport->max_reconnect_timeout) - 1;
+
+ login = kmalloc(sizeof(*login), GFP_KERNEL);
+ if (!login) {
+ pr_err("failed to allocate login descriptor\n");
+
+ sbp_session_release(sess, true);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+ return;
+ }
+
+ login->sess = sess;
+ login->lun = se_lun;
+ login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
+ login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
+ login->login_id = atomic_inc_return(&login_id);
+
+ login->tgt_agt = sbp_target_agent_register(login);
+ if (IS_ERR(login->tgt_agt)) {
+ ret = PTR_ERR(login->tgt_agt);
+ pr_err("failed to map command block handler: %d\n", ret);
+
+ sbp_session_release(sess, true);
+ kfree(login);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+ return;
+ }
+
+ spin_lock_bh(&sess->lock);
+ list_add_tail(&login->link, &sess->login_list);
+ spin_unlock_bh(&sess->lock);
+
+already_logged_in:
+ response = kzalloc(sizeof(*response), GFP_KERNEL);
+ if (!response) {
+ pr_err("failed to allocate login response block\n");
+
+ sbp_login_release(login, true);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
+ return;
+ }
+
+ login_response_len = clamp_val(
+ LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
+ 12, sizeof(*response));
+ response->misc = cpu_to_be32(
+ ((login_response_len & 0xffff) << 16) |
+ (login->login_id & 0xffff));
+ response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
+ addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
+ &response->command_block_agent);
+
+ ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
+ sess->node_id, sess->generation, sess->speed,
+ sbp2_pointer_to_addr(&req->orb.ptr2), response,
+ login_response_len);
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("failed to write login response block: %x\n", ret);
+
+ kfree(response);
+ sbp_login_release(login, true);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ return;
+ }
+
+ kfree(response);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static void sbp_management_request_query_logins(
+ struct sbp_management_agent *agent, struct sbp_management_request *req,
+ int *status_data_size)
+{
+ pr_notice("QUERY LOGINS not implemented\n");
+ /* FIXME: implement */
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+}
+
+static void sbp_management_request_reconnect(
+ struct sbp_management_agent *agent, struct sbp_management_request *req,
+ int *status_data_size)
+{
+ struct sbp_tport *tport = agent->tport;
+ struct sbp_tpg *tpg = tport->tpg;
+ int ret;
+ u64 guid;
+ struct sbp_login_descriptor *login;
+
+ ret = read_peer_guid(&guid, req);
+ if (ret != RCODE_COMPLETE) {
+ pr_warn("failed to read peer GUID: %d\n", ret);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ return;
+ }
+
+ pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
+
+ login = sbp_login_find_by_id(tpg,
+ RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
+
+ if (!login) {
+ pr_err("mgt_agent RECONNECT unknown login ID\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ if (login->sess->guid != guid) {
+ pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ spin_lock_bh(&login->sess->lock);
+ if (login->sess->card)
+ fw_card_put(login->sess->card);
+
+ /* update the node details */
+ login->sess->generation = req->generation;
+ login->sess->node_id = req->node_addr;
+ login->sess->card = fw_card_get(req->card);
+ login->sess->speed = req->speed;
+ spin_unlock_bh(&login->sess->lock);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static void sbp_management_request_logout(
+ struct sbp_management_agent *agent, struct sbp_management_request *req,
+ int *status_data_size)
+{
+ struct sbp_tport *tport = agent->tport;
+ struct sbp_tpg *tpg = tport->tpg;
+ int id;
+ struct sbp_login_descriptor *login;
+
+ id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
+
+ login = sbp_login_find_by_id(tpg, id);
+ if (!login) {
+ pr_warn("cannot find login: %d\n", id);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
+ return;
+ }
+
+ pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
+ login->lun->unpacked_lun, login->login_id);
+
+ if (req->node_addr != login->sess->node_id) {
+ pr_warn("logout from different node ID\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
+ return;
+ }
+
+ sbp_login_release(login, true);
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static void session_check_for_reset(struct sbp_session *sess)
+{
+ bool card_valid = false;
+
+ spin_lock_bh(&sess->lock);
+
+ if (sess->card) {
+ spin_lock_irq(&sess->card->lock);
+ card_valid = (sess->card->local_node != NULL);
+ spin_unlock_irq(&sess->card->lock);
+
+ if (!card_valid) {
+ fw_card_put(sess->card);
+ sess->card = NULL;
+ }
+ }
+
+ if (!card_valid || (sess->generation != sess->card->generation)) {
+ pr_info("Waiting for reconnect from node: %016llx\n",
+ sess->guid);
+
+ sess->node_id = -1;
+ sess->reconnect_expires = get_jiffies_64() +
+ ((sess->reconnect_hold + 1) * HZ);
+ }
+
+ spin_unlock_bh(&sess->lock);
+}
+
+static void session_reconnect_expired(struct sbp_session *sess)
+{
+ struct sbp_login_descriptor *login, *temp;
+ LIST_HEAD(login_list);
+
+ pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
+
+ spin_lock_bh(&sess->lock);
+ list_for_each_entry_safe(login, temp, &sess->login_list, link) {
+ login->sess = NULL;
+ list_del(&login->link);
+ list_add_tail(&login->link, &login_list);
+ }
+ spin_unlock_bh(&sess->lock);
+
+ list_for_each_entry_safe(login, temp, &login_list, link) {
+ list_del(&login->link);
+ sbp_login_release(login, false);
+ }
+
+ sbp_session_release(sess, false);
+}
+
+static void session_maintenance_work(struct work_struct *work)
+{
+ struct sbp_session *sess = container_of(work, struct sbp_session,
+ maint_work.work);
+
+ /* could be called while tearing down the session */
+ spin_lock_bh(&sess->lock);
+ if (list_empty(&sess->login_list)) {
+ spin_unlock_bh(&sess->lock);
+ return;
+ }
+ spin_unlock_bh(&sess->lock);
+
+ if (sess->node_id != -1) {
+ /* check for bus reset and make node_id invalid */
+ session_check_for_reset(sess);
+
+ schedule_delayed_work(&sess->maint_work,
+ SESSION_MAINTENANCE_INTERVAL);
+ } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
+ /* still waiting for reconnect */
+ schedule_delayed_work(&sess->maint_work,
+ SESSION_MAINTENANCE_INTERVAL);
+ } else {
+ /* reconnect timeout has expired */
+ session_reconnect_expired(sess);
+ }
+}
+
+static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
+ struct sbp_target_agent *agent)
+{
+ __be32 state;
+
+ switch (tcode) {
+ case TCODE_READ_QUADLET_REQUEST:
+ pr_debug("tgt_agent AGENT_STATE READ\n");
+
+ spin_lock_bh(&agent->lock);
+ state = cpu_to_be32(agent->state);
+ spin_unlock_bh(&agent->lock);
+ memcpy(data, &state, sizeof(state));
+
+ return RCODE_COMPLETE;
+
+ case TCODE_WRITE_QUADLET_REQUEST:
+ /* ignored */
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
+ struct sbp_target_agent *agent)
+{
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ pr_debug("tgt_agent AGENT_RESET\n");
+ spin_lock_bh(&agent->lock);
+ agent->state = AGENT_STATE_RESET;
+ spin_unlock_bh(&agent->lock);
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
+ struct sbp_target_agent *agent)
+{
+ struct sbp2_pointer *ptr = data;
+
+ switch (tcode) {
+ case TCODE_WRITE_BLOCK_REQUEST:
+ spin_lock_bh(&agent->lock);
+ if (agent->state != AGENT_STATE_SUSPENDED &&
+ agent->state != AGENT_STATE_RESET) {
+ spin_unlock_bh(&agent->lock);
+ pr_notice("Ignoring ORB_POINTER write while active.\n");
+ return RCODE_CONFLICT_ERROR;
+ }
+ agent->state = AGENT_STATE_ACTIVE;
+ spin_unlock_bh(&agent->lock);
+
+ agent->orb_pointer = sbp2_pointer_to_addr(ptr);
+ agent->doorbell = false;
+
+ pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
+ agent->orb_pointer);
+
+ queue_work(system_unbound_wq, &agent->work);
+
+ return RCODE_COMPLETE;
+
+ case TCODE_READ_BLOCK_REQUEST:
+ pr_debug("tgt_agent ORB_POINTER READ\n");
+ spin_lock_bh(&agent->lock);
+ addr_to_sbp2_pointer(agent->orb_pointer, ptr);
+ spin_unlock_bh(&agent->lock);
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
+ struct sbp_target_agent *agent)
+{
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ spin_lock_bh(&agent->lock);
+ if (agent->state != AGENT_STATE_SUSPENDED) {
+ spin_unlock_bh(&agent->lock);
+ pr_debug("Ignoring DOORBELL while active.\n");
+ return RCODE_CONFLICT_ERROR;
+ }
+ agent->state = AGENT_STATE_ACTIVE;
+ spin_unlock_bh(&agent->lock);
+
+ agent->doorbell = true;
+
+ pr_debug("tgt_agent DOORBELL\n");
+
+ queue_work(system_unbound_wq, &agent->work);
+
+ return RCODE_COMPLETE;
+
+ case TCODE_READ_QUADLET_REQUEST:
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
+ int tcode, void *data, struct sbp_target_agent *agent)
+{
+ switch (tcode) {
+ case TCODE_WRITE_QUADLET_REQUEST:
+ pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
+ /* ignored as we don't send unsolicited status */
+ return RCODE_COMPLETE;
+
+ case TCODE_READ_QUADLET_REQUEST:
+ return RCODE_COMPLETE;
+
+ default:
+ return RCODE_TYPE_ERROR;
+ }
+}
+
+static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source, int generation,
+ unsigned long long offset, void *data, size_t length,
+ void *callback_data)
+{
+ struct sbp_target_agent *agent = callback_data;
+ struct sbp_session *sess = agent->login->sess;
+ int sess_gen, sess_node, rcode;
+
+ spin_lock_bh(&sess->lock);
+ sess_gen = sess->generation;
+ sess_node = sess->node_id;
+ spin_unlock_bh(&sess->lock);
+
+ if (generation != sess_gen) {
+ pr_notice("ignoring request with wrong generation\n");
+ rcode = RCODE_TYPE_ERROR;
+ goto out;
+ }
+
+ if (source != sess_node) {
+ pr_notice("ignoring request from foreign node (%x != %x)\n",
+ source, sess_node);
+ rcode = RCODE_TYPE_ERROR;
+ goto out;
+ }
+
+ /* turn offset into the offset from the start of the block */
+ offset -= agent->handler.offset;
+
+ if (offset == 0x00 && length == 4) {
+ /* AGENT_STATE */
+ rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
+ } else if (offset == 0x04 && length == 4) {
+ /* AGENT_RESET */
+ rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
+ } else if (offset == 0x08 && length == 8) {
+ /* ORB_POINTER */
+ rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
+ } else if (offset == 0x10 && length == 4) {
+ /* DOORBELL */
+ rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
+ } else if (offset == 0x14 && length == 4) {
+ /* UNSOLICITED_STATUS_ENABLE */
+ rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
+ data, agent);
+ } else {
+ rcode = RCODE_ADDRESS_ERROR;
+ }
+
+out:
+ fw_send_response(card, request, rcode);
+}
+
+static void sbp_handle_command(struct sbp_target_request *);
+static int sbp_send_status(struct sbp_target_request *);
+static void sbp_free_request(struct sbp_target_request *);
+
+static void tgt_agent_process_work(struct work_struct *work)
+{
+ struct sbp_target_request *req =
+ container_of(work, struct sbp_target_request, work);
+
+ pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
+ req->orb_pointer,
+ sbp2_pointer_to_addr(&req->orb.next_orb),
+ sbp2_pointer_to_addr(&req->orb.data_descriptor),
+ be32_to_cpu(req->orb.misc));
+
+ if (req->orb_pointer >> 32)
+ pr_debug("ORB with high bits set\n");
+
+ switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
+ case 0:/* Format specified by this standard */
+ sbp_handle_command(req);
+ return;
+ case 1: /* Reserved for future standardization */
+ case 2: /* Vendor-dependent */
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(
+ SBP_STATUS_REQ_TYPE_NOTSUPP));
+ sbp_send_status(req);
+ sbp_free_request(req);
+ return;
+ case 3: /* Dummy ORB */
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(
+ SBP_STATUS_DUMMY_ORB_COMPLETE));
+ sbp_send_status(req);
+ sbp_free_request(req);
+ return;
+ default:
+ BUG();
+ }
+}
+
+/* used to double-check we haven't been issued an AGENT_RESET */
+static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
+{
+ bool active;
+
+ spin_lock_bh(&agent->lock);
+ active = (agent->state == AGENT_STATE_ACTIVE);
+ spin_unlock_bh(&agent->lock);
+
+ return active;
+}
+
+static void tgt_agent_fetch_work(struct work_struct *work)
+{
+ struct sbp_target_agent *agent =
+ container_of(work, struct sbp_target_agent, work);
+ struct sbp_session *sess = agent->login->sess;
+ struct sbp_target_request *req;
+ int ret;
+ bool doorbell = agent->doorbell;
+ u64 next_orb = agent->orb_pointer;
+
+ while (next_orb && tgt_agent_check_active(agent)) {
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ spin_lock_bh(&agent->lock);
+ agent->state = AGENT_STATE_DEAD;
+ spin_unlock_bh(&agent->lock);
+ return;
+ }
+
+ req->login = agent->login;
+ req->orb_pointer = next_orb;
+
+ req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
+ req->orb_pointer >> 32));
+ req->status.orb_low = cpu_to_be32(
+ req->orb_pointer & 0xfffffffc);
+
+ /* read in the ORB */
+ ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
+ sess->node_id, sess->generation, sess->speed,
+ req->orb_pointer, &req->orb, sizeof(req->orb));
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("tgt_orb fetch failed: %x\n", ret);
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_SRC(
+ STATUS_SRC_ORB_FINISHED) |
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(1) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(
+ SBP_STATUS_UNSPECIFIED_ERROR));
+ spin_lock_bh(&agent->lock);
+ agent->state = AGENT_STATE_DEAD;
+ spin_unlock_bh(&agent->lock);
+
+ sbp_send_status(req);
+ sbp_free_request(req);
+ return;
+ }
+
+ /* check the next_ORB field */
+ if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
+ next_orb = 0;
+ req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
+ STATUS_SRC_ORB_FINISHED));
+ } else {
+ next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
+ req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
+ STATUS_SRC_ORB_CONTINUING));
+ }
+
+ if (tgt_agent_check_active(agent) && !doorbell) {
+ INIT_WORK(&req->work, tgt_agent_process_work);
+ queue_work(system_unbound_wq, &req->work);
+ } else {
+ /* don't process this request, just check next_ORB */
+ sbp_free_request(req);
+ }
+
+ spin_lock_bh(&agent->lock);
+ doorbell = agent->doorbell = false;
+
+ /* check if we should carry on processing */
+ if (next_orb)
+ agent->orb_pointer = next_orb;
+ else
+ agent->state = AGENT_STATE_SUSPENDED;
+
+ spin_unlock_bh(&agent->lock);
+ };
+}
+
+static struct sbp_target_agent *sbp_target_agent_register(
+ struct sbp_login_descriptor *login)
+{
+ struct sbp_target_agent *agent;
+ int ret;
+
+ agent = kmalloc(sizeof(*agent), GFP_KERNEL);
+ if (!agent)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&agent->lock);
+
+ agent->handler.length = 0x20;
+ agent->handler.address_callback = tgt_agent_rw;
+ agent->handler.callback_data = agent;
+
+ agent->login = login;
+ agent->state = AGENT_STATE_RESET;
+ INIT_WORK(&agent->work, tgt_agent_fetch_work);
+ agent->orb_pointer = 0;
+ agent->doorbell = false;
+
+ ret = fw_core_add_address_handler(&agent->handler,
+ &sbp_register_region);
+ if (ret < 0) {
+ kfree(agent);
+ return ERR_PTR(ret);
+ }
+
+ return agent;
+}
+
+static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
+{
+ fw_core_remove_address_handler(&agent->handler);
+ cancel_work_sync(&agent->work);
+ kfree(agent);
+}
+
+/*
+ * Simple wrapper around fw_run_transaction that retries the transaction several
+ * times in case of failure, with an exponential backoff.
+ */
+static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
+ int generation, int speed, unsigned long long offset,
+ void *payload, size_t length)
+{
+ int attempt, ret, delay;
+
+ for (attempt = 1; attempt <= 5; attempt++) {
+ ret = fw_run_transaction(card, tcode, destination_id,
+ generation, speed, offset, payload, length);
+
+ switch (ret) {
+ case RCODE_COMPLETE:
+ case RCODE_TYPE_ERROR:
+ case RCODE_ADDRESS_ERROR:
+ case RCODE_GENERATION:
+ return ret;
+
+ default:
+ delay = 5 * attempt * attempt;
+ usleep_range(delay, delay * 2);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Wrapper around sbp_run_transaction that gets the card, destination,
+ * generation and speed out of the request's session.
+ */
+static int sbp_run_request_transaction(struct sbp_target_request *req,
+ int tcode, unsigned long long offset, void *payload,
+ size_t length)
+{
+ struct sbp_login_descriptor *login = req->login;
+ struct sbp_session *sess = login->sess;
+ struct fw_card *card;
+ int node_id, generation, speed, ret;
+
+ spin_lock_bh(&sess->lock);
+ card = fw_card_get(sess->card);
+ node_id = sess->node_id;
+ generation = sess->generation;
+ speed = sess->speed;
+ spin_unlock_bh(&sess->lock);
+
+ ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
+ offset, payload, length);
+
+ fw_card_put(card);
+
+ return ret;
+}
+
+static int sbp_fetch_command(struct sbp_target_request *req)
+{
+ int ret, cmd_len, copy_len;
+
+ cmd_len = scsi_command_size(req->orb.command_block);
+
+ req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
+ if (!req->cmd_buf)
+ return -ENOMEM;
+
+ memcpy(req->cmd_buf, req->orb.command_block,
+ min_t(int, cmd_len, sizeof(req->orb.command_block)));
+
+ if (cmd_len > sizeof(req->orb.command_block)) {
+ pr_debug("sbp_fetch_command: filling in long command\n");
+ copy_len = cmd_len - sizeof(req->orb.command_block);
+
+ ret = sbp_run_request_transaction(req,
+ TCODE_READ_BLOCK_REQUEST,
+ req->orb_pointer + sizeof(req->orb),
+ req->cmd_buf + sizeof(req->orb.command_block),
+ copy_len);
+ if (ret != RCODE_COMPLETE)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sbp_fetch_page_table(struct sbp_target_request *req)
+{
+ int pg_tbl_sz, ret;
+ struct sbp_page_table_entry *pg_tbl;
+
+ if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
+ return 0;
+
+ pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
+ sizeof(struct sbp_page_table_entry);
+
+ pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
+ if (!pg_tbl)
+ return -ENOMEM;
+
+ ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
+ sbp2_pointer_to_addr(&req->orb.data_descriptor),
+ pg_tbl, pg_tbl_sz);
+ if (ret != RCODE_COMPLETE) {
+ kfree(pg_tbl);
+ return -EIO;
+ }
+
+ req->pg_tbl = pg_tbl;
+ return 0;
+}
+
+static void sbp_calc_data_length_direction(struct sbp_target_request *req,
+ u32 *data_len, enum dma_data_direction *data_dir)
+{
+ int data_size, direction, idx;
+
+ data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
+ direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
+
+ if (!data_size) {
+ *data_len = 0;
+ *data_dir = DMA_NONE;
+ return;
+ }
+
+ *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ if (req->pg_tbl) {
+ *data_len = 0;
+ for (idx = 0; idx < data_size; idx++) {
+ *data_len += be16_to_cpu(
+ req->pg_tbl[idx].segment_length);
+ }
+ } else {
+ *data_len = data_size;
+ }
+}
+
+static void sbp_handle_command(struct sbp_target_request *req)
+{
+ struct sbp_login_descriptor *login = req->login;
+ struct sbp_session *sess = login->sess;
+ int ret, unpacked_lun;
+ u32 data_length;
+ enum dma_data_direction data_dir;
+
+ ret = sbp_fetch_command(req);
+ if (ret) {
+ pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ sbp_send_status(req);
+ sbp_free_request(req);
+ return;
+ }
+
+ ret = sbp_fetch_page_table(req);
+ if (ret) {
+ pr_debug("sbp_handle_command: fetch page table failed: %d\n",
+ ret);
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ sbp_send_status(req);
+ sbp_free_request(req);
+ return;
+ }
+
+ unpacked_lun = req->login->lun->unpacked_lun;
+ sbp_calc_data_length_direction(req, &data_length, &data_dir);
+
+ pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
+ req->orb_pointer, unpacked_lun, data_length, data_dir);
+
+ target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
+ req->sense_buf, unpacked_lun, data_length,
+ MSG_SIMPLE_TAG, data_dir, 0);
+}
+
+/*
+ * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
+ * DMA_FROM_DEVICE = write to initiator (SCSI READ)
+ */
+static int sbp_rw_data(struct sbp_target_request *req)
+{
+ struct sbp_session *sess = req->login->sess;
+ int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
+ generation, num_pte, length, tfr_length,
+ rcode = RCODE_COMPLETE;
+ struct sbp_page_table_entry *pte;
+ unsigned long long offset;
+ struct fw_card *card;
+ struct sg_mapping_iter iter;
+
+ if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
+ tcode = TCODE_WRITE_BLOCK_REQUEST;
+ sg_miter_flags = SG_MITER_FROM_SG;
+ } else {
+ tcode = TCODE_READ_BLOCK_REQUEST;
+ sg_miter_flags = SG_MITER_TO_SG;
+ }
+
+ max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
+ speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
+
+ pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
+ if (pg_size) {
+ pr_err("sbp_run_transaction: page size ignored\n");
+ pg_size = 0x100 << pg_size;
+ }
+
+ spin_lock_bh(&sess->lock);
+ card = fw_card_get(sess->card);
+ node_id = sess->node_id;
+ generation = sess->generation;
+ spin_unlock_bh(&sess->lock);
+
+ if (req->pg_tbl) {
+ pte = req->pg_tbl;
+ num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
+
+ offset = 0;
+ length = 0;
+ } else {
+ pte = NULL;
+ num_pte = 0;
+
+ offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
+ length = req->se_cmd.data_length;
+ }
+
+ sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
+ sg_miter_flags);
+
+ while (length || num_pte) {
+ if (!length) {
+ offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
+ be32_to_cpu(pte->segment_base_lo);
+ length = be16_to_cpu(pte->segment_length);
+
+ pte++;
+ num_pte--;
+ }
+
+ sg_miter_next(&iter);
+
+ tfr_length = min3(length, max_payload, (int)iter.length);
+
+ /* FIXME: take page_size into account */
+
+ rcode = sbp_run_transaction(card, tcode, node_id,
+ generation, speed,
+ offset, iter.addr, tfr_length);
+
+ if (rcode != RCODE_COMPLETE)
+ break;
+
+ length -= tfr_length;
+ offset += tfr_length;
+ iter.consumed = tfr_length;
+ }
+
+ sg_miter_stop(&iter);
+ fw_card_put(card);
+
+ if (rcode == RCODE_COMPLETE) {
+ WARN_ON(length != 0);
+ return 0;
+ } else {
+ return -EIO;
+ }
+}
+
+static int sbp_send_status(struct sbp_target_request *req)
+{
+ int ret, length;
+ struct sbp_login_descriptor *login = req->login;
+
+ length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
+
+ ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
+ login->status_fifo_addr, &req->status, length);
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
+ return -EIO;
+ }
+
+ pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
+ req->orb_pointer);
+
+ return 0;
+}
+
+static void sbp_sense_mangle(struct sbp_target_request *req)
+{
+ struct se_cmd *se_cmd = &req->se_cmd;
+ u8 *sense = req->sense_buf;
+ u8 *status = req->status.data;
+
+ WARN_ON(se_cmd->scsi_sense_length < 18);
+
+ switch (sense[0] & 0x7f) { /* sfmt */
+ case 0x70: /* current, fixed */
+ status[0] = 0 << 6;
+ break;
+ case 0x71: /* deferred, fixed */
+ status[0] = 1 << 6;
+ break;
+ case 0x72: /* current, descriptor */
+ case 0x73: /* deferred, descriptor */
+ default:
+ /*
+ * TODO: SBP-3 specifies what we should do with descriptor
+ * format sense data
+ */
+ pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
+ sense[0]);
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
+ return;
+ }
+
+ status[0] |= se_cmd->scsi_status & 0x3f;/* status */
+ status[1] =
+ (sense[0] & 0x80) | /* valid */
+ ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
+ (sense[2] & 0x0f); /* sense_key */
+ status[2] = se_cmd->scsi_asc; /* sense_code */
+ status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
+
+ /* information */
+ status[4] = sense[3];
+ status[5] = sense[4];
+ status[6] = sense[5];
+ status[7] = sense[6];
+
+ /* CDB-dependent */
+ status[8] = sense[8];
+ status[9] = sense[9];
+ status[10] = sense[10];
+ status[11] = sense[11];
+
+ /* fru */
+ status[12] = sense[14];
+
+ /* sense_key-dependent */
+ status[13] = sense[15];
+ status[14] = sense[16];
+ status[15] = sense[17];
+
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(5) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+}
+
+static int sbp_send_sense(struct sbp_target_request *req)
+{
+ struct se_cmd *se_cmd = &req->se_cmd;
+
+ if (se_cmd->scsi_sense_length) {
+ sbp_sense_mangle(req);
+ } else {
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
+ }
+
+ return sbp_send_status(req);
+}
+
+static void sbp_free_request(struct sbp_target_request *req)
+{
+ kfree(req->pg_tbl);
+ kfree(req->cmd_buf);
+ kfree(req);
+}
+
+static void sbp_mgt_agent_process(struct work_struct *work)
+{
+ struct sbp_management_agent *agent =
+ container_of(work, struct sbp_management_agent, work);
+ struct sbp_management_request *req = agent->request;
+ int ret;
+ int status_data_len = 0;
+
+ /* fetch the ORB from the initiator */
+ ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
+ req->node_addr, req->generation, req->speed,
+ agent->orb_offset, &req->orb, sizeof(req->orb));
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("mgt_orb fetch failed: %x\n", ret);
+ goto out;
+ }
+
+ pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
+ sbp2_pointer_to_addr(&req->orb.ptr1),
+ sbp2_pointer_to_addr(&req->orb.ptr2),
+ be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
+ sbp2_pointer_to_addr(&req->orb.status_fifo));
+
+ if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
+ ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
+ pr_err("mgt_orb bad request\n");
+ goto out;
+ }
+
+ switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
+ case MANAGEMENT_ORB_FUNCTION_LOGIN:
+ sbp_management_request_login(agent, req, &status_data_len);
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
+ sbp_management_request_query_logins(agent, req,
+ &status_data_len);
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_RECONNECT:
+ sbp_management_request_reconnect(agent, req, &status_data_len);
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
+ pr_notice("SET PASSWORD not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_LOGOUT:
+ sbp_management_request_logout(agent, req, &status_data_len);
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
+ pr_notice("ABORT TASK not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
+ pr_notice("ABORT TASK SET not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
+ pr_notice("LOGICAL UNIT RESET not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
+ pr_notice("TARGET RESET not implemented\n");
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+
+ default:
+ pr_notice("unknown management function 0x%x\n",
+ MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
+
+ req->status.status = cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
+
+ break;
+ }
+
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
+ STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
+ STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
+ req->status.orb_low = cpu_to_be32(agent->orb_offset);
+
+ /* write the status block back to the initiator */
+ ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
+ req->node_addr, req->generation, req->speed,
+ sbp2_pointer_to_addr(&req->orb.status_fifo),
+ &req->status, 8 + status_data_len);
+ if (ret != RCODE_COMPLETE) {
+ pr_debug("mgt_orb status write failed: %x\n", ret);
+ goto out;
+ }
+
+out:
+ fw_card_put(req->card);
+ kfree(req);
+
+ spin_lock_bh(&agent->lock);
+ agent->state = MANAGEMENT_AGENT_STATE_IDLE;
+ spin_unlock_bh(&agent->lock);
+}
+
+static void sbp_mgt_agent_rw(struct fw_card *card,
+ struct fw_request *request, int tcode, int destination, int source,
+ int generation, unsigned long long offset, void *data, size_t length,
+ void *callback_data)
+{
+ struct sbp_management_agent *agent = callback_data;
+ struct sbp2_pointer *ptr = data;
+ int rcode = RCODE_ADDRESS_ERROR;
+
+ if (!agent->tport->enable)
+ goto out;
+
+ if ((offset != agent->handler.offset) || (length != 8))
+ goto out;
+
+ if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
+ struct sbp_management_request *req;
+ int prev_state;
+
+ spin_lock_bh(&agent->lock);
+ prev_state = agent->state;
+ agent->state = MANAGEMENT_AGENT_STATE_BUSY;
+ spin_unlock_bh(&agent->lock);
+
+ if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
+ pr_notice("ignoring management request while busy\n");
+ rcode = RCODE_CONFLICT_ERROR;
+ goto out;
+ }
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req) {
+ rcode = RCODE_CONFLICT_ERROR;
+ goto out;
+ }
+
+ req->card = fw_card_get(card);
+ req->generation = generation;
+ req->node_addr = source;
+ req->speed = fw_get_request_speed(request);
+
+ agent->orb_offset = sbp2_pointer_to_addr(ptr);
+ agent->request = req;
+
+ queue_work(system_unbound_wq, &agent->work);
+ rcode = RCODE_COMPLETE;
+ } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
+ addr_to_sbp2_pointer(agent->orb_offset, ptr);
+ rcode = RCODE_COMPLETE;
+ } else {
+ rcode = RCODE_TYPE_ERROR;
+ }
+
+out:
+ fw_send_response(card, request, rcode);
+}
+
+static struct sbp_management_agent *sbp_management_agent_register(
+ struct sbp_tport *tport)
+{
+ int ret;
+ struct sbp_management_agent *agent;
+
+ agent = kmalloc(sizeof(*agent), GFP_KERNEL);
+ if (!agent)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&agent->lock);
+ agent->tport = tport;
+ agent->handler.length = 0x08;
+ agent->handler.address_callback = sbp_mgt_agent_rw;
+ agent->handler.callback_data = agent;
+ agent->state = MANAGEMENT_AGENT_STATE_IDLE;
+ INIT_WORK(&agent->work, sbp_mgt_agent_process);
+ agent->orb_offset = 0;
+ agent->request = NULL;
+
+ ret = fw_core_add_address_handler(&agent->handler,
+ &sbp_register_region);
+ if (ret < 0) {
+ kfree(agent);
+ return ERR_PTR(ret);
+ }
+
+ return agent;
+}
+
+static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
+{
+ fw_core_remove_address_handler(&agent->handler);
+ cancel_work_sync(&agent->work);
+ kfree(agent);
+}
+
+static int sbp_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int sbp_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static char *sbp_get_fabric_name(void)
+{
+ return "sbp";
+}
+
+static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+
+ return &tport->tport_name[0];
+}
+
+static u16 sbp_get_tag(struct se_portal_group *se_tpg)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ return tpg->tport_tpgt;
+}
+
+static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+ struct sbp_nacl *nacl;
+
+ nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
+ if (!nacl) {
+ pr_err("Unable to alocate struct sbp_nacl\n");
+ return NULL;
+ }
+
+ return &nacl->se_node_acl;
+}
+
+static void sbp_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct sbp_nacl *nacl =
+ container_of(se_nacl, struct sbp_nacl, se_node_acl);
+ kfree(nacl);
+}
+
+static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void sbp_release_cmd(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+
+ sbp_free_request(req);
+}
+
+static int sbp_shutdown_session(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void sbp_close_session(struct se_session *se_sess)
+{
+ return;
+}
+
+static u32 sbp_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static int sbp_write_pending(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+ int ret;
+
+ ret = sbp_rw_data(req);
+ if (ret) {
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(
+ STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(
+ SBP_STATUS_UNSPECIFIED_ERROR));
+ sbp_send_status(req);
+ return ret;
+ }
+
+ transport_generic_process_write(se_cmd);
+
+ return 0;
+}
+
+static int sbp_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+
+ /* only used for printk until we do TMRs */
+ return (u32)req->orb_pointer;
+}
+
+static int sbp_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int sbp_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+ int ret;
+
+ ret = sbp_rw_data(req);
+ if (ret) {
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ sbp_send_status(req);
+ return ret;
+ }
+
+ return sbp_send_sense(req);
+}
+
+/*
+ * Called after command (no data transfer) or after the write (to device)
+ * operation is completed
+ */
+static int sbp_queue_status(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+
+ return sbp_send_sense(req);
+}
+
+static int sbp_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static u16 sbp_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+ return 0;
+}
+
+static u16 sbp_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static int sbp_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
+
+ transport_generic_free_cmd(&req->se_cmd, 0);
+ return 1;
+}
+
+/*
+ * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
+ */
+static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ /*
+ * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
+ * This is defined in section 7.5.1 Table 362 in spc4r17
+ */
+ return SCSI_PROTOCOL_SBP;
+}
+
+static u32 sbp_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ int ret;
+
+ /*
+ * Set PROTOCOL IDENTIFIER to 3h for SBP
+ */
+ buf[0] = SCSI_PROTOCOL_SBP;
+ /*
+ * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
+ * over IEEE 1394
+ */
+ ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
+ if (ret < 0)
+ pr_debug("sbp transport_id: invalid hex string\n");
+
+ /*
+ * The IEEE 1394 Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+
+static u32 sbp_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ *format_code = 0;
+ /*
+ * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
+ * over IEEE 1394
+ *
+ * The SBP Transport ID is a hardcoded 24-byte length
+ */
+ return 24;
+}
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+static char *sbp_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ /*
+ * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
+ * for initiator ports using SCSI over SBP Serial SCSI Protocol
+ *
+ * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
+ * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
+ * so we return the **port_nexus_ptr set to NULL.
+ */
+ *port_nexus_ptr = NULL;
+ *out_tid_len = 24;
+
+ return (char *)&buf[8];
+}
+
+static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
+{
+ int i, count = 0;
+
+ spin_lock(&tpg->tpg_lun_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ struct se_lun *se_lun = tpg->tpg_lun_list[i];
+
+ if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
+ continue;
+
+ count++;
+ }
+ spin_unlock(&tpg->tpg_lun_lock);
+
+ return count;
+}
+
+static int sbp_update_unit_directory(struct sbp_tport *tport)
+{
+ int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
+ u32 *data;
+
+ if (tport->unit_directory.data) {
+ fw_core_remove_descriptor(&tport->unit_directory);
+ kfree(tport->unit_directory.data);
+ tport->unit_directory.data = NULL;
+ }
+
+ if (!tport->enable || !tport->tpg)
+ return 0;
+
+ num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
+
+ /*
+ * Number of entries in the final unit directory:
+ * - all of those in the template
+ * - management_agent
+ * - unit_characteristics
+ * - reconnect_timeout
+ * - unit unique ID
+ * - one for each LUN
+ *
+ * MUST NOT include leaf or sub-directory entries
+ */
+ num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
+
+ if (tport->directory_id != -1)
+ num_entries++;
+
+ /* allocate num_entries + 4 for the header and unique ID leaf */
+ data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* directory_length */
+ data[idx++] = num_entries << 16;
+
+ /* directory_id */
+ if (tport->directory_id != -1)
+ data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
+
+ /* unit directory template */
+ memcpy(&data[idx], sbp_unit_directory_template,
+ sizeof(sbp_unit_directory_template));
+ idx += ARRAY_SIZE(sbp_unit_directory_template);
+
+ /* management_agent */
+ mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
+ data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
+
+ /* unit_characteristics */
+ data[idx++] = 0x3a000000 |
+ (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
+ SBP_ORB_FETCH_SIZE;
+
+ /* reconnect_timeout */
+ data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
+
+ /* unit unique ID (leaf is just after LUNs) */
+ data[idx++] = 0x8d000000 | (num_luns + 1);
+
+ spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
+ struct se_device *dev;
+ int type;
+
+ if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
+ continue;
+
+ spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
+
+ dev = se_lun->lun_se_dev;
+ type = dev->transport->get_device_type(dev);
+
+ /* logical_unit_number */
+ data[idx++] = 0x14000000 |
+ ((type << 16) & 0x1f0000) |
+ (se_lun->unpacked_lun & 0xffff);
+
+ spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
+ }
+ spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
+
+ /* unit unique ID leaf */
+ data[idx++] = 2 << 16;
+ data[idx++] = tport->guid >> 32;
+ data[idx++] = tport->guid;
+
+ tport->unit_directory.length = idx;
+ tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
+ tport->unit_directory.data = data;
+
+ ret = fw_core_add_descriptor(&tport->unit_directory);
+ if (ret < 0) {
+ kfree(tport->unit_directory.data);
+ tport->unit_directory.data = NULL;
+ }
+
+ return ret;
+}
+
+static ssize_t sbp_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+ const char *cp;
+ char c, nibble;
+ int pos = 0, err;
+
+ *wwn = 0;
+ for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
+ c = *cp;
+ if (c == '\n' && cp[1] == '\0')
+ continue;
+ if (c == '\0') {
+ err = 2;
+ if (pos != 16)
+ goto fail;
+ return cp - name;
+ }
+ err = 3;
+ if (isdigit(c))
+ nibble = c - '0';
+ else if (isxdigit(c) && (islower(c) || !strict))
+ nibble = tolower(c) - 'a' + 10;
+ else
+ goto fail;
+ *wwn = (*wwn << 4) | nibble;
+ pos++;
+ }
+ err = 4;
+fail:
+ printk(KERN_INFO "err %u len %zu pos %u\n",
+ err, cp - name, pos);
+ return -1;
+}
+
+static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
+{
+ return snprintf(buf, len, "%016llx", wwn);
+}
+
+static struct se_node_acl *sbp_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct sbp_nacl *nacl;
+ u64 guid = 0;
+ u32 nexus_depth = 1;
+
+ if (sbp_parse_wwn(name, &guid, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NodeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ sbp_release_fabric_acl(se_tpg, se_nacl_new);
+ return se_nacl;
+ }
+
+ nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
+ nacl->guid = guid;
+ sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
+
+ return se_nacl;
+}
+
+static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
+{
+ struct sbp_nacl *nacl =
+ container_of(se_acl, struct sbp_nacl, se_node_acl);
+
+ core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
+ kfree(nacl);
+}
+
+static int sbp_post_link_lun(
+ struct se_portal_group *se_tpg,
+ struct se_lun *se_lun)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+
+ return sbp_update_unit_directory(tpg->tport);
+}
+
+static void sbp_pre_unlink_lun(
+ struct se_portal_group *se_tpg,
+ struct se_lun *se_lun)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ int ret;
+
+ if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
+ tport->enable = 0;
+
+ ret = sbp_update_unit_directory(tport);
+ if (ret < 0)
+ pr_err("unlink LUN: failed to update unit directory\n");
+}
+
+static struct se_portal_group *sbp_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct sbp_tport *tport =
+ container_of(wwn, struct sbp_tport, tport_wwn);
+
+ struct sbp_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (tport->tpg) {
+ pr_err("Only one TPG per Unit is possible.\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct sbp_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ tpg->tport = tport;
+ tpg->tport_tpgt = tpgt;
+ tport->tpg = tpg;
+
+ /* default attribute values */
+ tport->enable = 0;
+ tport->directory_id = -1;
+ tport->mgt_orb_timeout = 15;
+ tport->max_reconnect_timeout = 5;
+ tport->max_logins_per_lun = 1;
+
+ tport->mgt_agt = sbp_management_agent_register(tport);
+ if (IS_ERR(tport->mgt_agt)) {
+ ret = PTR_ERR(tport->mgt_agt);
+ kfree(tpg);
+ return ERR_PTR(ret);
+ }
+
+ ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, (void *)tpg,
+ TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ sbp_management_agent_unregister(tport->mgt_agt);
+ kfree(tpg);
+ return ERR_PTR(ret);
+ }
+
+ return &tpg->se_tpg;
+}
+
+static void sbp_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+
+ core_tpg_deregister(se_tpg);
+ sbp_management_agent_unregister(tport->mgt_agt);
+ tport->tpg = NULL;
+ kfree(tpg);
+}
+
+static struct se_wwn *sbp_make_tport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct sbp_tport *tport;
+ u64 guid = 0;
+
+ if (sbp_parse_wwn(name, &guid, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ tport = kzalloc(sizeof(*tport), GFP_KERNEL);
+ if (!tport) {
+ pr_err("Unable to allocate struct sbp_tport\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ tport->guid = guid;
+ sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
+
+ return &tport->tport_wwn;
+}
+
+static void sbp_drop_tport(struct se_wwn *wwn)
+{
+ struct sbp_tport *tport =
+ container_of(wwn, struct sbp_tport, tport_wwn);
+
+ kfree(tport);
+}
+
+static ssize_t sbp_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
+}
+
+TF_WWN_ATTR_RO(sbp, version);
+
+static struct configfs_attribute *sbp_wwn_attrs[] = {
+ &sbp_wwn_version.attr,
+ NULL,
+};
+
+static ssize_t sbp_tpg_show_directory_id(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+
+ if (tport->directory_id == -1)
+ return sprintf(page, "implicit\n");
+ else
+ return sprintf(page, "%06x\n", tport->directory_id);
+}
+
+static ssize_t sbp_tpg_store_directory_id(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+
+ if (tport->enable) {
+ pr_err("Cannot change the directory_id on an active target.\n");
+ return -EBUSY;
+ }
+
+ if (strstr(page, "implicit") == page) {
+ tport->directory_id = -1;
+ } else {
+ if (kstrtoul(page, 16, &val) < 0)
+ return -EINVAL;
+ if (val > 0xffffff)
+ return -EINVAL;
+
+ tport->directory_id = val;
+ }
+
+ return count;
+}
+
+static ssize_t sbp_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ return sprintf(page, "%d\n", tport->enable);
+}
+
+static ssize_t sbp_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(page, 0, &val) < 0)
+ return -EINVAL;
+ if ((val != 0) && (val != 1))
+ return -EINVAL;
+
+ if (tport->enable == val)
+ return count;
+
+ if (val) {
+ if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
+ pr_err("Cannot enable a target with no LUNs!\n");
+ return -EINVAL;
+ }
+ } else {
+ /* XXX: force-shutdown sessions instead? */
+ spin_lock_bh(&se_tpg->session_lock);
+ if (!list_empty(&se_tpg->tpg_sess_list)) {
+ spin_unlock_bh(&se_tpg->session_lock);
+ return -EBUSY;
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+ }
+
+ tport->enable = val;
+
+ ret = sbp_update_unit_directory(tport);
+ if (ret < 0) {
+ pr_err("Could not update Config ROM\n");
+ return ret;
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR);
+TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *sbp_tpg_base_attrs[] = {
+ &sbp_tpg_directory_id.attr,
+ &sbp_tpg_enable.attr,
+ NULL,
+};
+
+static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ return sprintf(page, "%d\n", tport->mgt_orb_timeout);
+}
+
+static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(page, 0, &val) < 0)
+ return -EINVAL;
+ if ((val < 1) || (val > 127))
+ return -EINVAL;
+
+ if (tport->mgt_orb_timeout == val)
+ return count;
+
+ tport->mgt_orb_timeout = val;
+
+ ret = sbp_update_unit_directory(tport);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ return sprintf(page, "%d\n", tport->max_reconnect_timeout);
+}
+
+static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(page, 0, &val) < 0)
+ return -EINVAL;
+ if ((val < 1) || (val > 32767))
+ return -EINVAL;
+
+ if (tport->max_reconnect_timeout == val)
+ return count;
+
+ tport->max_reconnect_timeout = val;
+
+ ret = sbp_update_unit_directory(tport);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ return sprintf(page, "%d\n", tport->max_logins_per_lun);
+}
+
+static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
+ struct sbp_tport *tport = tpg->tport;
+ unsigned long val;
+
+ if (kstrtoul(page, 0, &val) < 0)
+ return -EINVAL;
+ if ((val < 1) || (val > 127))
+ return -EINVAL;
+
+ /* XXX: also check against current count? */
+
+ tport->max_logins_per_lun = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR);
+TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR);
+TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
+ &sbp_tpg_attrib_mgt_orb_timeout.attr,
+ &sbp_tpg_attrib_max_reconnect_timeout.attr,
+ &sbp_tpg_attrib_max_logins_per_lun.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops sbp_ops = {
+ .get_fabric_name = sbp_get_fabric_name,
+ .get_fabric_proto_ident = sbp_get_fabric_proto_ident,
+ .tpg_get_wwn = sbp_get_fabric_wwn,
+ .tpg_get_tag = sbp_get_tag,
+ .tpg_get_default_depth = sbp_get_default_depth,
+ .tpg_get_pr_transport_id = sbp_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = sbp_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = sbp_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = sbp_check_true,
+ .tpg_check_demo_mode_cache = sbp_check_true,
+ .tpg_check_demo_mode_write_protect = sbp_check_false,
+ .tpg_check_prod_mode_write_protect = sbp_check_false,
+ .tpg_alloc_fabric_acl = sbp_alloc_fabric_acl,
+ .tpg_release_fabric_acl = sbp_release_fabric_acl,
+ .tpg_get_inst_index = sbp_tpg_get_inst_index,
+ .release_cmd = sbp_release_cmd,
+ .shutdown_session = sbp_shutdown_session,
+ .close_session = sbp_close_session,
+ .sess_get_index = sbp_sess_get_index,
+ .write_pending = sbp_write_pending,
+ .write_pending_status = sbp_write_pending_status,
+ .set_default_node_attributes = sbp_set_default_node_attrs,
+ .get_task_tag = sbp_get_task_tag,
+ .get_cmd_state = sbp_get_cmd_state,
+ .queue_data_in = sbp_queue_data_in,
+ .queue_status = sbp_queue_status,
+ .queue_tm_rsp = sbp_queue_tm_rsp,
+ .get_fabric_sense_len = sbp_get_fabric_sense_len,
+ .set_fabric_sense_len = sbp_set_fabric_sense_len,
+ .check_stop_free = sbp_check_stop_free,
+
+ .fabric_make_wwn = sbp_make_tport,
+ .fabric_drop_wwn = sbp_drop_tport,
+ .fabric_make_tpg = sbp_make_tpg,
+ .fabric_drop_tpg = sbp_drop_tpg,
+ .fabric_post_link = sbp_post_link_lun,
+ .fabric_pre_unlink = sbp_pre_unlink_lun,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = sbp_make_nodeacl,
+ .fabric_drop_nodeacl = sbp_drop_nodeacl,
+};
+
+static int sbp_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ int ret;
+
+ fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
+ if (!fabric) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ return -ENOMEM;
+ }
+
+ fabric->tf_ops = sbp_ops;
+
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed for SBP\n");
+ return ret;
+ }
+
+ sbp_fabric_configfs = fabric;
+
+ return 0;
+};
+
+static void sbp_deregister_configfs(void)
+{
+ if (!sbp_fabric_configfs)
+ return;
+
+ target_fabric_configfs_deregister(sbp_fabric_configfs);
+ sbp_fabric_configfs = NULL;
+};
+
+static int __init sbp_init(void)
+{
+ int ret;
+
+ ret = sbp_register_configfs();
+ if (ret < 0)
+ return ret;
+
+ return 0;
+};
+
+static void sbp_exit(void)
+{
+ sbp_deregister_configfs();
+};
+
+MODULE_DESCRIPTION("FireWire SBP fabric driver");
+MODULE_LICENSE("GPL");
+module_init(sbp_init);
+module_exit(sbp_exit);
diff --git a/drivers/target/sbp/sbp_target.h b/drivers/target/sbp/sbp_target.h
new file mode 100644
index 000000000000..6d0d74a2c545
--- /dev/null
+++ b/drivers/target/sbp/sbp_target.h
@@ -0,0 +1,251 @@
+#ifndef _SBP_BASE_H
+#define _SBP_BASE_H
+
+#include <linux/firewire.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <target/target_core_base.h>
+
+#define SBP_VERSION "v0.1"
+#define SBP_NAMELEN 32
+
+#define SBP_ORB_FETCH_SIZE 8
+
+#define MANAGEMENT_AGENT_STATE_IDLE 0
+#define MANAGEMENT_AGENT_STATE_BUSY 1
+
+#define ORB_NOTIFY(v) (((v) >> 31) & 0x01)
+#define ORB_REQUEST_FORMAT(v) (((v) >> 29) & 0x03)
+
+#define MANAGEMENT_ORB_FUNCTION(v) (((v) >> 16) & 0x0f)
+
+#define MANAGEMENT_ORB_FUNCTION_LOGIN 0x0
+#define MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS 0x1
+#define MANAGEMENT_ORB_FUNCTION_RECONNECT 0x3
+#define MANAGEMENT_ORB_FUNCTION_SET_PASSWORD 0x4
+#define MANAGEMENT_ORB_FUNCTION_LOGOUT 0x7
+#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK 0xb
+#define MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET 0xc
+#define MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET 0xe
+#define MANAGEMENT_ORB_FUNCTION_TARGET_RESET 0xf
+
+#define LOGIN_ORB_EXCLUSIVE(v) (((v) >> 28) & 0x01)
+#define LOGIN_ORB_RESERVED(v) (((v) >> 24) & 0x0f)
+#define LOGIN_ORB_RECONNECT(v) (((v) >> 20) & 0x0f)
+#define LOGIN_ORB_LUN(v) (((v) >> 0) & 0xffff)
+#define LOGIN_ORB_PASSWORD_LENGTH(v) (((v) >> 16) & 0xffff)
+#define LOGIN_ORB_RESPONSE_LENGTH(v) (((v) >> 0) & 0xffff)
+
+#define RECONNECT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff)
+#define LOGOUT_ORB_LOGIN_ID(v) (((v) >> 0) & 0xffff)
+
+#define CMDBLK_ORB_DIRECTION(v) (((v) >> 27) & 0x01)
+#define CMDBLK_ORB_SPEED(v) (((v) >> 24) & 0x07)
+#define CMDBLK_ORB_MAX_PAYLOAD(v) (((v) >> 20) & 0x0f)
+#define CMDBLK_ORB_PG_TBL_PRESENT(v) (((v) >> 19) & 0x01)
+#define CMDBLK_ORB_PG_SIZE(v) (((v) >> 16) & 0x07)
+#define CMDBLK_ORB_DATA_SIZE(v) (((v) >> 0) & 0xffff)
+
+#define STATUS_BLOCK_SRC(v) (((v) & 0x03) << 30)
+#define STATUS_BLOCK_RESP(v) (((v) & 0x03) << 28)
+#define STATUS_BLOCK_DEAD(v) (((v) ? 1 : 0) << 27)
+#define STATUS_BLOCK_LEN(v) (((v) & 0x07) << 24)
+#define STATUS_BLOCK_SBP_STATUS(v) (((v) & 0xff) << 16)
+#define STATUS_BLOCK_ORB_OFFSET_HIGH(v) (((v) & 0xffff) << 0)
+
+#define STATUS_SRC_ORB_CONTINUING 0
+#define STATUS_SRC_ORB_FINISHED 1
+#define STATUS_SRC_UNSOLICITED 2
+
+#define STATUS_RESP_REQUEST_COMPLETE 0
+#define STATUS_RESP_TRANSPORT_FAILURE 1
+#define STATUS_RESP_ILLEGAL_REQUEST 2
+#define STATUS_RESP_VENDOR_DEPENDENT 3
+
+#define SBP_STATUS_OK 0
+#define SBP_STATUS_REQ_TYPE_NOTSUPP 1
+#define SBP_STATUS_SPEED_NOTSUPP 2
+#define SBP_STATUS_PAGE_SIZE_NOTSUPP 3
+#define SBP_STATUS_ACCESS_DENIED 4
+#define SBP_STATUS_LUN_NOTSUPP 5
+#define SBP_STATUS_PAYLOAD_TOO_SMALL 6
+/* 7 is reserved */
+#define SBP_STATUS_RESOURCES_UNAVAIL 8
+#define SBP_STATUS_FUNCTION_REJECTED 9
+#define SBP_STATUS_LOGIN_ID_UNKNOWN 10
+#define SBP_STATUS_DUMMY_ORB_COMPLETE 11
+#define SBP_STATUS_REQUEST_ABORTED 12
+#define SBP_STATUS_UNSPECIFIED_ERROR 0xff
+
+#define AGENT_STATE_RESET 0
+#define AGENT_STATE_ACTIVE 1
+#define AGENT_STATE_SUSPENDED 2
+#define AGENT_STATE_DEAD 3
+
+struct sbp2_pointer {
+ __be32 high;
+ __be32 low;
+};
+
+struct sbp_command_block_orb {
+ struct sbp2_pointer next_orb;
+ struct sbp2_pointer data_descriptor;
+ __be32 misc;
+ u8 command_block[12];
+};
+
+struct sbp_page_table_entry {
+ __be16 segment_length;
+ __be16 segment_base_hi;
+ __be32 segment_base_lo;
+};
+
+struct sbp_management_orb {
+ struct sbp2_pointer ptr1;
+ struct sbp2_pointer ptr2;
+ __be32 misc;
+ __be32 length;
+ struct sbp2_pointer status_fifo;
+};
+
+struct sbp_status_block {
+ __be32 status;
+ __be32 orb_low;
+ u8 data[24];
+};
+
+struct sbp_login_response_block {
+ __be32 misc;
+ struct sbp2_pointer command_block_agent;
+ __be32 reconnect_hold;
+};
+
+struct sbp_login_descriptor {
+ struct sbp_session *sess;
+ struct list_head link;
+
+ struct se_lun *lun;
+
+ u64 status_fifo_addr;
+ int exclusive;
+ u16 login_id;
+
+ struct sbp_target_agent *tgt_agt;
+};
+
+struct sbp_session {
+ spinlock_t lock;
+ struct se_session *se_sess;
+ struct list_head login_list;
+ struct delayed_work maint_work;
+
+ u64 guid; /* login_owner_EUI_64 */
+ int node_id; /* login_owner_ID */
+
+ struct fw_card *card;
+ int generation;
+ int speed;
+
+ int reconnect_hold;
+ u64 reconnect_expires;
+};
+
+struct sbp_nacl {
+ /* Initiator EUI-64 */
+ u64 guid;
+ /* ASCII formatted GUID for SBP Initiator port */
+ char iport_name[SBP_NAMELEN];
+ /* Returned by sbp_make_nodeacl() */
+ struct se_node_acl se_node_acl;
+};
+
+struct sbp_tpg {
+ /* Target portal group tag for TCM */
+ u16 tport_tpgt;
+ /* Pointer back to sbp_tport */
+ struct sbp_tport *tport;
+ /* Returned by sbp_make_tpg() */
+ struct se_portal_group se_tpg;
+};
+
+struct sbp_tport {
+ /* Target Unit Identifier (EUI-64) */
+ u64 guid;
+ /* Target port name */
+ char tport_name[SBP_NAMELEN];
+ /* Returned by sbp_make_tport() */
+ struct se_wwn tport_wwn;
+
+ struct sbp_tpg *tpg;
+
+ /* FireWire unit directory */
+ struct fw_descriptor unit_directory;
+
+ /* SBP Management Agent */
+ struct sbp_management_agent *mgt_agt;
+
+ /* Parameters */
+ int enable;
+ s32 directory_id;
+ int mgt_orb_timeout;
+ int max_reconnect_timeout;
+ int max_logins_per_lun;
+};
+
+static inline u64 sbp2_pointer_to_addr(const struct sbp2_pointer *ptr)
+{
+ return (u64)(be32_to_cpu(ptr->high) & 0x0000ffff) << 32 |
+ (be32_to_cpu(ptr->low) & 0xfffffffc);
+}
+
+static inline void addr_to_sbp2_pointer(u64 addr, struct sbp2_pointer *ptr)
+{
+ ptr->high = cpu_to_be32(addr >> 32);
+ ptr->low = cpu_to_be32(addr);
+}
+
+struct sbp_target_agent {
+ spinlock_t lock;
+ struct fw_address_handler handler;
+ struct sbp_login_descriptor *login;
+ int state;
+ struct work_struct work;
+ u64 orb_pointer;
+ bool doorbell;
+};
+
+struct sbp_target_request {
+ struct sbp_login_descriptor *login;
+ u64 orb_pointer;
+ struct sbp_command_block_orb orb;
+ struct sbp_status_block status;
+ struct work_struct work;
+
+ struct se_cmd se_cmd;
+ struct sbp_page_table_entry *pg_tbl;
+ void *cmd_buf;
+
+ unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
+};
+
+struct sbp_management_agent {
+ spinlock_t lock;
+ struct sbp_tport *tport;
+ struct fw_address_handler handler;
+ int state;
+ struct work_struct work;
+ u64 orb_offset;
+ struct sbp_management_request *request;
+};
+
+struct sbp_management_request {
+ struct sbp_management_orb orb;
+ struct sbp_status_block status;
+ struct fw_card *card;
+ int generation;
+ int node_addr;
+ int speed;
+};
+
+#endif
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index e624b836469c..91799973081a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
out:
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
- return 0;
+ if (!rc)
+ target_complete_cmd(cmd, GOOD);
+ return rc;
}
static inline int core_alua_state_nonoptimized(
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 9888693a18fe..664f6e775d0e 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -1095,7 +1095,7 @@ int target_emulate_write_same(struct se_cmd *cmd)
if (num_blocks != 0)
range = num_blocks;
else
- range = (dev->transport->get_blocks(dev) - lba);
+ range = (dev->transport->get_blocks(dev) - lba) + 1;
pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
(unsigned long long)lba, (unsigned long long)range);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 686dba189f8e..9f99d0404908 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice(
ret = PTR_ERR(dev_p);
goto fail;
}
-
- /* O_DIRECT too? */
- flags = O_RDWR | O_CREAT | O_LARGEFILE;
-
/*
- * If fd_buffered_io=1 has not been set explicitly (the default),
- * use O_SYNC to force FILEIO writes to disk.
+ * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+ * of pure timestamp updates.
*/
- if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
- flags |= O_SYNC;
+ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file)) {
@@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
}
}
-static void fd_emulate_write_fua(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- struct fd_dev *fd_dev = dev->dev_ptr;
- loff_t start = cmd->t_task_lba *
- dev->se_sub_dev->se_dev_attrib.block_size;
- loff_t end = start + cmd->data_length;
- int ret;
-
- pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
- cmd->t_task_lba, cmd->data_length);
-
- ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
- if (ret != 0)
- pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
-}
-
static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, enum dma_data_direction data_direction)
{
@@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
ret = fd_do_readv(cmd, sgl, sgl_nents);
} else {
ret = fd_do_writev(cmd, sgl, sgl_nents);
-
+ /*
+ * Perform implict vfs_fsync_range() for fd_do_writev() ops
+ * for SCSI WRITEs with Forced Unit Access (FUA) set.
+ * Allow this to happen independent of WCE=0 setting.
+ */
if (ret > 0 &&
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)) {
- /*
- * We might need to be a bit smarter here
- * and return some sense data to let the initiator
- * know the FUA WRITE cache sync failed..?
- */
- fd_emulate_write_fua(cmd);
- }
+ struct fd_dev *fd_dev = dev->dev_ptr;
+ loff_t start = cmd->t_task_lba *
+ dev->se_sub_dev->se_dev_attrib.block_size;
+ loff_t end = start + cmd->data_length;
+ vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+ }
}
if (ret < 0) {
@@ -442,7 +422,6 @@ enum {
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
- {Opt_fd_buffered_io, "fd_buffered_io=%d"},
{Opt_err, NULL}
};
@@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params(
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, arg, token;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params(
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
- case Opt_fd_buffered_io:
- match_int(args, &arg);
- if (arg != 1) {
- pr_err("bogus fd_buffered_io=%d value\n", arg);
- ret = -EINVAL;
- goto out;
- }
-
- pr_debug("FILEIO: Using buffered I/O"
- " operations for struct fd_dev\n");
-
- fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
- break;
default:
break;
}
@@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params(
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
- bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
- fd_dev->fd_dev_name, fd_dev->fd_dev_size,
- (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
- "Buffered" : "Synchronous");
+ bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
+ fd_dev->fd_dev_name, fd_dev->fd_dev_size);
return bl;
}
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index fbd59ef7d8be..70ce7fd7111d 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -14,7 +14,6 @@
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
-#define FDBD_USE_BUFFERED_IO 0x04
struct fd_dev {
u32 fbd_flags;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 85564998500a..a1bcd927a9e6 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2031,7 +2031,7 @@ static int __core_scsi3_write_aptpl_to_file(
if (IS_ERR(file) || !file || !file->f_dentry) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
+ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
}
iov[0].iov_base = &buf[0];
@@ -3818,7 +3818,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -3828,7 +3828,8 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
*/
if (!cmd->se_sess) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (cmd->data_length < 24) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b05fdc0c05d3..634d0f31a28c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -315,7 +315,7 @@ void transport_register_session(
}
EXPORT_SYMBOL(transport_register_session);
-static void target_release_session(struct kref *kref)
+void target_release_session(struct kref *kref)
{
struct se_session *se_sess = container_of(kref,
struct se_session, sess_kref);
@@ -332,6 +332,12 @@ EXPORT_SYMBOL(target_get_session);
void target_put_session(struct se_session *se_sess)
{
+ struct se_portal_group *tpg = se_sess->se_tpg;
+
+ if (tpg->se_tpg_tfo->put_session != NULL) {
+ tpg->se_tpg_tfo->put_session(se_sess);
+ return;
+ }
kref_put(&se_sess->sess_kref, target_release_session);
}
EXPORT_SYMBOL(target_put_session);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index f03fb9730f5b..5b65f33939a8 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -230,6 +230,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+ if (cmd->aborted)
+ return ~0;
return fc_seq_exch(cmd->seq)->rxid;
}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index cb99da920068..87901fa74dd7 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -58,7 +58,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
struct ft_tport *tport;
int i;
- tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+ tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+ lockdep_is_held(&ft_lport_lock));
if (tport && tport->tpg)
return tport;
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 35819e312624..6cc4358f68c1 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1033,7 +1033,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
- tty_lock(tty);
+ tty_lock();
tmp.line = tty->index;
tmp.port = state->port;
tmp.flags = state->tport.flags;
@@ -1042,7 +1042,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
tmp.close_delay = state->tport.close_delay;
tmp.closing_wait = state->tport.closing_wait;
tmp.custom_divisor = state->custom_divisor;
- tty_unlock(tty);
+ tty_unlock();
if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
return -EFAULT;
return 0;
@@ -1059,12 +1059,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
return -EFAULT;
- tty_lock(tty);
+ tty_lock();
change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
new_serial.custom_divisor != state->custom_divisor;
if (new_serial.irq || new_serial.port != state->port ||
new_serial.xmit_fifo_size != state->xmit_fifo_size) {
- tty_unlock(tty);
+ tty_unlock();
return -EINVAL;
}
@@ -1074,7 +1074,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
(new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
((new_serial.flags & ~ASYNC_USR_MASK) !=
(port->flags & ~ASYNC_USR_MASK))) {
- tty_unlock(tty);
+ tty_unlock();
return -EPERM;
}
port->flags = ((port->flags & ~ASYNC_USR_MASK) |
@@ -1084,7 +1084,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
}
if (new_serial.baud_base < 9600) {
- tty_unlock(tty);
+ tty_unlock();
return -EINVAL;
}
@@ -1116,7 +1116,7 @@ check_and_exit:
}
} else
retval = startup(tty, state);
- tty_unlock(tty);
+ tty_unlock();
return retval;
}
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 6984e1a2686a..e61cabdd69df 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -1599,7 +1599,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
* If the port is the middle of closing, bail out now
*/
if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
- wait_event_interruptible_tty(tty, info->port.close_wait,
+ wait_event_interruptible_tty(info->port.close_wait,
!(info->port.flags & ASYNC_CLOSING));
return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
}
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index ced26c8ccd57..0d2ea0c224c3 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -401,7 +401,7 @@ out:
}
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
-void __init udbg_init_debug_opal(void)
+void __init udbg_init_debug_opal_raw(void)
{
u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
hvc_opal_privs[index] = &hvc_opal_boot_priv;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index d3d91dae065c..944eaeb8e0cf 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -214,24 +214,24 @@ static int xen_hvm_console_init(void)
/* already configured */
if (info->intf != NULL)
return 0;
-
+ /*
+ * If the toolstack (or the hypervisor) hasn't set these values, the
+ * default value is 0. Even though mfn = 0 and evtchn = 0 are
+ * theoretically correct values, in practice they never are and they
+ * mean that a legacy toolstack hasn't initialized the pv console correctly.
+ */
r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
- if (r < 0) {
- kfree(info);
- return -ENODEV;
- }
+ if (r < 0 || v == 0)
+ goto err;
info->evtchn = v;
- hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
- if (r < 0) {
- kfree(info);
- return -ENODEV;
- }
+ v = 0;
+ r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
+ if (r < 0 || v == 0)
+ goto err;
mfn = v;
info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
- if (info->intf == NULL) {
- kfree(info);
- return -ENODEV;
- }
+ if (info->intf == NULL)
+ goto err;
info->vtermno = HVC_COOKIE;
spin_lock(&xencons_lock);
@@ -239,6 +239,9 @@ static int xen_hvm_console_init(void)
spin_unlock(&xencons_lock);
return 0;
+err:
+ kfree(info);
+ return -ENODEV;
}
static int xen_pv_console_init(void)
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 656ad93bbc96..5c6c31459a2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1065,8 +1065,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
TRACE_L("read()");
- /* FIXME: should use a private lock */
- tty_lock(tty);
+ tty_lock();
pClient = findClient(pInfo, task_pid(current));
if (pClient) {
@@ -1078,7 +1077,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
goto unlock;
}
/* block until there is a message: */
- wait_event_interruptible_tty(tty, pInfo->read_wait,
+ wait_event_interruptible_tty(pInfo->read_wait,
(pMsg = remove_msg(pInfo, pClient)));
}
@@ -1108,7 +1107,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
}
ret = -EPERM;
unlock:
- tty_unlock(tty);
+ tty_unlock();
return ret;
}
@@ -1157,7 +1156,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
pHeader->locks = 0;
pHeader->owner = NULL;
- tty_lock(tty);
+ tty_lock();
pClient = findClient(pInfo, task_pid(current));
if (pClient) {
@@ -1176,7 +1175,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
add_tx_queue(pInfo, pHeader);
trigger_transmit(pInfo);
- tty_unlock(tty);
+ tty_unlock();
return 0;
}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 59af3945ea85..5505ffc91da4 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -47,7 +47,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
tty->packet = 0;
- /* Review - krefs on tty_link ?? */
if (!tty->link)
return;
tty->link->packet = 0;
@@ -63,9 +62,9 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
mutex_unlock(&devpts_mutex);
}
#endif
- tty_unlock(tty);
+ tty_unlock();
tty_vhangup(tty->link);
- tty_lock(tty);
+ tty_lock();
}
}
@@ -623,29 +622,26 @@ static int ptmx_open(struct inode *inode, struct file *filp)
return retval;
/* find a device that is not in use. */
- mutex_lock(&devpts_mutex);
+ tty_lock();
index = devpts_new_index(inode);
+ tty_unlock();
if (index < 0) {
retval = index;
goto err_file;
}
- mutex_unlock(&devpts_mutex);
-
mutex_lock(&tty_mutex);
mutex_lock(&devpts_mutex);
tty = tty_init_dev(ptm_driver, index);
+ mutex_unlock(&devpts_mutex);
+ tty_lock();
+ mutex_unlock(&tty_mutex);
if (IS_ERR(tty)) {
retval = PTR_ERR(tty);
goto out;
}
- /* The tty returned here is locked so we can safely
- drop the mutex */
- mutex_unlock(&devpts_mutex);
- mutex_unlock(&tty_mutex);
-
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
tty_add_file(tty, filp);
@@ -658,17 +654,16 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
goto err_release;
- tty_unlock(tty);
+ tty_unlock();
return 0;
err_release:
- tty_unlock(tty);
+ tty_unlock();
tty_release(inode, filp);
return retval;
out:
- mutex_unlock(&tty_mutex);
devpts_kill_index(inode, index);
+ tty_unlock();
err_file:
- mutex_unlock(&devpts_mutex);
tty_free_file(filp);
return retval;
}
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 47d061b9ad4d..6e1958a325bd 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -3113,7 +3113,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
/**
* serial8250_register_8250_port - register a serial port
- * @port: serial port template
+ * @up: serial port template
*
* Configure the serial port specified by the request. If the
* port exists and is in use, it is hung up and unregistered
diff --git a/drivers/tty/serial/8250/8250_mca.c b/drivers/tty/serial/8250/8250_mca.c
deleted file mode 100644
index d20abf04541e..000000000000
--- a/drivers/tty/serial/8250/8250_mca.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2005 Russell King.
- * Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mca.h>
-#include <linux/serial_8250.h>
-
-/*
- * FIXME: Should we be doing AUTO_IRQ here?
- */
-#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
-#define MCA_FLAGS UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ
-#else
-#define MCA_FLAGS UPF_BOOT_AUTOCONF | UPF_SKIP_TEST
-#endif
-
-#define PORT(_base,_irq) \
- { \
- .iobase = _base, \
- .irq = _irq, \
- .uartclk = 1843200, \
- .iotype = UPIO_PORT, \
- .flags = MCA_FLAGS, \
- }
-
-static struct plat_serial8250_port mca_data[] = {
- PORT(0x3220, 3),
- PORT(0x3228, 3),
- PORT(0x4220, 3),
- PORT(0x4228, 3),
- PORT(0x5220, 3),
- PORT(0x5228, 3),
- { },
-};
-
-static struct platform_device mca_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_MCA,
- .dev = {
- .platform_data = mca_data,
- },
-};
-
-static int __init mca_init(void)
-{
- if (!MCA_bus)
- return -ENODEV;
- return platform_device_register(&mca_device);
-}
-
-module_init(mca_init);
-
-MODULE_AUTHOR("Russell King");
-MODULE_DESCRIPTION("8250 serial probe module for MCA ports");
-MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 8bc7ecbf6bea..a27dd0569bd7 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -241,15 +241,6 @@ config SERIAL_8250_RSA
help
::: To be written :::
-config SERIAL_8250_MCA
- tristate "Support 8250-type ports on MCA buses"
- depends on SERIAL_8250 != n && MCA
- help
- Say Y here if you have a MCA serial ports.
-
- To compile this driver as a module, choose M here: the module
- will be called 8250_mca.
-
config SERIAL_8250_ACORN
tristate "Acorn expansion card serial port support"
depends on ARCH_ACORN && SERIAL_8250
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 3f35eacdf673..d7533c7d2c1a 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
-obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4ad721fb8405..c17923ec6e95 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -133,6 +133,10 @@ struct pl011_dmatx_data {
struct uart_amba_port {
struct uart_port port;
struct clk *clk;
+ /* Two optional pin states - default & sleep */
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_sleep;
const struct vendor_data *vendor;
unsigned int dmacr; /* dma control reg */
unsigned int im; /* interrupt mask */
@@ -1312,6 +1316,14 @@ static int pl011_startup(struct uart_port *port)
unsigned int cr;
int retval;
+ /* Optionaly enable pins to be muxed in and configured */
+ if (!IS_ERR(uap->pins_default)) {
+ retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+ if (retval)
+ dev_err(port->dev,
+ "could not set default pins\n");
+ }
+
retval = clk_prepare(uap->clk);
if (retval)
goto out;
@@ -1420,6 +1432,7 @@ static void pl011_shutdown(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int cr;
+ int retval;
/*
* disable all interrupts
@@ -1462,6 +1475,14 @@ static void pl011_shutdown(struct uart_port *port)
*/
clk_disable(uap->clk);
clk_unprepare(uap->clk);
+ /* Optionally let pins go into sleep states */
+ if (!IS_ERR(uap->pins_sleep)) {
+ retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
+ if (retval)
+ dev_err(port->dev,
+ "could not set pins to sleep state\n");
+ }
+
if (uap->port.dev->platform_data) {
struct amba_pl011_data *plat;
@@ -1792,6 +1813,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
if (!uap)
return -ENODEV;
+ /* Allow pins to be muxed in and configured */
+ if (!IS_ERR(uap->pins_default)) {
+ ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+ if (ret)
+ dev_err(uap->port.dev,
+ "could not set default pins\n");
+ }
+
ret = clk_prepare(uap->clk);
if (ret)
return ret;
@@ -1844,7 +1873,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
- struct pinctrl *pinctrl;
void __iomem *base;
int i, ret;
@@ -1869,11 +1897,20 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
goto free;
}
- pinctrl = devm_pinctrl_get_select_default(&dev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
+ uap->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(uap->pinctrl)) {
+ ret = PTR_ERR(uap->pinctrl);
goto unmap;
}
+ uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(uap->pins_default))
+ dev_err(&dev->dev, "could not get default pinstate\n");
+
+ uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(uap->pins_sleep))
+ dev_dbg(&dev->dev, "could not get sleep pinstate\n");
uap->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(uap->clk)) {
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 7264d4d26717..80b6b1b1f725 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3976,7 +3976,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
- wait_event_interruptible_tty(tty, info->close_wait,
+ wait_event_interruptible_tty(info->close_wait,
!(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
if (info->flags & ASYNC_HUP_NOTIFY)
@@ -4052,9 +4052,9 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
printk("block_til_ready blocking: ttyS%d, count = %d\n",
info->line, info->count);
#endif
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&info->open_wait, &wait);
@@ -4115,7 +4115,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
- wait_event_interruptible_tty(tty, info->close_wait,
+ wait_event_interruptible_tty(info->close_wait,
!(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
return ((info->flags & ASYNC_HUP_NOTIFY) ?
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index ec206732f68c..4ef747307ecb 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -205,7 +205,8 @@ struct imx_port {
unsigned int irda_inv_rx:1;
unsigned int irda_inv_tx:1;
unsigned short trcv_delay; /* transceiver delay */
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
struct imx_uart_data *devdata;
};
@@ -673,7 +674,7 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
* RFDIV is set such way to satisfy requested uartclk value
*/
val = TXTL << 10 | RXTL;
- ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
+ ufcr_rfdiv = (clk_get_rate(sport->clk_per) + sport->port.uartclk / 2)
/ sport->port.uartclk;
if(!ufcr_rfdiv)
@@ -1286,7 +1287,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
else
ucfr_rfdiv = 6 - ucfr_rfdiv;
- uartclk = clk_get_rate(sport->clk);
+ uartclk = clk_get_rate(sport->clk_per);
uartclk /= ucfr_rfdiv;
{ /*
@@ -1511,14 +1512,22 @@ static int serial_imx_probe(struct platform_device *pdev)
goto unmap;
}
- sport->clk = clk_get(&pdev->dev, "uart");
- if (IS_ERR(sport->clk)) {
- ret = PTR_ERR(sport->clk);
+ sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(sport->clk_ipg)) {
+ ret = PTR_ERR(sport->clk_ipg);
goto unmap;
}
- clk_prepare_enable(sport->clk);
- sport->port.uartclk = clk_get_rate(sport->clk);
+ sport->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(sport->clk_per)) {
+ ret = PTR_ERR(sport->clk_per);
+ goto unmap;
+ }
+
+ clk_prepare_enable(sport->clk_per);
+ clk_prepare_enable(sport->clk_ipg);
+
+ sport->port.uartclk = clk_get_rate(sport->clk_per);
imx_ports[sport->port.line] = sport;
@@ -1539,8 +1548,8 @@ deinit:
if (pdata && pdata->exit)
pdata->exit(pdev);
clkput:
- clk_disable_unprepare(sport->clk);
- clk_put(sport->clk);
+ clk_disable_unprepare(sport->clk_per);
+ clk_disable_unprepare(sport->clk_ipg);
unmap:
iounmap(sport->port.membase);
free:
@@ -1558,11 +1567,10 @@ static int serial_imx_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- if (sport) {
- uart_remove_one_port(&imx_reg, &sport->port);
- clk_disable_unprepare(sport->clk);
- clk_put(sport->clk);
- }
+ uart_remove_one_port(&imx_reg, &sport->port);
+
+ clk_disable_unprepare(sport->clk_per);
+ clk_disable_unprepare(sport->clk_ipg);
if (pdata && pdata->exit)
pdata->exit(pdev);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 96c1cacc7360..02da071fe1e7 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -31,16 +31,19 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <lantiq_soc.h>
#define PORT_LTQ_ASC 111
#define MAXPORTS 2
#define UART_DUMMY_UER_RX 1
-#define DRVNAME "ltq_asc"
+#define DRVNAME "lantiq,asc"
#ifdef __BIG_ENDIAN
#define LTQ_ASC_TBUF (0x0020 + 3)
#define LTQ_ASC_RBUF (0x0024 + 3)
@@ -114,6 +117,9 @@ static DEFINE_SPINLOCK(ltq_asc_lock);
struct ltq_uart_port {
struct uart_port port;
+ /* clock used to derive divider */
+ struct clk *fpiclk;
+ /* clock gating of the ASC core */
struct clk *clk;
unsigned int tx_irq;
unsigned int rx_irq;
@@ -316,7 +322,9 @@ lqasc_startup(struct uart_port *port)
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
int retval;
- port->uartclk = clk_get_rate(ltq_port->clk);
+ if (ltq_port->clk)
+ clk_enable(ltq_port->clk);
+ port->uartclk = clk_get_rate(ltq_port->fpiclk);
ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
port->membase + LTQ_ASC_CLC);
@@ -382,6 +390,8 @@ lqasc_shutdown(struct uart_port *port)
port->membase + LTQ_ASC_RXFCON);
ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
port->membase + LTQ_ASC_TXFCON);
+ if (ltq_port->clk)
+ clk_disable(ltq_port->clk);
}
static void
@@ -630,7 +640,7 @@ lqasc_console_setup(struct console *co, char *options)
port = &ltq_port->port;
- port->uartclk = clk_get_rate(ltq_port->clk);
+ port->uartclk = clk_get_rate(ltq_port->fpiclk);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -668,37 +678,32 @@ static struct uart_driver lqasc_reg = {
static int __init
lqasc_probe(struct platform_device *pdev)
{
+ struct device_node *node = pdev->dev.of_node;
struct ltq_uart_port *ltq_port;
struct uart_port *port;
- struct resource *mmres, *irqres;
- int tx_irq, rx_irq, err_irq;
- struct clk *clk;
+ struct resource *mmres, irqres[3];
+ int line = 0;
int ret;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mmres || !irqres)
+ ret = of_irq_to_resource_table(node, irqres, 3);
+ if (!mmres || (ret != 3)) {
+ dev_err(&pdev->dev,
+ "failed to get memory/irq for serial port\n");
return -ENODEV;
+ }
- if (pdev->id >= MAXPORTS)
- return -EBUSY;
+ /* check if this is the console port */
+ if (mmres->start != CPHYSADDR(LTQ_EARLY_ASC))
+ line = 1;
- if (lqasc_port[pdev->id] != NULL)
+ if (lqasc_port[line]) {
+ dev_err(&pdev->dev, "port %d already allocated\n", line);
return -EBUSY;
-
- clk = clk_get(&pdev->dev, "fpi");
- if (IS_ERR(clk)) {
- pr_err("failed to get fpi clk\n");
- return -ENOENT;
}
- tx_irq = platform_get_irq_byname(pdev, "tx");
- rx_irq = platform_get_irq_byname(pdev, "rx");
- err_irq = platform_get_irq_byname(pdev, "err");
- if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
- return -ENODEV;
-
- ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
+ ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port),
+ GFP_KERNEL);
if (!ltq_port)
return -ENOMEM;
@@ -709,19 +714,26 @@ lqasc_probe(struct platform_device *pdev)
port->ops = &lqasc_pops;
port->fifosize = 16;
port->type = PORT_LTQ_ASC,
- port->line = pdev->id;
+ port->line = line;
port->dev = &pdev->dev;
-
- port->irq = tx_irq; /* unused, just to be backward-compatibe */
+ /* unused, just to be backward-compatible */
+ port->irq = irqres[0].start;
port->mapbase = mmres->start;
- ltq_port->clk = clk;
+ ltq_port->fpiclk = clk_get_fpi();
+ if (IS_ERR(ltq_port->fpiclk)) {
+ pr_err("failed to get fpi clk\n");
+ return -ENOENT;
+ }
- ltq_port->tx_irq = tx_irq;
- ltq_port->rx_irq = rx_irq;
- ltq_port->err_irq = err_irq;
+ /* not all asc ports have clock gates, lets ignore the return code */
+ ltq_port->clk = clk_get(&pdev->dev, NULL);
- lqasc_port[pdev->id] = ltq_port;
+ ltq_port->tx_irq = irqres[0].start;
+ ltq_port->rx_irq = irqres[1].start;
+ ltq_port->err_irq = irqres[2].start;
+
+ lqasc_port[line] = ltq_port;
platform_set_drvdata(pdev, ltq_port);
ret = uart_add_one_port(&lqasc_reg, port);
@@ -729,10 +741,17 @@ lqasc_probe(struct platform_device *pdev)
return ret;
}
+static const struct of_device_id ltq_asc_match[] = {
+ { .compatible = DRVNAME },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_asc_match);
+
static struct platform_driver lqasc_driver = {
.driver = {
.name = DRVNAME,
.owner = THIS_MODULE,
+ .of_match_table = ltq_asc_match,
},
};
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 0be8a2f00d0b..f76b1688c5c8 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 34bd345da775..6ae2a58d62f2 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -466,7 +466,7 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&up->port.lock, flags);
}
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/*
* Wait for transmitter & holding register to empty
*/
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 3158e17b665c..1bd9163bc118 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1052,9 +1052,17 @@ static int sci_request_irq(struct sci_port *port)
if (SCIx_IRQ_IS_MUXED(port)) {
i = SCIx_MUX_IRQ;
irq = up->irq;
- } else
+ } else {
irq = port->cfg->irqs[i];
+ /*
+ * Certain port types won't support all of the
+ * available interrupt sources.
+ */
+ if (unlikely(!irq))
+ continue;
+ }
+
desc = sci_irq_desc + i;
port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
dev_name(up->dev), desc->desc);
@@ -1094,6 +1102,15 @@ static void sci_free_irq(struct sci_port *port)
* IRQ first.
*/
for (i = 0; i < SCIx_NR_IRQS; i++) {
+ unsigned int irq = port->cfg->irqs[i];
+
+ /*
+ * Certain port types won't support all of the available
+ * interrupt sources.
+ */
+ if (unlikely(!irq))
+ continue;
+
free_irq(port->cfg->irqs[i], port);
kfree(port->irqstr[i]);
@@ -1564,10 +1581,32 @@ static void sci_enable_ms(struct uart_port *port)
static void sci_break_ctl(struct uart_port *port, int break_state)
{
- /*
- * Not supported by hardware. Most parts couple break and rx
- * interrupts together, with break detection always enabled.
- */
+ struct sci_port *s = to_sci_port(port);
+ struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
+ unsigned short scscr, scsptr;
+
+ /* check wheter the port has SCSPTR */
+ if (!reg->size) {
+ /*
+ * Not supported by hardware. Most parts couple break and rx
+ * interrupts together, with break detection always enabled.
+ */
+ return;
+ }
+
+ scsptr = serial_port_in(port, SCSPTR);
+ scscr = serial_port_in(port, SCSCR);
+
+ if (break_state == -1) {
+ scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT;
+ scscr &= ~SCSCR_TE;
+ } else {
+ scsptr = (scsptr | SCSPTR_SPB2DT) & ~SCSPTR_SPB2IO;
+ scscr |= SCSCR_TE;
+ }
+
+ serial_port_out(port, SCSPTR, scsptr);
+ serial_port_out(port, SCSCR, scscr);
}
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -2140,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev,
return 0;
}
+static void sci_cleanup_single(struct sci_port *port)
+{
+ sci_free_gpios(port);
+
+ clk_put(port->iclk);
+ clk_put(port->fclk);
+
+ pm_runtime_disable(port->port.dev);
+}
+
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
static void serial_console_putchar(struct uart_port *port, int ch)
{
@@ -2321,14 +2370,10 @@ static int sci_remove(struct platform_device *dev)
cpufreq_unregister_notifier(&port->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
- sci_free_gpios(port);
-
uart_remove_one_port(&sci_uart_driver, &port->port);
- clk_put(port->iclk);
- clk_put(port->fclk);
+ sci_cleanup_single(port);
- pm_runtime_disable(&dev->dev);
return 0;
}
@@ -2346,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev,
index+1, SCI_NPORTS);
dev_notice(&dev->dev, "Consider bumping "
"CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
- return 0;
+ return -EINVAL;
}
ret = sci_init_single(dev, sciport, index, p);
if (ret)
return ret;
- return uart_add_one_port(&sci_uart_driver, &sciport->port);
+ ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+ if (ret) {
+ sci_cleanup_single(sciport);
+ return ret;
+ }
+
+ return 0;
}
static int __devinit sci_probe(struct platform_device *dev)
@@ -2374,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev)
ret = sci_probe_single(dev, dev->id, p, sp);
if (ret)
- goto err_unreg;
+ return ret;
sp->freq_transition.notifier_call = sci_notifier;
ret = cpufreq_register_notifier(&sp->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
- if (unlikely(ret < 0))
- goto err_unreg;
+ if (unlikely(ret < 0)) {
+ sci_cleanup_single(sp);
+ return ret;
+ }
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
#endif
return 0;
-
-err_unreg:
- sci_remove(dev);
- return ret;
}
static int sci_suspend(struct device *dev)
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 4001eee6c08d..92c00b24d0df 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -57,6 +57,7 @@
#include <linux/ioport.h>
#include <linux/irqflags.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 5ed0daae6564..593d40ad0a6b 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -3338,9 +3338,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
printk("%s(%d):block_til_ready blocking on %s count=%d\n",
__FILE__,__LINE__, tty->driver->name, port->count );
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 45b43f11ca39..aa1debf97cc7 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -3336,9 +3336,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
}
DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 4a1e4f07765b..a3dddc12d2fe 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -3357,9 +3357,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
printk("%s(%d):%s block_til_ready() count=%d\n",
__FILE__,__LINE__, tty->driver->name, port->count );
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 9e930c009bf2..b425c79675ad 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -185,7 +185,6 @@ void free_tty_struct(struct tty_struct *tty)
put_device(tty->dev);
kfree(tty->write_buf);
tty_buffer_free_all(tty);
- tty->magic = 0xDEADDEAD;
kfree(tty);
}
@@ -574,7 +573,7 @@ void __tty_hangup(struct tty_struct *tty)
}
spin_unlock(&redirect_lock);
- tty_lock(tty);
+ tty_lock();
/* some functions below drop BTM, so we need this bit */
set_bit(TTY_HUPPING, &tty->flags);
@@ -667,7 +666,7 @@ void __tty_hangup(struct tty_struct *tty)
clear_bit(TTY_HUPPING, &tty->flags);
tty_ldisc_enable(tty);
- tty_unlock(tty);
+ tty_unlock();
if (f)
fput(f);
@@ -1104,12 +1103,12 @@ void tty_write_message(struct tty_struct *tty, char *msg)
{
if (tty) {
mutex_lock(&tty->atomic_write_lock);
- tty_lock(tty);
+ tty_lock();
if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
- tty_unlock(tty);
+ tty_unlock();
tty->ops->write(tty, msg, strlen(msg));
} else
- tty_unlock(tty);
+ tty_unlock();
tty_write_unlock(tty);
}
return;
@@ -1404,7 +1403,6 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
}
initialize_tty_struct(tty, driver, idx);
- tty_lock(tty);
retval = tty_driver_install_tty(driver, tty);
if (retval < 0)
goto err_deinit_tty;
@@ -1417,11 +1415,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
retval = tty_ldisc_setup(tty, tty->link);
if (retval)
goto err_release_tty;
- /* Return the tty locked so that it cannot vanish under the caller */
return tty;
err_deinit_tty:
- tty_unlock(tty);
deinitialize_tty_struct(tty);
free_tty_struct(tty);
err_module_put:
@@ -1430,7 +1426,6 @@ err_module_put:
/* call the tty release_tty routine to clean out this slot */
err_release_tty:
- tty_unlock(tty);
printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, "
"clearing slot %d\n", idx);
release_tty(tty, idx);
@@ -1633,7 +1628,7 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty_paranoia_check(tty, inode, __func__))
return 0;
- tty_lock(tty);
+ tty_lock();
check_tty_count(tty, __func__);
__tty_fasync(-1, filp, 0);
@@ -1642,11 +1637,10 @@ int tty_release(struct inode *inode, struct file *filp)
pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER);
devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
- /* Review: parallel close */
o_tty = tty->link;
if (tty_release_checks(tty, o_tty, idx)) {
- tty_unlock(tty);
+ tty_unlock();
return 0;
}
@@ -1658,7 +1652,7 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty->ops->close)
tty->ops->close(tty, filp);
- tty_unlock(tty);
+ tty_unlock();
/*
* Sanity check: if tty->count is going to zero, there shouldn't be
* any waiters on tty->read_wait or tty->write_wait. We test the
@@ -1681,7 +1675,7 @@ int tty_release(struct inode *inode, struct file *filp)
opens on /dev/tty */
mutex_lock(&tty_mutex);
- tty_lock_pair(tty, o_tty);
+ tty_lock();
tty_closing = tty->count <= 1;
o_tty_closing = o_tty &&
(o_tty->count <= (pty_master ? 1 : 0));
@@ -1712,7 +1706,7 @@ int tty_release(struct inode *inode, struct file *filp)
printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
__func__, tty_name(tty, buf));
- tty_unlock_pair(tty, o_tty);
+ tty_unlock();
mutex_unlock(&tty_mutex);
schedule();
}
@@ -1775,7 +1769,7 @@ int tty_release(struct inode *inode, struct file *filp)
/* check whether both sides are closing ... */
if (!tty_closing || (o_tty && !o_tty_closing)) {
- tty_unlock_pair(tty, o_tty);
+ tty_unlock();
return 0;
}
@@ -1788,16 +1782,14 @@ int tty_release(struct inode *inode, struct file *filp)
tty_ldisc_release(tty, o_tty);
/*
* The release_tty function takes care of the details of clearing
- * the slots and preserving the termios structure. The tty_unlock_pair
- * should be safe as we keep a kref while the tty is locked (so the
- * unlock never unlocks a freed tty).
+ * the slots and preserving the termios structure.
*/
release_tty(tty, idx);
- tty_unlock_pair(tty, o_tty);
/* Make this pty number available for reallocation */
if (devpts)
devpts_kill_index(inode, idx);
+ tty_unlock();
return 0;
}
@@ -1901,9 +1893,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
* Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
* tty->count should protect the rest.
* ->siglock protects ->signal/->sighand
- *
- * Note: the tty_unlock/lock cases without a ref are only safe due to
- * tty_mutex
*/
static int tty_open(struct inode *inode, struct file *filp)
@@ -1927,7 +1916,8 @@ retry_open:
retval = 0;
mutex_lock(&tty_mutex);
- /* This is protected by the tty_mutex */
+ tty_lock();
+
tty = tty_open_current_tty(device, filp);
if (IS_ERR(tty)) {
retval = PTR_ERR(tty);
@@ -1948,19 +1938,17 @@ retry_open:
}
if (tty) {
- tty_lock(tty);
retval = tty_reopen(tty);
- if (retval < 0) {
- tty_unlock(tty);
+ if (retval)
tty = ERR_PTR(retval);
- }
- } else /* Returns with the tty_lock held for now */
+ } else
tty = tty_init_dev(driver, index);
mutex_unlock(&tty_mutex);
if (driver)
tty_driver_kref_put(driver);
if (IS_ERR(tty)) {
+ tty_unlock();
retval = PTR_ERR(tty);
goto err_file;
}
@@ -1989,7 +1977,7 @@ retry_open:
printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
retval, tty->name);
#endif
- tty_unlock(tty); /* need to call tty_release without BTM */
+ tty_unlock(); /* need to call tty_release without BTM */
tty_release(inode, filp);
if (retval != -ERESTARTSYS)
return retval;
@@ -2001,15 +1989,17 @@ retry_open:
/*
* Need to reset f_op in case a hangup happened.
*/
+ tty_lock();
if (filp->f_op == &hung_up_tty_fops)
filp->f_op = &tty_fops;
+ tty_unlock();
goto retry_open;
}
- tty_unlock(tty);
+ tty_unlock();
mutex_lock(&tty_mutex);
- tty_lock(tty);
+ tty_lock();
spin_lock_irq(&current->sighand->siglock);
if (!noctty &&
current->signal->leader &&
@@ -2017,10 +2007,11 @@ retry_open:
tty->session == NULL)
__proc_set_tty(current, tty);
spin_unlock_irq(&current->sighand->siglock);
- tty_unlock(tty);
+ tty_unlock();
mutex_unlock(&tty_mutex);
return 0;
err_unlock:
+ tty_unlock();
mutex_unlock(&tty_mutex);
/* after locks to avoid deadlock */
if (!IS_ERR_OR_NULL(driver))
@@ -2103,13 +2094,10 @@ out:
static int tty_fasync(int fd, struct file *filp, int on)
{
- struct tty_struct *tty = file_tty(filp);
int retval;
-
- tty_lock(tty);
+ tty_lock();
retval = __tty_fasync(fd, filp, on);
- tty_unlock(tty);
-
+ tty_unlock();
return retval;
}
@@ -2946,7 +2934,6 @@ void initialize_tty_struct(struct tty_struct *tty,
tty->pgrp = NULL;
tty->overrun_time = jiffies;
tty_buffer_init(tty);
- mutex_init(&tty->legacy_mutex);
mutex_init(&tty->termios_mutex);
mutex_init(&tty->ldisc_mutex);
init_waitqueue_head(&tty->write_wait);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 173a9000a6cb..9911eb6b34cd 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -568,7 +568,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (IS_ERR(new_ldisc))
return PTR_ERR(new_ldisc);
- tty_lock(tty);
+ tty_lock();
/*
* We need to look at the tty locking here for pty/tty pairs
* when both sides try to change in parallel.
@@ -582,12 +582,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
*/
if (tty->ldisc->ops->num == ldisc) {
- tty_unlock(tty);
+ tty_unlock();
tty_ldisc_put(new_ldisc);
return 0;
}
- tty_unlock(tty);
+ tty_unlock();
/*
* Problem: What do we do if this blocks ?
* We could deadlock here
@@ -595,7 +595,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
tty_wait_until_sent(tty, 0);
- tty_lock(tty);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
/*
@@ -605,10 +605,10 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
mutex_unlock(&tty->ldisc_mutex);
- tty_unlock(tty);
+ tty_unlock();
wait_event(tty_ldisc_wait,
test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
- tty_lock(tty);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
}
@@ -623,7 +623,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
o_ldisc = tty->ldisc;
- tty_unlock(tty);
+ tty_unlock();
/*
* Make sure we don't change while someone holds a
* reference to the line discipline. The TTY_LDISC bit
@@ -650,7 +650,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
retval = tty_ldisc_wait_idle(tty, 5 * HZ);
- tty_lock(tty);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
/* handle wait idle failure locked */
@@ -665,7 +665,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
clear_bit(TTY_LDISC_CHANGING, &tty->flags);
mutex_unlock(&tty->ldisc_mutex);
tty_ldisc_put(new_ldisc);
- tty_unlock(tty);
+ tty_unlock();
return -EIO;
}
@@ -708,7 +708,7 @@ enable:
if (o_work)
schedule_work(&o_tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
- tty_unlock(tty);
+ tty_unlock();
return retval;
}
@@ -816,11 +816,11 @@ void tty_ldisc_hangup(struct tty_struct *tty)
* need to wait for another function taking the BTM
*/
clear_bit(TTY_LDISC, &tty->flags);
- tty_unlock(tty);
+ tty_unlock();
cancel_work_sync(&tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
retry:
- tty_lock(tty);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
/* At this point we have a closed ldisc and we want to
@@ -831,7 +831,7 @@ retry:
if (atomic_read(&tty->ldisc->users) != 1) {
char cur_n[TASK_COMM_LEN], tty_n[64];
long timeout = 3 * HZ;
- tty_unlock(tty);
+ tty_unlock();
while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
timeout = MAX_SCHEDULE_TIMEOUT;
@@ -912,10 +912,10 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
* race with the set_ldisc code path.
*/
- tty_unlock(tty);
+ tty_unlock();
tty_ldisc_halt(tty);
tty_ldisc_flush_works(tty);
- tty_lock(tty);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
/*
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 69adc80c98cd..9ff986c32a21 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -4,59 +4,29 @@
#include <linux/semaphore.h>
#include <linux/sched.h>
-/* Legacy tty mutex glue */
+/*
+ * The 'big tty mutex'
+ *
+ * This mutex is taken and released by tty_lock() and tty_unlock(),
+ * replacing the older big kernel lock.
+ * It can no longer be taken recursively, and does not get
+ * released implicitly while sleeping.
+ *
+ * Don't use in new code.
+ */
+static DEFINE_MUTEX(big_tty_mutex);
/*
* Getting the big tty mutex.
*/
-
-void __lockfunc tty_lock(struct tty_struct *tty)
+void __lockfunc tty_lock(void)
{
- if (tty->magic != TTY_MAGIC) {
- printk(KERN_ERR "L Bad %p\n", tty);
- WARN_ON(1);
- return;
- }
- tty_kref_get(tty);
- mutex_lock(&tty->legacy_mutex);
+ mutex_lock(&big_tty_mutex);
}
EXPORT_SYMBOL(tty_lock);
-void __lockfunc tty_unlock(struct tty_struct *tty)
+void __lockfunc tty_unlock(void)
{
- if (tty->magic != TTY_MAGIC) {
- printk(KERN_ERR "U Bad %p\n", tty);
- WARN_ON(1);
- return;
- }
- mutex_unlock(&tty->legacy_mutex);
- tty_kref_put(tty);
+ mutex_unlock(&big_tty_mutex);
}
EXPORT_SYMBOL(tty_unlock);
-
-/*
- * Getting the big tty mutex for a pair of ttys with lock ordering
- * On a non pty/tty pair tty2 can be NULL which is just fine.
- */
-void __lockfunc tty_lock_pair(struct tty_struct *tty,
- struct tty_struct *tty2)
-{
- if (tty < tty2) {
- tty_lock(tty);
- tty_lock(tty2);
- } else {
- if (tty2 && tty2 != tty)
- tty_lock(tty2);
- tty_lock(tty);
- }
-}
-EXPORT_SYMBOL(tty_lock_pair);
-
-void __lockfunc tty_unlock_pair(struct tty_struct *tty,
- struct tty_struct *tty2)
-{
- tty_unlock(tty);
- if (tty2 && tty2 != tty)
- tty_unlock(tty2);
-}
-EXPORT_SYMBOL(tty_unlock_pair);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index d9cca95a5452..bf6e238146ae 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -230,7 +230,7 @@ int tty_port_block_til_ready(struct tty_port *port,
/* block if port is in the process of being closed */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
- wait_event_interruptible_tty(tty, port->close_wait,
+ wait_event_interruptible_tty(port->close_wait,
!(port->flags & ASYNC_CLOSING));
if (port->flags & ASYNC_HUP_NOTIFY)
return -EAGAIN;
@@ -296,9 +296,9 @@ int tty_port_block_til_ready(struct tty_port *port,
retval = -ERESTARTSYS;
break;
}
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
finish_wait(&port->open_wait, &wait);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 3b0c4e32ed7b..48cc6f25cfd3 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -53,17 +53,13 @@ extern void ctrl_alt_del(void);
#define KBD_DEFMODE ((1 << VC_REPEAT) | (1 << VC_META))
-/*
- * Some laptops take the 789uiojklm,. keys as number pad when NumLock is on.
- * This seems a good reason to start with NumLock off. On HIL keyboards
- * of PARISC machines however there is no NumLock key and everyone expects the
- * keypad to be used for numbers.
- */
-
-#if defined(CONFIG_PARISC) && (defined(CONFIG_KEYBOARD_HIL) || defined(CONFIG_KEYBOARD_HIL_OLD))
-#define KBD_DEFLEDS (1 << VC_NUMLOCK)
+#if defined(CONFIG_X86) || defined(CONFIG_PARISC)
+#include <asm/kbdleds.h>
#else
-#define KBD_DEFLEDS 0
+static inline int kbd_defleds(void)
+{
+ return 0;
+}
#endif
#define KBD_DEFLOCK 0
@@ -1524,8 +1520,8 @@ int __init kbd_init(void)
int error;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
- kbd_table[i].ledflagstate = KBD_DEFLEDS;
- kbd_table[i].default_ledflagstate = KBD_DEFLEDS;
+ kbd_table[i].ledflagstate = kbd_defleds();
+ kbd_table[i].default_ledflagstate = kbd_defleds();
kbd_table[i].ledmode = LED_SHOW_FLAGS;
kbd_table[i].lockstate = KBD_DEFLOCK;
kbd_table[i].slockstate = 0;
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index c691eea51537..f5ed3d75fa5a 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -46,7 +46,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/
obj-$(CONFIG_USB_SERIAL) += serial/
obj-$(CONFIG_USB) += misc/
-obj-$(CONFIG_USB) += phy/
+obj-$(CONFIG_USB_COMMON) += phy/
obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/
obj-$(CONFIG_USB_ATM) += atm/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f2a120eea9d4..36a2a0b7b82c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
usb_autopm_put_interface(acm->control);
+ /*
+ * Unthrottle device in case the TTY was closed while throttled.
+ */
+ spin_lock_irq(&acm->read_lock);
+ acm->throttled = 0;
+ acm->throttle_req = 0;
+ spin_unlock_irq(&acm->read_lock);
+
if (acm_submit_read_urbs(acm, GFP_KERNEL))
goto error_submit_read_urbs;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index ea8b304f0e85..ee469274a3fe 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -55,6 +55,15 @@ static const struct usb_device_id wdm_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */
},
+ {
+ /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+ USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = HUAWEI_VENDOR_ID,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */
+ },
{ }
};
@@ -491,6 +500,8 @@ retry:
goto retry;
}
if (!desc->reslength) { /* zero length read */
+ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+ clear_bit(WDM_READ, &desc->flags);
spin_unlock_irq(&desc->iuspin);
goto retry;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 57ed9e400c06..622b4a48e732 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,15 +493,6 @@ static int hcd_pci_suspend_noirq(struct device *dev)
pci_save_state(pci_dev);
- /*
- * Some systems crash if an EHCI controller is in D3 during
- * a sleep transition. We have to leave such controllers in D0.
- */
- if (hcd->broken_pci_sleep) {
- dev_dbg(dev, "Staying in PCI D0\n");
- return retval;
- }
-
/* If the root hub is dead rather than suspended, disallow remote
* wakeup. usb_hc_died() should ensure that both hosts are marked as
* dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 04fb834c3fa1..8fb484984c86 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2324,12 +2324,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm);
-/* Is a USB 3.0 port in the Inactive state? */
-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
{
return hub_is_superspeed(hub->hdev) &&
- (portstatus & USB_PORT_STAT_LINK_STATE) ==
- USB_SS_PORT_LS_SS_INACTIVE;
+ (((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_SS_INACTIVE) ||
+ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_COMP_MOD)) ;
}
static int hub_port_wait_reset(struct usb_hub *hub, int port1,
@@ -2365,7 +2369,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
*
* See https://bugzilla.kernel.org/show_bug.cgi?id=41752
*/
- if (hub_port_inactive(hub, portstatus)) {
+ if (hub_port_warm_reset_required(hub, portstatus)) {
int ret;
if ((portchange & USB_PORT_STAT_C_CONNECTION))
@@ -3379,7 +3383,7 @@ int usb_disable_lpm(struct usb_device *udev)
return 0;
udev->lpm_disable_count++;
- if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0))
+ if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
return 0;
/* If LPM is enabled, attempt to disable it. */
@@ -4408,9 +4412,7 @@ static void hub_events(void)
/* Warm reset a USB3 protocol port if it's in
* SS.Inactive state.
*/
- if (hub_is_superspeed(hub->hdev) &&
- (portstatus & USB_PORT_STAT_LINK_STATE)
- == USB_SS_PORT_LS_SS_INACTIVE) {
+ if (hub_port_warm_reset_required(hub, portstatus)) {
dev_dbg(hub_dev, "warm reset port %d\n", i);
hub_port_reset(hub, i, NULL,
HUB_BH_RESET_TIME, true);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b548cf1dbc62..bdd1c6749d88 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1838,7 +1838,6 @@ free_interfaces:
intfc = cp->intf_cache[i];
intf->altsetting = intfc->altsetting;
intf->num_altsetting = intfc->num_altsetting;
- intf->intf_assoc = find_iad(dev, cp, i);
kref_get(&intfc->ref);
alt = usb_altnum_to_altsetting(intf, 0);
@@ -1851,6 +1850,8 @@ free_interfaces:
if (!alt)
alt = &intf->altsetting[0];
+ intf->intf_assoc =
+ find_iad(dev, cp, alt->desc.bInterfaceNumber);
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
intf->dev.parent = &dev->dev;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3df1a1973b05..ec70df7aba17 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1091,7 +1091,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
if (r == req) {
/* wait until it is processed */
dwc3_stop_active_transfer(dwc, dep->number);
- goto out0;
+ goto out1;
}
dev_err(dwc->dev, "request %p was not queued to %s\n",
request, ep->name);
@@ -1099,6 +1099,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
goto out0;
}
+out1:
/* giveback the request */
dwc3_gadget_giveback(dep, req, -ECONNRESET);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index e23bf7984aaf..9a9bced813ed 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -599,12 +599,6 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
spin_lock_irqsave(&ep->udc->lock, flags);
- if (ep->ep.desc) {
- spin_unlock_irqrestore(&ep->udc->lock, flags);
- DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
- return -EBUSY;
- }
-
ep->ep.desc = desc;
ep->ep.maxpacket = maxpacket;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 51881f3bd07a..b09452d6f33a 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1596,7 +1596,7 @@ static int qe_ep_enable(struct usb_ep *_ep,
ep = container_of(_ep, struct qe_ep, ep);
/* catch various bogus parameters */
- if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] ||
+ if (!_ep || !desc || _ep->name == ep_name[0] ||
(desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_qe_udc.h b/drivers/usb/gadget/fsl_qe_udc.h
index 4c07ca9cebf3..7026919fc901 100644
--- a/drivers/usb/gadget/fsl_qe_udc.h
+++ b/drivers/usb/gadget/fsl_qe_udc.h
@@ -153,10 +153,10 @@ struct usb_ep_para{
#define USB_BUSMODE_DTB 0x02
/* Endpoint basic handle */
-#define ep_index(EP) ((EP)->desc->bEndpointAddress & 0xF)
+#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress & 0xF)
#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
#define ep_is_in(EP) ((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
- USB_DIR_IN) : ((EP)->desc->bEndpointAddress \
+ USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
& USB_DIR_IN) == USB_DIR_IN)
/* ep0 transfer state */
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 28316858208b..bc6f9bb9994a 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -567,7 +567,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
ep = container_of(_ep, struct fsl_ep, ep);
/* catch various bogus parameters */
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| (desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
@@ -2575,7 +2575,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
/* for ep0: the desc defined here;
* for other eps, gadget layer called ep_enable with defined desc
*/
- udc_controller->eps[0].desc = &fsl_ep0_desc;
+ udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
/* setup the udc->eps[] for non-control endpoints and link
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 5cd7b7e7ddb4..f61a967f7082 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -568,10 +568,10 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
/*
* ### internal used help routines.
*/
-#define ep_index(EP) ((EP)->desc->bEndpointAddress&0xF)
+#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF)
#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
#define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
- USB_DIR_IN ):((EP)->desc->bEndpointAddress \
+ USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
& USB_DIR_IN)==USB_DIR_IN)
#define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \
&udc->eps[pipe])
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index b241e6c6a7f2..3d28fb976c78 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -102,7 +102,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 262acfd53e32..2ab0388d93eb 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -61,6 +61,7 @@
#include <mach/irqs.h>
#include <mach/board.h>
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+#include <linux/debugfs.h>
#include <linux/seq_file.h>
#endif
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index dbcd1329495e..117a4bba1b8c 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -464,7 +464,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
ep = container_of(_ep, struct mv_ep, ep);
udc = ep->udc;
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 7ba32469c5bd..a460e8c204f4 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -153,7 +153,7 @@ static int omap_ep_enable(struct usb_ep *_ep,
u16 maxp;
/* catch various bogus parameters */
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->maxpacket < usb_endpoint_maxp(desc)) {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index d7c8cb3bf759..f7ff9e8e746a 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -218,7 +218,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
struct pxa25x_udc *dev;
ep = container_of (_ep, struct pxa25x_ep, ep);
- if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name
+ if (!_ep || !desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->fifo_size < usb_endpoint_maxp (desc)) {
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 36c6836eeb0f..236b271871a0 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -760,7 +760,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
u32 ecr = 0;
hsep = our_ep(_ep);
- if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name
+ if (!_ep || !desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| hsep->bEndpointAddress != desc->bEndpointAddress
|| ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 3de71d37d75e..f2e51f50e528 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1062,7 +1062,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
ep = to_s3c2410_ep(_ep);
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 15a42c8c1943..5b3f5fffea92 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -914,7 +914,7 @@ static int gs_put_char(struct tty_struct *tty, unsigned char ch)
unsigned long flags;
int status;
- pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
+ pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %pf\n",
port->port_num, tty, ch, __builtin_return_address(0));
spin_lock_irqsave(&port->port_lock, flags);
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index 0cdf89d32a15..104ae9c81251 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -543,7 +543,7 @@ done:
return ret;
}
-/* called with queue->irqlock held.. */
+/* called with &queue_irqlock held.. */
static struct uvc_buffer *
uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
{
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 54d7ca559cb2..2ca9386d655b 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -296,7 +296,7 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
return -EINVAL;
- return v4l2_event_subscribe(&handle->vfh, arg, 2);
+ return v4l2_event_subscribe(&handle->vfh, arg, 2, NULL);
}
case VIDIOC_UNSUBSCRIBE_EVENT:
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 74a14f6ed34c..83e58df29fe3 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -153,7 +153,7 @@ config USB_EHCI_HCD_OMAP
bool "EHCI support for OMAP3 and later chips"
depends on USB_EHCI_HCD && ARCH_OMAP
default y
- --- help ---
+ ---help---
Enables support for the on-chip EHCI controller on
OMAP3 and later chips.
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b100f5f9f4b6..800be38c78b4 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd)
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+#if defined(CONFIG_PPC_PS3)
hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
+#endif
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index a797d51ecbe8..c778ffe4e4e5 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -32,7 +32,7 @@
#define ULPI_VIEWPORT_OFFSET 0x170
struct ehci_mxc_priv {
- struct clk *usbclk, *ahbclk, *phy1clk;
+ struct clk *usbclk, *ahbclk, *phyclk;
struct usb_hcd *hcd;
};
@@ -166,31 +166,26 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
}
/* enable clocks */
- priv->usbclk = clk_get(dev, "usb");
+ priv->usbclk = clk_get(dev, "ipg");
if (IS_ERR(priv->usbclk)) {
ret = PTR_ERR(priv->usbclk);
goto err_clk;
}
- clk_enable(priv->usbclk);
+ clk_prepare_enable(priv->usbclk);
- if (!cpu_is_mx35() && !cpu_is_mx25()) {
- priv->ahbclk = clk_get(dev, "usb_ahb");
- if (IS_ERR(priv->ahbclk)) {
- ret = PTR_ERR(priv->ahbclk);
- goto err_clk_ahb;
- }
- clk_enable(priv->ahbclk);
+ priv->ahbclk = clk_get(dev, "ahb");
+ if (IS_ERR(priv->ahbclk)) {
+ ret = PTR_ERR(priv->ahbclk);
+ goto err_clk_ahb;
}
+ clk_prepare_enable(priv->ahbclk);
/* "dr" device has its own clock on i.MX51 */
- if (cpu_is_mx51() && (pdev->id == 0)) {
- priv->phy1clk = clk_get(dev, "usb_phy1");
- if (IS_ERR(priv->phy1clk)) {
- ret = PTR_ERR(priv->phy1clk);
- goto err_clk_phy;
- }
- clk_enable(priv->phy1clk);
- }
+ priv->phyclk = clk_get(dev, "phy");
+ if (IS_ERR(priv->phyclk))
+ priv->phyclk = NULL;
+ if (priv->phyclk)
+ clk_prepare_enable(priv->phyclk);
/* call platform specific init function */
@@ -265,17 +260,15 @@ err_add:
if (pdata && pdata->exit)
pdata->exit(pdev);
err_init:
- if (priv->phy1clk) {
- clk_disable(priv->phy1clk);
- clk_put(priv->phy1clk);
- }
-err_clk_phy:
- if (priv->ahbclk) {
- clk_disable(priv->ahbclk);
- clk_put(priv->ahbclk);
+ if (priv->phyclk) {
+ clk_disable_unprepare(priv->phyclk);
+ clk_put(priv->phyclk);
}
+
+ clk_disable_unprepare(priv->ahbclk);
+ clk_put(priv->ahbclk);
err_clk_ahb:
- clk_disable(priv->usbclk);
+ clk_disable_unprepare(priv->usbclk);
clk_put(priv->usbclk);
err_clk:
iounmap(hcd->regs);
@@ -307,15 +300,14 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
usb_put_hcd(hcd);
platform_set_drvdata(pdev, NULL);
- clk_disable(priv->usbclk);
+ clk_disable_unprepare(priv->usbclk);
clk_put(priv->usbclk);
- if (priv->ahbclk) {
- clk_disable(priv->ahbclk);
- clk_put(priv->ahbclk);
- }
- if (priv->phy1clk) {
- clk_disable(priv->phy1clk);
- clk_put(priv->phy1clk);
+ clk_disable_unprepare(priv->ahbclk);
+ clk_put(priv->ahbclk);
+
+ if (priv->phyclk) {
+ clk_disable_unprepare(priv->phyclk);
+ clk_put(priv->phyclk);
}
kfree(priv);
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a44294d13494..c30435499a02 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/gpio.h>
+#include <linux/clk.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@@ -55,6 +56,15 @@
#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
+/* Errata i693 */
+static struct clk *utmi_p1_fck;
+static struct clk *utmi_p2_fck;
+static struct clk *xclk60mhsp1_ck;
+static struct clk *xclk60mhsp2_ck;
+static struct clk *usbhost_p1_fck;
+static struct clk *usbhost_p2_fck;
+static struct clk *init_60m_fclk;
+
/*-------------------------------------------------------------------------*/
static const struct hc_driver ehci_omap_hc_driver;
@@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)
return __raw_readl(base + reg);
}
+/* Erratum i693 workaround sequence */
+static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
+{
+ int ret = 0;
+
+ /* Switch to the internal 60 MHz clock */
+ ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
+ if (ret != 0)
+ ehci_err(ehci, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+
+ ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
+ if (ret != 0)
+ ehci_err(ehci, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+
+ clk_enable(usbhost_p1_fck);
+ clk_enable(usbhost_p2_fck);
+
+ /* Wait 1ms and switch back to the external clock */
+ mdelay(1);
+ ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
+ if (ret != 0)
+ ehci_err(ehci, "xclk60mhsp1_ck set parent"
+ "failed error:%d\n", ret);
+
+ ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
+ if (ret != 0)
+ ehci_err(ehci, "xclk60mhsp2_ck set parent"
+ "failed error:%d\n", ret);
+
+ clk_disable(usbhost_p1_fck);
+ clk_disable(usbhost_p2_fck);
+}
+
static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
{
struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
@@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
}
}
+static int omap_ehci_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ u32 __iomem *status_reg = &ehci->regs->port_status[
+ (wIndex & 0xff) - 1];
+ u32 temp;
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ temp &= ~PORT_WKCONN_E;
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+ omap_ehci_erratum_i693(ehci);
+
+ set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /* Handle the hub control events here */
+ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+done:
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return retval;
+}
+
static void disable_put_regulator(
struct ehci_hcd_omap_platform_data *pdata)
{
@@ -192,14 +281,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
}
}
+ /* Hold PHYs in reset while initializing EHCI controller */
if (pdata->phy_reset) {
if (gpio_is_valid(pdata->reset_gpio_port[0]))
- gpio_request_one(pdata->reset_gpio_port[0],
- GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+ gpio_set_value_cansleep(pdata->reset_gpio_port[0], 0);
if (gpio_is_valid(pdata->reset_gpio_port[1]))
- gpio_request_one(pdata->reset_gpio_port[1],
- GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+ gpio_set_value_cansleep(pdata->reset_gpio_port[1], 0);
/* Hold the PHY in RESET for enough time till DIR is high */
udelay(10);
@@ -241,6 +329,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
ehci_reset(omap_ehci);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret) {
+ dev_err(dev, "failed to add hcd with err %d\n", ret);
+ goto err_add_hcd;
+ }
if (pdata->phy_reset) {
/* Hold the PHY in RESET for enough time till
@@ -255,17 +348,79 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
gpio_set_value_cansleep(pdata->reset_gpio_port[1], 1);
}
- ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (ret) {
- dev_err(dev, "failed to add hcd with err %d\n", ret);
+ /* root ports should always stay powered */
+ ehci_port_power(omap_ehci, 1);
+
+ /* get clocks */
+ utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+ if (IS_ERR(utmi_p1_fck)) {
+ ret = PTR_ERR(utmi_p1_fck);
+ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
goto err_add_hcd;
}
- /* root ports should always stay powered */
- ehci_port_power(omap_ehci, 1);
+ xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+ if (IS_ERR(xclk60mhsp1_ck)) {
+ ret = PTR_ERR(xclk60mhsp1_ck);
+ dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+ goto err_utmi_p1_fck;
+ }
+
+ utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+ if (IS_ERR(utmi_p2_fck)) {
+ ret = PTR_ERR(utmi_p2_fck);
+ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+ goto err_xclk60mhsp1_ck;
+ }
+
+ xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+ if (IS_ERR(xclk60mhsp2_ck)) {
+ ret = PTR_ERR(xclk60mhsp2_ck);
+ dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+ goto err_utmi_p2_fck;
+ }
+
+ usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+ if (IS_ERR(usbhost_p1_fck)) {
+ ret = PTR_ERR(usbhost_p1_fck);
+ dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+ goto err_xclk60mhsp2_ck;
+ }
+
+ usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+ if (IS_ERR(usbhost_p2_fck)) {
+ ret = PTR_ERR(usbhost_p2_fck);
+ dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+ goto err_usbhost_p1_fck;
+ }
+
+ init_60m_fclk = clk_get(dev, "init_60m_fclk");
+ if (IS_ERR(init_60m_fclk)) {
+ ret = PTR_ERR(init_60m_fclk);
+ dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+ goto err_usbhost_p2_fck;
+ }
return 0;
+err_usbhost_p2_fck:
+ clk_put(usbhost_p2_fck);
+
+err_usbhost_p1_fck:
+ clk_put(usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+ clk_put(xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+ clk_put(utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+ clk_put(xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+ clk_put(utmi_p1_fck);
+
err_add_hcd:
disable_put_regulator(pdata);
pm_runtime_put_sync(dev);
@@ -294,6 +449,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
disable_put_regulator(dev->platform_data);
iounmap(hcd->regs);
usb_put_hcd(hcd);
+
+ clk_put(utmi_p1_fck);
+ clk_put(utmi_p2_fck);
+ clk_put(xclk60mhsp1_ck);
+ clk_put(xclk60mhsp2_ck);
+ clk_put(usbhost_p1_fck);
+ clk_put(usbhost_p2_fck);
+ clk_put(init_60m_fclk);
+
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -364,7 +528,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
+ .hub_control = omap_ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 6c6a5a3b4ea7..82de1073aa52 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
+#include <linux/clk.h>
#include <plat/ehci-orion.h>
#define rdl(off) __raw_readl(hcd->regs + (off))
@@ -198,6 +199,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
+ struct clk *clk;
void __iomem *regs;
int irq, err;
@@ -238,6 +240,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
goto err2;
}
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
hcd = usb_create_hcd(&ehci_orion_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
@@ -301,12 +311,18 @@ err1:
static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct clk *clk;
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
return 0;
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bc94d7bf072d..123481793a47 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
hcd->has_tt = 1;
tdi_reset(ehci);
}
- if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
- /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
- if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
- ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
- hcd->broken_pci_sleep = 1;
- device_set_wakeup_capable(&pdev->dev, false);
- }
- }
break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index ca819cdd0c5e..e7cb3925abf8 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
- if (pdev->dev.platform_data != NULL)
- pdata = pdev->dev.platform_data;
+ pdata = pdev->dev.platform_data;
/* initialize hcd */
hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4a44bf833611..68548236ec42 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -722,8 +722,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
}
}
- tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
- TEGRA_USB_PHY_MODE_HOST);
+ tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs,
+ pdata->phy_config,
+ TEGRA_USB_PHY_MODE_HOST);
if (IS_ERR(tegra->phy)) {
dev_err(&pdev->dev, "Failed to open USB phy\n");
err = -ENXIO;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 9c2cc4633894..e9713d589e30 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
*
* Properly shutdown the hcd, call driver's shutdown routine.
*/
-static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
+static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
-
- return 0;
}
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 836772dfabd3..2f3619eefefa 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
}
/* Carry out the final steps of resuming the controller device */
-static void ohci_finish_controller_resume(struct usb_hcd *hcd)
+static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int port;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 2732ef660c5c..7b01094d7993 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -462,6 +462,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
}
}
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+{
+ u32 pls = status_reg & PORT_PLS_MASK;
+
+ /* resume state is a xHCI internal state.
+ * Do not report it to usb core.
+ */
+ if (pls == XDEV_RESUME)
+ return;
+
+ /* When the CAS bit is set then warm reset
+ * should be performed on port
+ */
+ if (status_reg & PORT_CAS) {
+ /* The CAS bit can be set while the port is
+ * in any link state.
+ * Only roothubs have CAS bit, so we
+ * pretend to be in compliance mode
+ * unless we're already in compliance
+ * or the inactive state.
+ */
+ if (pls != USB_SS_PORT_LS_COMP_MOD &&
+ pls != USB_SS_PORT_LS_SS_INACTIVE) {
+ pls = USB_SS_PORT_LS_COMP_MOD;
+ }
+ /* Return also connection bit -
+ * hub state machine resets port
+ * when this bit is set.
+ */
+ pls |= USB_PORT_STAT_CONNECTION;
+ }
+ /* update status field */
+ *status |= pls;
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
@@ -606,13 +642,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
else
status |= USB_PORT_STAT_POWER;
}
- /* Port Link State */
+ /* Update Port Link State for super speed ports*/
if (hcd->speed == HCD_USB3) {
- /* resume state is a xHCI internal state.
- * Do not report it to usb core.
- */
- if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
- status |= (temp & PORT_PLS_MASK);
+ xhci_hub_report_link_state(&status, temp);
}
if (bus_state->port_c_suspend & (1 << wIndex))
status |= 1 << USB_PORT_FEAT_C_SUSPEND;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ec4338eec826..77689bd64cac 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int slot_id)
{
- struct list_head *tt;
struct list_head *tt_list_head;
- struct list_head *tt_next;
- struct xhci_tt_bw_info *tt_info;
+ struct xhci_tt_bw_info *tt_info, *next;
+ bool slot_found = false;
/* If the device never made it past the Set Address stage,
* it may not have the real_port set correctly.
@@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
}
tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
- if (list_empty(tt_list_head))
- return;
-
- list_for_each(tt, tt_list_head) {
- tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
- if (tt_info->slot_id == slot_id)
+ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ /* Multi-TT hubs will have more than one entry */
+ if (tt_info->slot_id == slot_id) {
+ slot_found = true;
+ list_del(&tt_info->tt_list);
+ kfree(tt_info);
+ } else if (slot_found) {
break;
+ }
}
- /* Cautionary measure in case the hub was disconnected before we
- * stored the TT information.
- */
- if (tt_info->slot_id != slot_id)
- return;
-
- tt_next = tt->next;
- tt_info = list_entry(tt, struct xhci_tt_bw_info,
- tt_list);
- /* Multi-TT hubs will have more than one entry */
- do {
- list_del(tt);
- kfree(tt_info);
- tt = tt_next;
- if (list_empty(tt_list_head))
- break;
- tt_next = tt->next;
- tt_info = list_entry(tt, struct xhci_tt_bw_info,
- tt_list);
- } while (tt_info->slot_id == slot_id);
}
int xhci_alloc_tt_info(struct xhci_hcd *xhci,
@@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
struct dev_info *dev_info, *next;
- struct list_head *tt_list_head;
- struct list_head *tt;
- struct list_head *endpoints;
- struct list_head *ep, *q;
- struct xhci_tt_bw_info *tt_info;
- struct xhci_interval_bw_table *bwt;
- struct xhci_virt_ep *virt_ep;
-
unsigned long flags;
int size;
- int i;
+ int i, j, num_ports;
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
spin_unlock_irqrestore(&xhci->lock, flags);
- bwt = &xhci->rh_bw->bw_table;
- for (i = 0; i < XHCI_MAX_INTERVAL; i++) {
- endpoints = &bwt->interval_bw[i].endpoints;
- list_for_each_safe(ep, q, endpoints) {
- virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list);
- list_del(&virt_ep->bw_endpoint_list);
- kfree(virt_ep);
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+ for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+ struct list_head *ep = &bwt->interval_bw[j].endpoints;
+ while (!list_empty(ep))
+ list_del_init(ep->next);
}
}
- tt_list_head = &xhci->rh_bw->tts;
- list_for_each_safe(tt, q, tt_list_head) {
- tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
- list_del(tt);
- kfree(tt_info);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_tt_bw_info *tt, *n;
+ list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+ list_del(&tt->tt_list);
+ kfree(tt);
+ }
}
xhci->num_usb2_ports = 0;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 23b4aefd1036..8275645889da 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -885,6 +885,17 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
num_trbs_free_temp = ep_ring->num_trbs_free;
dequeue_temp = ep_ring->dequeue;
+ /* If we get two back-to-back stalls, and the first stalled transfer
+ * ends just before a link TRB, the dequeue pointer will be left on
+ * the link TRB by the code in the while loop. So we have to update
+ * the dequeue pointer one segment further, or we'll jump off
+ * the segment into la-la-land.
+ */
+ if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index afdc73ee84a6..a979cd0dbe0f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_CSS;
xhci_writel(xhci, command, &xhci->op_regs->command);
- if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
- xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
+ if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
+ xhci_warn(xhci, "WARN: xHC save state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
@@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
command |= CMD_CRS;
xhci_writel(xhci, command, &xhci->op_regs->command);
if (handshake(xhci, &xhci->op_regs->status,
- STS_RESTORE, 0, 10*100)) {
- xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
+ STS_RESTORE, 0, 10 * 1000)) {
+ xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
@@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
default:
dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
__func__);
- return -EINVAL;
+ return USB3_LPM_DISABLED;
}
if (sel <= max_sel_pel && pel <= max_sel_pel)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index de3d6e3e57be..55c0785810c9 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -341,7 +341,11 @@ struct xhci_op_regs {
#define PORT_PLC (1 << 22)
/* port configure error change - port failed to configure its link partner */
#define PORT_CEC (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS (1 << 24)
/* wake on connect (enable) */
#define PORT_WKCONN_E (1 << 25)
/* wake on disconnect (enable) */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 768b4b55c816..9d63ba4d10d6 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -34,6 +34,7 @@
#include <linux/dma-mapping.h>
#include <mach/cputype.h>
+#include <mach/hardware.h>
#include <asm/mach-types.h>
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 046c84433cad..371baa0ee509 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -15,7 +15,7 @@
*/
/* Integrated highspeed/otg PHY */
-#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_CTL_PADDR 0x01c40034
#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */
#define USBPHY_PHYCLKGD BIT(8)
#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */
@@ -27,7 +27,7 @@
#define USBPHY_OTGPDWN BIT(1)
#define USBPHY_PHYPDWN BIT(0)
-#define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48)
+#define DM355_DEEPSLEEP_PADDR 0x01c40048
#define DRVVBUS_FORCE BIT(2)
#define DRVVBUS_OVERRIDE BIT(1)
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f42c29b11f71..95918dacc99a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1232,6 +1232,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
}
musb_ep->desc = NULL;
+ musb_ep->end_point.desc = NULL;
/* abort all pending DMA and requests */
nuke(musb_ep, -ESHUTDOWN);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ef8d744800ac..e090c799d87b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -375,11 +375,21 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
*/
if (list_empty(&qh->hep->urb_list)) {
struct list_head *head;
+ struct dma_controller *dma = musb->dma_controller;
- if (is_in)
+ if (is_in) {
ep->rx_reinit = 1;
- else
+ if (ep->rx_channel) {
+ dma->channel_release(ep->rx_channel);
+ ep->rx_channel = NULL;
+ }
+ } else {
ep->tx_reinit = 1;
+ if (ep->tx_channel) {
+ dma->channel_release(ep->tx_channel);
+ ep->tx_channel = NULL;
+ }
+ }
/* Clobber old pointers to this qh */
musb_ep_set_qh(ep, is_in, NULL);
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index d2a9a8e691b9..0eabb049b6a9 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -305,9 +305,8 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
regulator_enable(twl->usb3v3);
twl->asleep = 1;
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
- 0x10);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_CLR);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
status = USB_EVENT_ID;
otg->default_a = true;
twl->phy.state = OTG_STATE_A_IDLE;
@@ -316,12 +315,10 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
atomic_notifier_call_chain(&twl->phy.notifier, status,
otg->gadget);
} else {
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
- 0x10);
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
- 0x1);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
}
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
+ twl6030_writeb(twl, TWL_MODULE_USB, status, USB_ID_INT_LATCH_CLR);
return IRQ_HANDLED;
}
@@ -343,7 +340,7 @@ static int twl6030_enable_irq(struct usb_phy *x)
{
struct twl6030_usb *twl = phy_to_twl(x);
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 3cfabcba7447..e7cf84f0751a 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -2,11 +2,11 @@
# Physical Layer USB driver configuration
#
comment "USB Physical Layer drivers"
- depends on USB
+ depends on USB || USB_GADGET
config USB_ISP1301
tristate "NXP ISP1301 USB transceiver support"
- depends on USB
+ depends on USB || USB_GADGET
depends on I2C
help
Say Y here to add support for the NXP ISP1301 USB transceiver driver.
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 1b1926200ba7..1e71079ce33b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
+ { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
{ USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
@@ -92,6 +93,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -133,7 +135,13 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+ { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
+ { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+ { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+ { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
@@ -145,7 +153,11 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8c084ea34e26..bc912e5a3beb 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f3c7c78ede33..5661c7e2d415 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -784,6 +784,7 @@
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
+#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
/*
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 105a6d898ca4..9b026bf7afef 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -39,13 +39,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
-/* we want to look at all devices, as the vendor/product id can change
- * depending on the command line argument */
-static const struct usb_device_id generic_serial_ids[] = {
- {.driver_info = 42},
- {}
-};
-
/* All of the device info needed for the Generic Serial Converter */
struct usb_serial_driver usb_serial_generic_device = {
.driver = {
@@ -79,7 +72,8 @@ int usb_serial_generic_register(int _debug)
USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
/* register our generic driver with ourselves */
- retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids);
+ retval = usb_serial_register_drivers(serial_drivers,
+ "usbserial_generic", generic_device_ids);
#endif
return retval;
}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index d0ec1aa52719..a71fa0aa0406 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -309,13 +309,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
WDR_TIMEOUT);
- if (rc < 0)
- dev_err(&serial->dev->dev,
- "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+ kfree(buf);
+
dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
- kfree(buf);
- return rc;
+ if (rc < 0) {
+ dev_err(&serial->dev->dev,
+ "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+ return rc;
+ }
+ return 0;
} /* mct_u232_set_modem_ctrl */
static int mct_u232_get_modem_stat(struct usb_serial *serial,
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 81423f7361db..d47eb06fe463 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -222,14 +222,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
metro_priv->throttled = 0;
spin_unlock_irqrestore(&metro_priv->lock, flags);
- /*
- * Force low_latency on so that our tty_push actually forces the data
- * through, otherwise it is scheduled, and with high data rates (like
- * with OHCI) data can get lost.
- */
- if (tty)
- tty->low_latency = 1;
-
/* Clear the urb pipe. */
usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 29160f8b5101..57eca2448424 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -190,7 +190,7 @@
static int device_type;
-static const struct usb_device_id id_table[] __devinitconst = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1aae9028cd0b..417ab1b0aa30 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -47,6 +47,7 @@
/* Function prototypes */
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id);
+static void option_release(struct usb_serial *serial);
static int option_send_setup(struct usb_serial_port *port);
static void option_instat_callback(struct urb *urb);
@@ -150,6 +151,7 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_E14AC 0x14AC
#define HUAWEI_PRODUCT_K3806 0x14AE
#define HUAWEI_PRODUCT_K4605 0x14C6
+#define HUAWEI_PRODUCT_K5005 0x14C8
#define HUAWEI_PRODUCT_K3770 0x14C9
#define HUAWEI_PRODUCT_K3771 0x14CA
#define HUAWEI_PRODUCT_K4510 0x14CB
@@ -234,6 +236,7 @@ static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_G1 0xA001
#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
+#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
@@ -425,7 +428,7 @@ static void option_instat_callback(struct urb *urb);
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
-/* YUGA products www.yuga-info.com*/
+/* YUGA products www.yuga-info.com gavin.kx@qq.com */
#define YUGA_VENDOR_ID 0x257A
#define YUGA_PRODUCT_CEM600 0x1601
#define YUGA_PRODUCT_CEM610 0x1602
@@ -442,6 +445,8 @@ static void option_instat_callback(struct urb *urb);
#define YUGA_PRODUCT_CEU516 0x160C
#define YUGA_PRODUCT_CEU528 0x160D
#define YUGA_PRODUCT_CEU526 0x160F
+#define YUGA_PRODUCT_CEU881 0x161F
+#define YUGA_PRODUCT_CEU882 0x162F
#define YUGA_PRODUCT_CWM600 0x2601
#define YUGA_PRODUCT_CWM610 0x2602
@@ -457,23 +462,26 @@ static void option_instat_callback(struct urb *urb);
#define YUGA_PRODUCT_CWU518 0x260B
#define YUGA_PRODUCT_CWU516 0x260C
#define YUGA_PRODUCT_CWU528 0x260D
+#define YUGA_PRODUCT_CWU581 0x260E
#define YUGA_PRODUCT_CWU526 0x260F
-
-#define YUGA_PRODUCT_CLM600 0x2601
-#define YUGA_PRODUCT_CLM610 0x2602
-#define YUGA_PRODUCT_CLM500 0x2603
-#define YUGA_PRODUCT_CLM510 0x2604
-#define YUGA_PRODUCT_CLM800 0x2605
-#define YUGA_PRODUCT_CLM900 0x2606
-
-#define YUGA_PRODUCT_CLU718 0x2607
-#define YUGA_PRODUCT_CLU716 0x2608
-#define YUGA_PRODUCT_CLU728 0x2609
-#define YUGA_PRODUCT_CLU726 0x260A
-#define YUGA_PRODUCT_CLU518 0x260B
-#define YUGA_PRODUCT_CLU516 0x260C
-#define YUGA_PRODUCT_CLU528 0x260D
-#define YUGA_PRODUCT_CLU526 0x260F
+#define YUGA_PRODUCT_CWU582 0x261F
+#define YUGA_PRODUCT_CWU583 0x262F
+
+#define YUGA_PRODUCT_CLM600 0x3601
+#define YUGA_PRODUCT_CLM610 0x3602
+#define YUGA_PRODUCT_CLM500 0x3603
+#define YUGA_PRODUCT_CLM510 0x3604
+#define YUGA_PRODUCT_CLM800 0x3605
+#define YUGA_PRODUCT_CLM900 0x3606
+
+#define YUGA_PRODUCT_CLU718 0x3607
+#define YUGA_PRODUCT_CLU716 0x3608
+#define YUGA_PRODUCT_CLU728 0x3609
+#define YUGA_PRODUCT_CLU726 0x360A
+#define YUGA_PRODUCT_CLU518 0x360B
+#define YUGA_PRODUCT_CLU516 0x360C
+#define YUGA_PRODUCT_CLU528 0x360D
+#define YUGA_PRODUCT_CLU526 0x360F
/* Viettel products */
#define VIETTEL_VENDOR_ID 0x2262
@@ -489,6 +497,19 @@ static void option_instat_callback(struct urb *urb);
/* MediaTek products */
#define MEDIATEK_VENDOR_ID 0x0e8d
+#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
+#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
+#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
+#define MEDIATEK_PRODUCT_7208_1COM 0x7101
+#define MEDIATEK_PRODUCT_7208_2COM 0x7102
+#define MEDIATEK_PRODUCT_FP_1COM 0x0003
+#define MEDIATEK_PRODUCT_FP_2COM 0x0023
+#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
+#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033
+
+/* Cellient products */
+#define CELLIENT_VENDOR_ID 0x2692
+#define CELLIENT_PRODUCT_MEN200 0x9005
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
@@ -542,6 +563,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
.reserved = BIT(1),
};
+static const struct option_blacklist_info net_intf2_blacklist = {
+ .reserved = BIT(2),
+};
+
static const struct option_blacklist_info net_intf3_blacklist = {
.reserved = BIT(3),
};
@@ -666,6 +691,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -722,6 +752,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1080,6 +1112,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1209,6 +1243,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
{ USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
@@ -1216,6 +1255,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1245,7 +1296,7 @@ static struct usb_serial_driver option_1port_device = {
.ioctl = usb_wwan_ioctl,
.attach = usb_wwan_startup,
.disconnect = usb_wwan_disconnect,
- .release = usb_wwan_release,
+ .release = option_release,
.read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
@@ -1259,35 +1310,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
static bool debug;
-/* per port private data */
-
-#define N_IN_URB 4
-#define N_OUT_URB 4
-#define IN_BUFLEN 4096
-#define OUT_BUFLEN 4096
-
-struct option_port_private {
- /* Input endpoints and buffer for this port */
- struct urb *in_urbs[N_IN_URB];
- u8 *in_buffer[N_IN_URB];
- /* Output endpoints and buffer for this port */
- struct urb *out_urbs[N_OUT_URB];
- u8 *out_buffer[N_OUT_URB];
- unsigned long out_busy; /* Bit vector of URBs in use */
- int opened;
- struct usb_anchor delayed;
-
- /* Settings for the port */
- int rts_state; /* Handshaking pins (outputs) */
- int dtr_state;
- int cts_state; /* Handshaking pins (inputs) */
- int dsr_state;
- int dcd_state;
- int ri_state;
-
- unsigned long tx_start_time[N_OUT_URB];
-};
-
module_usb_serial_driver(serial_drivers, option_ids);
static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
@@ -1356,12 +1378,22 @@ static int option_probe(struct usb_serial *serial,
return 0;
}
+static void option_release(struct usb_serial *serial)
+{
+ struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
+
+ usb_wwan_release(serial);
+
+ kfree(priv);
+}
+
static void option_instat_callback(struct urb *urb)
{
int err;
int status = urb->status;
struct usb_serial_port *port = urb->context;
- struct option_port_private *portdata = usb_get_serial_port_data(port);
+ struct usb_wwan_port_private *portdata =
+ usb_get_serial_port_data(port);
dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
@@ -1421,7 +1453,7 @@ static int option_send_setup(struct usb_serial_port *port)
struct usb_serial *serial = port->serial;
struct usb_wwan_intf_private *intfdata =
(struct usb_wwan_intf_private *) serial->private;
- struct option_port_private *portdata;
+ struct usb_wwan_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
int val = 0;
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 0d5fe59ebb9e..996015c5f1ac 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
{USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
{USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
+ {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+ {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
{USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
{ } /* Terminating entry */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ba54a0a8235c..d423d36acc04 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -294,6 +294,10 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ /* AT&T Direct IP LTE modems */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
{ USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6a1b609a0d94..27483f91a4a3 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -659,12 +659,14 @@ exit:
static struct usb_serial_driver *search_serial_device(
struct usb_interface *iface)
{
- const struct usb_device_id *id;
+ const struct usb_device_id *id = NULL;
struct usb_serial_driver *drv;
+ struct usb_driver *driver = to_usb_driver(iface->dev.driver);
/* Check if the usb id matches a known device */
list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
- id = get_iface_id(drv, iface);
+ if (drv->usb_driver == driver)
+ id = get_iface_id(drv, iface);
if (id)
return drv;
}
@@ -755,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface,
if (retval) {
dbg("sub driver rejected device");
- kfree(serial);
+ usb_serial_put(serial);
module_put(type->driver.owner);
return retval;
}
@@ -827,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface,
*/
if (num_bulk_in == 0 || num_bulk_out == 0) {
dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
- kfree(serial);
+ usb_serial_put(serial);
module_put(type->driver.owner);
return -ENODEV;
}
@@ -841,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface,
if (num_ports == 0) {
dev_err(&interface->dev,
"Generic device with no bulk out, not allowed.\n");
- kfree(serial);
+ usb_serial_put(serial);
module_put(type->driver.owner);
return -EIO;
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index a324a5d21e99..11418da9bc09 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -202,6 +202,12 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_NO_READ_CAPACITY_16)
sdev->no_read_capacity_16 = 1;
+ /*
+ * Many devices do not respond properly to READ_CAPACITY_16.
+ * Tell the SCSI layer to try READ_CAPACITY_10 first.
+ */
+ sdev->try_rc_10_first = 1;
+
/* assume SPC3 or latter devices support sense size > 18 */
if (sdev->scsi_level > SCSI_SPC_2)
us->fflags |= US_FL_SANE_SENSE;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 8f3cbb8dc81b..1719886bb9be 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -110,7 +110,7 @@ UNUSUAL_DEV( 0x040d, 0x6205, 0x0003, 0x0003,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Deduced by Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
+/* Deduced by Jonathan Woithe <jwoithe@just42.net>
* Entry needed for flags: US_FL_FIX_INQUIRY because initial inquiry message
* always fails and confuses drive.
*/
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 94dbd25caa30..112156f68afb 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -191,7 +191,9 @@ static int vhost_worker(void *data)
struct vhost_dev *dev = data;
struct vhost_work *work = NULL;
unsigned uninitialized_var(seq);
+ mm_segment_t oldfs = get_fs();
+ set_fs(USER_DS);
use_mm(dev->mm);
for (;;) {
@@ -229,6 +231,7 @@ static int vhost_worker(void *data)
}
unuse_mm(dev->mm);
+ set_fs(oldfs);
return 0;
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index a290be51a1f4..0217f7415ef5 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2210,7 +2210,7 @@ config FB_XILINX
config FB_COBALT
tristate "Cobalt server LCD frame buffer support"
- depends on FB && MIPS_COBALT
+ depends on FB && (MIPS_COBALT || MIPS_SEAD3)
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
@@ -2382,6 +2382,39 @@ config FB_BROADSHEET
and could also have been called by other names when coupled with
a bridge adapter.
+config FB_AUO_K190X
+ tristate "AUO-K190X EPD controller support"
+ depends on FB
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select FB_DEFERRED_IO
+ help
+ Provides support for epaper controllers from the K190X series
+ of AUO. These controllers can be used to drive epaper displays
+ from Sipix.
+
+ This option enables the common support, shared by the individual
+ controller drivers. You will also have to enable the driver
+ for the controller type used in your device.
+
+config FB_AUO_K1900
+ tristate "AUO-K1900 EPD controller support"
+ depends on FB && FB_AUO_K190X
+ help
+ This driver implements support for the AUO K1900 epd-controller.
+ This controller can drive Sipix epaper displays but can only do
+ serial updates, reducing the number of possible frames per second.
+
+config FB_AUO_K1901
+ tristate "AUO-K1901 EPD controller support"
+ depends on FB && FB_AUO_K190X
+ help
+ This driver implements support for the AUO K1901 epd-controller.
+ This controller can drive Sipix epaper displays and supports
+ concurrent updates, making higher frames per second possible.
+
config FB_JZ4740
tristate "JZ4740 LCD framebuffer support"
depends on FB && MACH_JZ4740
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9356add945b3..ee8dafb69e36 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -118,6 +118,9 @@ obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
obj-$(CONFIG_FB_MAXINE) += maxinefb.o
obj-$(CONFIG_FB_METRONOME) += metronomefb.o
obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o
+obj-$(CONFIG_FB_AUO_K190X) += auo_k190x.o
+obj-$(CONFIG_FB_AUO_K1900) += auo_k1900fb.o
+obj-$(CONFIG_FB_AUO_K1901) += auo_k1901fb.o
obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
obj-$(CONFIG_FB_SH7760) += sh7760fb.o
obj-$(CONFIG_FB_IMX) += imxfb.o
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index ffbce4525468..fe3b6ec87122 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -536,7 +536,7 @@ static int __devinit au1100fb_drv_probe(struct platform_device *dev)
for (page = (unsigned long)fbdev->fb_mem;
page < PAGE_ALIGN((unsigned long)fbdev->fb_mem + fbdev->fb_len);
page += PAGE_SIZE) {
-#if CONFIG_DMA_NONCOHERENT
+#ifdef CONFIG_DMA_NONCOHERENT
SetPageReserved(virt_to_page(CAC_ADDR((void *)page)));
#else
SetPageReserved(virt_to_page(page));
diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/auo_k1900fb.c
new file mode 100644
index 000000000000..c36cf961dcb2
--- /dev/null
+++ b/drivers/video/auo_k1900fb.c
@@ -0,0 +1,198 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1900 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1900 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2 4 step gray (2bit) - FIXME: add strange refresh
+ * mode3 2 step gray (1bit) - FIXME: add strange refresh
+ * mode4 handwriting mode (strange behaviour)
+ * mode5 automatic selection of update mode
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1900 specific commands
+ */
+
+#define AUOK1900_CMD_PARTIALDISP 0x1001
+#define AUOK1900_CMD_ROTATION 0x1006
+#define AUOK1900_CMD_LUT_STOP 0x1009
+
+#define AUOK1900_INIT_TEMP_AVERAGE (1 << 13)
+#define AUOK1900_INIT_ROTATE(_x) ((_x & 0x3) << 10)
+#define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2)
+
+static void auok1900_init(struct auok190xfb_par *par)
+{
+ struct auok190x_board *board = par->board;
+ u16 init_param = 0;
+
+ init_param |= AUOK1900_INIT_TEMP_AVERAGE;
+ init_param |= AUOK1900_INIT_ROTATE(par->rotation);
+ init_param |= AUOK190X_INIT_INVERSE_WHITE;
+ init_param |= AUOK190X_INIT_FORMAT0;
+ init_param |= AUOK1900_INIT_RESOLUTION(par->resolution);
+ init_param |= AUOK190X_INIT_SHIFT_RIGHT;
+
+ auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+ /* let the controller finish */
+ board->wait_for_rdy(par);
+}
+
+static void auok1900_update_region(struct auok190xfb_par *par, int mode,
+ u16 y1, u16 y2)
+{
+ struct device *dev = par->info->device;
+ unsigned char *buf = (unsigned char *)par->info->screen_base;
+ int xres = par->info->var.xres;
+ u16 args[4];
+
+ pm_runtime_get_sync(dev);
+
+ mutex_lock(&(par->io_lock));
+
+ /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+ y1 &= 0xfffe;
+ y2 &= 0xfffe;
+
+ dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+ 1, y1+1, xres, y2-y1, mode);
+
+ /* to FIX handle different partial update modes */
+ args[0] = mode | 1;
+ args[1] = y1 + 1;
+ args[2] = xres;
+ args[3] = y2 - y1;
+ buf += y1 * xres;
+ auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args,
+ ((y2 - y1) * xres)/2, (u16 *) buf);
+ auok190x_send_command(par, AUOK190X_CMD_DATA_STOP);
+
+ par->update_cnt++;
+
+ mutex_unlock(&(par->io_lock));
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par,
+ u16 y1, u16 y2)
+{
+ int mode;
+
+ if (par->update_mode < 0) {
+ mode = AUOK190X_UPDATE_MODE(1);
+ par->last_mode = -1;
+ } else {
+ mode = AUOK190X_UPDATE_MODE(par->update_mode);
+ par->last_mode = par->update_mode;
+ }
+
+ if (par->flash)
+ mode |= AUOK190X_UPDATE_NONFLASH;
+
+ auok1900_update_region(par, mode, y1, y2);
+}
+
+static void auok1900fb_dpy_update(struct auok190xfb_par *par)
+{
+ int mode;
+
+ if (par->update_mode < 0) {
+ mode = AUOK190X_UPDATE_MODE(0);
+ par->last_mode = -1;
+ } else {
+ mode = AUOK190X_UPDATE_MODE(par->update_mode);
+ par->last_mode = par->update_mode;
+ }
+
+ if (par->flash)
+ mode |= AUOK190X_UPDATE_NONFLASH;
+
+ auok1900_update_region(par, mode, 0, par->info->var.yres);
+ par->update_cnt = 0;
+}
+
+static bool auok1900fb_need_refresh(struct auok190xfb_par *par)
+{
+ return (par->update_cnt > 10);
+}
+
+static int __devinit auok1900fb_probe(struct platform_device *pdev)
+{
+ struct auok190x_init_data init;
+ struct auok190x_board *board;
+
+ /* pick up board specific routines */
+ board = pdev->dev.platform_data;
+ if (!board)
+ return -EINVAL;
+
+ /* fill temporary init struct for common init */
+ init.id = "auo_k1900fb";
+ init.board = board;
+ init.update_partial = auok1900fb_dpy_update_pages;
+ init.update_all = auok1900fb_dpy_update;
+ init.need_refresh = auok1900fb_need_refresh;
+ init.init = auok1900_init;
+
+ return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1900fb_remove(struct platform_device *pdev)
+{
+ return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1900fb_driver = {
+ .probe = auok1900fb_probe,
+ .remove = __devexit_p(auok1900fb_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "auo_k1900fb",
+ .pm = &auok190x_pm,
+ },
+};
+module_platform_driver(auok1900fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/auo_k1901fb.c
new file mode 100644
index 000000000000..1c054c18616e
--- /dev/null
+++ b/drivers/video/auo_k1901fb.c
@@ -0,0 +1,251 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1901 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1901 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2+3 4 step gray (2bit)
+ * mode4+5 2 step gray (1bit)
+ * - mode4 is described as "without LUT"
+ * mode7 automatic selection of update mode
+ *
+ * The most interesting difference to the K1900 is the ability to do screen
+ * updates in an asynchronous fashion. Where the K1900 needs to wait for the
+ * current update to complete, the K1901 can process later updates already.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1901 specific commands
+ */
+
+#define AUOK1901_CMD_LUT_INTERFACE 0x0005
+#define AUOK1901_CMD_DMA_START 0x1001
+#define AUOK1901_CMD_CURSOR_START 0x1007
+#define AUOK1901_CMD_CURSOR_STOP AUOK190X_CMD_DATA_STOP
+#define AUOK1901_CMD_DDMA_START 0x1009
+
+#define AUOK1901_INIT_GATE_PULSE_LOW (0 << 14)
+#define AUOK1901_INIT_GATE_PULSE_HIGH (1 << 14)
+#define AUOK1901_INIT_SINGLE_GATE (0 << 13)
+#define AUOK1901_INIT_DOUBLE_GATE (1 << 13)
+
+/* Bits to pixels
+ * Mode 15-12 11-8 7-4 3-0
+ * format2 2 T 1 T
+ * format3 1 T 2 T
+ * format4 T 2 T 1
+ * format5 T 1 T 2
+ *
+ * halftone modes:
+ * format6 2 2 1 1
+ * format7 1 1 2 2
+ */
+#define AUOK1901_INIT_FORMAT2 (1 << 7)
+#define AUOK1901_INIT_FORMAT3 ((1 << 7) | (1 << 6))
+#define AUOK1901_INIT_FORMAT4 (1 << 8)
+#define AUOK1901_INIT_FORMAT5 ((1 << 8) | (1 << 6))
+#define AUOK1901_INIT_FORMAT6 ((1 << 8) | (1 << 7))
+#define AUOK1901_INIT_FORMAT7 ((1 << 8) | (1 << 7) | (1 << 6))
+
+/* res[4] to bit 10
+ * res[3-0] to bits 5-2
+ */
+#define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \
+ | ((_res & 0xf) << 2))
+
+/*
+ * portrait / landscape orientation in AUOK1901_CMD_DMA_START
+ */
+#define AUOK1901_DMA_ROTATE90(_rot) ((_rot & 1) << 13)
+
+/*
+ * equivalent to 1 << 11, needs the ~ to have same rotation like K1900
+ */
+#define AUOK1901_DDMA_ROTATE180(_rot) ((~_rot & 2) << 10)
+
+static void auok1901_init(struct auok190xfb_par *par)
+{
+ struct auok190x_board *board = par->board;
+ u16 init_param = 0;
+
+ init_param |= AUOK190X_INIT_INVERSE_WHITE;
+ init_param |= AUOK190X_INIT_FORMAT0;
+ init_param |= AUOK1901_INIT_RESOLUTION(par->resolution);
+ init_param |= AUOK190X_INIT_SHIFT_LEFT;
+
+ auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+ /* let the controller finish */
+ board->wait_for_rdy(par);
+}
+
+static void auok1901_update_region(struct auok190xfb_par *par, int mode,
+ u16 y1, u16 y2)
+{
+ struct device *dev = par->info->device;
+ unsigned char *buf = (unsigned char *)par->info->screen_base;
+ int xres = par->info->var.xres;
+ u16 args[5];
+
+ pm_runtime_get_sync(dev);
+
+ mutex_lock(&(par->io_lock));
+
+ /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+ y1 &= 0xfffe;
+ y2 &= 0xfffe;
+
+ dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+ 1, y1+1, xres, y2-y1, mode);
+
+ /* K1901: first transfer the region data */
+ args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1;
+ args[1] = y1 + 1;
+ args[2] = xres;
+ args[3] = y2 - y1;
+ buf += y1 * xres;
+ auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4,
+ args, ((y2 - y1) * xres)/2,
+ (u16 *) buf);
+ auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP);
+
+ /* K1901: second tell the controller to update the region with mode */
+ args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation);
+ args[1] = 1;
+ args[2] = y1 + 1;
+ args[3] = xres;
+ args[4] = y2 - y1;
+ auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args);
+
+ par->update_cnt++;
+
+ mutex_unlock(&(par->io_lock));
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par,
+ u16 y1, u16 y2)
+{
+ int mode;
+
+ if (par->update_mode < 0) {
+ mode = AUOK190X_UPDATE_MODE(1);
+ par->last_mode = -1;
+ } else {
+ mode = AUOK190X_UPDATE_MODE(par->update_mode);
+ par->last_mode = par->update_mode;
+ }
+
+ if (par->flash)
+ mode |= AUOK190X_UPDATE_NONFLASH;
+
+ auok1901_update_region(par, mode, y1, y2);
+}
+
+static void auok1901fb_dpy_update(struct auok190xfb_par *par)
+{
+ int mode;
+
+ /* When doing full updates, wait for the controller to be ready
+ * This will hopefully catch some hangs of the K1901
+ */
+ par->board->wait_for_rdy(par);
+
+ if (par->update_mode < 0) {
+ mode = AUOK190X_UPDATE_MODE(0);
+ par->last_mode = -1;
+ } else {
+ mode = AUOK190X_UPDATE_MODE(par->update_mode);
+ par->last_mode = par->update_mode;
+ }
+
+ if (par->flash)
+ mode |= AUOK190X_UPDATE_NONFLASH;
+
+ auok1901_update_region(par, mode, 0, par->info->var.yres);
+ par->update_cnt = 0;
+}
+
+static bool auok1901fb_need_refresh(struct auok190xfb_par *par)
+{
+ return (par->update_cnt > 10);
+}
+
+static int __devinit auok1901fb_probe(struct platform_device *pdev)
+{
+ struct auok190x_init_data init;
+ struct auok190x_board *board;
+
+ /* pick up board specific routines */
+ board = pdev->dev.platform_data;
+ if (!board)
+ return -EINVAL;
+
+ /* fill temporary init struct for common init */
+ init.id = "auo_k1901fb";
+ init.board = board;
+ init.update_partial = auok1901fb_dpy_update_pages;
+ init.update_all = auok1901fb_dpy_update;
+ init.need_refresh = auok1901fb_need_refresh;
+ init.init = auok1901_init;
+
+ return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1901fb_remove(struct platform_device *pdev)
+{
+ return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1901fb_driver = {
+ .probe = auok1901fb_probe,
+ .remove = __devexit_p(auok1901fb_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "auo_k1901fb",
+ .pm = &auok190x_pm,
+ },
+};
+module_platform_driver(auok1901fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
new file mode 100644
index 000000000000..77da6a2f43dc
--- /dev/null
+++ b/drivers/video/auo_k190x.c
@@ -0,0 +1,1046 @@
+/*
+ * Common code for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+struct panel_info {
+ int w;
+ int h;
+};
+
+/* table of panel specific parameters to be indexed into by the board drivers */
+static struct panel_info panel_table[] = {
+ /* standard 6" */
+ [AUOK190X_RESOLUTION_800_600] = {
+ .w = 800,
+ .h = 600,
+ },
+ /* standard 9" */
+ [AUOK190X_RESOLUTION_1024_768] = {
+ .w = 1024,
+ .h = 768,
+ },
+};
+
+/*
+ * private I80 interface to the board driver
+ */
+
+static void auok190x_issue_data(struct auok190xfb_par *par, u16 data)
+{
+ par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+ par->board->set_hdb(par, data);
+ par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+}
+
+static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data)
+{
+ par->board->set_ctl(par, AUOK190X_I80_DC, 0);
+ auok190x_issue_data(par, data);
+ par->board->set_ctl(par, AUOK190X_I80_DC, 1);
+}
+
+static int auok190x_issue_pixels(struct auok190xfb_par *par, int size,
+ u16 *data)
+{
+ struct device *dev = par->info->device;
+ int i;
+ u16 tmp;
+
+ if (size & 3) {
+ dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n",
+ size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < (size >> 1); i++) {
+ par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+
+ /* simple reduction of 8bit staticgray to 4bit gray
+ * combines 4 * 4bit pixel values into a 16bit value
+ */
+ tmp = (data[2*i] & 0xF0) >> 4;
+ tmp |= (data[2*i] & 0xF000) >> 8;
+ tmp |= (data[2*i+1] & 0xF0) << 4;
+ tmp |= (data[2*i+1] & 0xF000);
+
+ par->board->set_hdb(par, tmp);
+ par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+ }
+
+ return 0;
+}
+
+static u16 auok190x_read_data(struct auok190xfb_par *par)
+{
+ u16 data;
+
+ par->board->set_ctl(par, AUOK190X_I80_OE, 0);
+ data = par->board->get_hdb(par);
+ par->board->set_ctl(par, AUOK190X_I80_OE, 1);
+
+ return data;
+}
+
+/*
+ * Command interface for the controller drivers
+ */
+
+void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data)
+{
+ par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+ auok190x_issue_cmd(par, data);
+ par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command_nowait);
+
+void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv)
+{
+ int i;
+
+ par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+ auok190x_issue_cmd(par, cmd);
+
+ for (i = 0; i < argc; i++)
+ auok190x_issue_data(par, argv[i]);
+ par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait);
+
+int auok190x_send_command(struct auok190xfb_par *par, u16 data)
+{
+ int ret;
+
+ ret = par->board->wait_for_rdy(par);
+ if (ret)
+ return ret;
+
+ auok190x_send_command_nowait(par, data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command);
+
+int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv)
+{
+ int ret;
+
+ ret = par->board->wait_for_rdy(par);
+ if (ret)
+ return ret;
+
+ auok190x_send_cmdargs_nowait(par, cmd, argc, argv);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs);
+
+int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv)
+{
+ int i, ret;
+
+ ret = par->board->wait_for_rdy(par);
+ if (ret)
+ return ret;
+
+ par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+ auok190x_issue_cmd(par, cmd);
+
+ for (i = 0; i < argc; i++)
+ argv[i] = auok190x_read_data(par);
+ par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_read_cmdargs);
+
+void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv, int size, u16 *data)
+{
+ int i;
+
+ par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+
+ auok190x_issue_cmd(par, cmd);
+
+ for (i = 0; i < argc; i++)
+ auok190x_issue_data(par, argv[i]);
+
+ auok190x_issue_pixels(par, size, data);
+
+ par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait);
+
+int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv, int size, u16 *data)
+{
+ int ret;
+
+ ret = par->board->wait_for_rdy(par);
+ if (ret)
+ return ret;
+
+ auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels);
+
+/*
+ * fbdefio callbacks - common on both controllers.
+ */
+
+static void auok190xfb_dpy_first_io(struct fb_info *info)
+{
+ /* tell runtime-pm that we wish to use the device in a short time */
+ pm_runtime_get(info->device);
+}
+
+/* this is called back from the deferred io workqueue */
+static void auok190xfb_dpy_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct auok190xfb_par *par = info->par;
+ u16 yres = info->var.yres;
+ u16 xres = info->var.xres;
+ u16 y1 = 0, h = 0;
+ int prev_index = -1;
+ struct page *cur;
+ int h_inc;
+ int threshold;
+
+ if (!list_empty(pagelist))
+ /* the device resume should've been requested through first_io,
+ * if the resume did not finish until now, wait for it.
+ */
+ pm_runtime_barrier(info->device);
+ else
+ /* We reached this via the fsync or some other way.
+ * In either case the first_io function did not run,
+ * so we runtime_resume the device here synchronously.
+ */
+ pm_runtime_get_sync(info->device);
+
+ /* Do a full screen update every n updates to prevent
+ * excessive darkening of the Sipix display.
+ * If we do this, there is no need to walk the pages.
+ */
+ if (par->need_refresh(par)) {
+ par->update_all(par);
+ goto out;
+ }
+
+ /* height increment is fixed per page */
+ h_inc = DIV_ROUND_UP(PAGE_SIZE , xres);
+
+ /* calculate number of pages from pixel height */
+ threshold = par->consecutive_threshold / h_inc;
+ if (threshold < 1)
+ threshold = 1;
+
+ /* walk the written page list and swizzle the data */
+ list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+ if (prev_index < 0) {
+ /* just starting so assign first page */
+ y1 = (cur->index << PAGE_SHIFT) / xres;
+ h = h_inc;
+ } else if ((cur->index - prev_index) <= threshold) {
+ /* page is within our threshold for single updates */
+ h += h_inc * (cur->index - prev_index);
+ } else {
+ /* page not consecutive, issue previous update first */
+ par->update_partial(par, y1, y1 + h);
+
+ /* start over with our non consecutive page */
+ y1 = (cur->index << PAGE_SHIFT) / xres;
+ h = h_inc;
+ }
+ prev_index = cur->index;
+ }
+
+ /* if we still have any pages to update we do so now */
+ if (h >= yres)
+ /* its a full screen update, just do it */
+ par->update_all(par);
+ else
+ par->update_partial(par, y1, min((u16) (y1 + h), yres));
+
+out:
+ pm_runtime_mark_last_busy(info->device);
+ pm_runtime_put_autosuspend(info->device);
+}
+
+/*
+ * framebuffer operations
+ */
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct auok190xfb_par *par = info->par;
+ unsigned long p = *ppos;
+ void *dst;
+ int err = 0;
+ unsigned long total_size;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return -EPERM;
+
+ total_size = info->fix.smem_len;
+
+ if (p > total_size)
+ return -EFBIG;
+
+ if (count > total_size) {
+ err = -EFBIG;
+ count = total_size;
+ }
+
+ if (count + p > total_size) {
+ if (!err)
+ err = -ENOSPC;
+
+ count = total_size - p;
+ }
+
+ dst = (void *)(info->screen_base + p);
+
+ if (copy_from_user(dst, buf, count))
+ err = -EFAULT;
+
+ if (!err)
+ *ppos += count;
+
+ par->update_all(par);
+
+ return (err) ? err : count;
+}
+
+static void auok190xfb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct auok190xfb_par *par = info->par;
+
+ sys_fillrect(info, rect);
+
+ par->update_all(par);
+}
+
+static void auok190xfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct auok190xfb_par *par = info->par;
+
+ sys_copyarea(info, area);
+
+ par->update_all(par);
+}
+
+static void auok190xfb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct auok190xfb_par *par = info->par;
+
+ sys_imageblit(info, image);
+
+ par->update_all(par);
+}
+
+static int auok190xfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if (info->var.xres != var->xres || info->var.yres != var->yres ||
+ info->var.xres_virtual != var->xres_virtual ||
+ info->var.yres_virtual != var->yres_virtual) {
+ pr_info("%s: Resolution not supported: X%u x Y%u\n",
+ __func__, var->xres, var->yres);
+ return -EINVAL;
+ }
+
+ /*
+ * Memory limit
+ */
+
+ if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
+ pr_info("%s: Memory Limit requested yres_virtual = %u\n",
+ __func__, var->yres_virtual);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct fb_ops auok190xfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_read = fb_sys_read,
+ .fb_write = auok190xfb_write,
+ .fb_fillrect = auok190xfb_fillrect,
+ .fb_copyarea = auok190xfb_copyarea,
+ .fb_imageblit = auok190xfb_imageblit,
+ .fb_check_var = auok190xfb_check_var,
+};
+
+/*
+ * Controller-functions common to both K1900 and K1901
+ */
+
+static int auok190x_read_temperature(struct auok190xfb_par *par)
+{
+ struct device *dev = par->info->device;
+ u16 data[4];
+ int temp;
+
+ pm_runtime_get_sync(dev);
+
+ mutex_lock(&(par->io_lock));
+
+ auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+ mutex_unlock(&(par->io_lock));
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ /* sanitize and split of half-degrees for now */
+ temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1);
+
+ /* handle positive and negative temperatures */
+ if (temp >= 201)
+ return (255 - temp + 1) * (-1);
+ else
+ return temp;
+}
+
+static void auok190x_identify(struct auok190xfb_par *par)
+{
+ struct device *dev = par->info->device;
+ u16 data[4];
+
+ pm_runtime_get_sync(dev);
+
+ mutex_lock(&(par->io_lock));
+
+ auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+ mutex_unlock(&(par->io_lock));
+
+ par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK;
+
+ par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]);
+ par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]);
+ par->panel_model = AUOK190X_VERSION_MODEL(data[2]);
+
+ par->tcon_version = AUOK190X_VERSION_TCON(data[3]);
+ par->lut_version = AUOK190X_VERSION_LUT(data[3]);
+
+ dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x",
+ par->panel_size_int, par->panel_size_float, par->panel_model,
+ par->epd_type, par->tcon_version, par->lut_version);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+/*
+ * Sysfs functions
+ */
+
+static ssize_t update_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+
+ return sprintf(buf, "%d\n", par->update_mode);
+}
+
+static ssize_t update_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+ int mode, ret;
+
+ ret = kstrtoint(buf, 10, &mode);
+ if (ret)
+ return ret;
+
+ par->update_mode = mode;
+
+ /* if we enter a better mode, do a full update */
+ if (par->last_mode > 1 && mode < par->last_mode)
+ par->update_all(par);
+
+ return count;
+}
+
+static ssize_t flash_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+
+ return sprintf(buf, "%d\n", par->flash);
+}
+
+static ssize_t flash_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+ int flash, ret;
+
+ ret = kstrtoint(buf, 10, &flash);
+ if (ret)
+ return ret;
+
+ if (flash > 0)
+ par->flash = 1;
+ else
+ par->flash = 0;
+
+ return count;
+}
+
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+ int temp;
+
+ temp = auok190x_read_temperature(par);
+ return sprintf(buf, "%d\n", temp);
+}
+
+static DEVICE_ATTR(update_mode, 0644, update_mode_show, update_mode_store);
+static DEVICE_ATTR(flash, 0644, flash_show, flash_store);
+static DEVICE_ATTR(temp, 0644, temp_show, NULL);
+
+static struct attribute *auok190x_attributes[] = {
+ &dev_attr_update_mode.attr,
+ &dev_attr_flash.attr,
+ &dev_attr_temp.attr,
+ NULL
+};
+
+static const struct attribute_group auok190x_attr_group = {
+ .attrs = auok190x_attributes,
+};
+
+static int auok190x_power(struct auok190xfb_par *par, bool on)
+{
+ struct auok190x_board *board = par->board;
+ int ret;
+
+ if (on) {
+ /* We should maintain POWER up for at least 80ms before set
+ * RST_N and SLP_N to high (TCON spec 20100803_v35 p59)
+ */
+ ret = regulator_enable(par->regulator);
+ if (ret)
+ return ret;
+
+ msleep(200);
+ gpio_set_value(board->gpio_nrst, 1);
+ gpio_set_value(board->gpio_nsleep, 1);
+ msleep(200);
+ } else {
+ regulator_disable(par->regulator);
+ gpio_set_value(board->gpio_nrst, 0);
+ gpio_set_value(board->gpio_nsleep, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * Recovery - powercycle the controller
+ */
+
+static void auok190x_recover(struct auok190xfb_par *par)
+{
+ auok190x_power(par, 0);
+ msleep(100);
+ auok190x_power(par, 1);
+
+ par->init(par);
+
+ /* wait for init to complete */
+ par->board->wait_for_rdy(par);
+}
+
+/*
+ * Power-management
+ */
+
+#ifdef CONFIG_PM
+static int auok190x_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+ u16 standby_param;
+
+ /* take and keep the lock until we are resumed, as the controller
+ * will never reach the non-busy state when in standby mode
+ */
+ mutex_lock(&(par->io_lock));
+
+ if (par->standby) {
+ dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n");
+ mutex_unlock(&(par->io_lock));
+ return 0;
+ }
+
+ /* according to runtime_pm.txt runtime_suspend only means, that the
+ * device will not process data and will not communicate with the CPU
+ * As we hold the lock, this stays true even without standby
+ */
+ if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+ dev_dbg(dev, "runtime suspend without standby\n");
+ goto finish;
+ } else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) {
+ /* for some TCON versions STANDBY expects a parameter (0) but
+ * it seems the real tcon version has to be determined yet.
+ */
+ dev_dbg(dev, "runtime suspend with additional empty param\n");
+ standby_param = 0;
+ auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1,
+ &standby_param);
+ } else {
+ dev_dbg(dev, "runtime suspend without param\n");
+ auok190x_send_command(par, AUOK190X_CMD_STANDBY);
+ }
+
+ msleep(64);
+
+finish:
+ par->standby = 1;
+
+ return 0;
+}
+
+static int auok190x_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+
+ if (!par->standby) {
+ dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n");
+ return 0;
+ }
+
+ if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+ dev_dbg(dev, "runtime resume without standby\n");
+ } else {
+ /* when in standby, controller is always busy
+ * and only accepts the wakeup command
+ */
+ dev_dbg(dev, "runtime resume from standby\n");
+ auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP);
+
+ msleep(160);
+
+ /* wait for the controller to be ready and release the lock */
+ board->wait_for_rdy(par);
+ }
+
+ par->standby = 0;
+
+ mutex_unlock(&(par->io_lock));
+
+ return 0;
+}
+
+static int auok190x_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+ int ret;
+
+ dev_dbg(dev, "suspend\n");
+ if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+ /* suspend via powering off the ic */
+ dev_dbg(dev, "suspend with broken standby\n");
+
+ auok190x_power(par, 0);
+ } else {
+ dev_dbg(dev, "suspend using sleep\n");
+
+ /* the sleep state can only be entered from the standby state.
+ * pm_runtime_get_noresume gets called before the suspend call.
+ * So the devices usage count is >0 but it is not necessarily
+ * active.
+ */
+ if (!pm_runtime_status_suspended(dev)) {
+ ret = auok190x_runtime_suspend(dev);
+ if (ret < 0) {
+ dev_err(dev, "auok190x_runtime_suspend failed with %d\n",
+ ret);
+ return ret;
+ }
+ par->manual_standby = 1;
+ }
+
+ gpio_direction_output(board->gpio_nsleep, 0);
+ }
+
+ msleep(100);
+
+ return 0;
+}
+
+static int auok190x_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+
+ dev_dbg(dev, "resume\n");
+ if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+ dev_dbg(dev, "resume with broken standby\n");
+
+ auok190x_power(par, 1);
+
+ par->init(par);
+ } else {
+ dev_dbg(dev, "resume from sleep\n");
+
+ /* device should be in runtime suspend when we were suspended
+ * and pm_runtime_put_sync gets called after this function.
+ * So there is no need to touch the standby mode here at all.
+ */
+ gpio_direction_output(board->gpio_nsleep, 1);
+ msleep(100);
+
+ /* an additional init call seems to be necessary after sleep */
+ auok190x_runtime_resume(dev);
+ par->init(par);
+
+ /* if we were runtime-suspended before, suspend again*/
+ if (!par->manual_standby)
+ auok190x_runtime_suspend(dev);
+ else
+ par->manual_standby = 0;
+ }
+
+ return 0;
+}
+#endif
+
+const struct dev_pm_ops auok190x_pm = {
+ SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume)
+};
+EXPORT_SYMBOL_GPL(auok190x_pm);
+
+/*
+ * Common probe and remove code
+ */
+
+int __devinit auok190x_common_probe(struct platform_device *pdev,
+ struct auok190x_init_data *init)
+{
+ struct auok190x_board *board = init->board;
+ struct auok190xfb_par *par;
+ struct fb_info *info;
+ struct panel_info *panel;
+ int videomemorysize, ret;
+ unsigned char *videomemory;
+
+ /* check board contents */
+ if (!board->init || !board->cleanup || !board->wait_for_rdy
+ || !board->set_ctl || !board->set_hdb || !board->get_hdb
+ || !board->setup_irq)
+ return -EINVAL;
+
+ info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev);
+ if (!info)
+ return -ENOMEM;
+
+ par = info->par;
+ par->info = info;
+ par->board = board;
+ par->recover = auok190x_recover;
+ par->update_partial = init->update_partial;
+ par->update_all = init->update_all;
+ par->need_refresh = init->need_refresh;
+ par->init = init->init;
+
+ /* init update modes */
+ par->update_cnt = 0;
+ par->update_mode = -1;
+ par->last_mode = -1;
+ par->flash = 0;
+
+ par->regulator = regulator_get(info->device, "vdd");
+ if (IS_ERR(par->regulator)) {
+ ret = PTR_ERR(par->regulator);
+ dev_err(info->device, "Failed to get regulator: %d\n", ret);
+ goto err_reg;
+ }
+
+ ret = board->init(par);
+ if (ret) {
+ dev_err(info->device, "board init failed, %d\n", ret);
+ goto err_board;
+ }
+
+ ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep");
+ if (ret) {
+ dev_err(info->device, "could not request sleep gpio, %d\n",
+ ret);
+ goto err_gpio1;
+ }
+
+ ret = gpio_direction_output(board->gpio_nsleep, 0);
+ if (ret) {
+ dev_err(info->device, "could not set sleep gpio, %d\n", ret);
+ goto err_gpio2;
+ }
+
+ ret = gpio_request(board->gpio_nrst, "AUOK190x reset");
+ if (ret) {
+ dev_err(info->device, "could not request reset gpio, %d\n",
+ ret);
+ goto err_gpio2;
+ }
+
+ ret = gpio_direction_output(board->gpio_nrst, 0);
+ if (ret) {
+ dev_err(info->device, "could not set reset gpio, %d\n", ret);
+ goto err_gpio3;
+ }
+
+ ret = auok190x_power(par, 1);
+ if (ret) {
+ dev_err(info->device, "could not power on the device, %d\n",
+ ret);
+ goto err_gpio3;
+ }
+
+ mutex_init(&par->io_lock);
+
+ init_waitqueue_head(&par->waitq);
+
+ ret = par->board->setup_irq(par->info);
+ if (ret) {
+ dev_err(info->device, "could not setup ready-irq, %d\n", ret);
+ goto err_irq;
+ }
+
+ /* wait for init to complete */
+ par->board->wait_for_rdy(par);
+
+ /*
+ * From here on the controller can talk to us
+ */
+
+ /* initialise fix, var, resolution and rotation */
+
+ strlcpy(info->fix.id, init->id, 16);
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
+ info->fix.xpanstep = 0;
+ info->fix.ypanstep = 0;
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+
+ info->var.bits_per_pixel = 8;
+ info->var.grayscale = 1;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+
+ panel = &panel_table[board->resolution];
+
+ /* if 90 degree rotation, switch width and height */
+ if (board->rotation & 1) {
+ info->var.xres = panel->h;
+ info->var.yres = panel->w;
+ info->var.xres_virtual = panel->h;
+ info->var.yres_virtual = panel->w;
+ info->fix.line_length = panel->h;
+ } else {
+ info->var.xres = panel->w;
+ info->var.yres = panel->h;
+ info->var.xres_virtual = panel->w;
+ info->var.yres_virtual = panel->h;
+ info->fix.line_length = panel->w;
+ }
+
+ par->resolution = board->resolution;
+ par->rotation = board->rotation;
+
+ /* videomemory handling */
+
+ videomemorysize = roundup((panel->w * panel->h), PAGE_SIZE);
+ videomemory = vmalloc(videomemorysize);
+ if (!videomemory) {
+ ret = -ENOMEM;
+ goto err_irq;
+ }
+
+ memset(videomemory, 0, videomemorysize);
+ info->screen_base = (char *)videomemory;
+ info->fix.smem_len = videomemorysize;
+
+ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+ info->fbops = &auok190xfb_ops;
+
+ /* deferred io init */
+
+ info->fbdefio = devm_kzalloc(info->device,
+ sizeof(struct fb_deferred_io),
+ GFP_KERNEL);
+ if (!info->fbdefio) {
+ dev_err(info->device, "Failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err_defio;
+ }
+
+ dev_dbg(info->device, "targetting %d frames per second\n", board->fps);
+ info->fbdefio->delay = HZ / board->fps;
+ info->fbdefio->first_io = auok190xfb_dpy_first_io,
+ info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io,
+ fb_deferred_io_init(info);
+
+ /* color map */
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret < 0) {
+ dev_err(info->device, "Failed to allocate colormap\n");
+ goto err_cmap;
+ }
+
+ /* controller init */
+
+ par->consecutive_threshold = 100;
+ par->init(par);
+ auok190x_identify(par);
+
+ platform_set_drvdata(pdev, info);
+
+ ret = register_framebuffer(info);
+ if (ret < 0)
+ goto err_regfb;
+
+ ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group);
+ if (ret)
+ goto err_sysfs;
+
+ dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n",
+ info->node, info->var.xres, info->var.yres,
+ videomemorysize >> 10);
+
+ /* increase autosuspend_delay when we use alternative methods
+ * for runtime_pm
+ */
+ par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN)
+ ? 1000 : 200;
+
+ pm_runtime_set_active(info->device);
+ pm_runtime_enable(info->device);
+ pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay);
+ pm_runtime_use_autosuspend(info->device);
+
+ return 0;
+
+err_sysfs:
+ unregister_framebuffer(info);
+err_regfb:
+ fb_dealloc_cmap(&info->cmap);
+err_cmap:
+ fb_deferred_io_cleanup(info);
+ kfree(info->fbdefio);
+err_defio:
+ vfree((void *)info->screen_base);
+err_irq:
+ auok190x_power(par, 0);
+err_gpio3:
+ gpio_free(board->gpio_nrst);
+err_gpio2:
+ gpio_free(board->gpio_nsleep);
+err_gpio1:
+ board->cleanup(par);
+err_board:
+ regulator_put(par->regulator);
+err_reg:
+ framebuffer_release(info);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_probe);
+
+int __devexit auok190x_common_remove(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+
+ pm_runtime_disable(info->device);
+
+ sysfs_remove_group(&info->device->kobj, &auok190x_attr_group);
+
+ unregister_framebuffer(info);
+
+ fb_dealloc_cmap(&info->cmap);
+
+ fb_deferred_io_cleanup(info);
+ kfree(info->fbdefio);
+
+ vfree((void *)info->screen_base);
+
+ auok190x_power(par, 0);
+
+ gpio_free(board->gpio_nrst);
+ gpio_free(board->gpio_nsleep);
+
+ board->cleanup(par);
+
+ regulator_put(par->regulator);
+
+ framebuffer_release(info);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_remove);
+
+MODULE_DESCRIPTION("Common code for AUO-K190X controllers");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.h b/drivers/video/auo_k190x.h
new file mode 100644
index 000000000000..e35af1f51b28
--- /dev/null
+++ b/drivers/video/auo_k190x.h
@@ -0,0 +1,129 @@
+/*
+ * Private common definitions for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * I80 interface specific defines
+ */
+
+#define AUOK190X_I80_CS 0x01
+#define AUOK190X_I80_DC 0x02
+#define AUOK190X_I80_WR 0x03
+#define AUOK190X_I80_OE 0x04
+
+/*
+ * AUOK190x commands, common to both controllers
+ */
+
+#define AUOK190X_CMD_INIT 0x0000
+#define AUOK190X_CMD_STANDBY 0x0001
+#define AUOK190X_CMD_WAKEUP 0x0002
+#define AUOK190X_CMD_TCON_RESET 0x0003
+#define AUOK190X_CMD_DATA_STOP 0x1002
+#define AUOK190X_CMD_LUT_START 0x1003
+#define AUOK190X_CMD_DISP_REFRESH 0x1004
+#define AUOK190X_CMD_DISP_RESET 0x1005
+#define AUOK190X_CMD_PRE_DISPLAY_START 0x100D
+#define AUOK190X_CMD_PRE_DISPLAY_STOP 0x100F
+#define AUOK190X_CMD_FLASH_W 0x2000
+#define AUOK190X_CMD_FLASH_E 0x2001
+#define AUOK190X_CMD_FLASH_STS 0x2002
+#define AUOK190X_CMD_FRAMERATE 0x3000
+#define AUOK190X_CMD_READ_VERSION 0x4000
+#define AUOK190X_CMD_READ_STATUS 0x4001
+#define AUOK190X_CMD_READ_LUT 0x4003
+#define AUOK190X_CMD_DRIVERTIMING 0x5000
+#define AUOK190X_CMD_LBALANCE 0x5001
+#define AUOK190X_CMD_AGINGMODE 0x6000
+#define AUOK190X_CMD_AGINGEXIT 0x6001
+
+/*
+ * Common settings for AUOK190X_CMD_INIT
+ */
+
+#define AUOK190X_INIT_DATA_FILTER (0 << 12)
+#define AUOK190X_INIT_DATA_BYPASS (1 << 12)
+#define AUOK190X_INIT_INVERSE_WHITE (0 << 9)
+#define AUOK190X_INIT_INVERSE_BLACK (1 << 9)
+#define AUOK190X_INIT_SCAN_DOWN (0 << 1)
+#define AUOK190X_INIT_SCAN_UP (1 << 1)
+#define AUOK190X_INIT_SHIFT_LEFT (0 << 0)
+#define AUOK190X_INIT_SHIFT_RIGHT (1 << 0)
+
+/* Common bits to pixels
+ * Mode 15-12 11-8 7-4 3-0
+ * format0 4 3 2 1
+ * format1 3 4 1 2
+ */
+
+#define AUOK190X_INIT_FORMAT0 0
+#define AUOK190X_INIT_FORMAT1 (1 << 6)
+
+/*
+ * settings for AUOK190X_CMD_RESET
+ */
+
+#define AUOK190X_RESET_TCON (0 << 0)
+#define AUOK190X_RESET_NORMAL (1 << 0)
+#define AUOK190X_RESET_PON (1 << 1)
+
+/*
+ * AUOK190X_CMD_VERSION
+ */
+
+#define AUOK190X_VERSION_TEMP_MASK (0x1ff)
+#define AUOK190X_VERSION_EPD_MASK (0xff)
+#define AUOK190X_VERSION_SIZE_INT(_val) ((_val & 0xfc00) >> 10)
+#define AUOK190X_VERSION_SIZE_FLOAT(_val) ((_val & 0x3c0) >> 6)
+#define AUOK190X_VERSION_MODEL(_val) (_val & 0x3f)
+#define AUOK190X_VERSION_LUT(_val) (_val & 0xff)
+#define AUOK190X_VERSION_TCON(_val) ((_val & 0xff00) >> 8)
+
+/*
+ * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901
+ */
+
+#define AUOK190X_UPDATE_MODE(_res) ((_res & 0x7) << 12)
+#define AUOK190X_UPDATE_NONFLASH (1 << 15)
+
+/*
+ * track panel specific parameters for common init
+ */
+
+struct auok190x_init_data {
+ char *id;
+ struct auok190x_board *board;
+
+ void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
+ void (*update_all)(struct auok190xfb_par *par);
+ bool (*need_refresh)(struct auok190xfb_par *par);
+ void (*init)(struct auok190xfb_par *par);
+};
+
+
+extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data);
+extern int auok190x_send_command(struct auok190xfb_par *par, u16 data);
+extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv);
+extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv);
+extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par,
+ u16 cmd, int argc, u16 *argv,
+ int size, u16 *data);
+extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv, int size,
+ u16 *data);
+extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv);
+
+extern int auok190x_common_probe(struct platform_device *pdev,
+ struct auok190x_init_data *init);
+extern int auok190x_common_remove(struct platform_device *pdev);
+
+extern const struct dev_pm_ops auok190x_pm;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index af16884491ed..2979292650d6 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -88,7 +88,7 @@ config LCD_PLATFORM
config LCD_TOSA
tristate "Sharp SL-6000 LCD Driver"
- depends on SPI && MACH_TOSA
+ depends on I2C && SPI && MACH_TOSA
help
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its LCD.
@@ -184,6 +184,18 @@ config BACKLIGHT_GENERIC
known as the Corgi backlight driver. If you have a Sharp Zaurus
SL-C7xx, SL-Cxx00 or SL-6000x say y.
+config BACKLIGHT_LM3533
+ tristate "Backlight Driver for LM3533"
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on MFD_LM3533
+ help
+ Say Y to enable the backlight driver for National Semiconductor / TI
+ LM3533 Lighting Power chips.
+
+ The backlights can be controlled directly, through PWM input, or by
+ the ambient-light-sensor interface. The chip supports 256 brightness
+ levels.
+
config BACKLIGHT_LOCOMO
tristate "Sharp LOCOMO LCD/Backlight Driver"
depends on SHARP_LOCOMO
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 36855ae887d6..a2ac9cfbaf6b 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 4911ea7989c8..df5db99af23d 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -160,7 +160,7 @@ static ssize_t adp5520_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -214,7 +214,7 @@ static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
struct adp5520_bl *data = dev_get_drvdata(dev);
int ret;
- ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret < 0)
return ret;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 550dbf0bb896..77d1fdba597f 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -222,7 +222,8 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
struct led_info *cur_led;
int ret, i;
- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds,
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&client->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -236,7 +237,7 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
if (ret) {
dev_err(&client->dev, "failed to write\n");
- goto err_free;
+ return ret;
}
for (i = 0; i < pdata->num_leds; ++i) {
@@ -291,9 +292,6 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
cancel_work_sync(&led[i].work);
}
- err_free:
- kfree(led);
-
return ret;
}
@@ -309,7 +307,6 @@ static int __devexit adp8860_led_remove(struct i2c_client *client)
cancel_work_sync(&data->led[i].work);
}
- kfree(data->led);
return 0;
}
#else
@@ -451,7 +448,7 @@ static ssize_t adp8860_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -501,7 +498,7 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adp8860_bl *data = dev_get_drvdata(dev);
- int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret)
return ret;
@@ -608,7 +605,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
uint8_t reg_val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -675,13 +672,13 @@ static int __devinit adp8860_probe(struct i2c_client *client,
return -EINVAL;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
ret = adp8860_read(client, ADP8860_MFDVID, &reg_val);
if (ret < 0)
- goto out2;
+ return ret;
switch (ADP8860_MANID(reg_val)) {
case ADP8863_MANUFID:
@@ -694,8 +691,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
break;
default:
dev_err(&client->dev, "failed to probe\n");
- ret = -ENODEV;
- goto out2;
+ return -ENODEV;
}
/* It's confirmed that the DEVID field is actually a REVID */
@@ -717,8 +713,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
&client->dev, data, &adp8860_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out2;
+ return PTR_ERR(bl);
}
bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
@@ -756,8 +751,6 @@ out:
&adp8860_bl_attr_group);
out1:
backlight_device_unregister(bl);
-out2:
- kfree(data);
return ret;
}
@@ -776,7 +769,6 @@ static int __devexit adp8860_remove(struct i2c_client *client)
&adp8860_bl_attr_group);
backlight_device_unregister(data->bl);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 9be58c6f18f1..edf7f91c8e61 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -244,8 +244,8 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
struct led_info *cur_led;
int ret, i;
-
- led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led),
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&client->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -253,17 +253,17 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
if (ret)
- goto err_free;
+ return ret;
ret = adp8870_write(client, ADP8870_ISCT1,
(pdata->led_on_time & 0x3) << 6);
if (ret)
- goto err_free;
+ return ret;
ret = adp8870_write(client, ADP8870_ISCF,
FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
if (ret)
- goto err_free;
+ return ret;
for (i = 0; i < pdata->num_leds; ++i) {
cur_led = &pdata->leds[i];
@@ -317,9 +317,6 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
cancel_work_sync(&led[i].work);
}
- err_free:
- kfree(led);
-
return ret;
}
@@ -335,7 +332,6 @@ static int __devexit adp8870_led_remove(struct i2c_client *client)
cancel_work_sync(&data->led[i].work);
}
- kfree(data->led);
return 0;
}
#else
@@ -572,7 +568,7 @@ static ssize_t adp8870_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -652,7 +648,7 @@ static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
- int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret)
return ret;
@@ -794,7 +790,7 @@ static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
uint8_t reg_val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -874,7 +870,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
return -ENODEV;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -894,8 +890,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
&client->dev, data, &adp8870_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out2;
+ return PTR_ERR(bl);
}
data->bl = bl;
@@ -930,8 +925,6 @@ out:
&adp8870_bl_attr_group);
out1:
backlight_device_unregister(bl);
-out2:
- kfree(data);
return ret;
}
@@ -950,7 +943,6 @@ static int __devexit adp8870_remove(struct i2c_client *client)
&adp8870_bl_attr_group);
backlight_device_unregister(data->bl);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 7bdadc790117..3729238e7096 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -482,7 +482,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ams369fg06), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -492,7 +492,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -501,15 +501,13 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL\n");
- goto out_free_lcd;
+ return -EFAULT;
}
ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
&ams369fg06_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_lcd;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -547,8 +545,6 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
out_lcd_unregister:
lcd_device_unregister(ld);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -559,7 +555,6 @@ static int __devexit ams369fg06_remove(struct spi_device *spi)
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- kfree(lcd);
return 0;
}
@@ -619,7 +614,6 @@ static void ams369fg06_shutdown(struct spi_device *spi)
static struct spi_driver ams369fg06_driver = {
.driver = {
.name = "ams369fg06",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ams369fg06_probe,
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index a523b255e124..9dc73ac3709a 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -16,6 +16,8 @@
* get at the firmware code in order to figure out what it's actually doing.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -25,6 +27,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/atomic.h>
+#include <linux/apple_bl.h>
static struct backlight_device *apple_backlight_device;
@@ -39,8 +42,6 @@ struct hw_data {
static const struct hw_data *hw_data;
-#define DRIVER "apple_backlight: "
-
/* Module parameters. */
static int debug;
module_param_named(debug, debug, int, 0644);
@@ -60,8 +61,7 @@ static int intel_chipset_send_intensity(struct backlight_device *bd)
int intensity = bd->props.brightness;
if (debug)
- printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
- intensity);
+ pr_debug("setting brightness to %d\n", intensity);
intel_chipset_set_brightness(intensity);
return 0;
@@ -76,8 +76,7 @@ static int intel_chipset_get_intensity(struct backlight_device *bd)
intensity = inb(0xb3) >> 4;
if (debug)
- printk(KERN_DEBUG DRIVER "read brightness of %d\n",
- intensity);
+ pr_debug("read brightness of %d\n", intensity);
return intensity;
}
@@ -107,8 +106,7 @@ static int nvidia_chipset_send_intensity(struct backlight_device *bd)
int intensity = bd->props.brightness;
if (debug)
- printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
- intensity);
+ pr_debug("setting brightness to %d\n", intensity);
nvidia_chipset_set_brightness(intensity);
return 0;
@@ -123,8 +121,7 @@ static int nvidia_chipset_get_intensity(struct backlight_device *bd)
intensity = inb(0x52f) >> 4;
if (debug)
- printk(KERN_DEBUG DRIVER "read brightness of %d\n",
- intensity);
+ pr_debug("read brightness of %d\n", intensity);
return intensity;
}
@@ -149,7 +146,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
host = pci_get_bus_and_slot(0, 0);
if (!host) {
- printk(KERN_ERR DRIVER "unable to find PCI host\n");
+ pr_err("unable to find PCI host\n");
return -ENODEV;
}
@@ -161,7 +158,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
pci_dev_put(host);
if (!hw_data) {
- printk(KERN_ERR DRIVER "unknown hardware\n");
+ pr_err("unknown hardware\n");
return -ENODEV;
}
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index bf5b1ece7160..297db2fa91f5 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -5,6 +5,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -123,7 +125,7 @@ static ssize_t backlight_store_power(struct device *dev,
rc = -ENXIO;
mutex_lock(&bd->ops_lock);
if (bd->ops) {
- pr_debug("backlight: set power to %lu\n", power);
+ pr_debug("set power to %lu\n", power);
if (bd->props.power != power) {
bd->props.power = power;
backlight_update_status(bd);
@@ -161,8 +163,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
if (brightness > bd->props.max_brightness)
rc = -EINVAL;
else {
- pr_debug("backlight: set brightness to %lu\n",
- brightness);
+ pr_debug("set brightness to %lu\n", brightness);
bd->props.brightness = brightness;
backlight_update_status(bd);
rc = count;
@@ -378,8 +379,8 @@ static int __init backlight_class_init(void)
{
backlight_class = class_create(THIS_MODULE, "backlight");
if (IS_ERR(backlight_class)) {
- printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
- PTR_ERR(backlight_class));
+ pr_warn("Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(backlight_class));
return PTR_ERR(backlight_class);
}
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 6dab13fe562e..23d732677ba1 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -544,7 +544,7 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
return -EINVAL;
}
- lcd = kzalloc(sizeof(struct corgi_lcd), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL);
if (!lcd) {
dev_err(&spi->dev, "failed to allocate memory\n");
return -ENOMEM;
@@ -554,10 +554,9 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev,
lcd, &corgi_lcd_ops);
- if (IS_ERR(lcd->lcd_dev)) {
- ret = PTR_ERR(lcd->lcd_dev);
- goto err_free_lcd;
- }
+ if (IS_ERR(lcd->lcd_dev))
+ return PTR_ERR(lcd->lcd_dev);
+
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
@@ -591,8 +590,6 @@ err_unregister_bl:
backlight_device_unregister(lcd->bl_dev);
err_unregister_lcd:
lcd_device_unregister(lcd->lcd_dev);
-err_free_lcd:
- kfree(lcd);
return ret;
}
@@ -613,7 +610,6 @@ static int __devexit corgi_lcd_remove(struct spi_device *spi)
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
- kfree(lcd);
return 0;
}
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 22489eb5f3e0..37bae801e23b 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -27,6 +27,8 @@
* Alan Hourihane <alanh-at-tungstengraphics-dot-com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -180,14 +182,13 @@ static int cr_backlight_probe(struct platform_device *pdev)
lpc_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
CRVML_DEVICE_LPC, NULL);
if (!lpc_dev) {
- printk("INTEL CARILLO RANCH LPC not found.\n");
+ pr_err("INTEL CARILLO RANCH LPC not found.\n");
return -ENODEV;
}
pci_read_config_byte(lpc_dev, CRVML_REG_GPIOEN, &dev_en);
if (!(dev_en & CRVML_GPIOEN_BIT)) {
- printk(KERN_ERR
- "Carillo Ranch GPIO device was not enabled.\n");
+ pr_err("Carillo Ranch GPIO device was not enabled.\n");
pci_dev_put(lpc_dev);
return -ENODEV;
}
@@ -270,7 +271,7 @@ static int __init cr_backlight_init(void)
return PTR_ERR(crp);
}
- printk("Carillo Ranch Backlight Driver Initialized.\n");
+ pr_info("Carillo Ranch Backlight Driver Initialized.\n");
return 0;
}
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 30e19681a30b..573c7ece0fde 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -136,6 +136,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
DA9034_WLED_ISET(pdata->output_current));
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_brightness;
bl = backlight_device_register(pdev->name, data->da903x_dev, data,
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 9ce6170c1860..8c660fcd250d 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -106,7 +108,7 @@ static int genericbl_probe(struct platform_device *pdev)
generic_backlight_device = bd;
- printk("Generic Backlight Driver Initialized.\n");
+ pr_info("Generic Backlight Driver Initialized.\n");
return 0;
}
@@ -120,7 +122,7 @@ static int genericbl_remove(struct platform_device *pdev)
backlight_device_unregister(bd);
- printk("Generic Backlight Driver Unloaded\n");
+ pr_info("Generic Backlight Driver Unloaded\n");
return 0;
}
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 5118a9f029ab..9327cd1b3143 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -220,7 +220,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
/* allocate and initialse our state */
- ili = kzalloc(sizeof(struct ili9320), GFP_KERNEL);
+ ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL);
if (ili == NULL) {
dev_err(dev, "no memory for device\n");
return -ENOMEM;
@@ -240,8 +240,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
if (IS_ERR(lcd)) {
dev_err(dev, "failed to register lcd device\n");
- ret = PTR_ERR(lcd);
- goto err_free;
+ return PTR_ERR(lcd);
}
ili->lcd = lcd;
@@ -259,20 +258,16 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
err_unregister:
lcd_device_unregister(lcd);
- err_free:
- kfree(ili);
-
return ret;
}
EXPORT_SYMBOL_GPL(ili9320_probe_spi);
-int __devexit ili9320_remove(struct ili9320 *ili)
+int ili9320_remove(struct ili9320 *ili)
{
ili9320_power(ili, FB_BLANK_POWERDOWN);
lcd_device_unregister(ili->lcd);
- kfree(ili);
return 0;
}
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 2f8af5d786ab..16f593b64427 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/backlight.h>
#include <linux/device.h>
#include <linux/fb.h>
@@ -38,7 +40,7 @@ static int jornada_bl_get_brightness(struct backlight_device *bd)
ret = jornada_ssp_byte(GETBRIGHTNESS);
if (jornada_ssp_byte(GETBRIGHTNESS) != TXDUMMY) {
- printk(KERN_ERR "bl : get brightness timeout\n");
+ pr_err("get brightness timeout\n");
jornada_ssp_end();
return -ETIMEDOUT;
} else /* exchange txdummy for value */
@@ -59,7 +61,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
ret = jornada_ssp_byte(BRIGHTNESSOFF);
if (ret != TXDUMMY) {
- printk(KERN_INFO "bl : brightness off timeout\n");
+ pr_info("brightness off timeout\n");
/* turn off backlight */
PPSR &= ~PPC_LDD1;
PPDR |= PPC_LDD1;
@@ -70,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
/* send command to our mcu */
if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) {
- printk(KERN_INFO "bl : failed to set brightness\n");
+ pr_info("failed to set brightness\n");
ret = -ETIMEDOUT;
goto out;
}
@@ -81,7 +83,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
but due to physical layout it is equal to 0, so we simply
invert the value (MAX VALUE - NEW VALUE). */
if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) {
- printk(KERN_ERR "bl : set brightness failed\n");
+ pr_err("set brightness failed\n");
ret = -ETIMEDOUT;
}
@@ -113,7 +115,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
- printk(KERN_ERR "bl : failed to register device, err=%x\n", ret);
+ pr_err("failed to register device, err=%x\n", ret);
return ret;
}
@@ -125,7 +127,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
jornada_bl_update_status(bd);
platform_set_drvdata(pdev, bd);
- printk(KERN_INFO "HP Jornada 700 series backlight driver\n");
+ pr_info("HP Jornada 700 series backlight driver\n");
return 0;
}
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index 22d231a17e3c..635b30523fd5 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/fb.h>
#include <linux/kernel.h>
@@ -44,7 +46,7 @@ static int jornada_lcd_get_contrast(struct lcd_device *dev)
jornada_ssp_start();
if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) {
- printk(KERN_ERR "lcd: get contrast failed\n");
+ pr_err("get contrast failed\n");
jornada_ssp_end();
return -ETIMEDOUT;
} else {
@@ -65,7 +67,7 @@ static int jornada_lcd_set_contrast(struct lcd_device *dev, int value)
/* push the new value */
if (jornada_ssp_byte(value) != TXDUMMY) {
- printk(KERN_ERR "lcd : set contrast failed\n");
+ pr_err("set contrast failed\n");
jornada_ssp_end();
return -ETIMEDOUT;
}
@@ -103,7 +105,7 @@ static int jornada_lcd_probe(struct platform_device *pdev)
if (IS_ERR(lcd_device)) {
ret = PTR_ERR(lcd_device);
- printk(KERN_ERR "lcd : failed to register device\n");
+ pr_err("failed to register device\n");
return ret;
}
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 6022b67285ec..40f606a86093 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
@@ -159,7 +161,8 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return -EINVAL;
}
- priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
+ GFP_KERNEL);
if (priv == NULL) {
dev_err(&spi->dev, "No memory for this device.\n");
@@ -177,7 +180,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
- goto err;
+ return ret;
}
ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
@@ -185,7 +188,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
- goto err2;
+ goto err;
}
priv->io_reg = regulator_get(&spi->dev, "vdd");
@@ -193,7 +196,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
ret = PTR_ERR(priv->io_reg);
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
- goto err3;
+ goto err2;
}
priv->core_reg = regulator_get(&spi->dev, "vcore");
@@ -201,14 +204,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
- goto err4;
+ goto err3;
}
priv->ld = lcd_device_register("l4f00242t03",
&spi->dev, priv, &l4f_ops);
if (IS_ERR(priv->ld)) {
ret = PTR_ERR(priv->ld);
- goto err5;
+ goto err4;
}
/* Init the LCD */
@@ -220,16 +223,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return 0;
-err5:
- regulator_put(priv->core_reg);
err4:
- regulator_put(priv->io_reg);
+ regulator_put(priv->core_reg);
err3:
- gpio_free(pdata->data_enable_gpio);
+ regulator_put(priv->io_reg);
err2:
- gpio_free(pdata->reset_gpio);
+ gpio_free(pdata->data_enable_gpio);
err:
- kfree(priv);
+ gpio_free(pdata->reset_gpio);
return ret;
}
@@ -250,8 +251,6 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
regulator_put(priv->io_reg);
regulator_put(priv->core_reg);
- kfree(priv);
-
return 0;
}
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 79c1b0d609a8..a5d0d024bb92 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -5,6 +5,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -32,6 +34,8 @@ static int fb_notifier_callback(struct notifier_block *self,
case FB_EVENT_BLANK:
case FB_EVENT_MODE_CHANGE:
case FB_EVENT_MODE_CHANGE_ALL:
+ case FB_EARLY_EVENT_BLANK:
+ case FB_R_EARLY_EVENT_BLANK:
break;
default:
return 0;
@@ -46,6 +50,14 @@ static int fb_notifier_callback(struct notifier_block *self,
if (event == FB_EVENT_BLANK) {
if (ld->ops->set_power)
ld->ops->set_power(ld, *(int *)evdata->data);
+ } else if (event == FB_EARLY_EVENT_BLANK) {
+ if (ld->ops->early_set_power)
+ ld->ops->early_set_power(ld,
+ *(int *)evdata->data);
+ } else if (event == FB_R_EARLY_EVENT_BLANK) {
+ if (ld->ops->r_early_set_power)
+ ld->ops->r_early_set_power(ld,
+ *(int *)evdata->data);
} else {
if (ld->ops->set_mode)
ld->ops->set_mode(ld, evdata->data);
@@ -106,7 +118,7 @@ static ssize_t lcd_store_power(struct device *dev,
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_power) {
- pr_debug("lcd: set power to %lu\n", power);
+ pr_debug("set power to %lu\n", power);
ld->ops->set_power(ld, power);
rc = count;
}
@@ -142,7 +154,7 @@ static ssize_t lcd_store_contrast(struct device *dev,
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_contrast) {
- pr_debug("lcd: set contrast to %lu\n", contrast);
+ pr_debug("set contrast to %lu\n", contrast);
ld->ops->set_contrast(ld, contrast);
rc = count;
}
@@ -253,8 +265,8 @@ static int __init lcd_class_init(void)
{
lcd_class = class_create(THIS_MODULE, "lcd");
if (IS_ERR(lcd_class)) {
- printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
- PTR_ERR(lcd_class));
+ pr_warn("Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(lcd_class));
return PTR_ERR(lcd_class);
}
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index efd352be21ae..58f517fb7d40 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -707,7 +707,7 @@ static int ld9040_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -717,7 +717,7 @@ static int ld9040_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -726,7 +726,7 @@ static int ld9040_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- goto out_free_lcd;
+ return -EFAULT;
}
mutex_init(&lcd->lock);
@@ -734,13 +734,13 @@ static int ld9040_probe(struct spi_device *spi)
ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
if (ret) {
dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- goto out_free_lcd;
+ return ret;
}
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto out_free_lcd;
+ goto out_free_regulator;
}
lcd->ld = ld;
@@ -782,10 +782,9 @@ static int ld9040_probe(struct spi_device *spi)
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
-out_free_lcd:
+out_free_regulator:
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
return ret;
}
@@ -797,7 +796,6 @@ static int __devexit ld9040_remove(struct spi_device *spi)
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
return 0;
}
@@ -846,7 +844,6 @@ static void ld9040_shutdown(struct spi_device *spi)
static struct spi_driver ld9040_driver = {
.driver = {
.name = "ld9040",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ld9040_probe,
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
new file mode 100644
index 000000000000..bebeb63607db
--- /dev/null
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -0,0 +1,423 @@
+/*
+ * lm3533-bl.c -- LM3533 Backlight driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_HVCTRLBANK_COUNT 2
+#define LM3533_BL_MAX_BRIGHTNESS 255
+
+#define LM3533_REG_CTRLBANK_AB_BCONF 0x1a
+
+
+struct lm3533_bl {
+ struct lm3533 *lm3533;
+ struct lm3533_ctrlbank cb;
+ struct backlight_device *bd;
+ int id;
+};
+
+
+static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
+{
+ return bl->id;
+}
+
+static int lm3533_bl_update_status(struct backlight_device *bd)
+{
+ struct lm3533_bl *bl = bl_get_data(bd);
+ int brightness = bd->props.brightness;
+
+ if (bd->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+ if (bd->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
+}
+
+static int lm3533_bl_get_brightness(struct backlight_device *bd)
+{
+ struct lm3533_bl *bl = bl_get_data(bd);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_brightness(&bl->cb, &val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static const struct backlight_ops lm3533_bl_ops = {
+ .get_brightness = lm3533_bl_get_brightness,
+ .update_status = lm3533_bl_update_status,
+};
+
+static ssize_t show_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bl->id);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ unsigned channel = lm3533_bl_get_ctrlbank_id(bl);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t show_als_en(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+ u8 val;
+ u8 mask;
+ bool enable;
+ int ret;
+
+ ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+ if (ret)
+ return ret;
+
+ mask = 1 << (2 * ctrlbank);
+ enable = val & mask;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+ int enable;
+ u8 val;
+ u8 mask;
+ int ret;
+
+ if (kstrtoint(buf, 0, &enable))
+ return -EINVAL;
+
+ mask = 1 << (2 * ctrlbank);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+ mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ u8 mask;
+ int linear;
+ int ret;
+
+ ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+ if (ret)
+ return ret;
+
+ mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+ if (val & mask)
+ linear = 1;
+ else
+ linear = 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ unsigned long linear;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &linear))
+ return -EINVAL;
+
+ mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+ if (linear)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+ mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_pwm(&bl->cb, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ ret = lm3533_ctrlbank_set_pwm(&bl->cb, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static LM3533_ATTR_RO(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+
+static struct attribute *lm3533_bl_attributes[] = {
+ &dev_attr_als_channel.attr,
+ &dev_attr_als_en.attr,
+ &dev_attr_id.attr,
+ &dev_attr_linear.attr,
+ &dev_attr_pwm.attr,
+ NULL,
+};
+
+static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_als_channel.attr ||
+ attr == &dev_attr_als_en.attr) {
+ if (!bl->lm3533->have_als)
+ mode = 0;
+ }
+
+ return mode;
+};
+
+static struct attribute_group lm3533_bl_attribute_group = {
+ .is_visible = lm3533_bl_attr_is_visible,
+ .attrs = lm3533_bl_attributes
+};
+
+static int __devinit lm3533_bl_setup(struct lm3533_bl *bl,
+ struct lm3533_bl_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_ctrlbank_set_max_current(&bl->cb, pdata->max_current);
+ if (ret)
+ return ret;
+
+ return lm3533_ctrlbank_set_pwm(&bl->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_bl_probe(struct platform_device *pdev)
+{
+ struct lm3533 *lm3533;
+ struct lm3533_bl_platform_data *pdata;
+ struct lm3533_bl *bl;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533 = dev_get_drvdata(pdev->dev.parent);
+ if (!lm3533)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdev->id < 0 || pdev->id >= LM3533_HVCTRLBANK_COUNT) {
+ dev_err(&pdev->dev, "illegal backlight id %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+ if (!bl) {
+ dev_err(&pdev->dev,
+ "failed to allocate memory for backlight\n");
+ return -ENOMEM;
+ }
+
+ bl->lm3533 = lm3533;
+ bl->id = pdev->id;
+
+ bl->cb.lm3533 = lm3533;
+ bl->cb.id = lm3533_bl_get_ctrlbank_id(bl);
+ bl->cb.dev = NULL; /* until registered */
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = LM3533_BL_MAX_BRIGHTNESS;
+ props.brightness = pdata->default_brightness;
+ bd = backlight_device_register(pdata->name, pdev->dev.parent, bl,
+ &lm3533_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ dev_err(&pdev->dev, "failed to register backlight device\n");
+ ret = PTR_ERR(bd);
+ goto err_free;
+ }
+
+ bl->bd = bd;
+ bl->cb.dev = &bl->bd->dev;
+
+ platform_set_drvdata(pdev, bl);
+
+ ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ backlight_update_status(bd);
+
+ ret = lm3533_bl_setup(bl, pdata);
+ if (ret)
+ goto err_sysfs_remove;
+
+ ret = lm3533_ctrlbank_enable(&bl->cb);
+ if (ret)
+ goto err_sysfs_remove;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+err_unregister:
+ backlight_device_unregister(bd);
+err_free:
+ kfree(bl);
+
+ return ret;
+}
+
+static int __devexit lm3533_bl_remove(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bd = bl->bd;
+
+ dev_dbg(&bd->dev, "%s\n", __func__);
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.brightness = 0;
+
+ lm3533_ctrlbank_disable(&bl->cb);
+ sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+ backlight_device_unregister(bd);
+ kfree(bl);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lm3533_bl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ return lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static int lm3533_bl_resume(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ return lm3533_ctrlbank_enable(&bl->cb);
+}
+#else
+#define lm3533_bl_suspend NULL
+#define lm3533_bl_resume NULL
+#endif
+
+static void lm3533_bl_shutdown(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static struct platform_driver lm3533_bl_driver = {
+ .driver = {
+ .name = "lm3533-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm3533_bl_probe,
+ .remove = __devexit_p(lm3533_bl_remove),
+ .shutdown = lm3533_bl_shutdown,
+ .suspend = lm3533_bl_suspend,
+ .resume = lm3533_bl_resume,
+};
+module_platform_driver(lm3533_bl_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Backlight driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-backlight");
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 4161f9e3982a..a9f2c36966f1 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -168,7 +168,8 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
goto err;
}
- st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
+ st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
+ GFP_KERNEL);
if (st == NULL) {
dev_err(&spi->dev, "No memory for device state\n");
ret = -ENOMEM;
@@ -178,7 +179,7 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto err2;
+ goto err;
}
st->spi = spi;
@@ -193,8 +194,6 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
return 0;
-err2:
- kfree(st);
err:
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
@@ -212,8 +211,6 @@ static int __devexit lms283gf05_remove(struct spi_device *spi)
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
- kfree(st);
-
return 0;
}
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 333949ff3265..6c0f1ac0d32a 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -232,23 +232,20 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
struct lcd_device *ld;
int ret;
- lcd = kzalloc(sizeof(struct ltv350qv), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ltv350qv), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
lcd->spi = spi;
lcd->power = FB_BLANK_POWERDOWN;
- lcd->buffer = kzalloc(8, GFP_KERNEL);
- if (!lcd->buffer) {
- ret = -ENOMEM;
- goto out_free_lcd;
- }
+ lcd->buffer = devm_kzalloc(&spi->dev, 8, GFP_KERNEL);
+ if (!lcd->buffer)
+ return -ENOMEM;
ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_buffer;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
+
lcd->ld = ld;
ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK);
@@ -261,10 +258,6 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(ld);
-out_free_buffer:
- kfree(lcd->buffer);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -274,8 +267,6 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->ld);
- kfree(lcd->buffer);
- kfree(lcd);
return 0;
}
@@ -310,7 +301,6 @@ static void ltv350qv_shutdown(struct spi_device *spi)
static struct spi_driver ltv350qv_driver = {
.driver = {
.name = "ltv350qv",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 0175bfb08a1c..bfdc5fbeaa11 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -168,7 +170,7 @@ static int omapbl_probe(struct platform_device *pdev)
dev->props.brightness = pdata->default_intensity;
omapbl_update_status(dev);
- printk(KERN_INFO "OMAP LCD backlight initialised\n");
+ pr_info("OMAP LCD backlight initialised\n");
return 0;
}
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index c65853cb9740..c092159f4383 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -111,6 +111,7 @@ static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
if (!pcf_bl)
return -ENOMEM;
+ memset(&bl_props, 0, sizeof(bl_props));
bl_props.type = BACKLIGHT_RAW;
bl_props.max_brightness = 0x3f;
bl_props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 6af183d6465e..69b35f02929e 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -68,13 +70,13 @@ static int progearbl_probe(struct platform_device *pdev)
pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL);
if (!pmu_dev) {
- printk("ALI M7101 PMU not found.\n");
+ pr_err("ALI M7101 PMU not found.\n");
return -ENODEV;
}
sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
if (!sb_dev) {
- printk("ALI 1533 SB not found.\n");
+ pr_err("ALI 1533 SB not found.\n");
ret = -ENODEV;
goto put_pmu;
}
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index e264f55b2574..6437ae474cf2 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -741,7 +741,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -751,7 +751,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -760,14 +760,12 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- goto out_free_lcd;
+ return -EFAULT;
}
ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_lcd;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -824,8 +822,6 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
out_lcd_unregister:
lcd_device_unregister(ld);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -838,7 +834,6 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &dev_attr_gamma_mode);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- kfree(lcd);
return 0;
}
@@ -899,7 +894,6 @@ static void s6e63m0_shutdown(struct spi_device *spi)
static struct spi_driver s6e63m0_driver = {
.driver = {
.name = "s6e63m0",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = s6e63m0_probe,
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 2368b8e5f89e..02444d042cd5 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -349,7 +349,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
if (err)
return err;
- lcd = kzalloc(sizeof(struct tdo24m), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct tdo24m), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -357,11 +357,9 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
- lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
- if (lcd->buf == NULL) {
- kfree(lcd);
+ lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
+ if (lcd->buf == NULL)
return -ENOMEM;
- }
m = &lcd->msg;
x = &lcd->xfer;
@@ -383,15 +381,13 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
break;
default:
dev_err(&spi->dev, "Unsupported model");
- goto out_free;
+ return -EINVAL;
}
lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
lcd, &tdo24m_ops);
- if (IS_ERR(lcd->lcd_dev)) {
- err = PTR_ERR(lcd->lcd_dev);
- goto out_free;
- }
+ if (IS_ERR(lcd->lcd_dev))
+ return PTR_ERR(lcd->lcd_dev);
dev_set_drvdata(&spi->dev, lcd);
err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
@@ -402,9 +398,6 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(lcd->lcd_dev);
-out_free:
- kfree(lcd->buf);
- kfree(lcd);
return err;
}
@@ -414,8 +407,6 @@ static int __devexit tdo24m_remove(struct spi_device *spi)
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
- kfree(lcd->buf);
- kfree(lcd);
return 0;
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 2b241abced43..0d54e607e82d 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -82,8 +82,11 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct backlight_properties props;
- struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
+ struct tosa_bl_data *data;
int ret = 0;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct tosa_bl_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -92,7 +95,7 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
- goto err_gpio_bl;
+ return ret;
}
ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
if (ret)
@@ -122,8 +125,6 @@ err_reg:
data->bl = NULL;
err_gpio_dir:
gpio_free(TOSA_GPIO_BL_C20MA);
-err_gpio_bl:
- kfree(data);
return ret;
}
@@ -136,8 +137,6 @@ static int __devexit tosa_bl_remove(struct i2c_client *client)
gpio_free(TOSA_GPIO_BL_C20MA);
- kfree(data);
-
return 0;
}
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 2231aec23918..47823b8efff0 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -174,7 +174,8 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
int ret;
struct tosa_lcd_data *data;
- data = kzalloc(sizeof(struct tosa_lcd_data), GFP_KERNEL);
+ data = devm_kzalloc(&spi->dev, sizeof(struct tosa_lcd_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -187,7 +188,7 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0)
- goto err_spi;
+ return ret;
data->spi = spi;
dev_set_drvdata(&spi->dev, data);
@@ -224,8 +225,6 @@ err_gpio_dir:
gpio_free(TOSA_GPIO_TG_ON);
err_gpio_tg:
dev_set_drvdata(&spi->dev, NULL);
-err_spi:
- kfree(data);
return ret;
}
@@ -242,7 +241,6 @@ static int __devexit tosa_lcd_remove(struct spi_device *spi)
gpio_free(TOSA_GPIO_TG_ON);
dev_set_drvdata(&spi->dev, NULL);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 5d365deb5f82..9e5517a3a52b 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -194,6 +194,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
data->current_brightness = 0;
data->isink_reg = isink_reg;
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_isel;
bl = backlight_device_register("wm831x", &pdev->dev, data,
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index 1a268a294478..9bdd4b0c18c8 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off,
static int
adv7393_write_proc(struct file *file, const char __user * buffer,
- unsigned long count, void *data)
+ size_t count, void *data)
{
struct adv7393fb_device *fbdev = data;
- char line[8];
unsigned int val;
int ret;
- ret = copy_from_user(line, buffer, count);
+ ret = kstrtouint_from_user(buffer, count, 0, &val);
if (ret)
return -EFAULT;
- val = simple_strtoul(line, NULL, 0);
adv7393_write(fbdev->client, val >> 8, val & 0xff);
return count;
@@ -414,14 +412,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
if (ret) {
dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
ret = -EBUSY;
- goto out_8;
+ goto free_fbdev;
}
}
if (peripheral_request_list(ppi_pins, DRIVER_NAME)) {
dev_err(&client->dev, "requesting PPI peripheral failed\n");
ret = -EFAULT;
- goto out_8;
+ goto free_gpio;
}
fbdev->fb_mem =
@@ -432,7 +430,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n",
(u32) fbdev->fb_len);
ret = -ENOMEM;
- goto out_7;
+ goto free_ppi_pins;
}
fbdev->info.screen_base = (void *)fbdev->fb_mem;
@@ -464,27 +462,27 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
if (!fbdev->info.pseudo_palette) {
dev_err(&client->dev, "failed to allocate pseudo_palette\n");
ret = -ENOMEM;
- goto out_6;
+ goto free_fb_mem;
}
if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
dev_err(&client->dev, "failed to allocate colormap (%d entries)\n",
BFIN_LCD_NBR_PALETTE_ENTRIES);
ret = -EFAULT;
- goto out_5;
+ goto free_palette;
}
if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) {
dev_err(&client->dev, "unable to request PPI DMA\n");
ret = -EFAULT;
- goto out_4;
+ goto free_cmap;
}
if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0,
"PPI ERROR", fbdev) < 0) {
dev_err(&client->dev, "unable to request PPI ERROR IRQ\n");
ret = -EFAULT;
- goto out_3;
+ goto free_ch_ppi;
}
fbdev->open = 0;
@@ -494,14 +492,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
if (ret) {
dev_err(&client->dev, "i2c attach: init error\n");
- goto out_1;
+ goto free_irq_ppi;
}
if (register_framebuffer(&fbdev->info) < 0) {
dev_err(&client->dev, "unable to register framebuffer\n");
ret = -EFAULT;
- goto out_1;
+ goto free_irq_ppi;
}
dev_info(&client->dev, "fb%d: %s frame buffer device\n",
@@ -512,7 +510,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
if (!entry) {
dev_err(&client->dev, "unable to create /proc entry\n");
ret = -EFAULT;
- goto out_0;
+ goto free_fb;
}
entry->read_proc = adv7393_read_proc;
@@ -521,22 +519,25 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
return 0;
- out_0:
+free_fb:
unregister_framebuffer(&fbdev->info);
- out_1:
+free_irq_ppi:
free_irq(IRQ_PPI_ERROR, fbdev);
- out_3:
+free_ch_ppi:
free_dma(CH_PPI);
- out_4:
- dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
- fbdev->dma_handle);
- out_5:
+free_cmap:
fb_dealloc_cmap(&fbdev->info.cmap);
- out_6:
+free_palette:
kfree(fbdev->info.pseudo_palette);
- out_7:
+free_fb_mem:
+ dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
+ fbdev->dma_handle);
+free_ppi_pins:
peripheral_free_list(ppi_pins);
- out_8:
+free_gpio:
+ if (ANOMALY_05000400)
+ gpio_free(P_IDENT(P_PPI0_FS3));
+free_fbdev:
kfree(fbdev);
return ret;
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index 377dde3d5bfc..c95b417d0d41 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev)
static struct platform_driver broadsheetfb_driver = {
.probe = broadsheetfb_probe,
- .remove = broadsheetfb_remove,
+ .remove = __devexit_p(broadsheetfb_remove),
.driver = {
.owner = THIS_MODULE,
.name = "broadsheetfb",
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index f56699d8122a..eae46f6457e2 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -1,7 +1,8 @@
/*
- * Cobalt server LCD frame buffer driver.
+ * Cobalt/SEAD3 LCD frame buffer driver.
*
* Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2012 MIPS Technologies, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -62,6 +63,7 @@
#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
+#ifdef CONFIG_MIPS_COBALT
static inline void lcd_write_control(struct fb_info *info, u8 control)
{
writel((u32)control << 24, info->screen_base);
@@ -81,6 +83,47 @@ static inline u8 lcd_read_data(struct fb_info *info)
{
return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
}
+#else
+
+#define LCD_CTL 0x00
+#define LCD_DATA 0x08
+#define CPLD_STATUS 0x10
+#define CPLD_DATA 0x18
+
+static inline void cpld_wait(struct fb_info *info)
+{
+ do {
+ } while (readl(info->screen_base + CPLD_STATUS) & 1);
+}
+
+static inline void lcd_write_control(struct fb_info *info, u8 control)
+{
+ cpld_wait(info);
+ writel(control, info->screen_base + LCD_CTL);
+}
+
+static inline u8 lcd_read_control(struct fb_info *info)
+{
+ cpld_wait(info);
+ readl(info->screen_base + LCD_CTL);
+ cpld_wait(info);
+ return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+
+static inline void lcd_write_data(struct fb_info *info, u8 data)
+{
+ cpld_wait(info);
+ writel(data, info->screen_base + LCD_DATA);
+}
+
+static inline u8 lcd_read_data(struct fb_info *info)
+{
+ cpld_wait(info);
+ readl(info->screen_base + LCD_DATA);
+ cpld_wait(info);
+ return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+#endif
static int lcd_busy_wait(struct fb_info *info)
{
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index c2d11fef114b..e2c96d01d8f5 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -224,5 +224,19 @@ config FONT_10x18
big letters. It fits between the sun 12x22 and the normal 8x16 font.
If other fonts are too big or too small for you, say Y, otherwise say N.
+config FONT_AUTOSELECT
+ def_bool y
+ depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
+ depends on !FONT_8x8
+ depends on !FONT_6x11
+ depends on !FONT_7x14
+ depends on !FONT_PEARL_8x8
+ depends on !FONT_ACORN_8x8
+ depends on !FONT_MINI_4x6
+ depends on !FONT_SUN8x16
+ depends on !FONT_SUN12x22
+ depends on !FONT_10x18
+ select FONT_8x16
+
endmenu
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 784139aed079..b4a632ada401 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -18,6 +18,8 @@
static bool request_mem_succeeded = false;
+static struct pci_dev *default_vga;
+
static struct fb_var_screeninfo efifb_defined __devinitdata = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
@@ -298,35 +300,72 @@ static struct fb_ops efifb_ops = {
.fb_imageblit = cfb_imageblit,
};
+struct pci_dev *vga_default_device(void)
+{
+ return default_vga;
+}
+
+EXPORT_SYMBOL_GPL(vga_default_device);
+
+void vga_set_default_device(struct pci_dev *pdev)
+{
+ default_vga = pdev;
+}
+
static int __init efifb_setup(char *options)
{
char *this_opt;
int i;
+ struct pci_dev *dev = NULL;
+
+ if (options && *options) {
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ if (!*this_opt) continue;
+
+ for (i = 0; i < M_UNKNOWN; i++) {
+ if (!strcmp(this_opt, dmi_list[i].optname) &&
+ dmi_list[i].base != 0) {
+ screen_info.lfb_base = dmi_list[i].base;
+ screen_info.lfb_linelength = dmi_list[i].stride;
+ screen_info.lfb_width = dmi_list[i].width;
+ screen_info.lfb_height = dmi_list[i].height;
+ }
+ }
+ if (!strncmp(this_opt, "base:", 5))
+ screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
+ else if (!strncmp(this_opt, "stride:", 7))
+ screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
+ else if (!strncmp(this_opt, "height:", 7))
+ screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
+ else if (!strncmp(this_opt, "width:", 6))
+ screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
+ }
+ }
- if (!options || !*options)
- return 0;
+ for_each_pci_dev(dev) {
+ int i;
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt) continue;
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ continue;
- for (i = 0; i < M_UNKNOWN; i++) {
- if (!strcmp(this_opt, dmi_list[i].optname) &&
- dmi_list[i].base != 0) {
- screen_info.lfb_base = dmi_list[i].base;
- screen_info.lfb_linelength = dmi_list[i].stride;
- screen_info.lfb_width = dmi_list[i].width;
- screen_info.lfb_height = dmi_list[i].height;
- }
+ for (i=0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
+
+ if (!(pci_resource_flags(dev, i) & IORESOURCE_MEM))
+ continue;
+
+ start = pci_resource_start(dev, i);
+ end = pci_resource_end(dev, i);
+
+ if (!start || !end)
+ continue;
+
+ if (screen_info.lfb_base >= start &&
+ (screen_info.lfb_base + screen_info.lfb_size) < end)
+ default_vga = dev;
}
- if (!strncmp(this_opt, "base:", 5))
- screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
- else if (!strncmp(this_opt, "stride:", 7))
- screen_info.lfb_linelength = simple_strtoul(this_opt+7, NULL, 0) * 4;
- else if (!strncmp(this_opt, "height:", 7))
- screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
- else if (!strncmp(this_opt, "width:", 6))
- screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
}
+
return 0;
}
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index f8babbeee275..345d96230978 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -507,16 +507,16 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
err = fb_alloc_cmap(&info->cmap, 256, 0);
if (err)
- goto failed;
+ goto failed_cmap;
err = ep93xxfb_alloc_videomem(info);
if (err)
- goto failed;
+ goto failed_videomem;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -ENXIO;
- goto failed;
+ goto failed_resource;
}
/*
@@ -532,7 +532,7 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
fbi->mmio_base = ioremap(res->start, resource_size(res));
if (!fbi->mmio_base) {
err = -ENXIO;
- goto failed;
+ goto failed_resource;
}
strcpy(info->fix.id, pdev->name);
@@ -553,24 +553,24 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
if (err == 0) {
dev_err(info->dev, "No suitable video mode found\n");
err = -EINVAL;
- goto failed;
+ goto failed_mode;
}
if (mach_info->setup) {
err = mach_info->setup(pdev);
if (err)
- return err;
+ goto failed_mode;
}
err = ep93xxfb_check_var(&info->var, info);
if (err)
- goto failed;
+ goto failed_check;
fbi->clk = clk_get(info->dev, NULL);
if (IS_ERR(fbi->clk)) {
err = PTR_ERR(fbi->clk);
fbi->clk = NULL;
- goto failed;
+ goto failed_check;
}
ep93xxfb_set_par(info);
@@ -585,15 +585,17 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
return 0;
failed:
- if (fbi->clk)
- clk_put(fbi->clk);
- if (fbi->mmio_base)
- iounmap(fbi->mmio_base);
- ep93xxfb_dealloc_videomem(info);
- if (&info->cmap)
- fb_dealloc_cmap(&info->cmap);
+ clk_put(fbi->clk);
+failed_check:
if (fbi->mach_info->teardown)
fbi->mach_info->teardown(pdev);
+failed_mode:
+ iounmap(fbi->mmio_base);
+failed_resource:
+ ep93xxfb_dealloc_videomem(info);
+failed_videomem:
+ fb_dealloc_cmap(&info->cmap);
+failed_cmap:
kfree(info);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index 2a4481cf260c..a36b2d28280e 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -21,14 +21,14 @@
#include <video/exynos_dp.h>
-#include <plat/cpu.h>
-
#include "exynos_dp_core.h"
static int exynos_dp_init_dp(struct exynos_dp_device *dp)
{
exynos_dp_reset(dp);
+ exynos_dp_swreset(dp);
+
/* SW defined function Normal operation */
exynos_dp_enable_sw_function(dp);
@@ -478,7 +478,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
int lane_count;
u8 buf[5];
- u8 *adjust_request;
+ u8 adjust_request[2];
u8 voltage_swing;
u8 pre_emphasis;
u8 training_lane;
@@ -493,8 +493,8 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
/* set training pattern 2 for EQ */
exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
- adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
- - DPCD_ADDR_LANE0_1_STATUS);
+ adjust_request[0] = link_status[4];
+ adjust_request[1] = link_status[5];
exynos_dp_get_adjust_train(dp, adjust_request);
@@ -566,7 +566,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
u8 buf[5];
u32 reg;
- u8 *adjust_request;
+ u8 adjust_request[2];
udelay(400);
@@ -575,8 +575,8 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
lane_count = dp->link_train.lane_count;
if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
- adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
- - DPCD_ADDR_LANE0_1_STATUS);
+ adjust_request[0] = link_status[4];
+ adjust_request[1] = link_status[5];
if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) {
/* traing pattern Set to Normal */
@@ -770,7 +770,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
return -ETIMEDOUT;
}
- mdelay(100);
+ udelay(1);
}
/* Set to use the register calculated M/N video */
@@ -804,7 +804,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
return -ETIMEDOUT;
}
- mdelay(100);
+ mdelay(1);
}
if (retval != 0)
@@ -860,7 +860,8 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
return -EINVAL;
}
- dp = kzalloc(sizeof(struct exynos_dp_device), GFP_KERNEL);
+ dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+ GFP_KERNEL);
if (!dp) {
dev_err(&pdev->dev, "no memory for device data\n");
return -ENOMEM;
@@ -871,8 +872,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
dp->clock = clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
dev_err(&pdev->dev, "failed to get clock\n");
- ret = PTR_ERR(dp->clock);
- goto err_dp;
+ return PTR_ERR(dp->clock);
}
clk_enable(dp->clock);
@@ -884,35 +884,25 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
goto err_clock;
}
- res = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request registers region\n");
- ret = -EINVAL;
- goto err_clock;
- }
-
- dp->res = res;
-
- dp->reg_base = ioremap(res->start, resource_size(res));
+ dp->reg_base = devm_request_and_ioremap(&pdev->dev, res);
if (!dp->reg_base) {
dev_err(&pdev->dev, "failed to ioremap\n");
ret = -ENOMEM;
- goto err_req_region;
+ goto err_clock;
}
dp->irq = platform_get_irq(pdev, 0);
if (!dp->irq) {
dev_err(&pdev->dev, "failed to get irq\n");
ret = -ENODEV;
- goto err_ioremap;
+ goto err_clock;
}
- ret = request_irq(dp->irq, exynos_dp_irq_handler, 0,
- "exynos-dp", dp);
+ ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
+ "exynos-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_ioremap;
+ goto err_clock;
}
dp->video_info = pdata->video_info;
@@ -924,7 +914,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
ret = exynos_dp_detect_hpd(dp);
if (ret) {
dev_err(&pdev->dev, "unable to detect hpd\n");
- goto err_irq;
+ goto err_clock;
}
exynos_dp_handle_edid(dp);
@@ -933,7 +923,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
dp->video_info->link_rate);
if (ret) {
dev_err(&pdev->dev, "unable to do link train\n");
- goto err_irq;
+ goto err_clock;
}
exynos_dp_enable_scramble(dp, 1);
@@ -947,23 +937,15 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
ret = exynos_dp_config_video(dp, dp->video_info);
if (ret) {
dev_err(&pdev->dev, "unable to config video\n");
- goto err_irq;
+ goto err_clock;
}
platform_set_drvdata(pdev, dp);
return 0;
-err_irq:
- free_irq(dp->irq, dp);
-err_ioremap:
- iounmap(dp->reg_base);
-err_req_region:
- release_mem_region(res->start, resource_size(res));
err_clock:
clk_put(dp->clock);
-err_dp:
- kfree(dp);
return ret;
}
@@ -976,16 +958,9 @@ static int __devexit exynos_dp_remove(struct platform_device *pdev)
if (pdata && pdata->phy_exit)
pdata->phy_exit();
- free_irq(dp->irq, dp);
- iounmap(dp->reg_base);
-
clk_disable(dp->clock);
clk_put(dp->clock);
- release_mem_region(dp->res->start, resource_size(dp->res));
-
- kfree(dp);
-
return 0;
}
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 90ceaca0fa24..1e0f998e0c9f 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -26,7 +26,6 @@ struct link_train {
struct exynos_dp_device {
struct device *dev;
- struct resource *res;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
@@ -39,8 +38,10 @@ struct exynos_dp_device {
void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
void exynos_dp_stop_video(struct exynos_dp_device *dp);
void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
void exynos_dp_reset(struct exynos_dp_device *dp);
+void exynos_dp_swreset(struct exynos_dp_device *dp);
void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 6548afa0e3d2..6ce76d56c3a1 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -16,8 +16,6 @@
#include <video/exynos_dp.h>
-#include <plat/cpu.h>
-
#include "exynos_dp_core.h"
#include "exynos_dp_reg.h"
@@ -65,6 +63,28 @@ void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
}
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = TX_TERMINAL_CTRL_50_OHM;
+ writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
+
+ reg = SEL_24M | TX_DVDD_BIT_1_0625V;
+ writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
+
+ reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
+ writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
+
+ reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
+ TX_CUR1_2X | TX_CUR_8_MA;
+ writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
+
+ reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
+ CH1_AMP_400_MV | CH0_AMP_400_MV;
+ writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
+}
+
void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
{
/* Set interrupt pin assertion polarity as high */
@@ -89,8 +109,6 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
{
u32 reg;
- writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
-
exynos_dp_stop_video(dp);
exynos_dp_enable_video_mute(dp, 0);
@@ -131,9 +149,15 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
+ exynos_dp_init_analog_param(dp);
exynos_dp_init_interrupt(dp);
}
+void exynos_dp_swreset(struct exynos_dp_device *dp)
+{
+ writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
+}
+
void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
{
u32 reg;
@@ -271,6 +295,7 @@ void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
{
u32 reg;
+ int timeout_loop = 0;
exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
@@ -282,9 +307,19 @@ void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
/* Power up PLL */
- if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED)
+ if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
exynos_dp_set_pll_power_down(dp, 0);
+ while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ timeout_loop++;
+ if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+ dev_err(dp->dev, "failed to get pll lock status\n");
+ return;
+ }
+ usleep_range(10, 20);
+ }
+ }
+
/* Enable Serdes FIFO function and Link symbol clock domain module */
reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/video/exynos/exynos_dp_reg.h
index 42f608e2a43e..125b27cd57ae 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/video/exynos/exynos_dp_reg.h
@@ -24,6 +24,12 @@
#define EXYNOS_DP_LANE_MAP 0x35C
+#define EXYNOS_DP_ANALOG_CTL_1 0x370
+#define EXYNOS_DP_ANALOG_CTL_2 0x374
+#define EXYNOS_DP_ANALOG_CTL_3 0x378
+#define EXYNOS_DP_PLL_FILTER_CTL_1 0x37C
+#define EXYNOS_DP_TX_AMP_TUNING_CTL 0x380
+
#define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390
#define EXYNOS_DP_COMMON_INT_STA_1 0x3C4
@@ -166,6 +172,29 @@
#define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0)
#define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0)
+/* EXYNOS_DP_ANALOG_CTL_1 */
+#define TX_TERMINAL_CTRL_50_OHM (0x1 << 4)
+
+/* EXYNOS_DP_ANALOG_CTL_2 */
+#define SEL_24M (0x1 << 3)
+#define TX_DVDD_BIT_1_0625V (0x4 << 0)
+
+/* EXYNOS_DP_ANALOG_CTL_3 */
+#define DRIVE_DVDD_BIT_1_0625V (0x4 << 5)
+#define VCO_BIT_600_MICRO (0x5 << 0)
+
+/* EXYNOS_DP_PLL_FILTER_CTL_1 */
+#define PD_RING_OSC (0x1 << 6)
+#define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4)
+#define TX_CUR1_2X (0x1 << 2)
+#define TX_CUR_8_MA (0x2 << 0)
+
+/* EXYNOS_DP_TX_AMP_TUNING_CTL */
+#define CH3_AMP_400_MV (0x0 << 24)
+#define CH2_AMP_400_MV (0x0 << 16)
+#define CH1_AMP_400_MV (0x0 << 8)
+#define CH0_AMP_400_MV (0x0 << 0)
+
/* EXYNOS_DP_AUX_HW_RETRY_CTL */
#define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8)
#define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3)
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 557091dc0e97..6c1f5c314a42 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -58,7 +58,7 @@ static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device
}
static struct regulator_bulk_data supplies[] = {
- { .supply = "vdd10", },
+ { .supply = "vdd11", },
{ .supply = "vdd18", },
};
@@ -102,6 +102,8 @@ static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim)
/* set display timing. */
exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config);
+ exynos_mipi_dsi_init_interrupt(dsim);
+
/*
* data from Display controller(FIMD) is transferred in video mode
* but in case of command mode, all settigs is updated to registers.
@@ -413,27 +415,30 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
goto err_platform_get_irq;
}
+ init_completion(&dsim_wr_comp);
+ init_completion(&dsim_rd_comp);
+ platform_set_drvdata(pdev, dsim);
+
ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler,
- IRQF_SHARED, pdev->name, dsim);
+ IRQF_SHARED, dev_name(&pdev->dev), dsim);
if (ret != 0) {
dev_err(&pdev->dev, "failed to request dsim irq\n");
ret = -EINVAL;
goto err_bind;
}
- init_completion(&dsim_wr_comp);
- init_completion(&dsim_rd_comp);
-
- /* enable interrupt */
+ /* enable interrupts */
exynos_mipi_dsi_init_interrupt(dsim);
/* initialize mipi-dsi client(lcd panel). */
if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe)
dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev);
- /* in case that mipi got enabled at bootloader. */
- if (dsim_pd->enabled)
- goto out;
+ /* in case mipi-dsi has been enabled by bootloader */
+ if (dsim_pd->enabled) {
+ exynos_mipi_regulator_enable(dsim);
+ goto done;
+ }
/* lcd panel power on. */
if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on)
@@ -453,12 +458,11 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
dsim->suspended = false;
-out:
+done:
platform_set_drvdata(pdev, dsim);
- dev_dbg(&pdev->dev, "mipi-dsi driver(%s mode) has been probed.\n",
- (dsim_config->e_interface == DSIM_COMMAND) ?
- "CPU" : "RGB");
+ dev_dbg(&pdev->dev, "%s() completed sucessfuly (%s mode)\n", __func__,
+ dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB");
return 0;
@@ -515,10 +519,10 @@ static int __devexit exynos_mipi_dsi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
- pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int exynos_mipi_dsi_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -544,8 +548,9 @@ static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
return 0;
}
-static int exynos_mipi_dsi_resume(struct platform_device *pdev)
+static int exynos_mipi_dsi_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -577,19 +582,19 @@ static int exynos_mipi_dsi_resume(struct platform_device *pdev)
return 0;
}
-#else
-#define exynos_mipi_dsi_suspend NULL
-#define exynos_mipi_dsi_resume NULL
#endif
+static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume)
+};
+
static struct platform_driver exynos_mipi_dsi_driver = {
.probe = exynos_mipi_dsi_probe,
.remove = __devexit_p(exynos_mipi_dsi_remove),
- .suspend = exynos_mipi_dsi_suspend,
- .resume = exynos_mipi_dsi_resume,
.driver = {
.name = "exynos-mipi-dsim",
.owner = THIS_MODULE,
+ .pm = &exynos_mipi_dsi_pm_ops,
},
};
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 14909c1d3832..47b533a183be 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -76,33 +76,25 @@ static unsigned int dpll_table[15] = {
irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
{
- unsigned int intsrc = 0;
- unsigned int intmsk = 0;
- struct mipi_dsim_device *dsim = NULL;
-
- dsim = dev_id;
- if (!dsim) {
- dev_dbg(dsim->dev, KERN_ERR "%s:error: wrong parameter\n",
- __func__);
- return IRQ_HANDLED;
+ struct mipi_dsim_device *dsim = dev_id;
+ unsigned int intsrc, intmsk;
+
+ if (dsim == NULL) {
+ dev_err(dsim->dev, "%s: wrong parameter\n", __func__);
+ return IRQ_NONE;
}
intsrc = exynos_mipi_dsi_read_interrupt(dsim);
intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
+ intmsk = ~intmsk & intsrc;
- intmsk = ~(intmsk) & intsrc;
-
- switch (intmsk) {
- case INTMSK_RX_DONE:
+ if (intsrc & INTMSK_RX_DONE) {
complete(&dsim_rd_comp);
dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n");
- break;
- case INTMSK_FIFO_EMPTY:
+ }
+ if (intsrc & INTMSK_FIFO_EMPTY) {
complete(&dsim_wr_comp);
dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n");
- break;
- default:
- break;
}
exynos_mipi_dsi_clear_interrupt(dsim, intmsk);
@@ -738,11 +730,11 @@ int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
if (dsim_config->auto_vertical_cnt == 0) {
exynos_mipi_dsi_set_main_disp_vporch(dsim,
dsim_config->cmd_allow,
- timing->upper_margin,
- timing->lower_margin);
+ timing->lower_margin,
+ timing->upper_margin);
exynos_mipi_dsi_set_main_disp_hporch(dsim,
- timing->left_margin,
- timing->right_margin);
+ timing->right_margin,
+ timing->left_margin);
exynos_mipi_dsi_set_main_disp_sync_area(dsim,
timing->vsync_len,
timing->hsync_len);
diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/exynos/s6e8ax0.c
index 4aa9ac6218bf..05d080b63bc0 100644
--- a/drivers/video/exynos/s6e8ax0.c
+++ b/drivers/video/exynos/s6e8ax0.c
@@ -293,9 +293,20 @@ static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd)
0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8
};
+ static const unsigned char data_to_send_panel_reverse[] = {
+ 0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
+ 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
+ 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
+ 0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1
+ };
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
+ if (lcd->dsim_dev->panel_reverse)
+ ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+ data_to_send_panel_reverse,
+ ARRAY_SIZE(data_to_send_panel_reverse));
+ else
+ ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+ data_to_send, ARRAY_SIZE(data_to_send));
}
static void s6e8ax0_display_cond(struct s6e8ax0 *lcd)
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index c27e153d8882..1ddeb11659d4 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -23,7 +23,7 @@
#include <linux/rmap.h>
#include <linux/pagemap.h>
-struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
+static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
{
void *screen_base = (void __force *) info->screen_base;
struct page *page;
@@ -107,6 +107,10 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
/* protect against the workqueue changing the page list */
mutex_lock(&fbdefio->lock);
+ /* first write in this cycle, notify the driver */
+ if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
+ fbdefio->first_io(info);
+
/*
* We want the page to remain locked from ->page_mkwrite until
* the PTE is marked dirty to avoid page_mkclean() being called
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index c6ce416ab587..0dff12a1daef 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1046,20 +1046,29 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
int
fb_blank(struct fb_info *info, int blank)
{
- int ret = -EINVAL;
+ struct fb_event event;
+ int ret = -EINVAL, early_ret;
if (blank > FB_BLANK_POWERDOWN)
blank = FB_BLANK_POWERDOWN;
+ event.info = info;
+ event.data = &blank;
+
+ early_ret = fb_notifier_call_chain(FB_EARLY_EVENT_BLANK, &event);
+
if (info->fbops->fb_blank)
ret = info->fbops->fb_blank(blank, info);
- if (!ret) {
- struct fb_event event;
-
- event.info = info;
- event.data = &blank;
+ if (!ret)
fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+ else {
+ /*
+ * if fb_blank is failed then revert effects of
+ * the early blank event.
+ */
+ if (!early_ret)
+ fb_notifier_call_chain(FB_R_EARLY_EVENT_BLANK, &event);
}
return ret;
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 67afa9c2289d..a55e3669d135 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -80,6 +80,8 @@ EXPORT_SYMBOL(framebuffer_alloc);
*/
void framebuffer_release(struct fb_info *info)
{
+ if (!info)
+ return;
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 6af3f16754f0..458c00664ade 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -834,7 +834,6 @@ static void update_lcdc(struct fb_info *info)
diu_ops.set_pixel_clock(var->pixclock);
out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */
- out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */
out_be32(&hw->plut, 0x01F5F666);
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index f135dbead07d..caad3689b4e6 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -131,7 +131,9 @@ struct imxfb_rgb {
struct imxfb_info {
struct platform_device *pdev;
void __iomem *regs;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
/*
* These are the addresses we mapped
@@ -340,7 +342,7 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
pr_debug("var->bits_per_pixel=%d\n", var->bits_per_pixel);
- lcd_clk = clk_get_rate(fbi->clk);
+ lcd_clk = clk_get_rate(fbi->clk_per);
tmp = var->pixclock * (unsigned long long)lcd_clk;
@@ -455,11 +457,17 @@ static int imxfb_bl_update_status(struct backlight_device *bl)
fbi->pwmr = (fbi->pwmr & ~0xFF) | brightness;
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- clk_enable(fbi->clk);
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+ clk_prepare_enable(fbi->clk_ipg);
+ clk_prepare_enable(fbi->clk_ahb);
+ clk_prepare_enable(fbi->clk_per);
+ }
writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- clk_disable(fbi->clk);
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+ clk_disable_unprepare(fbi->clk_per);
+ clk_disable_unprepare(fbi->clk_ahb);
+ clk_disable_unprepare(fbi->clk_ipg);
+ }
return 0;
}
@@ -522,7 +530,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
*/
writel(RMCR_LCDC_EN_MX1, fbi->regs + LCDC_RMCR);
- clk_enable(fbi->clk);
+ clk_prepare_enable(fbi->clk_ipg);
+ clk_prepare_enable(fbi->clk_ahb);
+ clk_prepare_enable(fbi->clk_per);
if (fbi->backlight_power)
fbi->backlight_power(1);
@@ -539,7 +549,9 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
if (fbi->lcd_power)
fbi->lcd_power(0);
- clk_disable(fbi->clk);
+ clk_disable_unprepare(fbi->clk_per);
+ clk_disable_unprepare(fbi->clk_ipg);
+ clk_disable_unprepare(fbi->clk_ahb);
writel(0, fbi->regs + LCDC_RMCR);
}
@@ -770,10 +782,21 @@ static int __init imxfb_probe(struct platform_device *pdev)
goto failed_req;
}
- fbi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fbi->clk)) {
- ret = PTR_ERR(fbi->clk);
- dev_err(&pdev->dev, "unable to get clock: %d\n", ret);
+ fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fbi->clk_ipg)) {
+ ret = PTR_ERR(fbi->clk_ipg);
+ goto failed_getclock;
+ }
+
+ fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(fbi->clk_ahb)) {
+ ret = PTR_ERR(fbi->clk_ahb);
+ goto failed_getclock;
+ }
+
+ fbi->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(fbi->clk_per)) {
+ ret = PTR_ERR(fbi->clk_per);
goto failed_getclock;
}
@@ -858,7 +881,6 @@ failed_platform_init:
failed_map:
iounmap(fbi->regs);
failed_ioremap:
- clk_put(fbi->clk);
failed_getclock:
release_mem_region(res->start, resource_size(res));
failed_req:
@@ -895,8 +917,6 @@ static int __devexit imxfb_remove(struct platform_device *pdev)
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
- clk_disable(fbi->clk);
- clk_put(fbi->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 02fd2263610c..bdcbfbae2777 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -680,6 +680,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
+ dinfo->fb.size);
if (!dinfo->aperture.virtual) {
ERR_MSG("Cannot remap FB region.\n");
+ agp_backend_release(bridge);
cleanup(dinfo);
return -ENODEV;
}
@@ -689,6 +690,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
INTEL_REG_SIZE);
if (!dinfo->mmio_base) {
ERR_MSG("Cannot remap MMIO region.\n");
+ agp_backend_release(bridge);
cleanup(dinfo);
return -ENODEV;
}
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 31b8f67477b7..217678e0b983 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1243,6 +1243,7 @@ static int maven_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING))
goto ERROR0;
if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) {
diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c
index 273769bb8deb..c87e17afb3e2 100644
--- a/drivers/video/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/mb862xx/mb862xx-i2c.c
@@ -68,7 +68,7 @@ static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last)
return 1;
}
-void mb862xx_i2c_stop(struct i2c_adapter *adap)
+static void mb862xx_i2c_stop(struct i2c_adapter *adap)
{
struct mb862xxfb_par *par = adap->algo_data;
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 11a7a333701d..00ce1f34b496 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -579,7 +579,7 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
-irqreturn_t mb862xx_intr(int irq, void *dev_id)
+static irqreturn_t mb862xx_intr(int irq, void *dev_id)
{
struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id;
unsigned long reg_ist, mask;
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 55bf6196b7a0..85e4f44bfa61 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -950,7 +950,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
res_size(mfbi->fb_req));
- if (!mfbi->reg_virt_addr) {
+ if (!mfbi->fb_virt_addr) {
dev_err(&dev->dev, "failed to ioremap frame buffer\n");
ret = -EINVAL;
goto err4;
@@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev)
static struct platform_driver mbxfb_driver = {
.probe = mbxfb_probe,
- .remove = mbxfb_remove,
+ .remove = __devexit_p(mbxfb_remove),
.suspend = mbxfb_suspend,
.resume = mbxfb_resume,
.driver = {
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 6c6bc578d0fc..abbe691047bd 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -889,6 +889,18 @@ static int __devexit mxsfb_remove(struct platform_device *pdev)
return 0;
}
+static void mxsfb_shutdown(struct platform_device *pdev)
+{
+ struct fb_info *fb_info = platform_get_drvdata(pdev);
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+
+ /*
+ * Force stop the LCD controller as keeping it running during reboot
+ * might interfere with the BootROM's boot mode pads sampling.
+ */
+ writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
+}
+
static struct platform_device_id mxsfb_devtype[] = {
{
.name = "imx23-fb",
@@ -905,6 +917,7 @@ MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
static struct platform_driver mxsfb_driver = {
.probe = mxsfb_probe,
.remove = __devexit_p(mxsfb_remove),
+ .shutdown = mxsfb_shutdown,
.id_table = mxsfb_devtype,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 1e7536d9a8fc..b48f95f0dfe2 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -39,14 +39,6 @@ config FB_OMAP_LCD_MIPID
the Mobile Industry Processor Interface DBI-C/DCS
specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3)
-config FB_OMAP_BOOTLOADER_INIT
- bool "Check bootloader initialization"
- depends on FB_OMAP
- help
- Say Y here if you want to enable checking if the bootloader has
- already initialized the display controller. In this case the
- driver will skip the initialization.
-
config FB_OMAP_CONSISTENT_DMA_SIZE
int "Consistent DMA memory size (MB)"
depends on FB_OMAP
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index d26f37ac69d8..ad741c3d1ae1 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -532,6 +532,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
/*------- Backlight control --------*/
+ memset(&props, 0, sizeof(props));
props.fb_blank = FB_BLANK_UNBLANK;
props.power = FB_BLANK_UNBLANK;
props.type = BACKLIGHT_RAW;
@@ -738,12 +739,6 @@ static void acx_panel_set_timings(struct omap_dss_device *dssdev,
}
}
-static void acx_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static int acx_panel_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -761,7 +756,6 @@ static struct omap_dss_driver acx_panel_driver = {
.resume = acx_panel_resume,
.set_timings = acx_panel_set_timings,
- .get_timings = acx_panel_get_timings,
.check_timings = acx_panel_check_timings,
.get_recommended_bpp = acx_get_recommended_bpp,
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 30fe4dfeb227..e42f9dc22123 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -386,6 +386,106 @@ static struct panel_config generic_dpi_panels[] = {
.name = "innolux_at080tn52",
},
+
+ /* Mitsubishi AA084SB01 */
+ {
+ {
+ .x_res = 800,
+ .y_res = 600,
+ .pixel_clock = 40000,
+
+ .hsw = 1,
+ .hfp = 254,
+ .hbp = 1,
+
+ .vsw = 1,
+ .vfp = 26,
+ .vbp = 1,
+ },
+ .config = OMAP_DSS_LCD_TFT,
+ .name = "mitsubishi_aa084sb01",
+ },
+ /* EDT ET0500G0DH6 */
+ {
+ {
+ .x_res = 800,
+ .y_res = 480,
+ .pixel_clock = 33260,
+
+ .hsw = 128,
+ .hfp = 216,
+ .hbp = 40,
+
+ .vsw = 2,
+ .vfp = 35,
+ .vbp = 10,
+ },
+ .config = OMAP_DSS_LCD_TFT,
+ .name = "edt_et0500g0dh6",
+ },
+
+ /* Prime-View PD050VL1 */
+ {
+ {
+ .x_res = 640,
+ .y_res = 480,
+
+ .pixel_clock = 25000,
+
+ .hsw = 96,
+ .hfp = 18,
+ .hbp = 46,
+
+ .vsw = 2,
+ .vfp = 10,
+ .vbp = 33,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+ .name = "primeview_pd050vl1",
+ },
+
+ /* Prime-View PM070WL4 */
+ {
+ {
+ .x_res = 800,
+ .y_res = 480,
+
+ .pixel_clock = 32000,
+
+ .hsw = 128,
+ .hfp = 42,
+ .hbp = 86,
+
+ .vsw = 2,
+ .vfp = 10,
+ .vbp = 33,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+ .name = "primeview_pm070wl4",
+ },
+
+ /* Prime-View PD104SLF */
+ {
+ {
+ .x_res = 800,
+ .y_res = 600,
+
+ .pixel_clock = 40000,
+
+ .hsw = 128,
+ .hfp = 42,
+ .hbp = 86,
+
+ .vsw = 4,
+ .vfp = 1,
+ .vbp = 23,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+ .name = "primeview_pd104slf",
+ },
};
struct panel_drv_data {
@@ -549,12 +649,6 @@ static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
dpi_set_timings(dssdev, timings);
}
-static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -571,7 +665,6 @@ static struct omap_dss_driver dpi_driver = {
.resume = generic_dpi_panel_resume,
.set_timings = generic_dpi_panel_set_timings,
- .get_timings = generic_dpi_panel_get_timings,
.check_timings = generic_dpi_panel_check_timings,
.driver = {
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index dc9408dc93d1..4a34cdc1371b 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -610,12 +610,6 @@ static int n8x0_panel_resume(struct omap_dss_device *dssdev)
return 0;
}
-static void n8x0_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -678,8 +672,6 @@ static struct omap_dss_driver n8x0_panel_driver = {
.get_resolution = n8x0_panel_get_resolution,
.get_recommended_bpp = omapdss_default_get_recommended_bpp,
- .get_timings = n8x0_panel_get_timings,
-
.driver = {
.name = "n8x0_panel",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index b2dd88b48420..901576eb5a84 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -30,7 +30,6 @@
#include <linux/gpio.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
#include <linux/mutex.h>
#include <video/omapdss.h>
@@ -55,73 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
static int taal_panel_reset(struct omap_dss_device *dssdev);
-struct panel_regulator {
- struct regulator *regulator;
- const char *name;
- int min_uV;
- int max_uV;
-};
-
-static void free_regulators(struct panel_regulator *regulators, int n)
-{
- int i;
-
- for (i = 0; i < n; i++) {
- /* disable/put in reverse order */
- regulator_disable(regulators[n - i - 1].regulator);
- regulator_put(regulators[n - i - 1].regulator);
- }
-}
-
-static int init_regulators(struct omap_dss_device *dssdev,
- struct panel_regulator *regulators, int n)
-{
- int r, i, v;
-
- for (i = 0; i < n; i++) {
- struct regulator *reg;
-
- reg = regulator_get(&dssdev->dev, regulators[i].name);
- if (IS_ERR(reg)) {
- dev_err(&dssdev->dev, "failed to get regulator %s\n",
- regulators[i].name);
- r = PTR_ERR(reg);
- goto err;
- }
-
- /* FIXME: better handling of fixed vs. variable regulators */
- v = regulator_get_voltage(reg);
- if (v < regulators[i].min_uV || v > regulators[i].max_uV) {
- r = regulator_set_voltage(reg, regulators[i].min_uV,
- regulators[i].max_uV);
- if (r) {
- dev_err(&dssdev->dev,
- "failed to set regulator %s voltage\n",
- regulators[i].name);
- regulator_put(reg);
- goto err;
- }
- }
-
- r = regulator_enable(reg);
- if (r) {
- dev_err(&dssdev->dev, "failed to enable regulator %s\n",
- regulators[i].name);
- regulator_put(reg);
- goto err;
- }
-
- regulators[i].regulator = reg;
- }
-
- return 0;
-
-err:
- free_regulators(regulators, i);
-
- return r;
-}
-
/**
* struct panel_config - panel configuration
* @name: panel name
@@ -150,8 +82,6 @@ struct panel_config {
unsigned int low;
} reset_sequence;
- struct panel_regulator *regulators;
- int num_regulators;
};
enum {
@@ -577,12 +507,6 @@ static const struct backlight_ops taal_bl_ops = {
.update_status = taal_bl_update_status,
};
-static void taal_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static void taal_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -602,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev,
{
struct omap_dss_device *dssdev = to_dss_device(dev);
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- u8 errors;
+ u8 errors = 0;
int r;
mutex_lock(&td->lock);
@@ -977,11 +901,6 @@ static int taal_probe(struct omap_dss_device *dssdev)
atomic_set(&td->do_update, 0);
- r = init_regulators(dssdev, panel_config->regulators,
- panel_config->num_regulators);
- if (r)
- goto err_reg;
-
td->workqueue = create_singlethread_workqueue("taal_esd");
if (td->workqueue == NULL) {
dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -1087,8 +1006,6 @@ err_bl:
err_rst_gpio:
destroy_workqueue(td->workqueue);
err_wq:
- free_regulators(panel_config->regulators, panel_config->num_regulators);
-err_reg:
kfree(td);
err:
return r;
@@ -1125,9 +1042,6 @@ static void __exit taal_remove(struct omap_dss_device *dssdev)
/* reset, to be sure that the panel is in a valid state */
taal_hw_reset(dssdev);
- free_regulators(td->panel_config->regulators,
- td->panel_config->num_regulators);
-
if (gpio_is_valid(panel_data->reset_gpio))
gpio_free(panel_data->reset_gpio);
@@ -1909,8 +1823,6 @@ static struct omap_dss_driver taal_driver = {
.run_test = taal_run_test,
.memory_read = taal_memory_read,
- .get_timings = taal_get_timings,
-
.driver = {
.name = "taal",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index 52637fa8fda8..bff306e041ca 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -47,13 +47,9 @@ struct panel_drv_data {
struct mutex lock;
int pd_gpio;
-};
-static inline struct tfp410_platform_data
-*get_pdata(const struct omap_dss_device *dssdev)
-{
- return dssdev->data;
-}
+ struct i2c_adapter *i2c_adapter;
+};
static int tfp410_power_on(struct omap_dss_device *dssdev)
{
@@ -68,7 +64,7 @@ static int tfp410_power_on(struct omap_dss_device *dssdev)
goto err0;
if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value(ddata->pd_gpio, 1);
+ gpio_set_value_cansleep(ddata->pd_gpio, 1);
return 0;
err0:
@@ -83,18 +79,18 @@ static void tfp410_power_off(struct omap_dss_device *dssdev)
return;
if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value(ddata->pd_gpio, 0);
+ gpio_set_value_cansleep(ddata->pd_gpio, 0);
omapdss_dpi_display_disable(dssdev);
}
static int tfp410_probe(struct omap_dss_device *dssdev)
{
- struct tfp410_platform_data *pdata = get_pdata(dssdev);
struct panel_drv_data *ddata;
int r;
+ int i2c_bus_num;
- ddata = kzalloc(sizeof(*ddata), GFP_KERNEL);
+ ddata = devm_kzalloc(&dssdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -104,10 +100,15 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
ddata->dssdev = dssdev;
mutex_init(&ddata->lock);
- if (pdata)
+ if (dssdev->data) {
+ struct tfp410_platform_data *pdata = dssdev->data;
+
ddata->pd_gpio = pdata->power_down_gpio;
- else
+ i2c_bus_num = pdata->i2c_bus_num;
+ } else {
ddata->pd_gpio = -1;
+ i2c_bus_num = -1;
+ }
if (gpio_is_valid(ddata->pd_gpio)) {
r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW,
@@ -115,13 +116,31 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
if (r) {
dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n",
ddata->pd_gpio);
- ddata->pd_gpio = -1;
+ return r;
}
}
+ if (i2c_bus_num != -1) {
+ struct i2c_adapter *adapter;
+
+ adapter = i2c_get_adapter(i2c_bus_num);
+ if (!adapter) {
+ dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
+ i2c_bus_num);
+ r = -EINVAL;
+ goto err_i2c;
+ }
+
+ ddata->i2c_adapter = adapter;
+ }
+
dev_set_drvdata(&dssdev->dev, ddata);
return 0;
+err_i2c:
+ if (gpio_is_valid(ddata->pd_gpio))
+ gpio_free(ddata->pd_gpio);
+ return r;
}
static void __exit tfp410_remove(struct omap_dss_device *dssdev)
@@ -130,14 +149,15 @@ static void __exit tfp410_remove(struct omap_dss_device *dssdev)
mutex_lock(&ddata->lock);
+ if (ddata->i2c_adapter)
+ i2c_put_adapter(ddata->i2c_adapter);
+
if (gpio_is_valid(ddata->pd_gpio))
gpio_free(ddata->pd_gpio);
dev_set_drvdata(&dssdev->dev, NULL);
mutex_unlock(&ddata->lock);
-
- kfree(ddata);
}
static int tfp410_enable(struct omap_dss_device *dssdev)
@@ -269,27 +289,17 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
u8 *edid, int len)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
- struct tfp410_platform_data *pdata = get_pdata(dssdev);
- struct i2c_adapter *adapter;
int r, l, bytes_read;
mutex_lock(&ddata->lock);
- if (pdata->i2c_bus_num == 0) {
+ if (!ddata->i2c_adapter) {
r = -ENODEV;
goto err;
}
- adapter = i2c_get_adapter(pdata->i2c_bus_num);
- if (!adapter) {
- dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
- pdata->i2c_bus_num);
- r = -EINVAL;
- goto err;
- }
-
l = min(EDID_LENGTH, len);
- r = tfp410_ddc_read(adapter, edid, l, 0);
+ r = tfp410_ddc_read(ddata->i2c_adapter, edid, l, 0);
if (r)
goto err;
@@ -299,7 +309,7 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
if (len > EDID_LENGTH && edid[0x7e] > 0) {
l = min(EDID_LENGTH, len - EDID_LENGTH);
- r = tfp410_ddc_read(adapter, edid + EDID_LENGTH,
+ r = tfp410_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
l, EDID_LENGTH);
if (r)
goto err;
@@ -319,21 +329,15 @@ err:
static bool tfp410_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
- struct tfp410_platform_data *pdata = get_pdata(dssdev);
- struct i2c_adapter *adapter;
unsigned char out;
int r;
mutex_lock(&ddata->lock);
- if (pdata->i2c_bus_num == 0)
- goto out;
-
- adapter = i2c_get_adapter(pdata->i2c_bus_num);
- if (!adapter)
+ if (!ddata->i2c_adapter)
goto out;
- r = tfp410_ddc_read(adapter, &out, 1, 0);
+ r = tfp410_ddc_read(ddata->i2c_adapter, &out, 1, 0);
mutex_unlock(&ddata->lock);
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 32f3fcd7f0f0..4b6448b3c31f 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -272,13 +272,16 @@ static const struct omap_video_timings tpo_td043_timings = {
static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
{
int nreset_gpio = tpo_td043->nreset_gpio;
+ int r;
if (tpo_td043->powered_on)
return 0;
- regulator_enable(tpo_td043->vcc_reg);
+ r = regulator_enable(tpo_td043->vcc_reg);
+ if (r != 0)
+ return r;
- /* wait for regulator to stabilize */
+ /* wait for panel to stabilize */
msleep(160);
if (gpio_is_valid(nreset_gpio))
@@ -470,6 +473,18 @@ static void tpo_td043_remove(struct omap_dss_device *dssdev)
gpio_free(nreset_gpio);
}
+static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ dpi_set_timings(dssdev, timings);
+}
+
+static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ return dpi_check_timings(dssdev, timings);
+}
+
static struct omap_dss_driver tpo_td043_driver = {
.probe = tpo_td043_probe,
.remove = tpo_td043_remove,
@@ -481,6 +496,9 @@ static struct omap_dss_driver tpo_td043_driver = {
.set_mirror = tpo_td043_set_hmirror,
.get_mirror = tpo_td043_get_hmirror,
+ .set_timings = tpo_td043_set_timings,
+ .check_timings = tpo_td043_check_timings,
+
.driver = {
.name = "tpo_td043mtea1_panel",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 7be7c06a249e..43324e5ed25f 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -68,6 +68,10 @@ config OMAP4_DSS_HDMI
HDMI Interface. This adds the High Definition Multimedia Interface.
See http://www.hdmi.org/ for HDMI specification.
+config OMAP4_DSS_HDMI_AUDIO
+ bool
+ depends on OMAP4_DSS_HDMI
+
config OMAP2_DSS_SDI
bool "SDI support"
depends on ARCH_OMAP3
@@ -90,15 +94,6 @@ config OMAP2_DSS_DSI
See http://www.mipi.org/ for DSI spesifications.
-config OMAP2_DSS_FAKE_VSYNC
- bool "Fake VSYNC irq from manual update displays"
- default n
- help
- If this is selected, DSI will generate a fake DISPC VSYNC interrupt
- when DSI has sent a frame. This is only needed with DSI or RFBI
- displays using manual mode, and you want VSYNC to, for example,
- time animation.
-
config OMAP2_DSS_MIN_FCK_PER_PCK
int "Minimum FCK/PCK ratio (for scaling)"
range 0 32
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index b10b3bc1931e..ab22cc224f3e 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -99,6 +99,11 @@ struct mgr_priv_data {
/* If true, a display is enabled using this manager */
bool enabled;
+
+ bool extra_info_dirty;
+ bool shadow_extra_info_dirty;
+
+ struct omap_video_timings timings;
};
static struct {
@@ -176,7 +181,7 @@ static bool mgr_manual_update(struct omap_overlay_manager *mgr)
}
static int dss_check_settings_low(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev, bool applying)
+ bool applying)
{
struct omap_overlay_info *oi;
struct omap_overlay_manager_info *mi;
@@ -187,6 +192,9 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
mp = get_mgr_priv(mgr);
+ if (!mp->enabled)
+ return 0;
+
if (applying && mp->user_info_dirty)
mi = &mp->user_info;
else
@@ -206,26 +214,24 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
ois[ovl->id] = oi;
}
- return dss_mgr_check(mgr, dssdev, mi, ois);
+ return dss_mgr_check(mgr, mi, &mp->timings, ois);
}
/*
* check manager and overlay settings using overlay_info from data->info
*/
-static int dss_check_settings(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev)
+static int dss_check_settings(struct omap_overlay_manager *mgr)
{
- return dss_check_settings_low(mgr, dssdev, false);
+ return dss_check_settings_low(mgr, false);
}
/*
* check manager and overlay settings using overlay_info from ovl->info if
* dirty and from data->info otherwise
*/
-static int dss_check_settings_apply(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev)
+static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
{
- return dss_check_settings_low(mgr, dssdev, true);
+ return dss_check_settings_low(mgr, true);
}
static bool need_isr(void)
@@ -261,6 +267,20 @@ static bool need_isr(void)
if (mp->shadow_info_dirty)
return true;
+ /*
+ * NOTE: we don't check extra_info flags for disabled
+ * managers, once the manager is enabled, the extra_info
+ * related manager changes will be taken in by HW.
+ */
+
+ /* to write new values to registers */
+ if (mp->extra_info_dirty)
+ return true;
+
+ /* to set GO bit */
+ if (mp->shadow_extra_info_dirty)
+ return true;
+
list_for_each_entry(ovl, &mgr->overlays, list) {
struct ovl_priv_data *op;
@@ -305,7 +325,7 @@ static bool need_go(struct omap_overlay_manager *mgr)
mp = get_mgr_priv(mgr);
- if (mp->shadow_info_dirty)
+ if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
return true;
list_for_each_entry(ovl, &mgr->overlays, list) {
@@ -320,20 +340,16 @@ static bool need_go(struct omap_overlay_manager *mgr)
/* returns true if an extra_info field is currently being updated */
static bool extra_info_update_ongoing(void)
{
- const int num_ovls = omap_dss_get_num_overlays();
- struct ovl_priv_data *op;
- struct omap_overlay *ovl;
- struct mgr_priv_data *mp;
+ const int num_mgrs = dss_feat_get_num_mgrs();
int i;
- for (i = 0; i < num_ovls; ++i) {
- ovl = omap_dss_get_overlay(i);
- op = get_ovl_priv(ovl);
-
- if (!ovl->manager)
- continue;
+ for (i = 0; i < num_mgrs; ++i) {
+ struct omap_overlay_manager *mgr;
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
- mp = get_mgr_priv(ovl->manager);
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
if (!mp->enabled)
continue;
@@ -341,8 +357,15 @@ static bool extra_info_update_ongoing(void)
if (!mp->updating)
continue;
- if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+ if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
return true;
+
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+
+ if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+ return true;
+ }
}
return false;
@@ -525,11 +548,13 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
oi = &op->info;
+ mp = get_mgr_priv(ovl->manager);
+
replication = dss_use_replication(ovl->manager->device, oi->color_mode);
ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
- r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
+ r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings);
if (r) {
/*
* We can't do much here, as this function can be called from
@@ -543,8 +568,6 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
return;
}
- mp = get_mgr_priv(ovl->manager);
-
op->info_dirty = false;
if (mp->updating)
op->shadow_info_dirty = true;
@@ -601,6 +624,22 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
}
}
+static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ DSSDBGF("%d", mgr->id);
+
+ if (!mp->extra_info_dirty)
+ return;
+
+ dispc_mgr_set_timings(mgr->id, &mp->timings);
+
+ mp->extra_info_dirty = false;
+ if (mp->updating)
+ mp->shadow_extra_info_dirty = true;
+}
+
static void dss_write_regs_common(void)
{
const int num_mgrs = omap_dss_get_num_overlay_managers();
@@ -646,7 +685,7 @@ static void dss_write_regs(void)
if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
continue;
- r = dss_check_settings(mgr, mgr->device);
+ r = dss_check_settings(mgr);
if (r) {
DSSERR("cannot write registers for manager %s: "
"illegal configuration\n", mgr->name);
@@ -654,6 +693,7 @@ static void dss_write_regs(void)
}
dss_mgr_write_regs(mgr);
+ dss_mgr_write_regs_extra(mgr);
}
}
@@ -693,6 +733,7 @@ static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
mp = get_mgr_priv(mgr);
mp->shadow_info_dirty = false;
+ mp->shadow_extra_info_dirty = false;
list_for_each_entry(ovl, &mgr->overlays, list) {
op = get_ovl_priv(ovl);
@@ -711,7 +752,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
WARN_ON(mp->updating);
- r = dss_check_settings(mgr, mgr->device);
+ r = dss_check_settings(mgr);
if (r) {
DSSERR("cannot start manual update: illegal configuration\n");
spin_unlock_irqrestore(&data_lock, flags);
@@ -719,6 +760,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
}
dss_mgr_write_regs(mgr);
+ dss_mgr_write_regs_extra(mgr);
dss_write_regs_common();
@@ -857,7 +899,7 @@ int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
spin_lock_irqsave(&data_lock, flags);
- r = dss_check_settings_apply(mgr, mgr->device);
+ r = dss_check_settings_apply(mgr);
if (r) {
spin_unlock_irqrestore(&data_lock, flags);
DSSERR("failed to apply settings: illegal configuration.\n");
@@ -918,16 +960,13 @@ static void dss_ovl_setup_fifo(struct omap_overlay *ovl,
bool use_fifo_merge)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
- struct omap_dss_device *dssdev;
u32 fifo_low, fifo_high;
if (!op->enabled && !op->enabling)
return;
- dssdev = ovl->manager->device;
-
dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
- use_fifo_merge);
+ use_fifo_merge, ovl_manual_update(ovl));
dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
}
@@ -1050,7 +1089,7 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
mp->enabled = true;
- r = dss_check_settings(mgr, mgr->device);
+ r = dss_check_settings(mgr);
if (r) {
DSSERR("failed to enable manager %d: check_settings failed\n",
mgr->id);
@@ -1225,6 +1264,35 @@ err:
return r;
}
+static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ mp->timings = *timings;
+ mp->extra_info_dirty = true;
+}
+
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings)
+{
+ unsigned long flags;
+
+ mutex_lock(&apply_lock);
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ dss_apply_mgr_timings(mgr, timings);
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ wait_pending_extra_info_updates();
+
+ mutex_unlock(&apply_lock);
+}
int dss_ovl_set_info(struct omap_overlay *ovl,
struct omap_overlay_info *info)
@@ -1393,7 +1461,7 @@ int dss_ovl_enable(struct omap_overlay *ovl)
op->enabling = true;
- r = dss_check_settings(ovl->manager, ovl->manager->device);
+ r = dss_check_settings(ovl->manager);
if (r) {
DSSERR("failed to enable overlay %d: check_settings failed\n",
ovl->id);
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index e8a120771ac6..58bd9c27369d 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/device.h>
#include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
#include <video/omapdss.h>
@@ -43,6 +44,8 @@ static struct {
struct regulator *vdds_dsi_reg;
struct regulator *vdds_sdi_reg;
+
+ const char *default_display_name;
} core;
static char *def_disp_name;
@@ -54,9 +57,6 @@ bool dss_debug;
module_param_named(debug, dss_debug, bool, 0644);
#endif
-static int omap_dss_register_device(struct omap_dss_device *);
-static void omap_dss_unregister_device(struct omap_dss_device *);
-
/* REGULATORS */
struct regulator *dss_get_vdds_dsi(void)
@@ -87,6 +87,51 @@ struct regulator *dss_get_vdds_sdi(void)
return reg;
}
+int dss_get_ctx_loss_count(struct device *dev)
+{
+ struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+ int cnt;
+
+ if (!board_data->get_context_loss_count)
+ return -ENOENT;
+
+ cnt = board_data->get_context_loss_count(dev);
+
+ WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+ return cnt;
+}
+
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
+{
+ struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+ if (!board_data->dsi_enable_pads)
+ return -ENOENT;
+
+ return board_data->dsi_enable_pads(dsi_id, lane_mask);
+}
+
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
+{
+ struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+ if (!board_data->dsi_enable_pads)
+ return;
+
+ return board_data->dsi_disable_pads(dsi_id, lane_mask);
+}
+
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+
+ if (pdata->set_min_bus_tput)
+ return pdata->set_min_bus_tput(dev, tput);
+ else
+ return 0;
+}
+
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
static int dss_debug_show(struct seq_file *s, void *unused)
{
@@ -121,34 +166,6 @@ static int dss_initialize_debugfs(void)
debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
&dss_debug_dump_clocks, &dss_debug_fops);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
- &dispc_dump_irqs, &dss_debug_fops);
-#endif
-
-#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
- dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
-#endif
-
- debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
- &dss_dump_regs, &dss_debug_fops);
- debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
- &dispc_dump_regs, &dss_debug_fops);
-#ifdef CONFIG_OMAP2_DSS_RFBI
- debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
- &rfbi_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
- &venc_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
- debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir,
- &hdmi_dump_regs, &dss_debug_fops);
-#endif
return 0;
}
@@ -157,6 +174,19 @@ static void dss_uninitialize_debugfs(void)
if (dss_debugfs_dir)
debugfs_remove_recursive(dss_debugfs_dir);
}
+
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+ struct dentry *d;
+
+ d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
+ write, &dss_debug_fops);
+
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+
+ return 0;
+}
#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
static inline int dss_initialize_debugfs(void)
{
@@ -165,14 +195,39 @@ static inline int dss_initialize_debugfs(void)
static inline void dss_uninitialize_debugfs(void)
{
}
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+ return 0;
+}
#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
/* PLATFORM DEVICE */
-static int omap_dss_probe(struct platform_device *pdev)
+static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+{
+ DSSDBG("pm notif %lu\n", v);
+
+ switch (v) {
+ case PM_SUSPEND_PREPARE:
+ DSSDBG("suspending displays\n");
+ return dss_suspend_all_devices();
+
+ case PM_POST_SUSPEND:
+ DSSDBG("resuming displays\n");
+ return dss_resume_all_devices();
+
+ default:
+ return 0;
+ }
+}
+
+static struct notifier_block omap_dss_pm_notif_block = {
+ .notifier_call = omap_dss_pm_notif,
+};
+
+static int __init omap_dss_probe(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int r;
- int i;
core.pdev = pdev;
@@ -187,28 +242,15 @@ static int omap_dss_probe(struct platform_device *pdev)
if (r)
goto err_debugfs;
- for (i = 0; i < pdata->num_devices; ++i) {
- struct omap_dss_device *dssdev = pdata->devices[i];
-
- r = omap_dss_register_device(dssdev);
- if (r) {
- DSSERR("device %d %s register failed %d\n", i,
- dssdev->name ?: "unnamed", r);
+ if (def_disp_name)
+ core.default_display_name = def_disp_name;
+ else if (pdata->default_device)
+ core.default_display_name = pdata->default_device->name;
- while (--i >= 0)
- omap_dss_unregister_device(pdata->devices[i]);
-
- goto err_register;
- }
-
- if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
- pdata->default_device = dssdev;
- }
+ register_pm_notifier(&omap_dss_pm_notif_block);
return 0;
-err_register:
- dss_uninitialize_debugfs();
err_debugfs:
return r;
@@ -216,17 +258,13 @@ err_debugfs:
static int omap_dss_remove(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- int i;
+ unregister_pm_notifier(&omap_dss_pm_notif_block);
dss_uninitialize_debugfs();
dss_uninit_overlays(pdev);
dss_uninit_overlay_managers(pdev);
- for (i = 0; i < pdata->num_devices; ++i)
- omap_dss_unregister_device(pdata->devices[i]);
-
return 0;
}
@@ -236,26 +274,9 @@ static void omap_dss_shutdown(struct platform_device *pdev)
dss_disable_all_devices();
}
-static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
-{
- DSSDBG("suspend %d\n", state.event);
-
- return dss_suspend_all_devices();
-}
-
-static int omap_dss_resume(struct platform_device *pdev)
-{
- DSSDBG("resume\n");
-
- return dss_resume_all_devices();
-}
-
static struct platform_driver omap_dss_driver = {
- .probe = omap_dss_probe,
.remove = omap_dss_remove,
.shutdown = omap_dss_shutdown,
- .suspend = omap_dss_suspend,
- .resume = omap_dss_resume,
.driver = {
.name = "omapdss",
.owner = THIS_MODULE,
@@ -326,7 +347,6 @@ static int dss_driver_probe(struct device *dev)
int r;
struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
bool force;
DSSDBG("driver_probe: dev %s/%s, drv %s\n",
@@ -335,7 +355,8 @@ static int dss_driver_probe(struct device *dev)
dss_init_device(core.pdev, dssdev);
- force = pdata->default_device == dssdev;
+ force = core.default_display_name &&
+ strcmp(core.default_display_name, dssdev->name) == 0;
dss_recheck_connections(dssdev, force);
r = dssdrv->probe(dssdev);
@@ -381,6 +402,8 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
if (dssdriver->get_recommended_bpp == NULL)
dssdriver->get_recommended_bpp =
omapdss_default_get_recommended_bpp;
+ if (dssdriver->get_timings == NULL)
+ dssdriver->get_timings = omapdss_default_get_timings;
return driver_register(&dssdriver->driver);
}
@@ -427,27 +450,38 @@ static void omap_dss_dev_release(struct device *dev)
reset_device(dev, 0);
}
-static int omap_dss_register_device(struct omap_dss_device *dssdev)
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+ struct device *parent, int disp_num)
{
- static int dev_num;
-
WARN_ON(!dssdev->driver_name);
reset_device(&dssdev->dev, 1);
dssdev->dev.bus = &dss_bus_type;
- dssdev->dev.parent = &dss_bus;
+ dssdev->dev.parent = parent;
dssdev->dev.release = omap_dss_dev_release;
- dev_set_name(&dssdev->dev, "display%d", dev_num++);
+ dev_set_name(&dssdev->dev, "display%d", disp_num);
return device_register(&dssdev->dev);
}
-static void omap_dss_unregister_device(struct omap_dss_device *dssdev)
+void omap_dss_unregister_device(struct omap_dss_device *dssdev)
{
device_unregister(&dssdev->dev);
}
+static int dss_unregister_dss_dev(struct device *dev, void *data)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ omap_dss_unregister_device(dssdev);
+ return 0;
+}
+
+void omap_dss_unregister_child_devices(struct device *parent)
+{
+ device_for_each_child(parent, NULL, dss_unregister_dss_dev);
+}
+
/* BUS */
-static int omap_dss_bus_register(void)
+static int __init omap_dss_bus_register(void)
{
int r;
@@ -469,12 +503,56 @@ static int omap_dss_bus_register(void)
}
/* INIT */
+static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+ dpi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+ sdi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+ rfbi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ venc_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+ hdmi_init_platform_driver,
+#endif
+};
+
+static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+ dpi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+ sdi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+ rfbi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ venc_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+ hdmi_uninit_platform_driver,
+#endif
+};
+
+static bool dss_output_drv_loaded[ARRAY_SIZE(dss_output_drv_reg_funcs)];
static int __init omap_dss_register_drivers(void)
{
int r;
+ int i;
- r = platform_driver_register(&omap_dss_driver);
+ r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
if (r)
return r;
@@ -490,40 +568,18 @@ static int __init omap_dss_register_drivers(void)
goto err_dispc;
}
- r = rfbi_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize rfbi platform driver\n");
- goto err_rfbi;
- }
-
- r = venc_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize venc platform driver\n");
- goto err_venc;
- }
-
- r = dsi_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize DSI platform driver\n");
- goto err_dsi;
- }
-
- r = hdmi_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize hdmi\n");
- goto err_hdmi;
+ /*
+ * It's ok if the output-driver register fails. It happens, for example,
+ * when there is no output-device (e.g. SDI for OMAP4).
+ */
+ for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
+ r = dss_output_drv_reg_funcs[i]();
+ if (r == 0)
+ dss_output_drv_loaded[i] = true;
}
return 0;
-err_hdmi:
- dsi_uninit_platform_driver();
-err_dsi:
- venc_uninit_platform_driver();
-err_venc:
- rfbi_uninit_platform_driver();
-err_rfbi:
- dispc_uninit_platform_driver();
err_dispc:
dss_uninit_platform_driver();
err_dss:
@@ -534,10 +590,13 @@ err_dss:
static void __exit omap_dss_unregister_drivers(void)
{
- hdmi_uninit_platform_driver();
- dsi_uninit_platform_driver();
- venc_uninit_platform_driver();
- rfbi_uninit_platform_driver();
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i) {
+ if (dss_output_drv_loaded[i])
+ dss_output_drv_unreg_funcs[i]();
+ }
+
dispc_uninit_platform_driver();
dss_uninit_platform_driver();
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index ee30937482e1..397d4eee11bb 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -131,23 +131,6 @@ static inline u32 dispc_read_reg(const u16 idx)
return __raw_readl(dispc.base + idx);
}
-static int dispc_get_ctx_loss_count(void)
-{
- struct device *dev = &dispc.pdev->dev;
- struct omap_display_platform_data *pdata = dev->platform_data;
- struct omap_dss_board_info *board_data = pdata->board_data;
- int cnt;
-
- if (!board_data->get_context_loss_count)
- return -ENOENT;
-
- cnt = board_data->get_context_loss_count(dev);
-
- WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
-
- return cnt;
-}
-
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
@@ -251,7 +234,7 @@ static void dispc_save_context(void)
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
- dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
+ dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev);
dispc.ctx_valid = true;
DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
@@ -266,7 +249,7 @@ static void dispc_restore_context(void)
if (!dispc.ctx_valid)
return;
- ctx = dispc_get_ctx_loss_count();
+ ctx = dss_get_ctx_loss_count(&dispc.pdev->dev);
if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
return;
@@ -401,7 +384,7 @@ void dispc_runtime_put(void)
DSSDBG("dispc_runtime_put\n");
r = pm_runtime_put_sync(&dispc.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
@@ -413,14 +396,6 @@ static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
return false;
}
-static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
-{
- struct omap_overlay_manager *mgr =
- omap_dss_get_overlay_manager(channel);
-
- return mgr ? mgr->device : NULL;
-}
-
u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
{
switch (channel) {
@@ -432,6 +407,7 @@ u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
default:
BUG();
+ return 0;
}
}
@@ -446,6 +422,7 @@ u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
return 0;
default:
BUG();
+ return 0;
}
}
@@ -764,7 +741,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
case OMAP_DSS_COLOR_XRGB16_1555:
m = 0xf; break;
default:
- BUG(); break;
+ BUG(); return;
}
} else {
switch (color_mode) {
@@ -801,13 +778,25 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
case OMAP_DSS_COLOR_XRGB16_1555:
m = 0xf; break;
default:
- BUG(); break;
+ BUG(); return;
}
}
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
+static void dispc_ovl_configure_burst_type(enum omap_plane plane,
+ enum omap_dss_rotation_type rotation_type)
+{
+ if (dss_has_feature(FEAT_BURST_2D) == 0)
+ return;
+
+ if (rotation_type == OMAP_DSS_ROT_TILER)
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
+ else
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
+}
+
void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
{
int shift;
@@ -845,6 +834,7 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
break;
default:
BUG();
+ return;
}
val = FLD_MOD(val, chan, shift, shift);
@@ -872,6 +862,7 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
break;
default:
BUG();
+ return 0;
}
val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
@@ -983,20 +974,13 @@ static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable)
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
}
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
+static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
+ u16 height)
{
u32 val;
- BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
- val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- dispc_write_reg(DISPC_SIZE_MGR(channel), val);
-}
-void dispc_set_digit_size(u16 width, u16 height)
-{
- u32 val;
- BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
+ dispc_write_reg(DISPC_SIZE_MGR(channel), val);
}
static void dispc_read_plane_fifo_sizes(void)
@@ -1063,7 +1047,8 @@ void dispc_enable_fifomerge(bool enable)
}
void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
- u32 *fifo_low, u32 *fifo_high, bool use_fifomerge)
+ u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+ bool manual_update)
{
/*
* All sizes are in bytes. Both the buffer and burst are made of
@@ -1091,7 +1076,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
* combined fifo size
*/
- if (dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
+ if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
*fifo_low = ovl_fifo_size - burst_size * 2;
*fifo_high = total_fifo_size - burst_size;
} else {
@@ -1185,6 +1170,94 @@ static void dispc_ovl_set_scale_param(enum omap_plane plane,
dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
}
+static void dispc_ovl_set_accu_uv(enum omap_plane plane,
+ u16 orig_width, u16 orig_height, u16 out_width, u16 out_height,
+ bool ilace, enum omap_color_mode color_mode, u8 rotation)
+{
+ int h_accu2_0, h_accu2_1;
+ int v_accu2_0, v_accu2_1;
+ int chroma_hinc, chroma_vinc;
+ int idx;
+
+ struct accu {
+ s8 h0_m, h0_n;
+ s8 h1_m, h1_n;
+ s8 v0_m, v0_n;
+ s8 v1_m, v1_n;
+ };
+
+ const struct accu *accu_table;
+ const struct accu *accu_val;
+
+ static const struct accu accu_nv12[4] = {
+ { 0, 1, 0, 1 , -1, 2, 0, 1 },
+ { 1, 2, -3, 4 , 0, 1, 0, 1 },
+ { -1, 1, 0, 1 , -1, 2, 0, 1 },
+ { -1, 2, -1, 2 , -1, 1, 0, 1 },
+ };
+
+ static const struct accu accu_nv12_ilace[4] = {
+ { 0, 1, 0, 1 , -3, 4, -1, 4 },
+ { -1, 4, -3, 4 , 0, 1, 0, 1 },
+ { -1, 1, 0, 1 , -1, 4, -3, 4 },
+ { -3, 4, -3, 4 , -1, 1, 0, 1 },
+ };
+
+ static const struct accu accu_yuv[4] = {
+ { 0, 1, 0, 1, 0, 1, 0, 1 },
+ { 0, 1, 0, 1, 0, 1, 0, 1 },
+ { -1, 1, 0, 1, 0, 1, 0, 1 },
+ { 0, 1, 0, 1, -1, 1, 0, 1 },
+ };
+
+ switch (rotation) {
+ case OMAP_DSS_ROT_0:
+ idx = 0;
+ break;
+ case OMAP_DSS_ROT_90:
+ idx = 1;
+ break;
+ case OMAP_DSS_ROT_180:
+ idx = 2;
+ break;
+ case OMAP_DSS_ROT_270:
+ idx = 3;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_NV12:
+ if (ilace)
+ accu_table = accu_nv12_ilace;
+ else
+ accu_table = accu_nv12;
+ break;
+ case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_UYVY:
+ accu_table = accu_yuv;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ accu_val = &accu_table[idx];
+
+ chroma_hinc = 1024 * orig_width / out_width;
+ chroma_vinc = 1024 * orig_height / out_height;
+
+ h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024;
+ h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024;
+ v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
+ v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
+
+ dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0);
+ dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1);
+}
+
static void dispc_ovl_set_scaling_common(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
@@ -1258,6 +1331,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
return;
}
+
+ dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
+ out_height, ilace, color_mode, rotation);
+
switch (color_mode) {
case OMAP_DSS_COLOR_NV12:
/* UV is subsampled by 2 vertically*/
@@ -1280,6 +1357,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
break;
default:
BUG();
+ return;
}
if (out_width != orig_width)
@@ -1297,9 +1375,6 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
/* set V scaling */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
-
- dispc_ovl_set_vid_accu2_0(plane, 0x80, 0);
- dispc_ovl_set_vid_accu2_1(plane, 0x80, 0);
}
static void dispc_ovl_set_scaling(enum omap_plane plane,
@@ -1410,6 +1485,7 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode)
return 32;
default:
BUG();
+ return 0;
}
}
@@ -1423,6 +1499,7 @@ static s32 pixinc(int pixels, u8 ps)
return 1 - (-pixels + 1) * ps;
else
BUG();
+ return 0;
}
static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
@@ -1431,7 +1508,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
enum omap_color_mode color_mode, bool fieldmode,
unsigned int field_offset,
unsigned *offset0, unsigned *offset1,
- s32 *row_inc, s32 *pix_inc)
+ s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
{
u8 ps;
@@ -1477,10 +1554,10 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
else
*offset0 = 0;
- *row_inc = pixinc(1 + (screen_width - width) +
- (fieldmode ? screen_width : 0),
- ps);
- *pix_inc = pixinc(1, ps);
+ *row_inc = pixinc(1 +
+ (y_predecim * screen_width - x_predecim * width) +
+ (fieldmode ? screen_width : 0), ps);
+ *pix_inc = pixinc(x_predecim, ps);
break;
case OMAP_DSS_ROT_0 + 4:
@@ -1498,14 +1575,15 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
*offset0 = field_offset * screen_width * ps;
else
*offset0 = 0;
- *row_inc = pixinc(1 - (screen_width + width) -
- (fieldmode ? screen_width : 0),
- ps);
- *pix_inc = pixinc(1, ps);
+ *row_inc = pixinc(1 -
+ (y_predecim * screen_width + x_predecim * width) -
+ (fieldmode ? screen_width : 0), ps);
+ *pix_inc = pixinc(x_predecim, ps);
break;
default:
BUG();
+ return;
}
}
@@ -1515,7 +1593,7 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
enum omap_color_mode color_mode, bool fieldmode,
unsigned int field_offset,
unsigned *offset0, unsigned *offset1,
- s32 *row_inc, s32 *pix_inc)
+ s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
{
u8 ps;
u16 fbw, fbh;
@@ -1557,10 +1635,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 + field_offset * screen_width * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(1 + (screen_width - fbw) +
- (fieldmode ? screen_width : 0),
- ps);
- *pix_inc = pixinc(1, ps);
+ *row_inc = pixinc(1 +
+ (y_predecim * screen_width - fbw * x_predecim) +
+ (fieldmode ? screen_width : 0), ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(x_predecim, ps);
break;
case OMAP_DSS_ROT_90:
*offset1 = screen_width * (fbh - 1) * ps;
@@ -1568,9 +1650,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 + field_offset * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(screen_width * (fbh - 1) + 1 +
- (fieldmode ? 1 : 0), ps);
- *pix_inc = pixinc(-screen_width, ps);
+ *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) +
+ y_predecim + (fieldmode ? 1 : 0), ps);
+ *pix_inc = pixinc(-x_predecim * screen_width, ps);
break;
case OMAP_DSS_ROT_180:
*offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
@@ -1579,10 +1661,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
else
*offset0 = *offset1;
*row_inc = pixinc(-1 -
- (screen_width - fbw) -
- (fieldmode ? screen_width : 0),
- ps);
- *pix_inc = pixinc(-1, ps);
+ (y_predecim * screen_width - fbw * x_predecim) -
+ (fieldmode ? screen_width : 0), ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(-x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(-x_predecim, ps);
break;
case OMAP_DSS_ROT_270:
*offset1 = (fbw - 1) * ps;
@@ -1590,9 +1675,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 - field_offset * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(-screen_width * (fbh - 1) - 1 -
- (fieldmode ? 1 : 0), ps);
- *pix_inc = pixinc(screen_width, ps);
+ *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) -
+ y_predecim - (fieldmode ? 1 : 0), ps);
+ *pix_inc = pixinc(x_predecim * screen_width, ps);
break;
/* mirroring */
@@ -1602,10 +1687,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 + field_offset * screen_width * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(screen_width * 2 - 1 +
+ *row_inc = pixinc(y_predecim * screen_width * 2 - 1 +
(fieldmode ? screen_width : 0),
ps);
- *pix_inc = pixinc(-1, ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(-x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(-x_predecim, ps);
break;
case OMAP_DSS_ROT_90 + 4:
@@ -1614,10 +1703,10 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 + field_offset * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(-screen_width * (fbh - 1) + 1 +
- (fieldmode ? 1 : 0),
+ *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) +
+ y_predecim + (fieldmode ? 1 : 0),
ps);
- *pix_inc = pixinc(screen_width, ps);
+ *pix_inc = pixinc(x_predecim * screen_width, ps);
break;
case OMAP_DSS_ROT_180 + 4:
@@ -1626,10 +1715,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 - field_offset * screen_width * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(1 - screen_width * 2 -
+ *row_inc = pixinc(1 - y_predecim * screen_width * 2 -
(fieldmode ? screen_width : 0),
ps);
- *pix_inc = pixinc(1, ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(x_predecim, ps);
break;
case OMAP_DSS_ROT_270 + 4:
@@ -1638,34 +1731,130 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 - field_offset * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(screen_width * (fbh - 1) - 1 -
- (fieldmode ? 1 : 0),
+ *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) -
+ y_predecim - (fieldmode ? 1 : 0),
ps);
- *pix_inc = pixinc(-screen_width, ps);
+ *pix_inc = pixinc(-x_predecim * screen_width, ps);
break;
default:
BUG();
+ return;
}
}
-static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
+static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
+ enum omap_color_mode color_mode, bool fieldmode,
+ unsigned int field_offset, unsigned *offset0, unsigned *offset1,
+ s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
+{
+ u8 ps;
+
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_CLUT1:
+ case OMAP_DSS_COLOR_CLUT2:
+ case OMAP_DSS_COLOR_CLUT4:
+ case OMAP_DSS_COLOR_CLUT8:
+ BUG();
+ return;
+ default:
+ ps = color_mode_to_bpp(color_mode) / 8;
+ break;
+ }
+
+ DSSDBG("scrw %d, width %d\n", screen_width, width);
+
+ /*
+ * field 0 = even field = bottom field
+ * field 1 = odd field = top field
+ */
+ *offset1 = 0;
+ if (field_offset)
+ *offset0 = *offset1 + field_offset * screen_width * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) +
+ (fieldmode ? screen_width : 0), ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(x_predecim, ps);
+}
+
+/*
+ * This function is used to avoid synclosts in OMAP3, because of some
+ * undocumented horizontal position and timing related limitations.
+ */
+static int check_horiz_timing_omap3(enum omap_channel channel,
+ const struct omap_video_timings *t, u16 pos_x,
+ u16 width, u16 height, u16 out_width, u16 out_height)
+{
+ int DS = DIV_ROUND_UP(height, out_height);
+ unsigned long nonactive, lclk, pclk;
+ static const u8 limits[3] = { 8, 10, 20 };
+ u64 val, blank;
+ int i;
+
+ nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+ pclk = dispc_mgr_pclk_rate(channel);
+ if (dispc_mgr_is_lcd(channel))
+ lclk = dispc_mgr_lclk_rate(channel);
+ else
+ lclk = dispc_fclk_rate();
+
+ i = 0;
+ if (out_height < height)
+ i++;
+ if (out_width < width)
+ i++;
+ blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+ DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
+ if (blank <= limits[i])
+ return -EINVAL;
+
+ /*
+ * Pixel data should be prepared before visible display point starts.
+ * So, atleast DS-2 lines must have already been fetched by DISPC
+ * during nonactive - pos_x period.
+ */
+ val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
+ DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
+ val, max(0, DS - 2) * width);
+ if (val < max(0, DS - 2) * width)
+ return -EINVAL;
+
+ /*
+ * All lines need to be refilled during the nonactive period of which
+ * only one line can be loaded during the active period. So, atleast
+ * DS - 1 lines should be loaded during nonactive period.
+ */
+ val = div_u64((u64)nonactive * lclk, pclk);
+ DSSDBG("nonactive * pcd = %llu, max(0, DS - 1) * width = %d\n",
+ val, max(0, DS - 1) * width);
+ if (val < max(0, DS - 1) * width)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned long calc_core_clk_five_taps(enum omap_channel channel,
+ const struct omap_video_timings *mgr_timings, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
- u32 fclk = 0;
+ u32 core_clk = 0;
u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
if (height <= out_height && width <= out_width)
return (unsigned long) pclk;
if (height > out_height) {
- struct omap_dss_device *dssdev = dispc_mgr_get_device(channel);
- unsigned int ppl = dssdev->panel.timings.x_res;
+ unsigned int ppl = mgr_timings->x_res;
tmp = pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
- fclk = tmp;
+ core_clk = tmp;
if (height > 2 * out_height) {
if (ppl == out_width)
@@ -1673,23 +1862,23 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
tmp = pclk * (height - 2 * out_height) * out_width;
do_div(tmp, 2 * out_height * (ppl - out_width));
- fclk = max(fclk, (u32) tmp);
+ core_clk = max_t(u32, core_clk, tmp);
}
}
if (width > out_width) {
tmp = pclk * width;
do_div(tmp, out_width);
- fclk = max(fclk, (u32) tmp);
+ core_clk = max_t(u32, core_clk, tmp);
if (color_mode == OMAP_DSS_COLOR_RGB24U)
- fclk <<= 1;
+ core_clk <<= 1;
}
- return fclk;
+ return core_clk;
}
-static unsigned long calc_fclk(enum omap_channel channel, u16 width,
+static unsigned long calc_core_clk(enum omap_channel channel, u16 width,
u16 height, u16 out_width, u16 out_height)
{
unsigned int hf, vf;
@@ -1730,15 +1919,20 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
}
static int dispc_ovl_calc_scaling(enum omap_plane plane,
- enum omap_channel channel, u16 width, u16 height,
- u16 out_width, u16 out_height,
- enum omap_color_mode color_mode, bool *five_taps)
+ enum omap_channel channel,
+ const struct omap_video_timings *mgr_timings,
+ u16 width, u16 height, u16 out_width, u16 out_height,
+ enum omap_color_mode color_mode, bool *five_taps,
+ int *x_predecim, int *y_predecim, u16 pos_x)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
- unsigned long fclk = 0;
+ const int max_decim_limit = 16;
+ unsigned long core_clk = 0;
+ int decim_x, decim_y, error, min_factor;
+ u16 in_width, in_height, in_width_max = 0;
if (width == out_width && height == out_height)
return 0;
@@ -1746,64 +1940,154 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
return -EINVAL;
- if (out_width < width / maxdownscale ||
- out_width > width * 8)
+ *x_predecim = max_decim_limit;
+ *y_predecim = max_decim_limit;
+
+ if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
+ color_mode == OMAP_DSS_COLOR_CLUT2 ||
+ color_mode == OMAP_DSS_COLOR_CLUT4 ||
+ color_mode == OMAP_DSS_COLOR_CLUT8) {
+ *x_predecim = 1;
+ *y_predecim = 1;
+ *five_taps = false;
+ return 0;
+ }
+
+ decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
+ decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
+
+ min_factor = min(decim_x, decim_y);
+
+ if (decim_x > *x_predecim || out_width > width * 8)
return -EINVAL;
- if (out_height < height / maxdownscale ||
- out_height > height * 8)
+ if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
if (cpu_is_omap24xx()) {
- if (width > maxsinglelinewidth)
- DSSERR("Cannot scale max input width exceeded");
*five_taps = false;
- fclk = calc_fclk(channel, width, height, out_width,
- out_height);
+
+ do {
+ in_height = DIV_ROUND_UP(height, decim_y);
+ in_width = DIV_ROUND_UP(width, decim_x);
+ core_clk = calc_core_clk(channel, in_width, in_height,
+ out_width, out_height);
+ error = (in_width > maxsinglelinewidth || !core_clk ||
+ core_clk > dispc_core_clk_rate());
+ if (error) {
+ if (decim_x == decim_y) {
+ decim_x = min_factor;
+ decim_y++;
+ } else {
+ swap(decim_x, decim_y);
+ if (decim_x < decim_y)
+ decim_x++;
+ }
+ }
+ } while (decim_x <= *x_predecim && decim_y <= *y_predecim &&
+ error);
+
+ if (in_width > maxsinglelinewidth) {
+ DSSERR("Cannot scale max input width exceeded");
+ return -EINVAL;
+ }
} else if (cpu_is_omap34xx()) {
- if (width > (maxsinglelinewidth * 2)) {
+
+ do {
+ in_height = DIV_ROUND_UP(height, decim_y);
+ in_width = DIV_ROUND_UP(width, decim_x);
+ core_clk = calc_core_clk_five_taps(channel, mgr_timings,
+ in_width, in_height, out_width, out_height,
+ color_mode);
+
+ error = check_horiz_timing_omap3(channel, mgr_timings,
+ pos_x, in_width, in_height, out_width,
+ out_height);
+
+ if (in_width > maxsinglelinewidth)
+ if (in_height > out_height &&
+ in_height < out_height * 2)
+ *five_taps = false;
+ if (!*five_taps)
+ core_clk = calc_core_clk(channel, in_width,
+ in_height, out_width, out_height);
+ error = (error || in_width > maxsinglelinewidth * 2 ||
+ (in_width > maxsinglelinewidth && *five_taps) ||
+ !core_clk || core_clk > dispc_core_clk_rate());
+ if (error) {
+ if (decim_x == decim_y) {
+ decim_x = min_factor;
+ decim_y++;
+ } else {
+ swap(decim_x, decim_y);
+ if (decim_x < decim_y)
+ decim_x++;
+ }
+ }
+ } while (decim_x <= *x_predecim && decim_y <= *y_predecim
+ && error);
+
+ if (check_horiz_timing_omap3(channel, mgr_timings, pos_x, width,
+ height, out_width, out_height)){
+ DSSERR("horizontal timing too tight\n");
+ return -EINVAL;
+ }
+
+ if (in_width > (maxsinglelinewidth * 2)) {
DSSERR("Cannot setup scaling");
DSSERR("width exceeds maximum width possible");
return -EINVAL;
}
- fclk = calc_fclk_five_taps(channel, width, height, out_width,
- out_height, color_mode);
- if (width > maxsinglelinewidth) {
- if (height > out_height && height < out_height * 2)
- *five_taps = false;
- else {
- DSSERR("cannot setup scaling with five taps");
- return -EINVAL;
- }
+
+ if (in_width > maxsinglelinewidth && *five_taps) {
+ DSSERR("cannot setup scaling with five taps");
+ return -EINVAL;
}
- if (!*five_taps)
- fclk = calc_fclk(channel, width, height, out_width,
- out_height);
} else {
- if (width > maxsinglelinewidth) {
+ int decim_x_min = decim_x;
+ in_height = DIV_ROUND_UP(height, decim_y);
+ in_width_max = dispc_core_clk_rate() /
+ DIV_ROUND_UP(dispc_mgr_pclk_rate(channel),
+ out_width);
+ decim_x = DIV_ROUND_UP(width, in_width_max);
+
+ decim_x = decim_x > decim_x_min ? decim_x : decim_x_min;
+ if (decim_x > *x_predecim)
+ return -EINVAL;
+
+ do {
+ in_width = DIV_ROUND_UP(width, decim_x);
+ } while (decim_x <= *x_predecim &&
+ in_width > maxsinglelinewidth && decim_x++);
+
+ if (in_width > maxsinglelinewidth) {
DSSERR("Cannot scale width exceeds max line width");
return -EINVAL;
}
- fclk = calc_fclk(channel, width, height, out_width,
- out_height);
+
+ core_clk = calc_core_clk(channel, in_width, in_height,
+ out_width, out_height);
}
- DSSDBG("required fclk rate = %lu Hz\n", fclk);
- DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
+ DSSDBG("required core clk rate = %lu Hz\n", core_clk);
+ DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate());
- if (!fclk || fclk > dispc_fclk_rate()) {
+ if (!core_clk || core_clk > dispc_core_clk_rate()) {
DSSERR("failed to set up scaling, "
- "required fclk rate = %lu Hz, "
- "current fclk rate = %lu Hz\n",
- fclk, dispc_fclk_rate());
+ "required core clk rate = %lu Hz, "
+ "current core clk rate = %lu Hz\n",
+ core_clk, dispc_core_clk_rate());
return -EINVAL;
}
+ *x_predecim = decim_x;
+ *y_predecim = decim_y;
return 0;
}
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, bool replication)
+ bool ilace, bool replication,
+ const struct omap_video_timings *mgr_timings)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
bool five_taps = true;
@@ -1814,8 +2098,11 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
s32 pix_inc;
u16 frame_height = oi->height;
unsigned int field_offset = 0;
- u16 outw, outh;
+ u16 in_height = oi->height;
+ u16 in_width = oi->width;
+ u16 out_width, out_height;
enum omap_channel channel;
+ int x_predecim = 1, y_predecim = 1;
channel = dispc_ovl_get_channel_out(plane);
@@ -1829,32 +2116,35 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
if (oi->paddr == 0)
return -EINVAL;
- outw = oi->out_width == 0 ? oi->width : oi->out_width;
- outh = oi->out_height == 0 ? oi->height : oi->out_height;
+ out_width = oi->out_width == 0 ? oi->width : oi->out_width;
+ out_height = oi->out_height == 0 ? oi->height : oi->out_height;
- if (ilace && oi->height == outh)
+ if (ilace && oi->height == out_height)
fieldmode = 1;
if (ilace) {
if (fieldmode)
- oi->height /= 2;
+ in_height /= 2;
oi->pos_y /= 2;
- outh /= 2;
+ out_height /= 2;
DSSDBG("adjusting for ilace: height %d, pos_y %d, "
"out_height %d\n",
- oi->height, oi->pos_y, outh);
+ in_height, oi->pos_y, out_height);
}
if (!dss_feat_color_mode_supported(plane, oi->color_mode))
return -EINVAL;
- r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height,
- outw, outh, oi->color_mode,
- &five_taps);
+ r = dispc_ovl_calc_scaling(plane, channel, mgr_timings, in_width,
+ in_height, out_width, out_height, oi->color_mode,
+ &five_taps, &x_predecim, &y_predecim, oi->pos_x);
if (r)
return r;
+ in_width = DIV_ROUND_UP(in_width, x_predecim);
+ in_height = DIV_ROUND_UP(in_height, y_predecim);
+
if (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
oi->color_mode == OMAP_DSS_COLOR_UYVY ||
oi->color_mode == OMAP_DSS_COLOR_NV12)
@@ -1868,32 +2158,46 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
* so the integer part must be added to the base address of the
* bottom field.
*/
- if (!oi->height || oi->height == outh)
+ if (!in_height || in_height == out_height)
field_offset = 0;
else
- field_offset = oi->height / outh / 2;
+ field_offset = in_height / out_height / 2;
}
/* Fields are independent but interleaved in memory. */
if (fieldmode)
field_offset = 1;
- if (oi->rotation_type == OMAP_DSS_ROT_DMA)
+ offset0 = 0;
+ offset1 = 0;
+ row_inc = 0;
+ pix_inc = 0;
+
+ if (oi->rotation_type == OMAP_DSS_ROT_TILER)
+ calc_tiler_rotation_offset(oi->screen_width, in_width,
+ oi->color_mode, fieldmode, field_offset,
+ &offset0, &offset1, &row_inc, &pix_inc,
+ x_predecim, y_predecim);
+ else if (oi->rotation_type == OMAP_DSS_ROT_DMA)
calc_dma_rotation_offset(oi->rotation, oi->mirror,
- oi->screen_width, oi->width, frame_height,
+ oi->screen_width, in_width, frame_height,
oi->color_mode, fieldmode, field_offset,
- &offset0, &offset1, &row_inc, &pix_inc);
+ &offset0, &offset1, &row_inc, &pix_inc,
+ x_predecim, y_predecim);
else
calc_vrfb_rotation_offset(oi->rotation, oi->mirror,
- oi->screen_width, oi->width, frame_height,
+ oi->screen_width, in_width, frame_height,
oi->color_mode, fieldmode, field_offset,
- &offset0, &offset1, &row_inc, &pix_inc);
+ &offset0, &offset1, &row_inc, &pix_inc,
+ x_predecim, y_predecim);
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
dispc_ovl_set_color_mode(plane, oi->color_mode);
+ dispc_ovl_configure_burst_type(plane, oi->rotation_type);
+
dispc_ovl_set_ba0(plane, oi->paddr + offset0);
dispc_ovl_set_ba1(plane, oi->paddr + offset1);
@@ -1906,19 +2210,18 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
dispc_ovl_set_row_inc(plane, row_inc);
dispc_ovl_set_pix_inc(plane, pix_inc);
- DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width,
- oi->height, outw, outh);
+ DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, in_width,
+ in_height, out_width, out_height);
dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
- dispc_ovl_set_pic_size(plane, oi->width, oi->height);
+ dispc_ovl_set_pic_size(plane, in_width, in_height);
if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
- dispc_ovl_set_scaling(plane, oi->width, oi->height,
- outw, outh,
- ilace, five_taps, fieldmode,
+ dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
+ out_height, ilace, five_taps, fieldmode,
oi->color_mode, oi->rotation);
- dispc_ovl_set_vid_size(plane, outw, outh);
+ dispc_ovl_set_vid_size(plane, out_width, out_height);
dispc_ovl_set_vid_color_conv(plane, cconv);
}
@@ -2087,8 +2390,10 @@ bool dispc_mgr_is_enabled(enum omap_channel channel)
return !!REG_GET(DISPC_CONTROL, 1, 1);
else if (channel == OMAP_DSS_CHANNEL_LCD2)
return !!REG_GET(DISPC_CONTROL2, 0, 0);
- else
+ else {
BUG();
+ return false;
+ }
}
void dispc_mgr_enable(enum omap_channel channel, bool enable)
@@ -2285,6 +2590,12 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
}
+static bool _dispc_mgr_size_ok(u16 width, u16 height)
+{
+ return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) &&
+ height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT);
+}
+
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
int vsw, int vfp, int vbp)
{
@@ -2309,11 +2620,20 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
return true;
}
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+ const struct omap_video_timings *timings)
{
- return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
- timings->hbp, timings->vsw,
- timings->vfp, timings->vbp);
+ bool timings_ok;
+
+ timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
+
+ if (dispc_mgr_is_lcd(channel))
+ timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw,
+ timings->hfp, timings->hbp,
+ timings->vsw, timings->vfp,
+ timings->vbp);
+
+ return timings_ok;
}
static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
@@ -2340,37 +2660,45 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
}
/* change name to mode? */
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
struct omap_video_timings *timings)
{
unsigned xtot, ytot;
unsigned long ht, vt;
+ struct omap_video_timings t = *timings;
+
+ DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
- if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
- timings->hbp, timings->vsw,
- timings->vfp, timings->vbp))
+ if (!dispc_mgr_timings_ok(channel, &t)) {
BUG();
+ return;
+ }
+
+ if (dispc_mgr_is_lcd(channel)) {
+ _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
+ t.vfp, t.vbp);
+
+ xtot = t.x_res + t.hfp + t.hsw + t.hbp;
+ ytot = t.y_res + t.vfp + t.vsw + t.vbp;
- _dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp,
- timings->hbp, timings->vsw, timings->vfp,
- timings->vbp);
+ ht = (timings->pixel_clock * 1000) / xtot;
+ vt = (timings->pixel_clock * 1000) / xtot / ytot;
- dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res);
+ DSSDBG("pck %u\n", timings->pixel_clock);
+ DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+ t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
- xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
- ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
+ DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+ } else {
+ enum dss_hdmi_venc_clk_source_select source;
- ht = (timings->pixel_clock * 1000) / xtot;
- vt = (timings->pixel_clock * 1000) / xtot / ytot;
+ source = dss_get_hdmi_venc_clk_source();
- DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res,
- timings->y_res);
- DSSDBG("pck %u\n", timings->pixel_clock);
- DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
- timings->hsw, timings->hfp, timings->hbp,
- timings->vsw, timings->vfp, timings->vbp);
+ if (source == DSS_VENC_TV_CLK)
+ t.y_res /= 2;
+ }
- DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+ dispc_mgr_set_size(channel, t.x_res, t.y_res);
}
static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
@@ -2411,6 +2739,7 @@ unsigned long dispc_fclk_rate(void)
break;
default:
BUG();
+ return 0;
}
return r;
@@ -2441,6 +2770,7 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
break;
default:
BUG();
+ return 0;
}
return r / lcd;
@@ -2462,20 +2792,35 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
return r / pcd;
} else {
- struct omap_dss_device *dssdev =
- dispc_mgr_get_device(channel);
+ enum dss_hdmi_venc_clk_source_select source;
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_VENC:
+ source = dss_get_hdmi_venc_clk_source();
+
+ switch (source) {
+ case DSS_VENC_TV_CLK:
return venc_get_pixel_clock();
- case OMAP_DISPLAY_TYPE_HDMI:
+ case DSS_HDMI_M_PCLK:
return hdmi_get_pixel_clock();
default:
BUG();
+ return 0;
}
}
}
+unsigned long dispc_core_clk_rate(void)
+{
+ int lcd;
+ unsigned long fclk = dispc_fclk_rate();
+
+ if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ lcd = REG_GET(DISPC_DIVISOR, 23, 16);
+ else
+ lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
+
+ return fclk / lcd;
+}
+
void dispc_dump_clocks(struct seq_file *s)
{
int lcd, pcd;
@@ -2588,7 +2933,7 @@ void dispc_dump_irqs(struct seq_file *s)
}
#endif
-void dispc_dump_regs(struct seq_file *s)
+static void dispc_dump_regs(struct seq_file *s)
{
int i, j;
const char *mgr_names[] = {
@@ -3247,27 +3592,6 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
return 0;
}
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
-void dispc_fake_vsync_irq(void)
-{
- u32 irqstatus = DISPC_IRQ_VSYNC;
- int i;
-
- WARN_ON(!in_interrupt());
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- struct omap_dispc_isr_data *isr_data;
- isr_data = &dispc.registered_isr[i];
-
- if (!isr_data->isr)
- continue;
-
- if (isr_data->mask & irqstatus)
- isr_data->isr(isr_data->arg, irqstatus);
- }
-}
-#endif
-
static void _omap_dispc_initialize_irq(void)
{
unsigned long flags;
@@ -3330,7 +3654,7 @@ static void _omap_dispc_initial_config(void)
}
/* DISPC HW IP initialisation */
-static int omap_dispchw_probe(struct platform_device *pdev)
+static int __init omap_dispchw_probe(struct platform_device *pdev)
{
u32 rev;
int r = 0;
@@ -3399,6 +3723,11 @@ static int omap_dispchw_probe(struct platform_device *pdev)
dispc_runtime_put();
+ dss_debugfs_create_file("dispc", dispc_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
+#endif
return 0;
err_runtime_get:
@@ -3407,7 +3736,7 @@ err_runtime_get:
return r;
}
-static int omap_dispchw_remove(struct platform_device *pdev)
+static int __exit omap_dispchw_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
@@ -3419,19 +3748,12 @@ static int omap_dispchw_remove(struct platform_device *pdev)
static int dispc_runtime_suspend(struct device *dev)
{
dispc_save_context();
- dss_runtime_put();
return 0;
}
static int dispc_runtime_resume(struct device *dev)
{
- int r;
-
- r = dss_runtime_get();
- if (r < 0)
- return r;
-
dispc_restore_context();
return 0;
@@ -3443,8 +3765,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
};
static struct platform_driver omap_dispchw_driver = {
- .probe = omap_dispchw_probe,
- .remove = omap_dispchw_remove,
+ .remove = __exit_p(omap_dispchw_remove),
.driver = {
.name = "omapdss_dispc",
.owner = THIS_MODULE,
@@ -3452,12 +3773,12 @@ static struct platform_driver omap_dispchw_driver = {
},
};
-int dispc_init_platform_driver(void)
+int __init dispc_init_platform_driver(void)
{
- return platform_driver_register(&omap_dispchw_driver);
+ return platform_driver_probe(&omap_dispchw_driver, omap_dispchw_probe);
}
-void dispc_uninit_platform_driver(void)
+void __exit dispc_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omap_dispchw_driver);
+ platform_driver_unregister(&omap_dispchw_driver);
}
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index 5836bd1650f9..f278080e1063 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -120,6 +120,7 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
return 0x03AC;
default:
BUG();
+ return 0;
}
}
@@ -134,6 +135,7 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel)
return 0x03B0;
default:
BUG();
+ return 0;
}
}
@@ -144,10 +146,12 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel)
return 0x0064;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x0400;
default:
BUG();
+ return 0;
}
}
@@ -158,10 +162,12 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel)
return 0x0068;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x0404;
default:
BUG();
+ return 0;
}
}
@@ -172,10 +178,12 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel)
return 0x006C;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x0408;
default:
BUG();
+ return 0;
}
}
@@ -186,10 +194,12 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel)
return 0x0070;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x040C;
default:
BUG();
+ return 0;
}
}
@@ -205,6 +215,7 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel)
return 0x03CC;
default:
BUG();
+ return 0;
}
}
@@ -215,10 +226,12 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel)
return 0x01D4;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03C0;
default:
BUG();
+ return 0;
}
}
@@ -229,10 +242,12 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel)
return 0x01D8;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03C4;
default:
BUG();
+ return 0;
}
}
@@ -243,10 +258,12 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel)
return 0x01DC;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03C8;
default:
BUG();
+ return 0;
}
}
@@ -257,10 +274,12 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel)
return 0x0220;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03BC;
default:
BUG();
+ return 0;
}
}
@@ -271,10 +290,12 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel)
return 0x0224;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03B8;
default:
BUG();
+ return 0;
}
}
@@ -285,10 +306,12 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel)
return 0x0228;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03B4;
default:
BUG();
+ return 0;
}
}
@@ -306,6 +329,7 @@ static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
return 0x0300;
default:
BUG();
+ return 0;
}
}
@@ -321,6 +345,7 @@ static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
return 0x0008;
default:
BUG();
+ return 0;
}
}
@@ -335,6 +360,7 @@ static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
return 0x000C;
default:
BUG();
+ return 0;
}
}
@@ -343,6 +369,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0544;
case OMAP_DSS_VIDEO2:
@@ -351,6 +378,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
return 0x0310;
default:
BUG();
+ return 0;
}
}
@@ -359,6 +387,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0548;
case OMAP_DSS_VIDEO2:
@@ -367,6 +396,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
return 0x0314;
default:
BUG();
+ return 0;
}
}
@@ -381,6 +411,7 @@ static inline u16 DISPC_POS_OFFSET(enum omap_plane plane)
return 0x009C;
default:
BUG();
+ return 0;
}
}
@@ -395,6 +426,7 @@ static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
return 0x00A8;
default:
BUG();
+ return 0;
}
}
@@ -410,6 +442,7 @@ static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
return 0x0070;
default:
BUG();
+ return 0;
}
}
@@ -418,6 +451,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0568;
case OMAP_DSS_VIDEO2:
@@ -426,6 +460,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
return 0x032C;
default:
BUG();
+ return 0;
}
}
@@ -441,6 +476,7 @@ static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
return 0x008C;
default:
BUG();
+ return 0;
}
}
@@ -456,6 +492,7 @@ static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
return 0x0088;
default:
BUG();
+ return 0;
}
}
@@ -471,6 +508,7 @@ static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
return 0x00A4;
default:
BUG();
+ return 0;
}
}
@@ -486,6 +524,7 @@ static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
return 0x0098;
default:
BUG();
+ return 0;
}
}
@@ -498,8 +537,10 @@ static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
BUG();
+ return 0;
default:
BUG();
+ return 0;
}
}
@@ -512,8 +553,10 @@ static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
BUG();
+ return 0;
default:
BUG();
+ return 0;
}
}
@@ -522,6 +565,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0024;
@@ -529,6 +573,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
return 0x0090;
default:
BUG();
+ return 0;
}
}
@@ -537,6 +582,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0580;
case OMAP_DSS_VIDEO2:
@@ -545,6 +591,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
return 0x0424;
default:
BUG();
+ return 0;
}
}
@@ -553,6 +600,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0028;
@@ -560,6 +608,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
return 0x0094;
default:
BUG();
+ return 0;
}
}
@@ -569,6 +618,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x002C;
@@ -576,6 +626,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
return 0x0000;
default:
BUG();
+ return 0;
}
}
@@ -584,6 +635,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0584;
case OMAP_DSS_VIDEO2:
@@ -592,6 +644,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
return 0x0428;
default:
BUG();
+ return 0;
}
}
@@ -600,6 +653,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0030;
@@ -607,6 +661,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
return 0x0004;
default:
BUG();
+ return 0;
}
}
@@ -615,6 +670,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0588;
case OMAP_DSS_VIDEO2:
@@ -623,6 +679,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
return 0x042C;
default:
BUG();
+ return 0;
}
}
@@ -632,6 +689,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0034 + i * 0x8;
@@ -639,6 +697,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
return 0x0010 + i * 0x8;
default:
BUG();
+ return 0;
}
}
@@ -648,6 +707,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x058C + i * 0x8;
case OMAP_DSS_VIDEO2:
@@ -656,6 +716,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
return 0x0430 + i * 0x8;
default:
BUG();
+ return 0;
}
}
@@ -665,6 +726,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0038 + i * 0x8;
@@ -672,6 +734,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
return 0x0014 + i * 0x8;
default:
BUG();
+ return 0;
}
}
@@ -681,6 +744,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0590 + i * 8;
case OMAP_DSS_VIDEO2:
@@ -689,6 +753,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
return 0x0434 + i * 0x8;
default:
BUG();
+ return 0;
}
}
@@ -698,12 +763,14 @@ static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
return 0x0074 + i * 0x4;
default:
BUG();
+ return 0;
}
}
@@ -713,6 +780,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0124 + i * 0x4;
case OMAP_DSS_VIDEO2:
@@ -721,6 +789,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
return 0x0050 + i * 0x4;
default:
BUG();
+ return 0;
}
}
@@ -730,6 +799,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x05CC + i * 0x4;
case OMAP_DSS_VIDEO2:
@@ -738,6 +808,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
return 0x0470 + i * 0x4;
default:
BUG();
+ return 0;
}
}
@@ -754,6 +825,7 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
return 0x00A0;
default:
BUG();
+ return 0;
}
}
#endif
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 4424c198dbcd..249010630370 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -304,10 +304,18 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
return 24;
default:
BUG();
+ return 0;
}
}
EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+EXPORT_SYMBOL(omapdss_default_get_timings);
+
/* Checks if replication logic should be used. Only use for active matrix,
* when overlay is in RGB12U or RGB16 mode, and LCD interface is
* 18bpp or 24bpp */
@@ -340,6 +348,7 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
break;
default:
BUG();
+ return false;
}
return bpp > 16;
@@ -352,46 +361,6 @@ void dss_init_device(struct platform_device *pdev,
int i;
int r;
- switch (dssdev->type) {
-#ifdef CONFIG_OMAP2_DSS_DPI
- case OMAP_DISPLAY_TYPE_DPI:
- r = dpi_init_display(dssdev);
- break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_RFBI
- case OMAP_DISPLAY_TYPE_DBI:
- r = rfbi_init_display(dssdev);
- break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- case OMAP_DISPLAY_TYPE_VENC:
- r = venc_init_display(dssdev);
- break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
- case OMAP_DISPLAY_TYPE_SDI:
- r = sdi_init_display(dssdev);
- break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- case OMAP_DISPLAY_TYPE_DSI:
- r = dsi_init_display(dssdev);
- break;
-#endif
- case OMAP_DISPLAY_TYPE_HDMI:
- r = hdmi_init_display(dssdev);
- break;
- default:
- DSSERR("Support for display '%s' not compiled in.\n",
- dssdev->name);
- return;
- }
-
- if (r) {
- DSSERR("failed to init display %s\n", dssdev->name);
- return;
- }
-
/* create device sysfs files */
i = 0;
while ((attr = display_sysfs_attrs[i++]) != NULL) {
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index faaf305fda27..8c2056c9537b 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -156,7 +156,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
t->pixel_clock = pck;
}
- dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+ dss_mgr_set_timings(dssdev->manager, t);
return 0;
}
@@ -202,10 +202,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
goto err_reg_enable;
}
- r = dss_runtime_get();
- if (r)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r)
goto err_get_dispc;
@@ -244,8 +240,6 @@ err_dsi_pll_init:
err_get_dsi:
dispc_runtime_put();
err_get_dispc:
- dss_runtime_put();
-err_get_dss:
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
err_reg_enable:
@@ -266,7 +260,6 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
}
dispc_runtime_put();
- dss_runtime_put();
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
@@ -283,21 +276,15 @@ void dpi_set_timings(struct omap_dss_device *dssdev,
DSSDBG("dpi_set_timings\n");
dssdev->panel.timings = *timings;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- r = dss_runtime_get();
- if (r)
- return;
-
r = dispc_runtime_get();
- if (r) {
- dss_runtime_put();
+ if (r)
return;
- }
dpi_set_mode(dssdev);
- dispc_mgr_go(dssdev->manager->id);
dispc_runtime_put();
- dss_runtime_put();
+ } else {
+ dss_mgr_set_timings(dssdev->manager, timings);
}
}
EXPORT_SYMBOL(dpi_set_timings);
@@ -312,7 +299,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
unsigned long pck;
struct dispc_clock_info dispc_cinfo;
- if (!dispc_lcd_timings_ok(timings))
+ if (dss_mgr_check_timings(dssdev->manager, timings))
return -EINVAL;
if (timings->pixel_clock == 0)
@@ -352,7 +339,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(dpi_check_timings);
-int dpi_init_display(struct omap_dss_device *dssdev)
+static int __init dpi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -378,12 +365,58 @@ int dpi_init_display(struct omap_dss_device *dssdev)
return 0;
}
-int dpi_init(void)
+static void __init dpi_probe_pdata(struct platform_device *pdev)
{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int i, r;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DPI)
+ continue;
+
+ r = dpi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
+static int __init omap_dpi_probe(struct platform_device *pdev)
+{
+ dpi_probe_pdata(pdev);
+
+ return 0;
+}
+
+static int __exit omap_dpi_remove(struct platform_device *pdev)
+{
+ omap_dss_unregister_child_devices(&pdev->dev);
+
return 0;
}
-void dpi_exit(void)
+static struct platform_driver omap_dpi_driver = {
+ .remove = __exit_p(omap_dpi_remove),
+ .driver = {
+ .name = "omapdss_dpi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init dpi_init_platform_driver(void)
{
+ return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe);
}
+void __exit dpi_uninit_platform_driver(void)
+{
+ platform_driver_unregister(&omap_dpi_driver);
+}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 210a3c4f6150..14ce8cc079e3 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -256,14 +256,13 @@ struct dsi_data {
struct platform_device *pdev;
void __iomem *base;
+ int module_id;
+
int irq;
struct clk *dss_clk;
struct clk *sys_clk;
- int (*enable_pads)(int dsi_id, unsigned lane_mask);
- void (*disable_pads)(int dsi_id, unsigned lane_mask);
-
struct dsi_clock_info current_cinfo;
bool vdds_dsi_enabled;
@@ -361,11 +360,6 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
return dsi_pdev_map[module];
}
-static inline int dsi_get_dsidev_id(struct platform_device *dsidev)
-{
- return dsidev->id;
-}
-
static inline void dsi_write_reg(struct platform_device *dsidev,
const struct dsi_reg idx, u32 val)
{
@@ -452,6 +446,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
return 16;
default:
BUG();
+ return 0;
}
}
@@ -1080,7 +1075,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
DSSDBG("dsi_runtime_put\n");
r = pm_runtime_put_sync(&dsi->pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
@@ -1184,10 +1179,9 @@ static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
{
unsigned long r;
- int dsi_module = dsi_get_dsidev_id(dsidev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
+ if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
r = clk_get_rate(dsi->dss_clk);
} else {
@@ -1279,10 +1273,9 @@ static int dsi_pll_power(struct platform_device *dsidev,
}
/* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
+static int dsi_calc_clock_rates(struct platform_device *dsidev,
struct dsi_clock_info *cinfo)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
@@ -1297,21 +1290,8 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
if (cinfo->regm_dsi > dsi->regm_dsi_max)
return -EINVAL;
- if (cinfo->use_sys_clk) {
- cinfo->clkin = clk_get_rate(dsi->sys_clk);
- /* XXX it is unclear if highfreq should be used
- * with DSS_SYS_CLK source also */
- cinfo->highfreq = 0;
- } else {
- cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id);
-
- if (cinfo->clkin < 32000000)
- cinfo->highfreq = 0;
- else
- cinfo->highfreq = 1;
- }
-
- cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
+ cinfo->clkin = clk_get_rate(dsi->sys_clk);
+ cinfo->fint = cinfo->clkin / cinfo->regn;
if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
return -EINVAL;
@@ -1378,27 +1358,21 @@ retry:
memset(&cur, 0, sizeof(cur));
cur.clkin = dss_sys_clk;
- cur.use_sys_clk = 1;
- cur.highfreq = 0;
- /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
- /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
+ /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
/* To reduce PLL lock time, keep Fint high (around 2 MHz) */
for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
- if (cur.highfreq == 0)
- cur.fint = cur.clkin / cur.regn;
- else
- cur.fint = cur.clkin / (2 * cur.regn);
+ cur.fint = cur.clkin / cur.regn;
if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
continue;
- /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
+ /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
unsigned long a, b;
a = 2 * cur.regm * (cur.clkin/1000);
- b = cur.regn * (cur.highfreq + 1);
+ b = cur.regn;
cur.clkin4ddr = a / b * 1000;
if (cur.clkin4ddr > 1800 * 1000 * 1000)
@@ -1486,9 +1460,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
DSSDBGF();
- dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
- dsi->current_cinfo.highfreq = cinfo->highfreq;
-
+ dsi->current_cinfo.clkin = cinfo->clkin;
dsi->current_cinfo.fint = cinfo->fint;
dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
@@ -1503,17 +1475,13 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
DSSDBG("DSI Fint %ld\n", cinfo->fint);
- DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
- cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
- cinfo->clkin,
- cinfo->highfreq);
+ DSSDBG("clkin rate %ld\n", cinfo->clkin);
/* DSIPHY == CLKIN4DDR */
- DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
+ DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
cinfo->regm,
cinfo->regn,
cinfo->clkin,
- cinfo->highfreq + 1,
cinfo->clkin4ddr);
DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
@@ -1568,10 +1536,6 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
- l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
- 11, 11); /* DSI_PLL_CLKSEL */
- l = FLD_MOD(l, cinfo->highfreq,
- 12, 12); /* DSI_PLL_HIGHFREQ */
l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
@@ -1716,7 +1680,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dsi_clock_info *cinfo = &dsi->current_cinfo;
enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
- int dsi_module = dsi_get_dsidev_id(dsidev);
+ int dsi_module = dsi->module_id;
dispc_clk_src = dss_get_dispc_clk_source();
dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
@@ -1726,8 +1690,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
- seq_printf(s, "dsi pll source = %s\n",
- cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
+ seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin);
seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
@@ -1789,7 +1752,6 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
struct dsi_irq_stats stats;
- int dsi_module = dsi_get_dsidev_id(dsidev);
spin_lock_irqsave(&dsi->irq_stats_lock, flags);
@@ -1806,7 +1768,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
#define PIS(x) \
seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
- seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
+ seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
PIS(VC0);
PIS(VC1);
PIS(VC2);
@@ -1886,22 +1848,6 @@ static void dsi2_dump_irqs(struct seq_file *s)
dsi_dump_dsidev_irqs(dsidev, s);
}
-
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
- const struct file_operations *debug_fops)
-{
- struct platform_device *dsidev;
-
- dsidev = dsi_get_dsidev_from_id(0);
- if (dsidev)
- debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
- &dsi1_dump_irqs, debug_fops);
-
- dsidev = dsi_get_dsidev_from_id(1);
- if (dsidev)
- debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
- &dsi2_dump_irqs, debug_fops);
-}
#endif
static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
@@ -2002,21 +1948,6 @@ static void dsi2_dump_regs(struct seq_file *s)
dsi_dump_dsidev_regs(dsidev, s);
}
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
- const struct file_operations *debug_fops)
-{
- struct platform_device *dsidev;
-
- dsidev = dsi_get_dsidev_from_id(0);
- if (dsidev)
- debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
- &dsi1_dump_regs, debug_fops);
-
- dsidev = dsi_get_dsidev_from_id(1);
- if (dsidev)
- debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
- &dsi2_dump_regs, debug_fops);
-}
enum dsi_cio_power_state {
DSI_COMPLEXIO_POWER_OFF = 0x0,
DSI_COMPLEXIO_POWER_ON = 0x1,
@@ -2073,6 +2004,7 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
return 1365 * 3; /* 1365x24 bits */
default:
BUG();
+ return 0;
}
}
@@ -2337,7 +2269,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
DSSDBGF();
- r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+ r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
if (r)
return r;
@@ -2447,7 +2379,7 @@ err_cio_pwr:
dsi_cio_disable_lane_override(dsidev);
err_scp_clk_dom:
dsi_disable_scp_clk(dsidev);
- dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+ dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
return r;
}
@@ -2461,7 +2393,7 @@ static void dsi_cio_uninit(struct omap_dss_device *dssdev)
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
dsi_disable_scp_clk(dsidev);
- dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+ dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
}
static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2485,6 +2417,7 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
BUG();
+ return;
}
v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2517,6 +2450,7 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
BUG();
+ return;
}
v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2658,6 +2592,7 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel)
return dsi_sync_vc_l4(dsidev, channel);
default:
BUG();
+ return -EINVAL;
}
}
@@ -3226,6 +3161,7 @@ static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev,
data = reqdata[0] | (reqdata[1] << 8);
} else {
BUG();
+ return -EINVAL;
}
r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
@@ -3340,7 +3276,6 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
goto err;
}
- BUG();
err:
DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
@@ -3735,6 +3670,186 @@ static void dsi_config_blanking_modes(struct omap_dss_device *dssdev)
dsi_write_reg(dsidev, DSI_CTRL, r);
}
+/*
+ * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
+ * results in maximum transition time for data and clock lanes to enter and
+ * exit HS mode. Hence, this is the scenario where the least amount of command
+ * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
+ * clock cycles that can be used to interleave command mode data in HS so that
+ * all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
+ int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
+{
+ int transition;
+
+ /*
+ * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
+ * time of data lanes only, if it isn't set, we need to consider HS
+ * transition time of both data and clock lanes. HS transition time
+ * of Scenario 3 is considered.
+ */
+ if (ddr_alwon) {
+ transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
+ } else {
+ int trans1, trans2;
+ trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
+ trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
+ enter_hs + 1;
+ transition = max(trans1, trans2);
+ }
+
+ return blank > transition ? blank - transition : 0;
+}
+
+/*
+ * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
+ * results in maximum transition time for data lanes to enter and exit LP mode.
+ * Hence, this is the scenario where the least amount of command mode data can
+ * be interleaved. We program the minimum amount of bytes that can be
+ * interleaved in LP so that all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
+ int lp_clk_div, int tdsi_fclk)
+{
+ int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */
+ int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */
+ int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */
+ int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
+ int lp_inter; /* cmd mode data that can be interleaved, in bytes */
+
+ /* maximum LP transition time according to Scenario 1 */
+ trans_lp = exit_hs + max(enter_hs, 2) + 1;
+
+ /* CLKIN4DDR = 16 * TXBYTECLKHS */
+ tlp_avail = thsbyte_clk * (blank - trans_lp);
+
+ ttxclkesc = tdsi_fclk * lp_clk_div;
+
+ lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
+ 26) / 16;
+
+ return max(lp_inter, 0);
+}
+
+static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ int blanking_mode;
+ int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
+ int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
+ int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
+ int tclk_trail, ths_exit, exiths_clk;
+ bool ddr_alwon;
+ struct omap_video_timings *timings = &dssdev->panel.timings;
+ int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+ int ndl = dsi->num_lanes_used - 1;
+ int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
+ int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
+ int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
+ int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
+ int bl_interleave_hs = 0, bl_interleave_lp = 0;
+ u32 r;
+
+ r = dsi_read_reg(dsidev, DSI_CTRL);
+ blanking_mode = FLD_GET(r, 20, 20);
+ hfp_blanking_mode = FLD_GET(r, 21, 21);
+ hbp_blanking_mode = FLD_GET(r, 22, 22);
+ hsa_blanking_mode = FLD_GET(r, 23, 23);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+ hbp = FLD_GET(r, 11, 0);
+ hfp = FLD_GET(r, 23, 12);
+ hsa = FLD_GET(r, 31, 24);
+
+ r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
+ ddr_clk_post = FLD_GET(r, 7, 0);
+ ddr_clk_pre = FLD_GET(r, 15, 8);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
+ exit_hs_mode_lat = FLD_GET(r, 15, 0);
+ enter_hs_mode_lat = FLD_GET(r, 31, 16);
+
+ r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
+ lp_clk_div = FLD_GET(r, 12, 0);
+ ddr_alwon = FLD_GET(r, 13, 13);
+
+ r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+ ths_exit = FLD_GET(r, 7, 0);
+
+ r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+ tclk_trail = FLD_GET(r, 15, 8);
+
+ exiths_clk = ths_exit + tclk_trail;
+
+ width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
+
+ if (!hsa_blanking_mode) {
+ hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ exiths_clk, ddr_clk_pre, ddr_clk_post);
+ hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ lp_clk_div, dsi_fclk_hsdiv);
+ }
+
+ if (!hfp_blanking_mode) {
+ hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ exiths_clk, ddr_clk_pre, ddr_clk_post);
+ hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ lp_clk_div, dsi_fclk_hsdiv);
+ }
+
+ if (!hbp_blanking_mode) {
+ hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+ hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ lp_clk_div, dsi_fclk_hsdiv);
+ }
+
+ if (!blanking_mode) {
+ bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+ bl_interleave_lp = dsi_compute_interleave_lp(bllp,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ lp_clk_div, dsi_fclk_hsdiv);
+ }
+
+ DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+ hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
+ bl_interleave_hs);
+
+ DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+ hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
+ bl_interleave_lp);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
+ r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
+ r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
+ r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
+ dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
+ r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
+ r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
+ r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
+ dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
+ r = FLD_MOD(r, bl_interleave_hs, 31, 15);
+ r = FLD_MOD(r, bl_interleave_lp, 16, 0);
+ dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
+}
+
static int dsi_proto_config(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3769,6 +3884,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
break;
default:
BUG();
+ return -EINVAL;
}
r = dsi_read_reg(dsidev, DSI_CTRL);
@@ -3793,6 +3909,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_config_vp_sync_events(dssdev);
dsi_config_blanking_modes(dssdev);
+ dsi_config_cmd_mode_interleaving(dssdev);
}
dsi_vc_initial_config(dsidev, 0);
@@ -4008,6 +4125,7 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
break;
default:
BUG();
+ return -EINVAL;
};
dsi_if_enable(dsidev, false);
@@ -4192,10 +4310,6 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
__cancel_delayed_work(&dsi->framedone_timeout_work);
dsi_handle_framedone(dsidev, 0);
-
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
- dispc_fake_vsync_irq();
-#endif
}
int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
@@ -4259,13 +4373,12 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
dispc_mgr_enable_stallmode(dssdev->manager->id, true);
dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
- dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings);
+ dss_mgr_set_timings(dssdev->manager, &timings);
} else {
dispc_mgr_enable_stallmode(dssdev->manager->id, false);
dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0);
- dispc_mgr_set_lcd_timings(dssdev->manager->id,
- &dssdev->panel.timings);
+ dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
}
dispc_mgr_set_lcd_display_type(dssdev->manager->id,
@@ -4294,13 +4407,11 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
struct dsi_clock_info cinfo;
int r;
- /* we always use DSS_CLK_SYSCK as input clock */
- cinfo.use_sys_clk = true;
cinfo.regn = dssdev->clocks.dsi.regn;
cinfo.regm = dssdev->clocks.dsi.regm;
cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
- r = dsi_calc_clock_rates(dssdev, &cinfo);
+ r = dsi_calc_clock_rates(dsidev, &cinfo);
if (r) {
DSSERR("Failed to calc dsi clocks\n");
return r;
@@ -4345,7 +4456,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- int dsi_module = dsi_get_dsidev_id(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
r = dsi_pll_init(dsidev, true, true);
@@ -4357,7 +4468,7 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
goto err1;
dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
- dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
+ dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
dss_select_lcd_clk_source(dssdev->manager->id,
dssdev->clocks.dispc.channel.lcd_clk_src);
@@ -4396,7 +4507,7 @@ err3:
dsi_cio_uninit(dssdev);
err2:
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
- dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
err1:
@@ -4410,7 +4521,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int dsi_module = dsi_get_dsidev_id(dsidev);
if (enter_ulps && !dsi->ulps_enabled)
dsi_enter_ulps(dsidev);
@@ -4423,7 +4533,7 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
dsi_vc_enable(dsidev, 3, 0);
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
- dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
dsi_cio_uninit(dssdev);
dsi_pll_uninit(dsidev, disconnect_lanes);
@@ -4527,7 +4637,7 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
}
EXPORT_SYMBOL(omapdss_dsi_enable_te);
-int dsi_init_display(struct omap_dss_device *dssdev)
+static int __init dsi_init_display(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4680,13 +4790,39 @@ static void dsi_put_clocks(struct platform_device *dsidev)
clk_put(dsi->sys_clk);
}
+static void __init dsi_probe_pdata(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct omap_dss_board_info *pdata = dsidev->dev.platform_data;
+ int i, r;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
+ continue;
+
+ if (dssdev->phy.dsi.module != dsi->module_id)
+ continue;
+
+ r = dsi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &dsidev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
/* DSI1 HW IP initialisation */
-static int omap_dsihw_probe(struct platform_device *dsidev)
+static int __init omap_dsihw_probe(struct platform_device *dsidev)
{
- struct omap_display_platform_data *dss_plat_data;
- struct omap_dss_board_info *board_info;
u32 rev;
- int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
+ int r, i;
struct resource *dsi_mem;
struct dsi_data *dsi;
@@ -4694,15 +4830,11 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
if (!dsi)
return -ENOMEM;
+ dsi->module_id = dsidev->id;
dsi->pdev = dsidev;
- dsi_pdev_map[dsi_module] = dsidev;
+ dsi_pdev_map[dsi->module_id] = dsidev;
dev_set_drvdata(&dsidev->dev, dsi);
- dss_plat_data = dsidev->dev.platform_data;
- board_info = dss_plat_data->board_data;
- dsi->enable_pads = board_info->dsi_enable_pads;
- dsi->disable_pads = board_info->dsi_disable_pads;
-
spin_lock_init(&dsi->irq_lock);
spin_lock_init(&dsi->errors_lock);
dsi->errors = 0;
@@ -4780,8 +4912,21 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
else
dsi->num_lanes_supported = 3;
+ dsi_probe_pdata(dsidev);
+
dsi_runtime_put(dsidev);
+ if (dsi->module_id == 0)
+ dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
+ else if (dsi->module_id == 1)
+ dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ if (dsi->module_id == 0)
+ dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
+ else if (dsi->module_id == 1)
+ dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
+#endif
return 0;
err_runtime_get:
@@ -4790,12 +4935,14 @@ err_runtime_get:
return r;
}
-static int omap_dsihw_remove(struct platform_device *dsidev)
+static int __exit omap_dsihw_remove(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
WARN_ON(dsi->scp_clk_refcount > 0);
+ omap_dss_unregister_child_devices(&dsidev->dev);
+
pm_runtime_disable(&dsidev->dev);
dsi_put_clocks(dsidev);
@@ -4816,7 +4963,6 @@ static int omap_dsihw_remove(struct platform_device *dsidev)
static int dsi_runtime_suspend(struct device *dev)
{
dispc_runtime_put();
- dss_runtime_put();
return 0;
}
@@ -4825,20 +4971,11 @@ static int dsi_runtime_resume(struct device *dev)
{
int r;
- r = dss_runtime_get();
- if (r)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r)
- goto err_get_dispc;
+ return r;
return 0;
-
-err_get_dispc:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static const struct dev_pm_ops dsi_pm_ops = {
@@ -4847,8 +4984,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
};
static struct platform_driver omap_dsihw_driver = {
- .probe = omap_dsihw_probe,
- .remove = omap_dsihw_remove,
+ .remove = __exit_p(omap_dsihw_remove),
.driver = {
.name = "omapdss_dsi",
.owner = THIS_MODULE,
@@ -4856,12 +4992,12 @@ static struct platform_driver omap_dsihw_driver = {
},
};
-int dsi_init_platform_driver(void)
+int __init dsi_init_platform_driver(void)
{
- return platform_driver_register(&omap_dsihw_driver);
+ return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
}
-void dsi_uninit_platform_driver(void)
+void __exit dsi_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omap_dsihw_driver);
+ platform_driver_unregister(&omap_dsihw_driver);
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index bd2d5e159463..d2b57197b292 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -62,6 +62,9 @@ struct dss_reg {
#define REG_FLD_MOD(idx, val, start, end) \
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
+static int dss_runtime_get(void);
+static void dss_runtime_put(void);
+
static struct {
struct platform_device *pdev;
void __iomem *base;
@@ -277,7 +280,7 @@ void dss_dump_clocks(struct seq_file *s)
dss_runtime_put();
}
-void dss_dump_regs(struct seq_file *s)
+static void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
@@ -322,6 +325,7 @@ void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
break;
default:
BUG();
+ return;
}
dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
@@ -335,7 +339,7 @@ void dss_select_dsi_clk_source(int dsi_module,
enum omap_dss_clk_source clk_src)
{
struct platform_device *dsidev;
- int b;
+ int b, pos;
switch (clk_src) {
case OMAP_DSS_CLK_SRC_FCK:
@@ -355,9 +359,11 @@ void dss_select_dsi_clk_source(int dsi_module,
break;
default:
BUG();
+ return;
}
- REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
+ pos = dsi_module == 0 ? 1 : 10;
+ REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */
dss.dsi_clk_source[dsi_module] = clk_src;
}
@@ -389,6 +395,7 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
break;
default:
BUG();
+ return;
}
pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
@@ -706,7 +713,7 @@ static void dss_put_clocks(void)
clk_put(dss.dss_clk);
}
-int dss_runtime_get(void)
+static int dss_runtime_get(void)
{
int r;
@@ -717,14 +724,14 @@ int dss_runtime_get(void)
return r < 0 ? r : 0;
}
-void dss_runtime_put(void)
+static void dss_runtime_put(void)
{
int r;
DSSDBG("dss_runtime_put\n");
r = pm_runtime_put_sync(&dss.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
}
/* DEBUGFS */
@@ -740,7 +747,7 @@ void dss_debug_dump_clocks(struct seq_file *s)
#endif
/* DSS HW IP initialisation */
-static int omap_dsshw_probe(struct platform_device *pdev)
+static int __init omap_dsshw_probe(struct platform_device *pdev)
{
struct resource *dss_mem;
u32 rev;
@@ -785,40 +792,24 @@ static int omap_dsshw_probe(struct platform_device *pdev)
dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
- r = dpi_init();
- if (r) {
- DSSERR("Failed to initialize DPI\n");
- goto err_dpi;
- }
-
- r = sdi_init();
- if (r) {
- DSSERR("Failed to initialize SDI\n");
- goto err_sdi;
- }
-
rev = dss_read_reg(DSS_REVISION);
printk(KERN_INFO "OMAP DSS rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
dss_runtime_put();
+ dss_debugfs_create_file("dss", dss_dump_regs);
+
return 0;
-err_sdi:
- dpi_exit();
-err_dpi:
- dss_runtime_put();
+
err_runtime_get:
pm_runtime_disable(&pdev->dev);
dss_put_clocks();
return r;
}
-static int omap_dsshw_remove(struct platform_device *pdev)
+static int __exit omap_dsshw_remove(struct platform_device *pdev)
{
- dpi_exit();
- sdi_exit();
-
pm_runtime_disable(&pdev->dev);
dss_put_clocks();
@@ -829,11 +820,24 @@ static int omap_dsshw_remove(struct platform_device *pdev)
static int dss_runtime_suspend(struct device *dev)
{
dss_save_context();
+ dss_set_min_bus_tput(dev, 0);
return 0;
}
static int dss_runtime_resume(struct device *dev)
{
+ int r;
+ /*
+ * Set an arbitrarily high tput request to ensure OPP100.
+ * What we should really do is to make a request to stay in OPP100,
+ * without any tput requirements, but that is not currently possible
+ * via the PM layer.
+ */
+
+ r = dss_set_min_bus_tput(dev, 1000000000);
+ if (r)
+ return r;
+
dss_restore_context();
return 0;
}
@@ -844,8 +848,7 @@ static const struct dev_pm_ops dss_pm_ops = {
};
static struct platform_driver omap_dsshw_driver = {
- .probe = omap_dsshw_probe,
- .remove = omap_dsshw_remove,
+ .remove = __exit_p(omap_dsshw_remove),
.driver = {
.name = "omapdss_dss",
.owner = THIS_MODULE,
@@ -853,12 +856,12 @@ static struct platform_driver omap_dsshw_driver = {
},
};
-int dss_init_platform_driver(void)
+int __init dss_init_platform_driver(void)
{
- return platform_driver_register(&omap_dsshw_driver);
+ return platform_driver_probe(&omap_dsshw_driver, omap_dsshw_probe);
}
void dss_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omap_dsshw_driver);
+ platform_driver_unregister(&omap_dsshw_driver);
}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index d4b3dff2ead3..dd1092ceaeef 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -150,9 +150,6 @@ struct dsi_clock_info {
u16 regm_dsi; /* OMAP3: REGM4
* OMAP4: REGM5 */
u16 lp_clk_div;
-
- u8 highfreq;
- bool use_sys_clk;
};
struct seq_file;
@@ -162,6 +159,16 @@ struct platform_device;
struct bus_type *dss_get_bus(void);
struct regulator *dss_get_vdds_dsi(void);
struct regulator *dss_get_vdds_sdi(void);
+int dss_get_ctx_loss_count(struct device *dev);
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+ struct device *parent, int disp_num);
+void omap_dss_unregister_device(struct omap_dss_device *dssdev);
+void omap_dss_unregister_child_devices(struct device *parent);
/* apply */
void dss_apply_init(void);
@@ -179,6 +186,9 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
int dss_mgr_set_device(struct omap_overlay_manager *mgr,
struct omap_dss_device *dssdev);
int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings);
+const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
bool dss_ovl_is_enabled(struct omap_overlay *ovl);
int dss_ovl_enable(struct omap_overlay *ovl);
@@ -208,9 +218,11 @@ int dss_init_overlay_managers(struct platform_device *pdev);
void dss_uninit_overlay_managers(struct platform_device *pdev);
int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
const struct omap_overlay_manager_info *info);
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings);
int dss_mgr_check(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev,
struct omap_overlay_manager_info *info,
+ const struct omap_video_timings *mgr_timings,
struct omap_overlay_info **overlay_infos);
/* overlay */
@@ -220,22 +232,18 @@ void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
int dss_ovl_simple_check(struct omap_overlay *ovl,
const struct omap_overlay_info *info);
-int dss_ovl_check(struct omap_overlay *ovl,
- struct omap_overlay_info *info, struct omap_dss_device *dssdev);
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+ const struct omap_video_timings *mgr_timings);
/* DSS */
-int dss_init_platform_driver(void);
+int dss_init_platform_driver(void) __init;
void dss_uninit_platform_driver(void);
-int dss_runtime_get(void);
-void dss_runtime_put(void);
-
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
-void dss_dump_regs(struct seq_file *s);
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
void dss_debug_dump_clocks(struct seq_file *s);
#endif
@@ -265,19 +273,8 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
struct dispc_clock_info *dispc_cinfo);
/* SDI */
-#ifdef CONFIG_OMAP2_DSS_SDI
-int sdi_init(void);
-void sdi_exit(void);
-int sdi_init_display(struct omap_dss_device *display);
-#else
-static inline int sdi_init(void)
-{
- return 0;
-}
-static inline void sdi_exit(void)
-{
-}
-#endif
+int sdi_init_platform_driver(void) __init;
+void sdi_uninit_platform_driver(void) __exit;
/* DSI */
#ifdef CONFIG_OMAP2_DSS_DSI
@@ -285,19 +282,14 @@ static inline void sdi_exit(void)
struct dentry;
struct file_operations;
-int dsi_init_platform_driver(void);
-void dsi_uninit_platform_driver(void);
+int dsi_init_platform_driver(void) __init;
+void dsi_uninit_platform_driver(void) __exit;
int dsi_runtime_get(struct platform_device *dsidev);
void dsi_runtime_put(struct platform_device *dsidev);
void dsi_dump_clocks(struct seq_file *s);
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
- const struct file_operations *debug_fops);
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
- const struct file_operations *debug_fops);
-int dsi_init_display(struct omap_dss_device *display);
void dsi_irq_handler(void);
u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
@@ -314,13 +306,6 @@ void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
struct platform_device *dsi_get_dsidev_from_id(int module);
#else
-static inline int dsi_init_platform_driver(void)
-{
- return 0;
-}
-static inline void dsi_uninit_platform_driver(void)
-{
-}
static inline int dsi_runtime_get(struct platform_device *dsidev)
{
return 0;
@@ -377,28 +362,14 @@ static inline struct platform_device *dsi_get_dsidev_from_id(int module)
#endif
/* DPI */
-#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init(void);
-void dpi_exit(void);
-int dpi_init_display(struct omap_dss_device *dssdev);
-#else
-static inline int dpi_init(void)
-{
- return 0;
-}
-static inline void dpi_exit(void)
-{
-}
-#endif
+int dpi_init_platform_driver(void) __init;
+void dpi_uninit_platform_driver(void) __exit;
/* DISPC */
-int dispc_init_platform_driver(void);
-void dispc_uninit_platform_driver(void);
+int dispc_init_platform_driver(void) __init;
+void dispc_uninit_platform_driver(void) __exit;
void dispc_dump_clocks(struct seq_file *s);
-void dispc_dump_irqs(struct seq_file *s);
-void dispc_dump_regs(struct seq_file *s);
void dispc_irq_handler(void);
-void dispc_fake_vsync_irq(void);
int dispc_runtime_get(void);
void dispc_runtime_put(void);
@@ -409,12 +380,12 @@ void dispc_disable_sidle(void);
void dispc_lcd_enable_signal_polarity(bool act_high);
void dispc_lcd_enable_signal(bool enable);
void dispc_pck_free_enable(bool enable);
-void dispc_set_digit_size(u16 width, u16 height);
void dispc_enable_fifomerge(bool enable);
void dispc_enable_gamma_table(bool enable);
void dispc_set_loadmode(enum omap_dss_load_mode mode);
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+ const struct omap_video_timings *timings);
unsigned long dispc_fclk_rate(void);
void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
struct dispc_clock_info *cinfo);
@@ -424,15 +395,16 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
- u32 *fifo_low, u32 *fifo_high, bool use_fifomerge);
+ u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+ bool manual_update);
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, bool replication);
+ bool ilace, bool replication,
+ const struct omap_video_timings *mgr_timings);
int dispc_ovl_enable(enum omap_plane plane, bool enable);
void dispc_ovl_set_channel_out(enum omap_plane plane,
enum omap_channel channel);
void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
bool dispc_mgr_go_busy(enum omap_channel channel);
@@ -445,12 +417,13 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
enum omap_lcd_display_type type);
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
struct omap_video_timings *timings);
void dispc_mgr_set_pol_freq(enum omap_channel channel,
enum omap_panel_config config, u8 acbi, u8 acb);
unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
+unsigned long dispc_core_clk_rate(void);
int dispc_mgr_set_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
int dispc_mgr_get_clock_div(enum omap_channel channel,
@@ -460,19 +433,10 @@ void dispc_mgr_setup(enum omap_channel channel,
/* VENC */
#ifdef CONFIG_OMAP2_DSS_VENC
-int venc_init_platform_driver(void);
-void venc_uninit_platform_driver(void);
-void venc_dump_regs(struct seq_file *s);
-int venc_init_display(struct omap_dss_device *display);
+int venc_init_platform_driver(void) __init;
+void venc_uninit_platform_driver(void) __exit;
unsigned long venc_get_pixel_clock(void);
#else
-static inline int venc_init_platform_driver(void)
-{
- return 0;
-}
-static inline void venc_uninit_platform_driver(void)
-{
-}
static inline unsigned long venc_get_pixel_clock(void)
{
WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__);
@@ -482,23 +446,10 @@ static inline unsigned long venc_get_pixel_clock(void)
/* HDMI */
#ifdef CONFIG_OMAP4_DSS_HDMI
-int hdmi_init_platform_driver(void);
-void hdmi_uninit_platform_driver(void);
-int hdmi_init_display(struct omap_dss_device *dssdev);
+int hdmi_init_platform_driver(void) __init;
+void hdmi_uninit_platform_driver(void) __exit;
unsigned long hdmi_get_pixel_clock(void);
-void hdmi_dump_regs(struct seq_file *s);
#else
-static inline int hdmi_init_display(struct omap_dss_device *dssdev)
-{
- return 0;
-}
-static inline int hdmi_init_platform_driver(void)
-{
- return 0;
-}
-static inline void hdmi_uninit_platform_driver(void)
-{
-}
static inline unsigned long hdmi_get_pixel_clock(void)
{
WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__);
@@ -514,22 +465,18 @@ int omapdss_hdmi_read_edid(u8 *buf, int len);
bool omapdss_hdmi_detect(void);
int hdmi_panel_init(void);
void hdmi_panel_exit(void);
+#ifdef CONFIG_OMAP4_DSS_HDMI_AUDIO
+int hdmi_audio_enable(void);
+void hdmi_audio_disable(void);
+int hdmi_audio_start(void);
+void hdmi_audio_stop(void);
+bool hdmi_mode_has_audio(void);
+int hdmi_audio_config(struct omap_dss_audio *audio);
+#endif
/* RFBI */
-#ifdef CONFIG_OMAP2_DSS_RFBI
-int rfbi_init_platform_driver(void);
-void rfbi_uninit_platform_driver(void);
-void rfbi_dump_regs(struct seq_file *s);
-int rfbi_init_display(struct omap_dss_device *display);
-#else
-static inline int rfbi_init_platform_driver(void)
-{
- return 0;
-}
-static inline void rfbi_uninit_platform_driver(void)
-{
-}
-#endif
+int rfbi_init_platform_driver(void) __init;
+void rfbi_uninit_platform_driver(void) __exit;
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index ce14aa6dd672..938709724f0c 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -52,6 +52,8 @@ struct omap_dss_features {
const char * const *clksrc_names;
const struct dss_param_range *dss_params;
+ const enum omap_dss_rotation_type supported_rotation_types;
+
const u32 buffer_size_unit;
const u32 burst_size_unit;
};
@@ -311,6 +313,8 @@ static const struct dss_param_range omap2_dss_param_range[] = {
* scaler cannot scale a image with width more than 768.
*/
[FEAT_PARAM_LINEWIDTH] = { 1, 768 },
+ [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
+ [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap3_dss_param_range[] = {
@@ -324,6 +328,8 @@ static const struct dss_param_range omap3_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
+ [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
+ [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap4_dss_param_range[] = {
@@ -337,6 +343,8 @@ static const struct dss_param_range omap4_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
+ [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
+ [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const enum dss_feat_id omap2_dss_feat_list[] = {
@@ -399,6 +407,7 @@ static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
FEAT_FIR_COEF_V,
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
};
static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
@@ -416,6 +425,7 @@ static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
FEAT_FIR_COEF_V,
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
};
static const enum dss_feat_id omap4_dss_feat_list[] = {
@@ -434,6 +444,7 @@ static const enum dss_feat_id omap4_dss_feat_list[] = {
FEAT_FIR_COEF_V,
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
};
/* OMAP2 DSS Features */
@@ -451,6 +462,7 @@ static const struct omap_dss_features omap2_dss_features = {
.overlay_caps = omap2_dss_overlay_caps,
.clksrc_names = omap2_dss_clk_source_names,
.dss_params = omap2_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -470,6 +482,7 @@ static const struct omap_dss_features omap3430_dss_features = {
.overlay_caps = omap3430_dss_overlay_caps,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -488,6 +501,7 @@ static const struct omap_dss_features omap3630_dss_features = {
.overlay_caps = omap3630_dss_overlay_caps,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -508,6 +522,7 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
.overlay_caps = omap4_dss_overlay_caps,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -527,6 +542,7 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
.overlay_caps = omap4_dss_overlay_caps,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -546,6 +562,7 @@ static const struct omap_dss_features omap4_dss_features = {
.overlay_caps = omap4_dss_overlay_caps,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -562,13 +579,17 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
.pll_enable = ti_hdmi_4xxx_pll_enable,
.pll_disable = ti_hdmi_4xxx_pll_disable,
.video_enable = ti_hdmi_4xxx_wp_video_start,
+ .video_disable = ti_hdmi_4xxx_wp_video_stop,
.dump_wrapper = ti_hdmi_4xxx_wp_dump,
.dump_core = ti_hdmi_4xxx_core_dump,
.dump_pll = ti_hdmi_4xxx_pll_dump,
.dump_phy = ti_hdmi_4xxx_phy_dump,
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
.audio_enable = ti_hdmi_4xxx_wp_audio_enable,
+ .audio_disable = ti_hdmi_4xxx_wp_audio_disable,
+ .audio_start = ti_hdmi_4xxx_audio_start,
+ .audio_stop = ti_hdmi_4xxx_audio_stop,
+ .audio_config = ti_hdmi_4xxx_audio_config,
#endif
};
@@ -662,6 +683,11 @@ void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
*end = omap_current_dss_features->reg_fields[id].end;
}
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type)
+{
+ return omap_current_dss_features->supported_rotation_types & rot_type;
+}
+
void dss_features_init(void)
{
if (cpu_is_omap24xx())
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index c332e7ddfce1..bdf469f080e7 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -62,6 +62,7 @@ enum dss_feat_id {
FEAT_FIFO_MERGE,
/* An unknown HW bug causing the normal FIFO thresholds not to work */
FEAT_OMAP3_DSI_FIFO_BUG,
+ FEAT_BURST_2D,
};
/* DSS register field id */
@@ -91,6 +92,8 @@ enum dss_range_param {
FEAT_PARAM_DSIPLL_LPDIV,
FEAT_PARAM_DOWNSCALE,
FEAT_PARAM_LINEWIDTH,
+ FEAT_PARAM_MGR_WIDTH,
+ FEAT_PARAM_MGR_HEIGHT,
};
/* DSS Feature Functions */
@@ -108,6 +111,8 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
u32 dss_feat_get_burst_size_unit(void); /* in bytes */
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
+
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
void dss_features_init(void);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index c4b4f6950a92..26a2430a7028 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -33,12 +33,6 @@
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <video/omapdss.h>
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#include "ti_hdmi_4xxx_ip.h"
-#endif
#include "ti_hdmi.h"
#include "dss.h"
@@ -63,7 +57,6 @@
static struct {
struct mutex lock;
- struct omap_display_platform_data *pdata;
struct platform_device *pdev;
struct hdmi_ip_data ip_data;
@@ -130,25 +123,12 @@ static int hdmi_runtime_get(void)
DSSDBG("hdmi_runtime_get\n");
- /*
- * HACK: Add dss_runtime_get() to ensure DSS clock domain is enabled.
- * This should be removed later.
- */
- r = dss_runtime_get();
- if (r < 0)
- goto err_get_dss;
-
r = pm_runtime_get_sync(&hdmi.pdev->dev);
WARN_ON(r < 0);
if (r < 0)
- goto err_get_hdmi;
+ return r;
return 0;
-
-err_get_hdmi:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static void hdmi_runtime_put(void)
@@ -158,16 +138,10 @@ static void hdmi_runtime_put(void)
DSSDBG("hdmi_runtime_put\n");
r = pm_runtime_put_sync(&hdmi.pdev->dev);
- WARN_ON(r < 0);
-
- /*
- * HACK: This is added to complement the dss_runtime_get() call in
- * hdmi_runtime_get(). This should be removed later.
- */
- dss_runtime_put();
+ WARN_ON(r < 0 && r != -ENOSYS);
}
-int hdmi_init_display(struct omap_dss_device *dssdev)
+static int __init hdmi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -344,7 +318,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
- hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+ hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
/* config the PLL and PHY hdmi_set_pll_pwrfirst */
r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
@@ -376,10 +350,11 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
dispc_enable_gamma_table(0);
/* tv size */
- dispc_set_digit_size(dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
+ dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
- hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1);
+ r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
+ if (r)
+ goto err_vid_enable;
r = dss_mgr_enable(dssdev->manager);
if (r)
@@ -388,7 +363,8 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
return 0;
err_mgr_enable:
- hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+ hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
+err_vid_enable:
hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
err:
@@ -400,7 +376,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
{
dss_mgr_disable(dssdev->manager);
- hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+ hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
hdmi_runtime_put();
@@ -436,10 +412,12 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
r = hdmi_power_on(dssdev);
if (r)
DSSERR("failed to power on device\n");
+ } else {
+ dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
}
}
-void hdmi_dump_regs(struct seq_file *s)
+static void hdmi_dump_regs(struct seq_file *s)
{
mutex_lock(&hdmi.lock);
@@ -555,248 +533,201 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi.lock);
}
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-
-static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
+static int hdmi_get_clocks(struct platform_device *pdev)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_codec *codec = rtd->codec;
- struct platform_device *pdev = to_platform_device(codec->dev);
- struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
- int err = 0;
+ struct clk *clk;
- if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) {
- dev_err(&pdev->dev, "Cannot enable/disable audio\n");
- return -ENODEV;
+ clk = clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
}
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ip_data->ops->audio_enable(ip_data, true);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- ip_data->ops->audio_enable(ip_data, false);
- break;
- default:
- err = -EINVAL;
- }
- return err;
-}
-
-static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_codec *codec = rtd->codec;
- struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
- struct hdmi_audio_format audio_format;
- struct hdmi_audio_dma audio_dma;
- struct hdmi_core_audio_config core_cfg;
- struct hdmi_core_infoframe_audio aud_if_cfg;
- int err, n, cts;
- enum hdmi_core_audio_sample_freq sample_freq;
-
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- core_cfg.i2s_cfg.word_max_length =
- HDMI_AUDIO_I2S_MAX_WORD_20BITS;
- core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
- core_cfg.i2s_cfg.in_length_bits =
- HDMI_AUDIO_I2S_INPUT_LENGTH_16;
- core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
- audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
- audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
- audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
- audio_dma.transfer_size = 0x10;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- core_cfg.i2s_cfg.word_max_length =
- HDMI_AUDIO_I2S_MAX_WORD_24BITS;
- core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
- core_cfg.i2s_cfg.in_length_bits =
- HDMI_AUDIO_I2S_INPUT_LENGTH_24;
- audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
- audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
- audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
- core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
- audio_dma.transfer_size = 0x20;
- break;
- default:
+ hdmi.sys_clk = clk;
+
+ return 0;
+}
+
+static void hdmi_put_clocks(void)
+{
+ if (hdmi.sys_clk)
+ clk_put(hdmi.sys_clk);
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
+{
+ u32 deep_color;
+ bool deep_color_correct = false;
+ u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
+
+ if (n == NULL || cts == NULL)
return -EINVAL;
- }
- switch (params_rate(params)) {
+ /* TODO: When implemented, query deep color mode here. */
+ deep_color = 100;
+
+ /*
+ * When using deep color, the default N value (as in the HDMI
+ * specification) yields to an non-integer CTS. Hence, we
+ * modify it while keeping the restrictions described in
+ * section 7.2.1 of the HDMI 1.4a specification.
+ */
+ switch (sample_freq) {
case 32000:
- sample_freq = HDMI_AUDIO_FS_32000;
+ case 48000:
+ case 96000:
+ case 192000:
+ if (deep_color == 125)
+ if (pclk == 27027 || pclk == 74250)
+ deep_color_correct = true;
+ if (deep_color == 150)
+ if (pclk == 27027)
+ deep_color_correct = true;
break;
case 44100:
- sample_freq = HDMI_AUDIO_FS_44100;
- break;
- case 48000:
- sample_freq = HDMI_AUDIO_FS_48000;
+ case 88200:
+ case 176400:
+ if (deep_color == 125)
+ if (pclk == 27027)
+ deep_color_correct = true;
break;
default:
return -EINVAL;
}
- err = hdmi_config_audio_acr(ip_data, params_rate(params), &n, &cts);
- if (err < 0)
- return err;
-
- /* Audio wrapper config */
- audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
- audio_format.active_chnnls_msk = 0x03;
- audio_format.type = HDMI_AUDIO_TYPE_LPCM;
- audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
- /* Disable start/stop signals of IEC 60958 blocks */
- audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF;
+ if (deep_color_correct) {
+ switch (sample_freq) {
+ case 32000:
+ *n = 8192;
+ break;
+ case 44100:
+ *n = 12544;
+ break;
+ case 48000:
+ *n = 8192;
+ break;
+ case 88200:
+ *n = 25088;
+ break;
+ case 96000:
+ *n = 16384;
+ break;
+ case 176400:
+ *n = 50176;
+ break;
+ case 192000:
+ *n = 32768;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (sample_freq) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ case 88200:
+ *n = 12544;
+ break;
+ case 96000:
+ *n = 12288;
+ break;
+ case 176400:
+ *n = 25088;
+ break;
+ case 192000:
+ *n = 24576;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+ *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
- audio_dma.block_size = 0xC0;
- audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
- audio_dma.fifo_threshold = 0x20; /* in number of samples */
+ return 0;
+}
- hdmi_wp_audio_config_dma(ip_data, &audio_dma);
- hdmi_wp_audio_config_format(ip_data, &audio_format);
+int hdmi_audio_enable(void)
+{
+ DSSDBG("audio_enable\n");
- /*
- * I2S config
- */
- core_cfg.i2s_cfg.en_high_bitrate_aud = false;
- /* Only used with high bitrate audio */
- core_cfg.i2s_cfg.cbit_order = false;
- /* Serial data and word select should change on sck rising edge */
- core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
- core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
- /* Set I2S word select polarity */
- core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
- core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
- /* Set serial data to word select shift. See Phillips spec. */
- core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
- /* Enable one of the four available serial data channels */
- core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
-
- /* Core audio config */
- core_cfg.freq_sample = sample_freq;
- core_cfg.n = n;
- core_cfg.cts = cts;
- if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
- core_cfg.aud_par_busclk = 0;
- core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
- core_cfg.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
- } else {
- core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
- core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
- core_cfg.use_mclk = true;
- }
+ return hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
+}
- if (core_cfg.use_mclk)
- core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS;
- core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH;
- core_cfg.en_spdif = false;
- /* Use sample frequency from channel status word */
- core_cfg.fs_override = true;
- /* Enable ACR packets */
- core_cfg.en_acr_pkt = true;
- /* Disable direct streaming digital audio */
- core_cfg.en_dsd_audio = false;
- /* Use parallel audio interface */
- core_cfg.en_parallel_aud_input = true;
-
- hdmi_core_audio_config(ip_data, &core_cfg);
+void hdmi_audio_disable(void)
+{
+ DSSDBG("audio_disable\n");
- /*
- * Configure packet
- * info frame audio see doc CEA861-D page 74
- */
- aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
- aud_if_cfg.db1_channel_count = 2;
- aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
- aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
- aud_if_cfg.db4_channel_alloc = 0x00;
- aud_if_cfg.db5_downmix_inh = false;
- aud_if_cfg.db5_lsv = 0;
-
- hdmi_core_audio_infoframe_config(ip_data, &aud_if_cfg);
- return 0;
+ hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
}
-static int hdmi_audio_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+int hdmi_audio_start(void)
{
- if (!hdmi.ip_data.cfg.cm.mode) {
- pr_err("Current video settings do not support audio.\n");
- return -EIO;
- }
- return 0;
+ DSSDBG("audio_start\n");
+
+ return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
}
-static int hdmi_audio_codec_probe(struct snd_soc_codec *codec)
+void hdmi_audio_stop(void)
{
- struct hdmi_ip_data *priv = &hdmi.ip_data;
+ DSSDBG("audio_stop\n");
- snd_soc_codec_set_drvdata(codec, priv);
- return 0;
+ hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
}
-static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
- .probe = hdmi_audio_codec_probe,
-};
+bool hdmi_mode_has_audio(void)
+{
+ if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
+ return true;
+ else
+ return false;
+}
-static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
- .hw_params = hdmi_audio_hw_params,
- .trigger = hdmi_audio_trigger,
- .startup = hdmi_audio_startup,
-};
+int hdmi_audio_config(struct omap_dss_audio *audio)
+{
+ return hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
+}
-static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
- .name = "hdmi-audio-codec",
- .playback = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_32000 |
- SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
- },
- .ops = &hdmi_audio_codec_ops,
-};
#endif
-static int hdmi_get_clocks(struct platform_device *pdev)
+static void __init hdmi_probe_pdata(struct platform_device *pdev)
{
- struct clk *clk;
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int r, i;
- clk = clk_get(&pdev->dev, "sys_clk");
- if (IS_ERR(clk)) {
- DSSERR("can't get sys_clk\n");
- return PTR_ERR(clk);
- }
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
- hdmi.sys_clk = clk;
+ if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI)
+ continue;
- return 0;
-}
+ r = hdmi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
-static void hdmi_put_clocks(void)
-{
- if (hdmi.sys_clk)
- clk_put(hdmi.sys_clk);
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
}
/* HDMI HW IP initialisation */
-static int omapdss_hdmihw_probe(struct platform_device *pdev)
+static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
{
struct resource *hdmi_mem;
int r;
- hdmi.pdata = pdev->dev.platform_data;
hdmi.pdev = pdev;
mutex_init(&hdmi.lock);
@@ -830,28 +761,18 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
hdmi_panel_init();
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+ dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+
+ hdmi_probe_pdata(pdev);
- /* Register ASoC codec DAI */
- r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
- &hdmi_codec_dai_drv, 1);
- if (r) {
- DSSERR("can't register ASoC HDMI audio codec\n");
- return r;
- }
-#endif
return 0;
}
-static int omapdss_hdmihw_remove(struct platform_device *pdev)
+static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
{
- hdmi_panel_exit();
+ omap_dss_unregister_child_devices(&pdev->dev);
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
- snd_soc_unregister_codec(&pdev->dev);
-#endif
+ hdmi_panel_exit();
pm_runtime_disable(&pdev->dev);
@@ -867,7 +788,6 @@ static int hdmi_runtime_suspend(struct device *dev)
clk_disable(hdmi.sys_clk);
dispc_runtime_put();
- dss_runtime_put();
return 0;
}
@@ -876,23 +796,13 @@ static int hdmi_runtime_resume(struct device *dev)
{
int r;
- r = dss_runtime_get();
- if (r < 0)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r < 0)
- goto err_get_dispc;
-
+ return r;
clk_enable(hdmi.sys_clk);
return 0;
-
-err_get_dispc:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static const struct dev_pm_ops hdmi_pm_ops = {
@@ -901,8 +811,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
};
static struct platform_driver omapdss_hdmihw_driver = {
- .probe = omapdss_hdmihw_probe,
- .remove = omapdss_hdmihw_remove,
+ .remove = __exit_p(omapdss_hdmihw_remove),
.driver = {
.name = "omapdss_hdmi",
.owner = THIS_MODULE,
@@ -910,12 +819,12 @@ static struct platform_driver omapdss_hdmihw_driver = {
},
};
-int hdmi_init_platform_driver(void)
+int __init hdmi_init_platform_driver(void)
{
- return platform_driver_register(&omapdss_hdmihw_driver);
+ return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe);
}
-void hdmi_uninit_platform_driver(void)
+void __exit hdmi_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omapdss_hdmihw_driver);
+ platform_driver_unregister(&omapdss_hdmihw_driver);
}
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index 533d5dc634d2..1179e3c4b1c7 100644
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -30,7 +30,12 @@
#include "dss.h"
static struct {
- struct mutex hdmi_lock;
+ /* This protects the panel ops, mainly when accessing the HDMI IP. */
+ struct mutex lock;
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+ /* This protects the audio ops, specifically. */
+ spinlock_t audio_lock;
+#endif
} hdmi;
@@ -54,12 +59,168 @@ static void hdmi_panel_remove(struct omap_dss_device *dssdev)
}
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&hdmi.lock);
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+ /* enable audio only if the display is active and supports audio */
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+ !hdmi_mode_has_audio()) {
+ DSSERR("audio not supported or display is off\n");
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_audio_enable();
+
+ if (!r)
+ dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+err:
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+ hdmi_audio_disable();
+
+ dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
+
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+ /*
+ * No need to check the panel state. It was checked when trasitioning
+ * to AUDIO_ENABLED.
+ */
+ if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) {
+ DSSERR("audio start from invalid state\n");
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_audio_start();
+
+ if (!r)
+ dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
+
+err:
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+ return r;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+ hdmi_audio_stop();
+ dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+ bool r = false;
+
+ mutex_lock(&hdmi.lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ goto err;
+
+ if (!hdmi_mode_has_audio())
+ goto err;
+
+ r = true;
+err:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&hdmi.lock);
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+ /* config audio only if the display is active and supports audio */
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+ !hdmi_mode_has_audio()) {
+ DSSERR("audio not supported or display is off\n");
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_audio_config(audio);
+
+ if (!r)
+ dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
+
+err:
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+#else
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+ return false;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ return -EPERM;
+}
+#endif
+
static int hdmi_panel_enable(struct omap_dss_device *dssdev)
{
int r = 0;
DSSDBG("ENTER hdmi_panel_enable\n");
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
r = -EINVAL;
@@ -75,40 +236,52 @@ static int hdmi_panel_enable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
static void hdmi_panel_disable(struct omap_dss_device *dssdev)
{
- mutex_lock(&hdmi.hdmi_lock);
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ mutex_lock(&hdmi.lock);
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ /*
+ * TODO: notify audio users that the display was disabled. For
+ * now, disable audio locally to not break our audio state
+ * machine.
+ */
+ hdmi_panel_audio_disable(dssdev);
omapdss_hdmi_display_disable(dssdev);
+ }
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
}
static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
{
int r = 0;
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
r = -EINVAL;
goto err;
}
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ /*
+ * TODO: notify audio users that the display was suspended. For now,
+ * disable audio locally to not break our audio state machine.
+ */
+ hdmi_panel_audio_disable(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
omapdss_hdmi_display_disable(dssdev);
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -117,7 +290,7 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
{
int r = 0;
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
r = -EINVAL;
@@ -129,11 +302,12 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
DSSERR("failed to power on\n");
goto err;
}
+ /* TODO: notify audio users that the panel resumed. */
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -141,11 +315,11 @@ err:
static void hdmi_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
*timings = dssdev->panel.timings;
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
}
static void hdmi_set_timings(struct omap_dss_device *dssdev,
@@ -153,12 +327,18 @@ static void hdmi_set_timings(struct omap_dss_device *dssdev,
{
DSSDBG("hdmi_set_timings\n");
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
+
+ /*
+ * TODO: notify audio users that there was a timings change. For
+ * now, disable audio locally to not break our audio state machine.
+ */
+ hdmi_panel_audio_disable(dssdev);
dssdev->panel.timings = *timings;
omapdss_hdmi_display_set_timing(dssdev);
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
}
static int hdmi_check_timings(struct omap_dss_device *dssdev,
@@ -168,11 +348,11 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev,
DSSDBG("hdmi_check_timings\n");
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
r = omapdss_hdmi_display_check_timing(dssdev, timings);
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -180,7 +360,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
{
int r;
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
r = omapdss_hdmi_display_enable(dssdev);
@@ -194,7 +374,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
omapdss_hdmi_display_disable(dssdev);
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -203,7 +383,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
{
int r;
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
r = omapdss_hdmi_display_enable(dssdev);
@@ -217,7 +397,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
omapdss_hdmi_display_disable(dssdev);
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -234,6 +414,12 @@ static struct omap_dss_driver hdmi_driver = {
.check_timings = hdmi_check_timings,
.read_edid = hdmi_read_edid,
.detect = hdmi_detect,
+ .audio_enable = hdmi_panel_audio_enable,
+ .audio_disable = hdmi_panel_audio_disable,
+ .audio_start = hdmi_panel_audio_start,
+ .audio_stop = hdmi_panel_audio_stop,
+ .audio_supported = hdmi_panel_audio_supported,
+ .audio_config = hdmi_panel_audio_config,
.driver = {
.name = "hdmi_panel",
.owner = THIS_MODULE,
@@ -242,7 +428,11 @@ static struct omap_dss_driver hdmi_driver = {
int hdmi_panel_init(void)
{
- mutex_init(&hdmi.hdmi_lock);
+ mutex_init(&hdmi.lock);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+ spin_lock_init(&hdmi.audio_lock);
+#endif
omap_dss_register_driver(&hdmi_driver);
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index e7364603f6a1..0cbcde4c688a 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -654,9 +654,20 @@ static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
return 0;
}
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings)
+{
+ if (!dispc_mgr_timings_ok(mgr->id, timings)) {
+ DSSERR("check_manager: invalid timings\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int dss_mgr_check(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev,
struct omap_overlay_manager_info *info,
+ const struct omap_video_timings *mgr_timings,
struct omap_overlay_info **overlay_infos)
{
struct omap_overlay *ovl;
@@ -668,6 +679,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
return r;
}
+ r = dss_mgr_check_timings(mgr, mgr_timings);
+ if (r)
+ return r;
+
list_for_each_entry(ovl, &mgr->overlays, list) {
struct omap_overlay_info *oi;
int r;
@@ -677,7 +692,7 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
if (oi == NULL)
continue;
- r = dss_ovl_check(ovl, oi, dssdev);
+ r = dss_ovl_check(ovl, oi, mgr_timings);
if (r)
return r;
}
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 6e821810deec..b0ba60f88dd2 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -628,19 +628,23 @@ int dss_ovl_simple_check(struct omap_overlay *ovl,
return -EINVAL;
}
+ if (dss_feat_rotation_type_supported(info->rotation_type) == 0) {
+ DSSERR("check_overlay: rotation type %d not supported\n",
+ info->rotation_type);
+ return -EINVAL;
+ }
+
return 0;
}
-int dss_ovl_check(struct omap_overlay *ovl,
- struct omap_overlay_info *info, struct omap_dss_device *dssdev)
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+ const struct omap_video_timings *mgr_timings)
{
u16 outw, outh;
u16 dw, dh;
- if (dssdev == NULL)
- return 0;
-
- dssdev->driver->get_resolution(dssdev, &dw, &dh);
+ dw = mgr_timings->x_res;
+ dh = mgr_timings->y_res;
if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
outw = info->width;
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 788a0ef6323a..7985fa12b9b4 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -141,7 +141,7 @@ static void rfbi_runtime_put(void)
DSSDBG("rfbi_runtime_put\n");
r = pm_runtime_put_sync(&rfbi.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
void rfbi_bus_lock(void)
@@ -304,13 +304,23 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
u16 height, void (*callback)(void *data), void *data)
{
u32 l;
+ struct omap_video_timings timings = {
+ .hsw = 1,
+ .hfp = 1,
+ .hbp = 1,
+ .vsw = 1,
+ .vfp = 0,
+ .vbp = 0,
+ .x_res = width,
+ .y_res = height,
+ };
/*BUG_ON(callback == 0);*/
BUG_ON(rfbi.framedone_callback != NULL);
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dispc_mgr_set_lcd_size(dssdev->manager->id, width, height);
+ dss_mgr_set_timings(dssdev->manager, &timings);
dispc_mgr_enable(dssdev->manager->id, true);
@@ -766,6 +776,16 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
u16 *x, u16 *y, u16 *w, u16 *h)
{
u16 dw, dh;
+ struct omap_video_timings timings = {
+ .hsw = 1,
+ .hfp = 1,
+ .hbp = 1,
+ .vsw = 1,
+ .vfp = 0,
+ .vbp = 0,
+ .x_res = *w,
+ .y_res = *h,
+ };
dssdev->driver->get_resolution(dssdev, &dw, &dh);
@@ -784,7 +804,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
if (*w == 0 || *h == 0)
return -EINVAL;
- dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
+ dss_mgr_set_timings(dssdev->manager, &timings);
return 0;
}
@@ -799,7 +819,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(omap_rfbi_update);
-void rfbi_dump_regs(struct seq_file *s)
+static void rfbi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
@@ -900,15 +920,39 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL(omapdss_rfbi_display_disable);
-int rfbi_init_display(struct omap_dss_device *dssdev)
+static int __init rfbi_init_display(struct omap_dss_device *dssdev)
{
rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
return 0;
}
+static void __init rfbi_probe_pdata(struct platform_device *pdev)
+{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int i, r;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DBI)
+ continue;
+
+ r = rfbi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
/* RFBI HW IP initialisation */
-static int omap_rfbihw_probe(struct platform_device *pdev)
+static int __init omap_rfbihw_probe(struct platform_device *pdev)
{
u32 rev;
struct resource *rfbi_mem;
@@ -956,6 +1000,10 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
rfbi_runtime_put();
+ dss_debugfs_create_file("rfbi", rfbi_dump_regs);
+
+ rfbi_probe_pdata(pdev);
+
return 0;
err_runtime_get:
@@ -963,8 +1011,9 @@ err_runtime_get:
return r;
}
-static int omap_rfbihw_remove(struct platform_device *pdev)
+static int __exit omap_rfbihw_remove(struct platform_device *pdev)
{
+ omap_dss_unregister_child_devices(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -972,7 +1021,6 @@ static int omap_rfbihw_remove(struct platform_device *pdev)
static int rfbi_runtime_suspend(struct device *dev)
{
dispc_runtime_put();
- dss_runtime_put();
return 0;
}
@@ -981,20 +1029,11 @@ static int rfbi_runtime_resume(struct device *dev)
{
int r;
- r = dss_runtime_get();
- if (r < 0)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r < 0)
- goto err_get_dispc;
+ return r;
return 0;
-
-err_get_dispc:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static const struct dev_pm_ops rfbi_pm_ops = {
@@ -1003,8 +1042,7 @@ static const struct dev_pm_ops rfbi_pm_ops = {
};
static struct platform_driver omap_rfbihw_driver = {
- .probe = omap_rfbihw_probe,
- .remove = omap_rfbihw_remove,
+ .remove = __exit_p(omap_rfbihw_remove),
.driver = {
.name = "omapdss_rfbi",
.owner = THIS_MODULE,
@@ -1012,12 +1050,12 @@ static struct platform_driver omap_rfbihw_driver = {
},
};
-int rfbi_init_platform_driver(void)
+int __init rfbi_init_platform_driver(void)
{
- return platform_driver_register(&omap_rfbihw_driver);
+ return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe);
}
-void rfbi_uninit_platform_driver(void)
+void __exit rfbi_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omap_rfbihw_driver);
+ platform_driver_unregister(&omap_rfbihw_driver);
}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 8266ca0d666b..3a43dc2a9b46 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <linux/export.h>
+#include <linux/platform_device.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -71,10 +72,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_reg_enable;
- r = dss_runtime_get();
- if (r)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r)
goto err_get_dispc;
@@ -107,7 +104,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
}
- dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+ dss_mgr_set_timings(dssdev->manager, t);
r = dss_set_clock_div(&dss_cinfo);
if (r)
@@ -137,8 +134,6 @@ err_set_dss_clock_div:
err_calc_clock_div:
dispc_runtime_put();
err_get_dispc:
- dss_runtime_put();
-err_get_dss:
regulator_disable(sdi.vdds_sdi_reg);
err_reg_enable:
omap_dss_stop_device(dssdev);
@@ -154,7 +149,6 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
dss_sdi_disable();
dispc_runtime_put();
- dss_runtime_put();
regulator_disable(sdi.vdds_sdi_reg);
@@ -162,7 +156,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL(omapdss_sdi_display_disable);
-int sdi_init_display(struct omap_dss_device *dssdev)
+static int __init sdi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("SDI init\n");
@@ -182,11 +176,58 @@ int sdi_init_display(struct omap_dss_device *dssdev)
return 0;
}
-int sdi_init(void)
+static void __init sdi_probe_pdata(struct platform_device *pdev)
+{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int i, r;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_SDI)
+ continue;
+
+ r = sdi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
+static int __init omap_sdi_probe(struct platform_device *pdev)
{
+ sdi_probe_pdata(pdev);
+
+ return 0;
+}
+
+static int __exit omap_sdi_remove(struct platform_device *pdev)
+{
+ omap_dss_unregister_child_devices(&pdev->dev);
+
return 0;
}
-void sdi_exit(void)
+static struct platform_driver omap_sdi_driver = {
+ .remove = __exit_p(omap_sdi_remove),
+ .driver = {
+ .name = "omapdss_sdi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init sdi_init_platform_driver(void)
+{
+ return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe);
+}
+
+void __exit sdi_uninit_platform_driver(void)
{
+ platform_driver_unregister(&omap_sdi_driver);
}
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index 1f58b84d6901..e734cb444bc7 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -96,7 +96,9 @@ struct ti_hdmi_ip_ops {
void (*pll_disable)(struct hdmi_ip_data *ip_data);
- void (*video_enable)(struct hdmi_ip_data *ip_data, bool start);
+ int (*video_enable)(struct hdmi_ip_data *ip_data);
+
+ void (*video_disable)(struct hdmi_ip_data *ip_data);
void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
@@ -106,9 +108,17 @@ struct ti_hdmi_ip_ops {
void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
- void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+ int (*audio_enable)(struct hdmi_ip_data *ip_data);
+
+ void (*audio_disable)(struct hdmi_ip_data *ip_data);
+
+ int (*audio_start)(struct hdmi_ip_data *ip_data);
+
+ void (*audio_stop)(struct hdmi_ip_data *ip_data);
+
+ int (*audio_config)(struct hdmi_ip_data *ip_data,
+ struct omap_dss_audio *audio);
#endif
};
@@ -173,7 +183,8 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start);
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
@@ -181,8 +192,13 @@ void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+ struct omap_dss_audio *audio);
#endif
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index bfe6fe65c8be..4dae1b291079 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -29,9 +29,14 @@
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#endif
#include "ti_hdmi_4xxx_ip.h"
#include "dss.h"
+#include "dss_features.h"
static inline void hdmi_write_reg(void __iomem *base_addr,
const u16 idx, u32 val)
@@ -298,9 +303,9 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
- NULL, hpd_irq_handler,
- IRQF_DISABLED | IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING, "hpd", ip_data);
+ NULL, hpd_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, "hpd", ip_data);
if (r) {
DSSERR("HPD IRQ request failed\n");
hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
@@ -699,9 +704,15 @@ static void hdmi_wp_init(struct omap_video_timings *timings,
}
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start)
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
+ return 0;
+}
+
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
{
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31);
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
}
static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
@@ -886,10 +897,12 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
#define CORE_REG(i, name) name(i)
#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_pll_base(ip_data), r))
-#define DUMPCOREAV(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
+ hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
+#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(hdmi_av_base(ip_data), r))
+#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
(i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
- hdmi_read_reg(hdmi_pll_base(ip_data), CORE_REG(i, r)))
+ hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
DUMPCORE(HDMI_CORE_SYS_VND_IDL);
DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
@@ -898,6 +911,13 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPCORE(HDMI_CORE_SYS_SRST);
DUMPCORE(HDMI_CORE_CTRL1);
DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
+ DUMPCORE(HDMI_CORE_SYS_DE_DLY);
+ DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
+ DUMPCORE(HDMI_CORE_SYS_DE_TOP);
+ DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
+ DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
+ DUMPCORE(HDMI_CORE_SYS_DE_LINL);
+ DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
DUMPCORE(HDMI_CORE_SYS_VID_MODE);
DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
@@ -907,102 +927,91 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPCORE(HDMI_CORE_SYS_INTR4);
DUMPCORE(HDMI_CORE_SYS_UMASK1);
DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
- DUMPCORE(HDMI_CORE_SYS_DE_DLY);
- DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
- DUMPCORE(HDMI_CORE_SYS_DE_TOP);
- DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
- DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
- DUMPCORE(HDMI_CORE_SYS_DE_LINL);
- DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
- DUMPCORE(HDMI_CORE_DDC_CMD);
- DUMPCORE(HDMI_CORE_DDC_STATUS);
DUMPCORE(HDMI_CORE_DDC_ADDR);
+ DUMPCORE(HDMI_CORE_DDC_SEGM);
DUMPCORE(HDMI_CORE_DDC_OFFSET);
DUMPCORE(HDMI_CORE_DDC_COUNT1);
DUMPCORE(HDMI_CORE_DDC_COUNT2);
+ DUMPCORE(HDMI_CORE_DDC_STATUS);
+ DUMPCORE(HDMI_CORE_DDC_CMD);
DUMPCORE(HDMI_CORE_DDC_DATA);
- DUMPCORE(HDMI_CORE_DDC_SEGM);
- DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
- DUMPCORE(HDMI_CORE_AV_DPD);
- DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
- DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
- DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
- DUMPCORE(HDMI_CORE_AV_AVI_VERS);
- DUMPCORE(HDMI_CORE_AV_AVI_LEN);
- DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
+ DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
+ DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
+ DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
+ DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
+ DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
+ DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
+ DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
+ DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
+ DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
+ DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
+ DUMPCOREAV(HDMI_CORE_AV_ASRC);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
+ DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
+ DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
+ DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
+ DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
+ DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
+ DUMPCOREAV(HDMI_CORE_AV_DPD);
+ DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
+ DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
+ DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
+ DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
+ DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_AVI_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
+ DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
+ DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_SPD_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
+ DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
+ DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_AUD_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
+ DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
+ DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_MPEG_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_GEN_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_GEN2_DBYTE);
-
- DUMPCORE(HDMI_CORE_AV_ACR_CTRL);
- DUMPCORE(HDMI_CORE_AV_FREQ_SVAL);
- DUMPCORE(HDMI_CORE_AV_N_SVAL1);
- DUMPCORE(HDMI_CORE_AV_N_SVAL2);
- DUMPCORE(HDMI_CORE_AV_N_SVAL3);
- DUMPCORE(HDMI_CORE_AV_CTS_SVAL1);
- DUMPCORE(HDMI_CORE_AV_CTS_SVAL2);
- DUMPCORE(HDMI_CORE_AV_CTS_SVAL3);
- DUMPCORE(HDMI_CORE_AV_CTS_HVAL1);
- DUMPCORE(HDMI_CORE_AV_CTS_HVAL2);
- DUMPCORE(HDMI_CORE_AV_CTS_HVAL3);
- DUMPCORE(HDMI_CORE_AV_AUD_MODE);
- DUMPCORE(HDMI_CORE_AV_SPDIF_CTRL);
- DUMPCORE(HDMI_CORE_AV_HW_SPDIF_FS);
- DUMPCORE(HDMI_CORE_AV_SWAP_I2S);
- DUMPCORE(HDMI_CORE_AV_SPDIF_ERTH);
- DUMPCORE(HDMI_CORE_AV_I2S_IN_MAP);
- DUMPCORE(HDMI_CORE_AV_I2S_IN_CTRL);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST0);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST1);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST2);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST4);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST5);
- DUMPCORE(HDMI_CORE_AV_ASRC);
- DUMPCORE(HDMI_CORE_AV_I2S_IN_LEN);
- DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
- DUMPCORE(HDMI_CORE_AV_AUDO_TXSTAT);
- DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
- DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
- DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
- DUMPCORE(HDMI_CORE_AV_TEST_TXCTRL);
- DUMPCORE(HDMI_CORE_AV_DPD);
- DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
- DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
- DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
- DUMPCORE(HDMI_CORE_AV_AVI_VERS);
- DUMPCORE(HDMI_CORE_AV_AVI_LEN);
- DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
- DUMPCORE(HDMI_CORE_AV_SPD_TYPE);
- DUMPCORE(HDMI_CORE_AV_SPD_VERS);
- DUMPCORE(HDMI_CORE_AV_SPD_LEN);
- DUMPCORE(HDMI_CORE_AV_SPD_CHSUM);
- DUMPCORE(HDMI_CORE_AV_AUDIO_TYPE);
- DUMPCORE(HDMI_CORE_AV_AUDIO_VERS);
- DUMPCORE(HDMI_CORE_AV_AUDIO_LEN);
- DUMPCORE(HDMI_CORE_AV_AUDIO_CHSUM);
- DUMPCORE(HDMI_CORE_AV_MPEG_TYPE);
- DUMPCORE(HDMI_CORE_AV_MPEG_VERS);
- DUMPCORE(HDMI_CORE_AV_MPEG_LEN);
- DUMPCORE(HDMI_CORE_AV_MPEG_CHSUM);
- DUMPCORE(HDMI_CORE_AV_CP_BYTE1);
- DUMPCORE(HDMI_CORE_AV_CEC_ADDR_ID);
+ DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
}
void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
@@ -1016,9 +1025,8 @@ void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
}
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
struct hdmi_audio_format *aud_fmt)
{
u32 r;
@@ -1037,7 +1045,7 @@ void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
}
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
struct hdmi_audio_dma *aud_dma)
{
u32 r;
@@ -1055,7 +1063,7 @@ void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
}
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
struct hdmi_core_audio_config *cfg)
{
u32 r;
@@ -1106,27 +1114,33 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
cfg->fs_override, 1, 1);
- /* I2S parameters */
- REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_CHST4,
- cfg->freq_sample, 3, 0);
-
+ /*
+ * Set IEC-60958-3 channel status word. It is passed to the IP
+ * just as it is received. The user of the driver is responsible
+ * for its contents.
+ */
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
+ cfg->iec60958_cfg->status[0]);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
+ cfg->iec60958_cfg->status[1]);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
+ cfg->iec60958_cfg->status[2]);
+ /* yes, this is correct: status[3] goes to CHST4 register */
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
+ cfg->iec60958_cfg->status[3]);
+ /* yes, this is correct: status[4] goes to CHST5 register */
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
+ cfg->iec60958_cfg->status[4]);
+
+ /* set I2S parameters */
r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
- r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
- r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
- r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
- r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_CHST5);
- r = FLD_MOD(r, cfg->freq_sample, 7, 4);
- r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
- r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
- hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, r);
-
REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
cfg->i2s_cfg.in_length_bits, 3, 0);
@@ -1138,12 +1152,19 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
r = FLD_MOD(r, cfg->en_spdif, 1, 1);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
+
+ /* Audio channel mappings */
+ /* TODO: Make channel mapping dynamic. For now, map channels
+ * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
+ * HDMI speaker order is different. See CEA-861 Section 6.6.2.
+ */
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
}
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
- struct hdmi_core_infoframe_audio *info_aud)
+static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
+ struct snd_cea_861_aud_if *info_aud)
{
- u8 val;
u8 sum = 0, checksum = 0;
void __iomem *av_base = hdmi_av_base(ip_data);
@@ -1157,24 +1178,23 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
sum += 0x84 + 0x001 + 0x00a;
- val = (info_aud->db1_coding_type << 4)
- | (info_aud->db1_channel_count - 1);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), val);
- sum += val;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
+ info_aud->db1_ct_cc);
+ sum += info_aud->db1_ct_cc;
- val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), val);
- sum += val;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
+ info_aud->db2_sf_ss);
+ sum += info_aud->db2_sf_ss;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
+ sum += info_aud->db3;
- val = info_aud->db4_channel_alloc;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), val);
- sum += val;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
+ sum += info_aud->db4_ca;
- val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), val);
- sum += val;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
+ info_aud->db5_dminh_lsv);
+ sum += info_aud->db5_dminh_lsv;
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
@@ -1192,70 +1212,212 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
*/
}
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
- u32 sample_freq, u32 *n, u32 *cts)
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+ struct omap_dss_audio *audio)
{
- u32 r;
- u32 deep_color = 0;
- u32 pclk = ip_data->cfg.timings.pixel_clock;
-
- if (n == NULL || cts == NULL)
+ struct hdmi_audio_format audio_format;
+ struct hdmi_audio_dma audio_dma;
+ struct hdmi_core_audio_config core;
+ int err, n, cts, channel_count;
+ unsigned int fs_nr;
+ bool word_length_16b = false;
+
+ if (!audio || !audio->iec || !audio->cea || !ip_data)
return -EINVAL;
+
+ core.iec60958_cfg = audio->iec;
/*
- * Obtain current deep color configuration. This needed
- * to calculate the TMDS clock based on the pixel clock.
+ * In the IEC-60958 status word, check if the audio sample word length
+ * is 16-bit as several optimizations can be performed in such case.
*/
- r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0);
- switch (r) {
- case 1: /* No deep color selected */
- deep_color = 100;
+ if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
+ if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
+ word_length_16b = true;
+
+ /* I2S configuration. See Phillips' specification */
+ if (word_length_16b)
+ core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+ else
+ core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+ /*
+ * The I2S input word length is twice the lenght given in the IEC-60958
+ * status word. If the word size is greater than
+ * 20 bits, increment by one.
+ */
+ core.i2s_cfg.in_length_bits = audio->iec->status[4]
+ & IEC958_AES4_CON_WORDLEN;
+ if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
+ core.i2s_cfg.in_length_bits++;
+ core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
+ core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
+ core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
+ core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
+
+ /* convert sample frequency to a number */
+ switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
+ case IEC958_AES3_CON_FS_32000:
+ fs_nr = 32000;
+ break;
+ case IEC958_AES3_CON_FS_44100:
+ fs_nr = 44100;
+ break;
+ case IEC958_AES3_CON_FS_48000:
+ fs_nr = 48000;
break;
- case 2: /* 10-bit deep color selected */
- deep_color = 125;
+ case IEC958_AES3_CON_FS_88200:
+ fs_nr = 88200;
break;
- case 3: /* 12-bit deep color selected */
- deep_color = 150;
+ case IEC958_AES3_CON_FS_96000:
+ fs_nr = 96000;
+ break;
+ case IEC958_AES3_CON_FS_176400:
+ fs_nr = 176400;
+ break;
+ case IEC958_AES3_CON_FS_192000:
+ fs_nr = 192000;
break;
default:
return -EINVAL;
}
- switch (sample_freq) {
- case 32000:
- if ((deep_color == 125) && ((pclk == 54054)
- || (pclk == 74250)))
- *n = 8192;
- else
- *n = 4096;
+ err = hdmi_compute_acr(fs_nr, &n, &cts);
+
+ /* Audio clock regeneration settings */
+ core.n = n;
+ core.cts = cts;
+ if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
+ core.aud_par_busclk = 0;
+ core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
+ core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+ } else {
+ core.aud_par_busclk = (((128 * 31) - 1) << 8);
+ core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
+ core.use_mclk = true;
+ }
+
+ if (core.use_mclk)
+ core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
+
+ /* Audio channels settings */
+ channel_count = (audio->cea->db1_ct_cc &
+ CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
+
+ switch (channel_count) {
+ case 2:
+ audio_format.active_chnnls_msk = 0x03;
+ break;
+ case 3:
+ audio_format.active_chnnls_msk = 0x07;
+ break;
+ case 4:
+ audio_format.active_chnnls_msk = 0x0f;
+ break;
+ case 5:
+ audio_format.active_chnnls_msk = 0x1f;
break;
- case 44100:
- *n = 6272;
+ case 6:
+ audio_format.active_chnnls_msk = 0x3f;
break;
- case 48000:
- if ((deep_color == 125) && ((pclk == 54054)
- || (pclk == 74250)))
- *n = 8192;
- else
- *n = 6144;
+ case 7:
+ audio_format.active_chnnls_msk = 0x7f;
+ break;
+ case 8:
+ audio_format.active_chnnls_msk = 0xff;
break;
default:
- *n = 0;
return -EINVAL;
}
- /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
- *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+ /*
+ * the HDMI IP needs to enable four stereo channels when transmitting
+ * more than 2 audio channels
+ */
+ if (channel_count == 2) {
+ audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
+ core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
+ core.layout = HDMI_AUDIO_LAYOUT_2CH;
+ } else {
+ audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
+ core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+ HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
+ HDMI_AUDIO_I2S_SD3_EN;
+ core.layout = HDMI_AUDIO_LAYOUT_8CH;
+ }
+
+ core.en_spdif = false;
+ /* use sample frequency from channel status word */
+ core.fs_override = true;
+ /* enable ACR packets */
+ core.en_acr_pkt = true;
+ /* disable direct streaming digital audio */
+ core.en_dsd_audio = false;
+ /* use parallel audio interface */
+ core.en_parallel_aud_input = true;
+
+ /* DMA settings */
+ if (word_length_16b)
+ audio_dma.transfer_size = 0x10;
+ else
+ audio_dma.transfer_size = 0x20;
+ audio_dma.block_size = 0xC0;
+ audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
+ audio_dma.fifo_threshold = 0x20; /* in number of samples */
+
+ /* audio FIFO format settings */
+ if (word_length_16b) {
+ audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
+ audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
+ audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+ } else {
+ audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
+ audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
+ audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+ }
+ audio_format.type = HDMI_AUDIO_TYPE_LPCM;
+ audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
+ /* disable start/stop signals of IEC 60958 blocks */
+ audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
+
+ /* configure DMA and audio FIFO format*/
+ ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
+ ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
+
+ /* configure the core*/
+ ti_hdmi_4xxx_core_audio_config(ip_data, &core);
+
+ /* configure CEA 861 audio infoframe*/
+ ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
return 0;
}
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable)
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, true, 31, 31);
+ return 0;
+}
+
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, false, 31, 31);
+}
+
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
{
REG_FLD_MOD(hdmi_av_base(ip_data),
- HDMI_CORE_AV_AUD_MODE, enable, 0, 0);
+ HDMI_CORE_AV_AUD_MODE, true, 0, 0);
REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+ HDMI_WP_AUDIO_CTRL, true, 30, 30);
+ return 0;
+}
+
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
+{
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_MODE, false, 0, 0);
REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, enable, 30, 30);
+ HDMI_WP_AUDIO_CTRL, false, 30, 30);
}
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
index a14d1a0e6e41..8366ae19e82e 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
@@ -24,11 +24,6 @@
#include <linux/string.h>
#include <video/omapdss.h>
#include "ti_hdmi.h"
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#endif
/* HDMI Wrapper */
@@ -57,6 +52,13 @@
#define HDMI_CORE_SYS_SRST 0x14
#define HDMI_CORE_CTRL1 0x20
#define HDMI_CORE_SYS_SYS_STAT 0x24
+#define HDMI_CORE_SYS_DE_DLY 0xC8
+#define HDMI_CORE_SYS_DE_CTRL 0xCC
+#define HDMI_CORE_SYS_DE_TOP 0xD0
+#define HDMI_CORE_SYS_DE_CNTL 0xD8
+#define HDMI_CORE_SYS_DE_CNTH 0xDC
+#define HDMI_CORE_SYS_DE_LINL 0xE0
+#define HDMI_CORE_SYS_DE_LINH_1 0xE4
#define HDMI_CORE_SYS_VID_ACEN 0x124
#define HDMI_CORE_SYS_VID_MODE 0x128
#define HDMI_CORE_SYS_INTR_STATE 0x1C0
@@ -66,50 +68,24 @@
#define HDMI_CORE_SYS_INTR4 0x1D0
#define HDMI_CORE_SYS_UMASK1 0x1D4
#define HDMI_CORE_SYS_TMDS_CTRL 0x208
-#define HDMI_CORE_SYS_DE_DLY 0xC8
-#define HDMI_CORE_SYS_DE_CTRL 0xCC
-#define HDMI_CORE_SYS_DE_TOP 0xD0
-#define HDMI_CORE_SYS_DE_CNTL 0xD8
-#define HDMI_CORE_SYS_DE_CNTH 0xDC
-#define HDMI_CORE_SYS_DE_LINL 0xE0
-#define HDMI_CORE_SYS_DE_LINH_1 0xE4
+
#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
-#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
+#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
/* HDMI DDC E-DID */
-#define HDMI_CORE_DDC_CMD 0x3CC
-#define HDMI_CORE_DDC_STATUS 0x3C8
#define HDMI_CORE_DDC_ADDR 0x3B4
+#define HDMI_CORE_DDC_SEGM 0x3B8
#define HDMI_CORE_DDC_OFFSET 0x3BC
#define HDMI_CORE_DDC_COUNT1 0x3C0
#define HDMI_CORE_DDC_COUNT2 0x3C4
+#define HDMI_CORE_DDC_STATUS 0x3C8
+#define HDMI_CORE_DDC_CMD 0x3CC
#define HDMI_CORE_DDC_DATA 0x3D0
-#define HDMI_CORE_DDC_SEGM 0x3B8
/* HDMI IP Core Audio Video */
-#define HDMI_CORE_AV_HDMI_CTRL 0xBC
-#define HDMI_CORE_AV_DPD 0xF4
-#define HDMI_CORE_AV_PB_CTRL1 0xF8
-#define HDMI_CORE_AV_PB_CTRL2 0xFC
-#define HDMI_CORE_AV_AVI_TYPE 0x100
-#define HDMI_CORE_AV_AVI_VERS 0x104
-#define HDMI_CORE_AV_AVI_LEN 0x108
-#define HDMI_CORE_AV_AVI_CHSUM 0x10C
-#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110)
-#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15
-#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190)
-#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27
-#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210)
-#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10
-#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290)
-#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27
-#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300)
-#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
-#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380)
-#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
#define HDMI_CORE_AV_ACR_CTRL 0x4
#define HDMI_CORE_AV_FREQ_SVAL 0x8
#define HDMI_CORE_AV_N_SVAL1 0xC
@@ -148,25 +124,39 @@
#define HDMI_CORE_AV_AVI_VERS 0x104
#define HDMI_CORE_AV_AVI_LEN 0x108
#define HDMI_CORE_AV_AVI_CHSUM 0x10C
+#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110)
#define HDMI_CORE_AV_SPD_TYPE 0x180
#define HDMI_CORE_AV_SPD_VERS 0x184
#define HDMI_CORE_AV_SPD_LEN 0x188
#define HDMI_CORE_AV_SPD_CHSUM 0x18C
+#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190)
#define HDMI_CORE_AV_AUDIO_TYPE 0x200
#define HDMI_CORE_AV_AUDIO_VERS 0x204
#define HDMI_CORE_AV_AUDIO_LEN 0x208
#define HDMI_CORE_AV_AUDIO_CHSUM 0x20C
+#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210)
#define HDMI_CORE_AV_MPEG_TYPE 0x280
#define HDMI_CORE_AV_MPEG_VERS 0x284
#define HDMI_CORE_AV_MPEG_LEN 0x288
#define HDMI_CORE_AV_MPEG_CHSUM 0x28C
+#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290)
+#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300)
#define HDMI_CORE_AV_CP_BYTE1 0x37C
+#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380)
#define HDMI_CORE_AV_CEC_ADDR_ID 0x3FC
+
#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4
#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4
#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4
#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27
+#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
+
/* PLL */
#define PLLCTRL_PLL_CONTROL 0x0
@@ -284,35 +274,6 @@ enum hdmi_core_infoframe {
HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
- HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
- HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
- HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
- HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
- HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
- HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
- HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
- HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
- HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
- HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
- HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
- HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
- HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
- HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
- HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
- HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
- HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
- HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
- HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
- HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
- HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
- HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
- HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
- HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
- HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
- HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
- HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
- HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
- HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
};
enum hdmi_packing_mode {
@@ -322,17 +283,6 @@ enum hdmi_packing_mode {
HDMI_PACK_ALREADYPACKED = 7
};
-enum hdmi_core_audio_sample_freq {
- HDMI_AUDIO_FS_32000 = 0x3,
- HDMI_AUDIO_FS_44100 = 0x0,
- HDMI_AUDIO_FS_48000 = 0x2,
- HDMI_AUDIO_FS_88200 = 0x8,
- HDMI_AUDIO_FS_96000 = 0xA,
- HDMI_AUDIO_FS_176400 = 0xC,
- HDMI_AUDIO_FS_192000 = 0xE,
- HDMI_AUDIO_FS_NOT_INDICATED = 0x1
-};
-
enum hdmi_core_audio_layout {
HDMI_AUDIO_LAYOUT_2CH = 0,
HDMI_AUDIO_LAYOUT_8CH = 1
@@ -387,37 +337,12 @@ enum hdmi_audio_blk_strt_end_sig {
};
enum hdmi_audio_i2s_config {
- HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
- HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
- HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
- HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
- HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
- HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
- HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
- HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
- HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
- HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
- HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
- HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
- HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
- HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
- HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
- HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
- HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
- HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
- HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
- HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
- HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
- HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
- HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
- HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
- HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
HDMI_AUDIO_I2S_SD0_EN = 1,
@@ -446,20 +371,6 @@ struct hdmi_core_video_config {
enum hdmi_core_tclkselclkmult tclk_sel_clkmult;
};
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_audio {
- u8 db1_coding_type;
- u8 db1_channel_count;
- u8 db2_sample_freq;
- u8 db2_sample_size;
- u8 db4_channel_alloc;
- bool db5_downmix_inh;
- u8 db5_lsv; /* Level shift values for downmix */
-};
-
struct hdmi_core_packet_enable_repeat {
u32 audio_pkt;
u32 audio_pkt_repeat;
@@ -496,15 +407,10 @@ struct hdmi_audio_dma {
};
struct hdmi_core_audio_i2s_config {
- u8 word_max_length;
- u8 word_length;
u8 in_length_bits;
u8 justification;
- u8 en_high_bitrate_aud;
u8 sck_edge_mode;
- u8 cbit_order;
u8 vbit;
- u8 ws_polarity;
u8 direction;
u8 shift;
u8 active_sds;
@@ -512,7 +418,7 @@ struct hdmi_core_audio_i2s_config {
struct hdmi_core_audio_config {
struct hdmi_core_audio_i2s_config i2s_cfg;
- enum hdmi_core_audio_sample_freq freq_sample;
+ struct snd_aes_iec958 *iec60958_cfg;
bool fs_override;
u32 n;
u32 cts;
@@ -527,17 +433,4 @@ struct hdmi_core_audio_config {
bool en_spdif;
};
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
- u32 sample_freq, u32 *n, u32 *cts);
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
- struct hdmi_core_infoframe_audio *info_aud);
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
- struct hdmi_core_audio_config *cfg);
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
- struct hdmi_audio_dma *aud_dma);
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
- struct hdmi_audio_format *aud_fmt);
-#endif
#endif
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 9c3daf71750c..3907c8b6ecbc 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -402,7 +402,7 @@ static void venc_runtime_put(void)
DSSDBG("venc_runtime_put\n");
r = pm_runtime_put_sync(&venc.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
static const struct venc_config *venc_timings_to_config(
@@ -415,6 +415,7 @@ static const struct venc_config *venc_timings_to_config(
return &venc_config_ntsc_trm;
BUG();
+ return NULL;
}
static int venc_power_on(struct omap_dss_device *dssdev)
@@ -440,10 +441,11 @@ static int venc_power_on(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, l);
- dispc_set_digit_size(dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res/2);
+ dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
- regulator_enable(venc.vdda_dac_reg);
+ r = regulator_enable(venc.vdda_dac_reg);
+ if (r)
+ goto err;
if (dssdev->platform_enable)
dssdev->platform_enable(dssdev);
@@ -485,16 +487,68 @@ unsigned long venc_get_pixel_clock(void)
return 13500000;
}
+static ssize_t display_output_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ const char *ret;
+
+ switch (dssdev->phy.venc.type) {
+ case OMAP_DSS_VENC_TYPE_COMPOSITE:
+ ret = "composite";
+ break;
+ case OMAP_DSS_VENC_TYPE_SVIDEO:
+ ret = "svideo";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ret);
+}
+
+static ssize_t display_output_type_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ enum omap_dss_venc_type new_type;
+
+ if (sysfs_streq("composite", buf))
+ new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
+ else if (sysfs_streq("svideo", buf))
+ new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
+ else
+ return -EINVAL;
+
+ mutex_lock(&venc.venc_lock);
+
+ if (dssdev->phy.venc.type != new_type) {
+ dssdev->phy.venc.type = new_type;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ venc_power_off(dssdev);
+ venc_power_on(dssdev);
+ }
+ }
+
+ mutex_unlock(&venc.venc_lock);
+
+ return size;
+}
+
+static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR,
+ display_output_type_show, display_output_type_store);
+
/* driver */
static int venc_panel_probe(struct omap_dss_device *dssdev)
{
dssdev->panel.timings = omap_dss_pal_timings;
- return 0;
+ return device_create_file(&dssdev->dev, &dev_attr_output_type);
}
static void venc_panel_remove(struct omap_dss_device *dssdev)
{
+ device_remove_file(&dssdev->dev, &dev_attr_output_type);
}
static int venc_panel_enable(struct omap_dss_device *dssdev)
@@ -577,12 +631,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
return venc_panel_enable(dssdev);
}
-static void venc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static void venc_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -597,6 +645,8 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
/* turn the venc off and on to get new timings to use */
venc_panel_disable(dssdev);
venc_panel_enable(dssdev);
+ } else {
+ dss_mgr_set_timings(dssdev->manager, timings);
}
}
@@ -661,7 +711,6 @@ static struct omap_dss_driver venc_driver = {
.get_resolution = omapdss_default_get_resolution,
.get_recommended_bpp = omapdss_default_get_recommended_bpp,
- .get_timings = venc_get_timings,
.set_timings = venc_set_timings,
.check_timings = venc_check_timings,
@@ -675,7 +724,7 @@ static struct omap_dss_driver venc_driver = {
};
/* driver end */
-int venc_init_display(struct omap_dss_device *dssdev)
+static int __init venc_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -695,7 +744,7 @@ int venc_init_display(struct omap_dss_device *dssdev)
return 0;
}
-void venc_dump_regs(struct seq_file *s)
+static void venc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
@@ -779,8 +828,32 @@ static void venc_put_clocks(void)
clk_put(venc.tv_dac_clk);
}
+static void __init venc_probe_pdata(struct platform_device *pdev)
+{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int r, i;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_VENC)
+ continue;
+
+ r = venc_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
/* VENC HW IP initialisation */
-static int omap_venchw_probe(struct platform_device *pdev)
+static int __init omap_venchw_probe(struct platform_device *pdev)
{
u8 rev_id;
struct resource *venc_mem;
@@ -824,6 +897,10 @@ static int omap_venchw_probe(struct platform_device *pdev)
if (r)
goto err_reg_panel_driver;
+ dss_debugfs_create_file("venc", venc_dump_regs);
+
+ venc_probe_pdata(pdev);
+
return 0;
err_reg_panel_driver:
@@ -833,12 +910,15 @@ err_runtime_get:
return r;
}
-static int omap_venchw_remove(struct platform_device *pdev)
+static int __exit omap_venchw_remove(struct platform_device *pdev)
{
+ omap_dss_unregister_child_devices(&pdev->dev);
+
if (venc.vdda_dac_reg != NULL) {
regulator_put(venc.vdda_dac_reg);
venc.vdda_dac_reg = NULL;
}
+
omap_dss_unregister_driver(&venc_driver);
pm_runtime_disable(&pdev->dev);
@@ -853,7 +933,6 @@ static int venc_runtime_suspend(struct device *dev)
clk_disable(venc.tv_dac_clk);
dispc_runtime_put();
- dss_runtime_put();
return 0;
}
@@ -862,23 +941,14 @@ static int venc_runtime_resume(struct device *dev)
{
int r;
- r = dss_runtime_get();
- if (r < 0)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r < 0)
- goto err_get_dispc;
+ return r;
if (venc.tv_dac_clk)
clk_enable(venc.tv_dac_clk);
return 0;
-
-err_get_dispc:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static const struct dev_pm_ops venc_pm_ops = {
@@ -887,8 +957,7 @@ static const struct dev_pm_ops venc_pm_ops = {
};
static struct platform_driver omap_venchw_driver = {
- .probe = omap_venchw_probe,
- .remove = omap_venchw_remove,
+ .remove = __exit_p(omap_venchw_remove),
.driver = {
.name = "omapdss_venc",
.owner = THIS_MODULE,
@@ -896,18 +965,18 @@ static struct platform_driver omap_venchw_driver = {
},
};
-int venc_init_platform_driver(void)
+int __init venc_init_platform_driver(void)
{
if (cpu_is_omap44xx())
return 0;
- return platform_driver_register(&omap_venchw_driver);
+ return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
}
-void venc_uninit_platform_driver(void)
+void __exit venc_uninit_platform_driver(void)
{
if (cpu_is_omap44xx())
return;
- return platform_driver_unregister(&omap_venchw_driver);
+ platform_driver_unregister(&omap_venchw_driver);
}
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 6a09ef87e14f..c6cf372d22c5 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -70,7 +70,7 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
DBG("omapfb_setup_plane\n");
- if (ofbi->num_overlays != 1) {
+ if (ofbi->num_overlays == 0) {
r = -EINVAL;
goto out;
}
@@ -185,7 +185,7 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
- if (ofbi->num_overlays != 1) {
+ if (ofbi->num_overlays == 0) {
memset(pi, 0, sizeof(*pi));
} else {
struct omap_overlay *ovl;
@@ -225,6 +225,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
down_write_nested(&rg->lock, rg->id);
atomic_inc(&rg->lock_count);
+ if (rg->size == size && rg->type == mi->type)
+ goto out;
+
if (atomic_read(&rg->map_count)) {
r = -EBUSY;
goto out;
@@ -247,12 +250,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
}
}
- if (rg->size != size || rg->type != mi->type) {
- r = omapfb_realloc_fbmem(fbi, size, mi->type);
- if (r) {
- dev_err(fbdev->dev, "realloc fbmem failed\n");
- goto out;
- }
+ r = omapfb_realloc_fbmem(fbi, size, mi->type);
+ if (r) {
+ dev_err(fbdev->dev, "realloc fbmem failed\n");
+ goto out;
}
out:
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index b00db4068d21..3450ea0966c9 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -179,6 +179,7 @@ static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot)
break;
default:
BUG();
+ return 0;
}
offset *= vrfb->bytespp;
@@ -1502,7 +1503,7 @@ static int omapfb_parse_vram_param(const char *param, int max_entries,
fbnum = simple_strtoul(p, &p, 10);
- if (p == param)
+ if (p == start)
return -EINVAL;
if (*p != ':')
@@ -2307,7 +2308,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return 0;
}
-static int omapfb_probe(struct platform_device *pdev)
+static int __init omapfb_probe(struct platform_device *pdev)
{
struct omapfb2_device *fbdev = NULL;
int r = 0;
@@ -2448,7 +2449,7 @@ err0:
return r;
}
-static int omapfb_remove(struct platform_device *pdev)
+static int __exit omapfb_remove(struct platform_device *pdev)
{
struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
@@ -2462,8 +2463,7 @@ static int omapfb_remove(struct platform_device *pdev)
}
static struct platform_driver omapfb_driver = {
- .probe = omapfb_probe,
- .remove = omapfb_remove,
+ .remove = __exit_p(omapfb_remove),
.driver = {
.name = "omapfb",
.owner = THIS_MODULE,
@@ -2474,7 +2474,7 @@ static int __init omapfb_init(void)
{
DBG("omapfb_init\n");
- if (platform_driver_register(&omapfb_driver)) {
+ if (platform_driver_probe(&omapfb_driver, omapfb_probe)) {
printk(KERN_ERR "failed to register omapfb driver\n");
return -ENODEV;
}
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index c0bdc9b54ecf..30361a09aecd 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -166,6 +166,7 @@ static inline struct omapfb_display_data *get_display_data(
/* This should never happen */
BUG();
+ return NULL;
}
static inline void omapfb_lock(struct omapfb2_device *fbdev)
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
index 4e5b960c32c8..7e990220ad2a 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/omap2/vrfb.c
@@ -179,8 +179,10 @@ void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
pixel_size_exp = 2;
else if (bytespp == 2)
pixel_size_exp = 1;
- else
+ else {
BUG();
+ return;
+ }
vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 1d71c08a818f..0b4ae0cebeda 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -316,12 +316,9 @@ pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
ret = wait_event_interruptible_timeout(priv->wait_idle,
!priv->shared->hw_running, HZ*4);
- if (ret < 0)
+ if (ret != 0)
break;
- if (ret > 0)
- continue;
-
if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
priv->shared->num_interrupts == num) {
QERROR("TIMEOUT");
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index f3105160bf98..ea7b661e7229 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -47,7 +47,7 @@
#ifdef CONFIG_FB_S3C_DEBUG_REGWRITE
#undef writel
#define writel(v, r) do { \
- printk(KERN_DEBUG "%s: %08x => %p\n", __func__, (unsigned int)v, r); \
+ pr_debug("%s: %08x => %p\n", __func__, (unsigned int)v, r); \
__raw_writel(v, r); \
} while (0)
#endif /* FB_S3C_DEBUG_REGWRITE */
@@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
result = (unsigned int)tmp / 1000;
dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
- pixclk, clk, result, clk / result);
+ pixclk, clk, result, result ? clk / result : clk);
return result;
}
@@ -495,7 +495,6 @@ static int s3c_fb_set_par(struct fb_info *info)
u32 alpha = 0;
u32 data;
u32 pagewidth;
- int clkdiv;
dev_dbg(sfb->dev, "setting framebuffer parameters\n");
@@ -532,48 +531,9 @@ static int s3c_fb_set_par(struct fb_info *info)
/* disable the window whilst we update it */
writel(0, regs + WINCON(win_no));
- /* use platform specified window as the basis for the lcd timings */
-
- if (win_no == sfb->pdata->default_win) {
- clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
-
- data = sfb->pdata->vidcon0;
- data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
-
- if (clkdiv > 1)
- data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
- else
- data &= ~VIDCON0_CLKDIR; /* 1:1 clock */
-
- /* write the timing data to the panel */
-
- if (sfb->variant.is_2443)
- data |= (1 << 5);
-
- writel(data, regs + VIDCON0);
-
+ if (!sfb->output_on)
s3c_fb_enable(sfb, 1);
- data = VIDTCON0_VBPD(var->upper_margin - 1) |
- VIDTCON0_VFPD(var->lower_margin - 1) |
- VIDTCON0_VSPW(var->vsync_len - 1);
-
- writel(data, regs + sfb->variant.vidtcon);
-
- data = VIDTCON1_HBPD(var->left_margin - 1) |
- VIDTCON1_HFPD(var->right_margin - 1) |
- VIDTCON1_HSPW(var->hsync_len - 1);
-
- /* VIDTCON1 */
- writel(data, regs + sfb->variant.vidtcon + 4);
-
- data = VIDTCON2_LINEVAL(var->yres - 1) |
- VIDTCON2_HOZVAL(var->xres - 1) |
- VIDTCON2_LINEVAL_E(var->yres - 1) |
- VIDTCON2_HOZVAL_E(var->xres - 1);
- writel(data, regs + sfb->variant.vidtcon + 8);
- }
-
/* write the buffer address */
/* start and end registers stride is 8 */
@@ -839,6 +799,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
struct s3c_fb *sfb = win->parent;
unsigned int index = win->index;
u32 wincon;
+ u32 output_on = sfb->output_on;
dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
@@ -877,34 +838,18 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
shadow_protect_win(win, 1);
writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
- shadow_protect_win(win, 0);
/* Check the enabled state to see if we need to be running the
* main LCD interface, as if there are no active windows then
* it is highly likely that we also do not need to output
* anything.
*/
-
- /* We could do something like the following code, but the current
- * system of using framebuffer events means that we cannot make
- * the distinction between just window 0 being inactive and all
- * the windows being down.
- *
- * s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
- */
-
- /* we're stuck with this until we can do something about overriding
- * the power control using the blanking event for a single fb.
- */
- if (index == sfb->pdata->default_win) {
- shadow_protect_win(win, 1);
- s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
- shadow_protect_win(win, 0);
- }
+ s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
+ shadow_protect_win(win, 0);
pm_runtime_put_sync(sfb->dev);
- return 0;
+ return output_on == sfb->output_on;
}
/**
@@ -1111,7 +1056,7 @@ static struct fb_ops s3c_fb_ops = {
*
* Calculate the pixel clock when none has been given through platform data.
*/
-static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode)
+static void s3c_fb_missing_pixclock(struct fb_videomode *mode)
{
u64 pixclk = 1000000000000ULL;
u32 div;
@@ -1144,11 +1089,11 @@ static int __devinit s3c_fb_alloc_memory(struct s3c_fb *sfb,
dev_dbg(sfb->dev, "allocating memory for display\n");
- real_size = windata->win_mode.xres * windata->win_mode.yres;
+ real_size = windata->xres * windata->yres;
virt_size = windata->virtual_x * windata->virtual_y;
dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n",
- real_size, windata->win_mode.xres, windata->win_mode.yres,
+ real_size, windata->xres, windata->yres,
virt_size, windata->virtual_x, windata->virtual_y);
size = (real_size > virt_size) ? real_size : virt_size;
@@ -1230,7 +1175,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
struct s3c_fb_win **res)
{
struct fb_var_screeninfo *var;
- struct fb_videomode *initmode;
+ struct fb_videomode initmode;
struct s3c_fb_pd_win *windata;
struct s3c_fb_win *win;
struct fb_info *fbinfo;
@@ -1251,11 +1196,11 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
}
windata = sfb->pdata->win[win_no];
- initmode = &windata->win_mode;
+ initmode = *sfb->pdata->vtiming;
WARN_ON(windata->max_bpp == 0);
- WARN_ON(windata->win_mode.xres == 0);
- WARN_ON(windata->win_mode.yres == 0);
+ WARN_ON(windata->xres == 0);
+ WARN_ON(windata->yres == 0);
win = fbinfo->par;
*res = win;
@@ -1294,7 +1239,9 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
}
/* setup the initial video mode from the window */
- fb_videomode_to_var(&fbinfo->var, initmode);
+ initmode.xres = windata->xres;
+ initmode.yres = windata->yres;
+ fb_videomode_to_var(&fbinfo->var, &initmode);
fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
fbinfo->fix.accel = FB_ACCEL_NONE;
@@ -1339,6 +1286,53 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
}
/**
+ * s3c_fb_set_rgb_timing() - set video timing for rgb interface.
+ * @sfb: The base resources for the hardware.
+ *
+ * Set horizontal and vertical lcd rgb interface timing.
+ */
+static void s3c_fb_set_rgb_timing(struct s3c_fb *sfb)
+{
+ struct fb_videomode *vmode = sfb->pdata->vtiming;
+ void __iomem *regs = sfb->regs;
+ int clkdiv;
+ u32 data;
+
+ if (!vmode->pixclock)
+ s3c_fb_missing_pixclock(vmode);
+
+ clkdiv = s3c_fb_calc_pixclk(sfb, vmode->pixclock);
+
+ data = sfb->pdata->vidcon0;
+ data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
+
+ if (clkdiv > 1)
+ data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
+ else
+ data &= ~VIDCON0_CLKDIR; /* 1:1 clock */
+
+ if (sfb->variant.is_2443)
+ data |= (1 << 5);
+ writel(data, regs + VIDCON0);
+
+ data = VIDTCON0_VBPD(vmode->upper_margin - 1) |
+ VIDTCON0_VFPD(vmode->lower_margin - 1) |
+ VIDTCON0_VSPW(vmode->vsync_len - 1);
+ writel(data, regs + sfb->variant.vidtcon);
+
+ data = VIDTCON1_HBPD(vmode->left_margin - 1) |
+ VIDTCON1_HFPD(vmode->right_margin - 1) |
+ VIDTCON1_HSPW(vmode->hsync_len - 1);
+ writel(data, regs + sfb->variant.vidtcon + 4);
+
+ data = VIDTCON2_LINEVAL(vmode->yres - 1) |
+ VIDTCON2_HOZVAL(vmode->xres - 1) |
+ VIDTCON2_LINEVAL_E(vmode->yres - 1) |
+ VIDTCON2_HOZVAL_E(vmode->xres - 1);
+ writel(data, regs + sfb->variant.vidtcon + 8);
+}
+
+/**
* s3c_fb_clear_win() - clear hardware window registers.
* @sfb: The base resources for the hardware.
* @win: The window to process.
@@ -1354,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win)
writel(0, regs + VIDOSD_A(win, sfb->variant));
writel(0, regs + VIDOSD_B(win, sfb->variant));
writel(0, regs + VIDOSD_C(win, sfb->variant));
- reg = readl(regs + SHADOWCON);
- writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON);
+
+ if (sfb->variant.has_shadowcon) {
+ reg = readl(sfb->regs + SHADOWCON);
+ reg &= ~(SHADOWCON_WINx_PROTECT(win) |
+ SHADOWCON_CHx_ENABLE(win) |
+ SHADOWCON_CHx_LOCAL_ENABLE(win));
+ writel(reg, sfb->regs + SHADOWCON);
+ }
}
static int __devinit s3c_fb_probe(struct platform_device *pdev)
@@ -1481,15 +1481,14 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
writel(0xffffff, regs + WKEYCON1);
}
+ s3c_fb_set_rgb_timing(sfb);
+
/* we have the register setup, start allocating framebuffers */
for (win = 0; win < fbdrv->variant.nr_windows; win++) {
if (!pd->win[win])
continue;
- if (!pd->win[win]->win_mode.pixclock)
- s3c_fb_missing_pixclock(&pd->win[win]->win_mode);
-
ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win],
&sfb->windows[win]);
if (ret < 0) {
@@ -1564,6 +1563,8 @@ static int s3c_fb_suspend(struct device *dev)
struct s3c_fb_win *win;
int win_no;
+ pm_runtime_get_sync(sfb->dev);
+
for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
win = sfb->windows[win_no];
if (!win)
@@ -1577,6 +1578,9 @@ static int s3c_fb_suspend(struct device *dev)
clk_disable(sfb->lcd_clk);
clk_disable(sfb->bus_clk);
+
+ pm_runtime_put_sync(sfb->dev);
+
return 0;
}
@@ -1589,6 +1593,8 @@ static int s3c_fb_resume(struct device *dev)
int win_no;
u32 reg;
+ pm_runtime_get_sync(sfb->dev);
+
clk_enable(sfb->bus_clk);
if (!sfb->variant.has_clksel)
@@ -1623,6 +1629,8 @@ static int s3c_fb_resume(struct device *dev)
shadow_protect_win(win, 0);
}
+ s3c_fb_set_rgb_timing(sfb);
+
/* restore framebuffers */
for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
win = sfb->windows[win_no];
@@ -1633,6 +1641,8 @@ static int s3c_fb_resume(struct device *dev)
s3c_fb_set_par(win->fbinfo);
}
+ pm_runtime_put_sync(sfb->dev);
+
return 0;
}
#endif
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index cee7803a0a1c..f3d3b9ce4751 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r
/* following part not present in X11 driver */
cr67 = vga_in8(0x3d5, par) & 0xf;
vga_out8(0x3d5, 0x50 | cr67, par);
- udelay(10000);
+ mdelay(10);
vga_out8(0x3d4, 0x67, par);
/* end of part */
vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
@@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par)
vga_out8(0x3d4, 0x66, par);
cr66 = vga_in8(0x3d5, par);
vga_out8(0x3d5, cr66 | 0x02, par);
- udelay(10000);
+ mdelay(10);
vga_out8(0x3d4, 0x66, par);
vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */
- udelay(10000);
+ mdelay(10);
/*
@@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par)
vga_out8(0x3d4, 0x3f, par);
cr3f = vga_in8(0x3d5, par);
vga_out8(0x3d5, cr3f | 0x08, par);
- udelay(10000);
+ mdelay(10);
vga_out8(0x3d4, 0x3f, par);
vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */
- udelay(10000);
+ mdelay(10);
/* Savage ramdac speeds */
par->numClocks = 4;
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index eafb19da2c07..930e550e752a 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -31,6 +31,7 @@
#include "sh_mobile_lcdcfb.h"
+/* HDMI Core Control Register (HTOP0) */
#define HDMI_SYSTEM_CTRL 0x00 /* System control */
#define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control,
bits 19..16 of 20-bit N for Audio Clock Regeneration packet */
@@ -201,6 +202,68 @@
#define HDMI_REVISION_ID 0xF1 /* Revision ID */
#define HDMI_TEST_MODE 0xFE /* Test mode */
+/* HDMI Control Register (HTOP1) */
+#define HDMI_HTOP1_TEST_MODE 0x0000 /* Test mode */
+#define HDMI_HTOP1_VIDEO_INPUT 0x0008 /* VideoInput */
+#define HDMI_HTOP1_CORE_RSTN 0x000C /* CoreResetn */
+#define HDMI_HTOP1_PLLBW 0x0018 /* PLLBW */
+#define HDMI_HTOP1_CLK_TO_PHY 0x001C /* Clk to Phy */
+#define HDMI_HTOP1_VIDEO_INPUT2 0x0020 /* VideoInput2 */
+#define HDMI_HTOP1_TISEMP0_1 0x0024 /* tisemp0-1 */
+#define HDMI_HTOP1_TISEMP2_C 0x0028 /* tisemp2-c */
+#define HDMI_HTOP1_TISIDRV 0x002C /* tisidrv */
+#define HDMI_HTOP1_TISEN 0x0034 /* tisen */
+#define HDMI_HTOP1_TISDREN 0x0038 /* tisdren */
+#define HDMI_HTOP1_CISRANGE 0x003C /* cisrange */
+#define HDMI_HTOP1_ENABLE_SELECTOR 0x0040 /* Enable Selector */
+#define HDMI_HTOP1_MACRO_RESET 0x0044 /* Macro reset */
+#define HDMI_HTOP1_PLL_CALIBRATION 0x0048 /* PLL calibration */
+#define HDMI_HTOP1_RE_CALIBRATION 0x004C /* Re-calibration */
+#define HDMI_HTOP1_CURRENT 0x0050 /* Current */
+#define HDMI_HTOP1_PLL_LOCK_DETECT 0x0054 /* PLL lock detect */
+#define HDMI_HTOP1_PHY_TEST_MODE 0x0058 /* PHY Test Mode */
+#define HDMI_HTOP1_CLK_SET 0x0080 /* Clock Set */
+#define HDMI_HTOP1_DDC_FAIL_SAFE 0x0084 /* DDC fail safe */
+#define HDMI_HTOP1_PRBS 0x0088 /* PRBS */
+#define HDMI_HTOP1_EDID_AINC_CONTROL 0x008C /* EDID ainc Control */
+#define HDMI_HTOP1_HTOP_DCL_MODE 0x00FC /* Deep Coloer Mode */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0 0x0100 /* Deep Color:FRC COEF0 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1 0x0104 /* Deep Color:FRC COEF1 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2 0x0108 /* Deep Color:FRC COEF2 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3 0x010C /* Deep Color:FRC COEF3 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0_C 0x0110 /* Deep Color:FRC COEF0C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1_C 0x0114 /* Deep Color:FRC COEF1C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2_C 0x0118 /* Deep Color:FRC COEF2C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3_C 0x011C /* Deep Color:FRC COEF3C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_MODE 0x0120 /* Deep Color:FRC Mode */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START1 0x0124 /* Deep Color:Rect Start1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE1 0x0128 /* Deep Color:Rect Size1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START2 0x012C /* Deep Color:Rect Start2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE2 0x0130 /* Deep Color:Rect Size2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START3 0x0134 /* Deep Color:Rect Start3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE3 0x0138 /* Deep Color:Rect Size3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START4 0x013C /* Deep Color:Rect Start4 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE4 0x0140 /* Deep Color:Rect Size4 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1 0x0144 /* Deep Color:Fil Para Y1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2 0x0148 /* Deep Color:Fil Para Y1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1 0x014C /* Deep Color:Fil Para CB1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2 0x0150 /* Deep Color:Fil Para CB1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1 0x0154 /* Deep Color:Fil Para CR1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2 0x0158 /* Deep Color:Fil Para CR1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1 0x015C /* Deep Color:Fil Para Y2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2 0x0160 /* Deep Color:Fil Para Y2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1 0x0164 /* Deep Color:Fil Para CB2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2 0x0168 /* Deep Color:Fil Para CB2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1 0x016C /* Deep Color:Fil Para CR2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2 0x0170 /* Deep Color:Fil Para CR2_2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1 0x0174 /* Deep Color:Cor Para Y1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1 0x0178 /* Deep Color:Cor Para CB1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1 0x017C /* Deep Color:Cor Para CR1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2 0x0180 /* Deep Color:Cor Para Y2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2 0x0184 /* Deep Color:Cor Para CB2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2 0x0188 /* Deep Color:Cor Para CR2 */
+#define HDMI_HTOP1_EDID_DATA_READ 0x0200 /* EDID Data Read 128Byte:0x03FC */
+
enum hotplug_state {
HDMI_HOTPLUG_DISCONNECTED,
HDMI_HOTPLUG_CONNECTED,
@@ -211,6 +274,7 @@ struct sh_hdmi {
struct sh_mobile_lcdc_entity entity;
void __iomem *base;
+ void __iomem *htop1;
enum hotplug_state hp_state; /* hot-plug status */
u8 preprogrammed_vic; /* use a pre-programmed VIC or
the external mode */
@@ -222,20 +286,66 @@ struct sh_hdmi {
struct delayed_work edid_work;
struct fb_videomode mode;
struct fb_monspecs monspec;
+
+ /* register access functions */
+ void (*write)(struct sh_hdmi *hdmi, u8 data, u8 reg);
+ u8 (*read)(struct sh_hdmi *hdmi, u8 reg);
};
#define entity_to_sh_hdmi(e) container_of(e, struct sh_hdmi, entity)
-static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+static void __hdmi_write8(struct sh_hdmi *hdmi, u8 data, u8 reg)
{
iowrite8(data, hdmi->base + reg);
}
-static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+static u8 __hdmi_read8(struct sh_hdmi *hdmi, u8 reg)
{
return ioread8(hdmi->base + reg);
}
+static void __hdmi_write32(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+ iowrite32((u32)data, hdmi->base + (reg * 4));
+ udelay(100);
+}
+
+static u8 __hdmi_read32(struct sh_hdmi *hdmi, u8 reg)
+{
+ return (u8)ioread32(hdmi->base + (reg * 4));
+}
+
+static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+ hdmi->write(hdmi, data, reg);
+}
+
+static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+{
+ return hdmi->read(hdmi, reg);
+}
+
+static void hdmi_bit_set(struct sh_hdmi *hdmi, u8 mask, u8 data, u8 reg)
+{
+ u8 val = hdmi_read(hdmi, reg);
+
+ val &= ~mask;
+ val |= (data & mask);
+
+ hdmi_write(hdmi, val, reg);
+}
+
+static void hdmi_htop1_write(struct sh_hdmi *hdmi, u32 data, u32 reg)
+{
+ iowrite32(data, hdmi->htop1 + reg);
+ udelay(100);
+}
+
+static u32 hdmi_htop1_read(struct sh_hdmi *hdmi, u32 reg)
+{
+ return ioread32(hdmi->htop1 + reg);
+}
+
/*
* HDMI sound
*/
@@ -693,11 +803,11 @@ static void sh_hdmi_configure(struct sh_hdmi *hdmi)
msleep(10);
/* PS mode b->d, reset PLLA and PLLB */
- hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x4C, HDMI_SYSTEM_CTRL);
udelay(10);
- hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x40, HDMI_SYSTEM_CTRL);
}
static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi,
@@ -746,7 +856,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
/* Read EDID */
dev_dbg(hdmi->dev, "Read back EDID code:");
for (i = 0; i < 128; i++) {
- edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
+ edid[i] = (hdmi->htop1) ?
+ (u8)hdmi_htop1_read(hdmi, HDMI_HTOP1_EDID_DATA_READ + (i * 4)) :
+ hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
#ifdef DEBUG
if ((i % 16) == 0) {
printk(KERN_CONT "\n");
@@ -917,13 +1029,13 @@ static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id)
u8 status1, status2, mask1, mask2;
/* mode_b and PLLA and PLLB reset */
- hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x2C, HDMI_SYSTEM_CTRL);
/* How long shall reset be held? */
udelay(10);
/* mode_b and PLLA and PLLB reset release */
- hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x20, HDMI_SYSTEM_CTRL);
status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1);
status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2);
@@ -1001,7 +1113,7 @@ static int sh_hdmi_display_on(struct sh_mobile_lcdc_entity *entity)
*/
if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) {
/* PS mode d->e. All functions are active */
- hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x80, HDMI_SYSTEM_CTRL);
dev_dbg(hdmi->dev, "HDMI running\n");
}
@@ -1016,7 +1128,7 @@ static void sh_hdmi_display_off(struct sh_mobile_lcdc_entity *entity)
dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi);
/* PS mode e->a */
- hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x10, HDMI_SYSTEM_CTRL);
}
static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = {
@@ -1110,10 +1222,58 @@ out:
dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi);
}
+static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi)
+{
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_MODE);
+ hdmi_htop1_write(hdmi, 0x0000000b, 0x0010);
+ hdmi_htop1_write(hdmi, 0x00006710, HDMI_HTOP1_HTOP_DCL_FRC_MODE);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2);
+ hdmi_htop1_write(hdmi, 0x00000008, HDMI_HTOP1_CURRENT);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP0_1);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP2_C);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PHY_TEST_MODE);
+ hdmi_htop1_write(hdmi, 0x00000081, HDMI_HTOP1_TISIDRV);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PLLBW);
+ hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+ hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+ hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+ hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+ hdmi_htop1_write(hdmi, 0x00000016, HDMI_HTOP1_CISRANGE);
+ msleep(100);
+ hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_ENABLE_SELECTOR);
+ msleep(100);
+ hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+ hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+ hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+ hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_CLK_TO_PHY);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT2);
+ hdmi_htop1_write(hdmi, 0x0000000a, HDMI_HTOP1_CLK_SET);
+}
+
static int __init sh_hdmi_probe(struct platform_device *pdev)
{
struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *htop1_res;
int irq = platform_get_irq(pdev, 0), ret;
struct sh_hdmi *hdmi;
long rate;
@@ -1121,6 +1281,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
if (!res || !pdata || irq < 0)
return -ENODEV;
+ htop1_res = NULL;
+ if (pdata->flags & HDMI_HAS_HTOP1) {
+ htop1_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!htop1_res) {
+ dev_err(&pdev->dev, "htop1 needs register base\n");
+ return -EINVAL;
+ }
+ }
+
hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
if (!hdmi) {
dev_err(&pdev->dev, "Cannot allocate device data\n");
@@ -1138,6 +1307,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
goto egetclk;
}
+ /* select register access functions */
+ if (pdata->flags & HDMI_32BIT_REG) {
+ hdmi->write = __hdmi_write32;
+ hdmi->read = __hdmi_read32;
+ } else {
+ hdmi->write = __hdmi_write8;
+ hdmi->read = __hdmi_read8;
+ }
+
/* An arbitrary relaxed pixclock just to get things started: from standard 480p */
rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037));
if (rate > 0)
@@ -1176,6 +1354,24 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
+ /* init interrupt polarity */
+ if (pdata->flags & HDMI_OUTPUT_PUSH_PULL)
+ hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL);
+
+ if (pdata->flags & HDMI_OUTPUT_POLARITY_HI)
+ hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL);
+
+ /* enable htop1 register if needed */
+ if (htop1_res) {
+ hdmi->htop1 = ioremap(htop1_res->start, resource_size(htop1_res));
+ if (!hdmi->htop1) {
+ dev_err(&pdev->dev, "control register region already claimed\n");
+ ret = -ENOMEM;
+ goto emap_htop1;
+ }
+ sh_hdmi_htop1_init(hdmi);
+ }
+
/* Product and revision IDs are 0 in sh-mobile version */
dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID));
@@ -1199,6 +1395,9 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
ecodec:
free_irq(irq, hdmi);
ereqirq:
+ if (hdmi->htop1)
+ iounmap(hdmi->htop1);
+emap_htop1:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
iounmap(hdmi->base);
@@ -1230,6 +1429,8 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
clk_disable(hdmi->hdmi_clk);
clk_put(hdmi->hdmi_clk);
+ if (hdmi->htop1)
+ iounmap(hdmi->htop1);
iounmap(hdmi->base);
release_mem_region(res->start, resource_size(res));
kfree(hdmi);
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h
index aff73842d877..85d6738b6c64 100644
--- a/drivers/video/sis/init.h
+++ b/drivers/video/sis/init.h
@@ -105,51 +105,6 @@ static const unsigned short ModeIndex_1920x1440[] = {0x68, 0x69, 0x00, 0x6b};
static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00};
static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e};
-static const unsigned short SiS_DRAMType[17][5]={
- {0x0C,0x0A,0x02,0x40,0x39},
- {0x0D,0x0A,0x01,0x40,0x48},
- {0x0C,0x09,0x02,0x20,0x35},
- {0x0D,0x09,0x01,0x20,0x44},
- {0x0C,0x08,0x02,0x10,0x31},
- {0x0D,0x08,0x01,0x10,0x40},
- {0x0C,0x0A,0x01,0x20,0x34},
- {0x0C,0x09,0x01,0x08,0x32},
- {0x0B,0x08,0x02,0x08,0x21},
- {0x0C,0x08,0x01,0x08,0x30},
- {0x0A,0x08,0x02,0x04,0x11},
- {0x0B,0x0A,0x01,0x10,0x28},
- {0x09,0x08,0x02,0x02,0x01},
- {0x0B,0x09,0x01,0x08,0x24},
- {0x0B,0x08,0x01,0x04,0x20},
- {0x0A,0x08,0x01,0x02,0x10},
- {0x09,0x08,0x01,0x01,0x00}
-};
-
-static const unsigned short SiS_SDRDRAM_TYPE[13][5] =
-{
- { 2,12, 9,64,0x35},
- { 1,13, 9,64,0x44},
- { 2,12, 8,32,0x31},
- { 2,11, 9,32,0x25},
- { 1,12, 9,32,0x34},
- { 1,13, 8,32,0x40},
- { 2,11, 8,16,0x21},
- { 1,12, 8,16,0x30},
- { 1,11, 9,16,0x24},
- { 1,11, 8, 8,0x20},
- { 2, 9, 8, 4,0x01},
- { 1,10, 8, 4,0x10},
- { 1, 9, 8, 2,0x00}
-};
-
-static const unsigned short SiS_DDRDRAM_TYPE[4][5] =
-{
- { 2,12, 9,64,0x35},
- { 2,12, 8,32,0x31},
- { 2,11, 8,16,0x21},
- { 2, 9, 8, 4,0x01}
-};
-
static const unsigned char SiS_MDA_DAC[] =
{
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 078ca2167d6f..a7a48db64ce2 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -4222,6 +4222,26 @@ sisfb_post_300_buswidth(struct sis_video_info *ivideo)
return 1; /* 32bit */
}
+static const unsigned short __devinitconst SiS_DRAMType[17][5] = {
+ {0x0C,0x0A,0x02,0x40,0x39},
+ {0x0D,0x0A,0x01,0x40,0x48},
+ {0x0C,0x09,0x02,0x20,0x35},
+ {0x0D,0x09,0x01,0x20,0x44},
+ {0x0C,0x08,0x02,0x10,0x31},
+ {0x0D,0x08,0x01,0x10,0x40},
+ {0x0C,0x0A,0x01,0x20,0x34},
+ {0x0C,0x09,0x01,0x08,0x32},
+ {0x0B,0x08,0x02,0x08,0x21},
+ {0x0C,0x08,0x01,0x08,0x30},
+ {0x0A,0x08,0x02,0x04,0x11},
+ {0x0B,0x0A,0x01,0x10,0x28},
+ {0x09,0x08,0x02,0x02,0x01},
+ {0x0B,0x09,0x01,0x08,0x24},
+ {0x0B,0x08,0x01,0x04,0x20},
+ {0x0A,0x08,0x01,0x02,0x10},
+ {0x09,0x08,0x01,0x01,0x00}
+};
+
static int __devinit
sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth,
int PseudoRankCapacity, int PseudoAdrPinCount,
@@ -4231,27 +4251,8 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth
unsigned short sr14;
unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
- static const unsigned short SiS_DRAMType[17][5] = {
- {0x0C,0x0A,0x02,0x40,0x39},
- {0x0D,0x0A,0x01,0x40,0x48},
- {0x0C,0x09,0x02,0x20,0x35},
- {0x0D,0x09,0x01,0x20,0x44},
- {0x0C,0x08,0x02,0x10,0x31},
- {0x0D,0x08,0x01,0x10,0x40},
- {0x0C,0x0A,0x01,0x20,0x34},
- {0x0C,0x09,0x01,0x08,0x32},
- {0x0B,0x08,0x02,0x08,0x21},
- {0x0C,0x08,0x01,0x08,0x30},
- {0x0A,0x08,0x02,0x04,0x11},
- {0x0B,0x0A,0x01,0x10,0x28},
- {0x09,0x08,0x02,0x02,0x01},
- {0x0B,0x09,0x01,0x08,0x24},
- {0x0B,0x08,0x01,0x04,0x20},
- {0x0A,0x08,0x01,0x02,0x10},
- {0x09,0x08,0x01,0x01,0x00}
- };
- for(k = 0; k <= 16; k++) {
+ for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
RankCapacity = buswidth * SiS_DRAMType[k][3];
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 30f7a815a62b..5b6abc6de84b 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -1036,6 +1036,6 @@ static void __exit xxxfb_exit(void)
*/
module_init(xxxfb_init);
-module_exit(xxxfb_remove);
+module_exit(xxxfb_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index ccbfef5e828f..af3ef27ad36c 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -846,7 +846,7 @@ static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y,
}
}
-int ufx_handle_damage(struct ufx_data *dev, int x, int y,
+static int ufx_handle_damage(struct ufx_data *dev, int x, int y,
int width, int height)
{
size_t packed_line_len = ALIGN((width * 2), 4);
@@ -1083,7 +1083,7 @@ static int ufx_ops_open(struct fb_info *info, int user)
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 7af1e8166182..8af64148294b 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -893,7 +893,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 0c8837565bc7..c80e770e1800 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1276,17 +1276,12 @@ static int viafb_dfph_proc_open(struct inode *inode, struct file *file)
static ssize_t viafb_dfph_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
- char buf[20];
- u8 reg_val = 0;
- unsigned long length;
- if (count < 1)
- return -EINVAL;
- length = count > 20 ? 20 : count;
- if (copy_from_user(&buf[0], buffer, length))
- return -EFAULT;
- buf[length - 1] = '\0'; /*Ensure end string */
- if (kstrtou8(buf, 0, &reg_val) < 0)
- return -EINVAL;
+ int err;
+ u8 reg_val;
+ err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+ if (err)
+ return err;
+
viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f);
return count;
}
@@ -1316,17 +1311,12 @@ static int viafb_dfpl_proc_open(struct inode *inode, struct file *file)
static ssize_t viafb_dfpl_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
- char buf[20];
- u8 reg_val = 0;
- unsigned long length;
- if (count < 1)
- return -EINVAL;
- length = count > 20 ? 20 : count;
- if (copy_from_user(&buf[0], buffer, length))
- return -EFAULT;
- buf[length - 1] = '\0'; /*Ensure end string */
- if (kstrtou8(buf, 0, &reg_val) < 0)
- return -EINVAL;
+ int err;
+ u8 reg_val;
+ err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+ if (err)
+ return err;
+
viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f);
return count;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bfbc15ca38dd..0908e6044333 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -47,7 +47,7 @@ struct virtio_balloon
struct task_struct *thread;
/* Waiting for host to ack the pages we released. */
- struct completion acked;
+ wait_queue_head_t acked;
/* Number of balloon pages we've told the Host we're not using. */
unsigned int num_pages;
@@ -89,29 +89,25 @@ static struct page *balloon_pfn_to_page(u32 pfn)
static void balloon_ack(struct virtqueue *vq)
{
- struct virtio_balloon *vb;
- unsigned int len;
+ struct virtio_balloon *vb = vq->vdev->priv;
- vb = virtqueue_get_buf(vq, &len);
- if (vb)
- complete(&vb->acked);
+ wake_up(&vb->acked);
}
static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
{
struct scatterlist sg;
+ unsigned int len;
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
- init_completion(&vb->acked);
-
/* We should always be able to add one buffer to an empty queue. */
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
- wait_for_completion(&vb->acked);
+ wait_event(vb->acked, virtqueue_get_buf(vq, &len));
}
static void set_page_pfns(u32 pfns[], struct page *page)
@@ -231,12 +227,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
*/
static void stats_request(struct virtqueue *vq)
{
- struct virtio_balloon *vb;
- unsigned int len;
+ struct virtio_balloon *vb = vq->vdev->priv;
- vb = virtqueue_get_buf(vq, &len);
- if (!vb)
- return;
vb->need_stats_update = 1;
wake_up(&vb->config_change);
}
@@ -245,11 +237,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
{
struct virtqueue *vq;
struct scatterlist sg;
+ unsigned int len;
vb->need_stats_update = 0;
update_balloon_stats(vb);
vq = vb->stats_vq;
+ if (!virtqueue_get_buf(vq, &len))
+ return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
@@ -358,6 +353,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
INIT_LIST_HEAD(&vb->pages);
vb->num_pages = 0;
init_waitqueue_head(&vb->config_change);
+ init_waitqueue_head(&vb->acked);
vb->vdev = vdev;
vb->need_stats_update = 0;
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index fd2c7bd9dfbe..6743bde038cc 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -16,7 +16,7 @@ config W1_CON
depends on CONNECTOR
bool "Userspace communication over connector"
default y
- --- help ---
+ ---help---
This allows to communicate with userspace using connector. For more
information see <file:Documentation/connector/connector.txt>.
There are three types of messages between w1 core and userspace:
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a3b6a74c67a7..1cc61a700fa8 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -138,7 +138,7 @@ static int __devinit mxc_w1_probe(struct platform_device *pdev)
goto failed_ioremap;
}
- clk_enable(mdev->clk);
+ clk_prepare_enable(mdev->clk);
__raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
mdev->bus_master.data = mdev;
@@ -178,7 +178,7 @@ static int __devexit mxc_w1_remove(struct platform_device *pdev)
iounmap(mdev->regs);
release_mem_region(res->start, resource_size(res));
- clk_disable(mdev->clk);
+ clk_disable_unprepare(mdev->clk);
clk_put(mdev->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 37096246c937..fe819b76de56 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -64,6 +64,18 @@ config SOFT_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called softdog.
+config DA9052_WATCHDOG
+ tristate "Dialog DA9052 Watchdog"
+ depends on PMIC_DA9052
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog in the DA9052 PMIC. Watchdog trigger
+ cause system reset.
+
+ Say Y here to include support for the DA9052 watchdog.
+ Alternatively say M to compile the driver as a module,
+ which will be called da9052_wdt.
+
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
@@ -87,6 +99,7 @@ config WM8350_WATCHDOG
config ARM_SP805_WATCHDOG
tristate "ARM SP805 Watchdog"
depends on ARM_AMBA
+ select WATCHDOG_CORE
help
ARM Primecell SP805 Watchdog timer. This will reboot your system when
the timeout is reached.
@@ -129,17 +142,6 @@ config 977_WATCHDOG
Not sure? It's safe to say N.
-config IXP2000_WATCHDOG
- tristate "IXP2000 Watchdog"
- depends on ARCH_IXP2000
- help
- Say Y here if to include support for the watchdog timer
- in the Intel IXP2000(2400, 2800, 2850) network processors.
- This driver can be built as a module by choosing M. The module
- will be called ixp2000_wdt.
-
- Say N if you are unsure.
-
config IXP4XX_WATCHDOG
tristate "IXP4xx Watchdog"
depends on ARCH_IXP4XX
@@ -543,7 +545,7 @@ config WAFER_WDT
config I6300ESB_WDT
tristate "Intel 6300ESB Timer/Watchdog"
- depends on X86 && PCI
+ depends on PCI
---help---
Hardware driver for the watchdog timer built into the Intel
6300ESB controller hub.
@@ -551,6 +553,19 @@ config I6300ESB_WDT
To compile this driver as a module, choose M here: the
module will be called i6300esb.
+config IE6XX_WDT
+ tristate "Intel Atom E6xx Watchdog"
+ depends on X86 && PCI
+ select WATCHDOG_CORE
+ select MFD_CORE
+ select LPC_SCH
+ ---help---
+ Hardware driver for the watchdog timer built into the Intel
+ Atom E6XX (TunnelCreek) processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ie6xx_wdt.
+
config INTEL_SCU_WATCHDOG
bool "Intel SCU Watchdog for Mobile Platforms"
depends on X86_MRST
@@ -563,6 +578,7 @@ config INTEL_SCU_WATCHDOG
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
+ select LPC_ICH
---help---
Hardware driver for the intel TCO timer based watchdog devices.
These drivers are included in the Intel 82801 I/O Controller
@@ -607,7 +623,12 @@ config IT87_WDT
depends on X86 && EXPERIMENTAL
---help---
This is the driver for the hardware watchdog on the ITE IT8702,
- IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 Super I/O chips.
+ IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 and IT8728
+ Super I/O chips.
+
+ If the driver does not work, then make sure that the game port in
+ the BIOS is enabled.
+
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
amount of time.
@@ -780,7 +801,7 @@ config SMSC37B787_WDT
config VIA_WDT
tristate "VIA Watchdog Timer"
- depends on X86
+ depends on X86 && PCI
select WATCHDOG_CORE
---help---
This is the driver for the hardware watchdog timer on VIA
@@ -937,7 +958,7 @@ config BCM47XX_WDT
tristate "Broadcom BCM47xx Watchdog Timer"
depends on BCM47XX
help
- Hardware driver for the Broadcom BCM47xx Watchog Timer.
+ Hardware driver for the Broadcom BCM47xx Watchdog Timer.
config RC32434_WDT
tristate "IDT RC32434 SoC Watchdog Timer"
@@ -1138,6 +1159,7 @@ config ZVM_WATCHDOG
config SH_WDT
tristate "SuperH Watchdog"
depends on SUPERH && (CPU_SH3 || CPU_SH4)
+ select WATCHDOG_CORE
help
This driver adds watchdog support for the integrated watchdog in the
SuperH processors. If you have one of these processors and wish
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index e8f479a16402..572b39bed06a 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o
obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o
obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
obj-$(CONFIG_977_WATCHDOG) += wdt977.o
-obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o
obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o
obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
@@ -81,6 +80,7 @@ obj-$(CONFIG_IB700_WDT) += ib700wdt.o
obj-$(CONFIG_IBMASR) += ibmasr.o
obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o
+obj-$(CONFIG_IE6XX_WDT) += ie6xx_wdt.o
obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o
ifeq ($(CONFIG_ITCO_VENDOR_SUPPORT),y)
obj-$(CONFIG_ITCO_WDT) += iTCO_vendor_support.o
@@ -163,6 +163,7 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
+obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 639ae9a23fbc..dc30dbd21cf1 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -282,29 +282,19 @@ static int __devinit ar7_wdt_probe(struct platform_device *pdev)
platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!ar7_regs_wdt) {
pr_err("could not get registers resource\n");
- rc = -ENODEV;
- goto out;
- }
-
- if (!request_mem_region(ar7_regs_wdt->start,
- resource_size(ar7_regs_wdt), LONGNAME)) {
- pr_warn("watchdog I/O region busy\n");
- rc = -EBUSY;
- goto out;
+ return -ENODEV;
}
- ar7_wdt = ioremap(ar7_regs_wdt->start, resource_size(ar7_regs_wdt));
+ ar7_wdt = devm_request_and_ioremap(&pdev->dev, ar7_regs_wdt);
if (!ar7_wdt) {
pr_err("could not ioremap registers\n");
- rc = -ENXIO;
- goto out_mem_region;
+ return -ENXIO;
}
vbus_clk = clk_get(NULL, "vbus");
if (IS_ERR(vbus_clk)) {
pr_err("could not get vbus clock\n");
- rc = PTR_ERR(vbus_clk);
- goto out_mem_region;
+ return PTR_ERR(vbus_clk);
}
ar7_wdt_disable_wdt();
@@ -314,24 +304,21 @@ static int __devinit ar7_wdt_probe(struct platform_device *pdev)
rc = misc_register(&ar7_wdt_miscdev);
if (rc) {
pr_err("unable to register misc device\n");
- goto out_alloc;
+ goto out;
}
- goto out;
+ return 0;
-out_alloc:
- iounmap(ar7_wdt);
-out_mem_region:
- release_mem_region(ar7_regs_wdt->start, resource_size(ar7_regs_wdt));
out:
+ clk_put(vbus_clk);
+ vbus_clk = NULL;
return rc;
}
static int __devexit ar7_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&ar7_wdt_miscdev);
- iounmap(ar7_wdt);
- release_mem_region(ar7_regs_wdt->start, resource_size(ar7_regs_wdt));
-
+ clk_put(vbus_clk);
+ vbus_clk = NULL;
return 0;
}
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
new file mode 100644
index 000000000000..3f75129eb0a9
--- /dev/null
+++ b/drivers/watchdog/da9052_wdt.c
@@ -0,0 +1,251 @@
+/*
+ * System monitoring driver for DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Anthony Olech <Anthony.Olech@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/watchdog.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+
+#define DA9052_DEF_TIMEOUT 4
+#define DA9052_TWDMIN 256
+
+struct da9052_wdt_data {
+ struct watchdog_device wdt;
+ struct da9052 *da9052;
+ struct kref kref;
+ unsigned long jpast;
+};
+
+static const struct {
+ u8 reg_val;
+ int time; /* Seconds */
+} da9052_wdt_maps[] = {
+ { 1, 2 },
+ { 2, 4 },
+ { 3, 8 },
+ { 4, 16 },
+ { 5, 32 },
+ { 5, 33 }, /* Actual time 32.768s so included both 32s and 33s */
+ { 6, 65 },
+ { 6, 66 }, /* Actual time 65.536s so include both, 65s and 66s */
+ { 7, 131 },
+};
+
+
+static void da9052_wdt_release_resources(struct kref *r)
+{
+ struct da9052_wdt_data *driver_data =
+ container_of(r, struct da9052_wdt_data, kref);
+
+ kfree(driver_data);
+}
+
+static int da9052_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9052 *da9052 = driver_data->da9052;
+ int ret, i;
+
+ /*
+ * Disable the Watchdog timer before setting
+ * new time out.
+ */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE, 0);
+ if (ret < 0) {
+ dev_err(da9052->dev, "Failed to disable watchdog bit, %d\n",
+ ret);
+ return ret;
+ }
+ if (timeout) {
+ /*
+ * To change the timeout, da9052 needs to
+ * be disabled for at least 150 us.
+ */
+ udelay(150);
+
+ /* Set the desired timeout */
+ for (i = 0; i < ARRAY_SIZE(da9052_wdt_maps); i++)
+ if (da9052_wdt_maps[i].time == timeout)
+ break;
+
+ if (i == ARRAY_SIZE(da9052_wdt_maps))
+ ret = -EINVAL;
+ else
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE,
+ da9052_wdt_maps[i].reg_val);
+ if (ret < 0) {
+ dev_err(da9052->dev,
+ "Failed to update timescale bit, %d\n", ret);
+ return ret;
+ }
+
+ wdt_dev->timeout = timeout;
+ driver_data->jpast = jiffies;
+ }
+
+ return 0;
+}
+
+static void da9052_wdt_ref(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_get(&driver_data->kref);
+}
+
+static void da9052_wdt_unref(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_put(&driver_data->kref, da9052_wdt_release_resources);
+}
+
+static int da9052_wdt_start(struct watchdog_device *wdt_dev)
+{
+ return da9052_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+}
+
+static int da9052_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ return da9052_wdt_set_timeout(wdt_dev, 0);
+}
+
+static int da9052_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9052 *da9052 = driver_data->da9052;
+ unsigned long msec, jnow = jiffies;
+ int ret;
+
+ /*
+ * We have a minimum time for watchdog window called TWDMIN. A write
+ * to the watchdog before this elapsed time should cause an error.
+ */
+ msec = (jnow - driver_data->jpast) * 1000/HZ;
+ if (msec < DA9052_TWDMIN)
+ mdelay(msec);
+
+ /* Reset the watchdog timer */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_WATCHDOG, 1 << 7);
+ if (ret < 0)
+ goto err_strobe;
+
+ /*
+ * FIXME: Reset the watchdog core, in general PMIC
+ * is supposed to do this
+ */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_WATCHDOG, 0 << 7);
+err_strobe:
+ return ret;
+}
+
+static struct watchdog_info da9052_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "DA9052 Watchdog",
+};
+
+static const struct watchdog_ops da9052_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = da9052_wdt_start,
+ .stop = da9052_wdt_stop,
+ .ping = da9052_wdt_ping,
+ .set_timeout = da9052_wdt_set_timeout,
+ .ref = da9052_wdt_ref,
+ .unref = da9052_wdt_unref,
+};
+
+
+static int __devinit da9052_wdt_probe(struct platform_device *pdev)
+{
+ struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
+ struct da9052_wdt_data *driver_data;
+ struct watchdog_device *da9052_wdt;
+ int ret;
+
+ driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+ GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(da9052->dev, "Unable to alloacate watchdog device\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ driver_data->da9052 = da9052;
+
+ da9052_wdt = &driver_data->wdt;
+
+ da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+ da9052_wdt->info = &da9052_wdt_info;
+ da9052_wdt->ops = &da9052_wdt_ops;
+ watchdog_set_drvdata(da9052_wdt, driver_data);
+
+ kref_init(&driver_data->kref);
+
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = watchdog_register_device(&driver_data->wdt);
+ if (ret != 0) {
+ dev_err(da9052->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+ goto err;
+ }
+
+ dev_set_drvdata(&pdev->dev, driver_data);
+err:
+ return ret;
+}
+
+static int __devexit da9052_wdt_remove(struct platform_device *pdev)
+{
+ struct da9052_wdt_data *driver_data = dev_get_drvdata(&pdev->dev);
+
+ watchdog_unregister_device(&driver_data->wdt);
+ kref_put(&driver_data->kref, da9052_wdt_release_resources);
+
+ return 0;
+}
+
+static struct platform_driver da9052_wdt_driver = {
+ .probe = da9052_wdt_probe,
+ .remove = __devexit_p(da9052_wdt_remove),
+ .driver = {
+ .name = "da9052-watchdog",
+ },
+};
+
+module_platform_driver(da9052_wdt_driver);
+
+MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 SM Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-watchdog");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 23885f2d56a0..1eff743ec497 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -146,7 +146,7 @@ struct cmn_registers {
} __attribute__((packed));
static unsigned int hpwdt_nmi_decoding;
-static unsigned int allow_kdump;
+static unsigned int allow_kdump = 1;
static unsigned int is_icru;
static DEFINE_SPINLOCK(rom_lock);
static void *cru_rom_addr;
@@ -756,6 +756,8 @@ error:
static void hpwdt_exit_nmi_decoding(void)
{
unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
+ unregister_nmi_handler(NMI_SERR, "hpwdt");
+ unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
if (cru_rom_addr)
iounmap(cru_rom_addr);
}
@@ -861,16 +863,6 @@ static struct pci_driver hpwdt_driver = {
.remove = __devexit_p(hpwdt_exit),
};
-static void __exit hpwdt_cleanup(void)
-{
- pci_unregister_driver(&hpwdt_driver);
-}
-
-static int __init hpwdt_init(void)
-{
- return pci_register_driver(&hpwdt_driver);
-}
-
MODULE_AUTHOR("Tom Mingarelli");
MODULE_DESCRIPTION("hp watchdog driver");
MODULE_LICENSE("GPL");
@@ -889,5 +881,4 @@ module_param(allow_kdump, int, 0);
MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
#endif /* !CONFIG_HPWDT_NMI_DECODING */
-module_init(hpwdt_init);
-module_exit(hpwdt_cleanup);
+module_pci_driver(hpwdt_driver);
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index 738032a36bcf..276877d5b6a3 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -492,19 +492,7 @@ static struct pci_driver esb_driver = {
.shutdown = esb_shutdown,
};
-static int __init watchdog_init(void)
-{
- return pci_register_driver(&esb_driver);
-}
-
-static void __exit watchdog_cleanup(void)
-{
- pci_unregister_driver(&esb_driver);
- pr_info("Watchdog Module Unloaded\n");
-}
-
-module_init(watchdog_init);
-module_exit(watchdog_cleanup);
+module_pci_driver(esb_driver);
MODULE_AUTHOR("Ross Biro and David Härdeman");
MODULE_DESCRIPTION("Watchdog driver for Intel 6300ESB chipsets");
diff --git a/drivers/watchdog/iTCO_vendor.h b/drivers/watchdog/iTCO_vendor.h
index 9e27e6422f66..3c57b45537a2 100644
--- a/drivers/watchdog/iTCO_vendor.h
+++ b/drivers/watchdog/iTCO_vendor.h
@@ -1,8 +1,8 @@
/* iTCO Vendor Specific Support hooks */
#ifdef CONFIG_ITCO_VENDOR_SUPPORT
-extern void iTCO_vendor_pre_start(unsigned long, unsigned int);
-extern void iTCO_vendor_pre_stop(unsigned long);
-extern void iTCO_vendor_pre_keepalive(unsigned long, unsigned int);
+extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
+extern void iTCO_vendor_pre_stop(struct resource *);
+extern void iTCO_vendor_pre_keepalive(struct resource *, unsigned int);
extern void iTCO_vendor_pre_set_heartbeat(unsigned int);
extern int iTCO_vendor_check_noreboot_on(void);
#else
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index 2721d29ce243..b6b2f90b5d44 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -35,11 +35,6 @@
#include "iTCO_vendor.h"
-/* iTCO defines */
-#define SMI_EN (acpibase + 0x30) /* SMI Control and Enable Register */
-#define TCOBASE (acpibase + 0x60) /* TCO base address */
-#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */
-
/* List of vendor support modes */
/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
#define SUPERMICRO_OLD_BOARD 1
@@ -82,24 +77,24 @@ MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
* 20.6 seconds.
*/
-static void supermicro_old_pre_start(unsigned long acpibase)
+static void supermicro_old_pre_start(struct resource *smires)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
- outl(val32, SMI_EN); /* Needed to activate watchdog */
+ outl(val32, smires->start); /* Needed to activate watchdog */
}
-static void supermicro_old_pre_stop(unsigned long acpibase)
+static void supermicro_old_pre_stop(struct resource *smires)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
val32 |= 0x00002000; /* Turn on SMI clearing watchdog */
- outl(val32, SMI_EN); /* Needed to deactivate watchdog */
+ outl(val32, smires->start); /* Needed to deactivate watchdog */
}
/*
@@ -270,66 +265,66 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
* Don't use this fix if you don't need to!!!
*/
-static void broken_bios_start(unsigned long acpibase)
+static void broken_bios_start(struct resource *smires)
{
unsigned long val32;
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
val32 &= 0xffffdffe;
- outl(val32, SMI_EN);
+ outl(val32, smires->start);
}
-static void broken_bios_stop(unsigned long acpibase)
+static void broken_bios_stop(struct resource *smires)
{
unsigned long val32;
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
/* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
val32 |= 0x00002001;
- outl(val32, SMI_EN);
+ outl(val32, smires->start);
}
/*
* Generic Support Functions
*/
-void iTCO_vendor_pre_start(unsigned long acpibase,
+void iTCO_vendor_pre_start(struct resource *smires,
unsigned int heartbeat)
{
switch (vendorsupport) {
case SUPERMICRO_OLD_BOARD:
- supermicro_old_pre_start(acpibase);
+ supermicro_old_pre_start(smires);
break;
case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_start(heartbeat);
break;
case BROKEN_BIOS:
- broken_bios_start(acpibase);
+ broken_bios_start(smires);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_start);
-void iTCO_vendor_pre_stop(unsigned long acpibase)
+void iTCO_vendor_pre_stop(struct resource *smires)
{
switch (vendorsupport) {
case SUPERMICRO_OLD_BOARD:
- supermicro_old_pre_stop(acpibase);
+ supermicro_old_pre_stop(smires);
break;
case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_stop();
break;
case BROKEN_BIOS:
- broken_bios_stop(acpibase);
+ broken_bios_stop(smires);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_stop);
-void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat)
+void iTCO_vendor_pre_keepalive(struct resource *smires, unsigned int heartbeat)
{
if (vendorsupport == SUPERMICRO_NEW_BOARD)
supermicro_new_pre_set_heartbeat(heartbeat);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 9fecb95645a3..9c2c27c3b424 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -66,316 +66,16 @@
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
#include "iTCO_vendor.h"
-/* TCO related info */
-enum iTCO_chipsets {
- TCO_ICH = 0, /* ICH */
- TCO_ICH0, /* ICH0 */
- TCO_ICH2, /* ICH2 */
- TCO_ICH2M, /* ICH2-M */
- TCO_ICH3, /* ICH3-S */
- TCO_ICH3M, /* ICH3-M */
- TCO_ICH4, /* ICH4 */
- TCO_ICH4M, /* ICH4-M */
- TCO_CICH, /* C-ICH */
- TCO_ICH5, /* ICH5 & ICH5R */
- TCO_6300ESB, /* 6300ESB */
- TCO_ICH6, /* ICH6 & ICH6R */
- TCO_ICH6M, /* ICH6-M */
- TCO_ICH6W, /* ICH6W & ICH6RW */
- TCO_631XESB, /* 631xESB/632xESB */
- TCO_ICH7, /* ICH7 & ICH7R */
- TCO_ICH7DH, /* ICH7DH */
- TCO_ICH7M, /* ICH7-M & ICH7-U */
- TCO_ICH7MDH, /* ICH7-M DH */
- TCO_NM10, /* NM10 */
- TCO_ICH8, /* ICH8 & ICH8R */
- TCO_ICH8DH, /* ICH8DH */
- TCO_ICH8DO, /* ICH8DO */
- TCO_ICH8M, /* ICH8M */
- TCO_ICH8ME, /* ICH8M-E */
- TCO_ICH9, /* ICH9 */
- TCO_ICH9R, /* ICH9R */
- TCO_ICH9DH, /* ICH9DH */
- TCO_ICH9DO, /* ICH9DO */
- TCO_ICH9M, /* ICH9M */
- TCO_ICH9ME, /* ICH9M-E */
- TCO_ICH10, /* ICH10 */
- TCO_ICH10R, /* ICH10R */
- TCO_ICH10D, /* ICH10D */
- TCO_ICH10DO, /* ICH10DO */
- TCO_PCH, /* PCH Desktop Full Featured */
- TCO_PCHM, /* PCH Mobile Full Featured */
- TCO_P55, /* P55 */
- TCO_PM55, /* PM55 */
- TCO_H55, /* H55 */
- TCO_QM57, /* QM57 */
- TCO_H57, /* H57 */
- TCO_HM55, /* HM55 */
- TCO_Q57, /* Q57 */
- TCO_HM57, /* HM57 */
- TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */
- TCO_QS57, /* QS57 */
- TCO_3400, /* 3400 */
- TCO_3420, /* 3420 */
- TCO_3450, /* 3450 */
- TCO_EP80579, /* EP80579 */
- TCO_CPT, /* Cougar Point */
- TCO_CPTD, /* Cougar Point Desktop */
- TCO_CPTM, /* Cougar Point Mobile */
- TCO_PBG, /* Patsburg */
- TCO_DH89XXCC, /* DH89xxCC */
- TCO_PPT, /* Panther Point */
- TCO_LPT, /* Lynx Point */
-};
-
-static struct {
- char *name;
- unsigned int iTCO_version;
-} iTCO_chipset_info[] __devinitdata = {
- {"ICH", 1},
- {"ICH0", 1},
- {"ICH2", 1},
- {"ICH2-M", 1},
- {"ICH3-S", 1},
- {"ICH3-M", 1},
- {"ICH4", 1},
- {"ICH4-M", 1},
- {"C-ICH", 1},
- {"ICH5 or ICH5R", 1},
- {"6300ESB", 1},
- {"ICH6 or ICH6R", 2},
- {"ICH6-M", 2},
- {"ICH6W or ICH6RW", 2},
- {"631xESB/632xESB", 2},
- {"ICH7 or ICH7R", 2},
- {"ICH7DH", 2},
- {"ICH7-M or ICH7-U", 2},
- {"ICH7-M DH", 2},
- {"NM10", 2},
- {"ICH8 or ICH8R", 2},
- {"ICH8DH", 2},
- {"ICH8DO", 2},
- {"ICH8M", 2},
- {"ICH8M-E", 2},
- {"ICH9", 2},
- {"ICH9R", 2},
- {"ICH9DH", 2},
- {"ICH9DO", 2},
- {"ICH9M", 2},
- {"ICH9M-E", 2},
- {"ICH10", 2},
- {"ICH10R", 2},
- {"ICH10D", 2},
- {"ICH10DO", 2},
- {"PCH Desktop Full Featured", 2},
- {"PCH Mobile Full Featured", 2},
- {"P55", 2},
- {"PM55", 2},
- {"H55", 2},
- {"QM57", 2},
- {"H57", 2},
- {"HM55", 2},
- {"Q57", 2},
- {"HM57", 2},
- {"PCH Mobile SFF Full Featured", 2},
- {"QS57", 2},
- {"3400", 2},
- {"3420", 2},
- {"3450", 2},
- {"EP80579", 2},
- {"Cougar Point", 2},
- {"Cougar Point Desktop", 2},
- {"Cougar Point Mobile", 2},
- {"Patsburg", 2},
- {"DH89xxCC", 2},
- {"Panther Point", 2},
- {"Lynx Point", 2},
- {NULL, 0}
-};
-
-/*
- * This data only exists for exporting the supported PCI ids
- * via MODULE_DEVICE_TABLE. We do not actually register a
- * pci_driver, because the I/O Controller Hub has also other
- * functions that probably will be registered by other drivers.
- */
-static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
- { PCI_VDEVICE(INTEL, 0x2410), TCO_ICH},
- { PCI_VDEVICE(INTEL, 0x2420), TCO_ICH0},
- { PCI_VDEVICE(INTEL, 0x2440), TCO_ICH2},
- { PCI_VDEVICE(INTEL, 0x244c), TCO_ICH2M},
- { PCI_VDEVICE(INTEL, 0x2480), TCO_ICH3},
- { PCI_VDEVICE(INTEL, 0x248c), TCO_ICH3M},
- { PCI_VDEVICE(INTEL, 0x24c0), TCO_ICH4},
- { PCI_VDEVICE(INTEL, 0x24cc), TCO_ICH4M},
- { PCI_VDEVICE(INTEL, 0x2450), TCO_CICH},
- { PCI_VDEVICE(INTEL, 0x24d0), TCO_ICH5},
- { PCI_VDEVICE(INTEL, 0x25a1), TCO_6300ESB},
- { PCI_VDEVICE(INTEL, 0x2640), TCO_ICH6},
- { PCI_VDEVICE(INTEL, 0x2641), TCO_ICH6M},
- { PCI_VDEVICE(INTEL, 0x2642), TCO_ICH6W},
- { PCI_VDEVICE(INTEL, 0x2670), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2671), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2672), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2673), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2674), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2675), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2676), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2677), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2678), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2679), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267a), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267b), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267c), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267d), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267e), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267f), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x27b8), TCO_ICH7},
- { PCI_VDEVICE(INTEL, 0x27b0), TCO_ICH7DH},
- { PCI_VDEVICE(INTEL, 0x27b9), TCO_ICH7M},
- { PCI_VDEVICE(INTEL, 0x27bd), TCO_ICH7MDH},
- { PCI_VDEVICE(INTEL, 0x27bc), TCO_NM10},
- { PCI_VDEVICE(INTEL, 0x2810), TCO_ICH8},
- { PCI_VDEVICE(INTEL, 0x2812), TCO_ICH8DH},
- { PCI_VDEVICE(INTEL, 0x2814), TCO_ICH8DO},
- { PCI_VDEVICE(INTEL, 0x2815), TCO_ICH8M},
- { PCI_VDEVICE(INTEL, 0x2811), TCO_ICH8ME},
- { PCI_VDEVICE(INTEL, 0x2918), TCO_ICH9},
- { PCI_VDEVICE(INTEL, 0x2916), TCO_ICH9R},
- { PCI_VDEVICE(INTEL, 0x2912), TCO_ICH9DH},
- { PCI_VDEVICE(INTEL, 0x2914), TCO_ICH9DO},
- { PCI_VDEVICE(INTEL, 0x2919), TCO_ICH9M},
- { PCI_VDEVICE(INTEL, 0x2917), TCO_ICH9ME},
- { PCI_VDEVICE(INTEL, 0x3a18), TCO_ICH10},
- { PCI_VDEVICE(INTEL, 0x3a16), TCO_ICH10R},
- { PCI_VDEVICE(INTEL, 0x3a1a), TCO_ICH10D},
- { PCI_VDEVICE(INTEL, 0x3a14), TCO_ICH10DO},
- { PCI_VDEVICE(INTEL, 0x3b00), TCO_PCH},
- { PCI_VDEVICE(INTEL, 0x3b01), TCO_PCHM},
- { PCI_VDEVICE(INTEL, 0x3b02), TCO_P55},
- { PCI_VDEVICE(INTEL, 0x3b03), TCO_PM55},
- { PCI_VDEVICE(INTEL, 0x3b06), TCO_H55},
- { PCI_VDEVICE(INTEL, 0x3b07), TCO_QM57},
- { PCI_VDEVICE(INTEL, 0x3b08), TCO_H57},
- { PCI_VDEVICE(INTEL, 0x3b09), TCO_HM55},
- { PCI_VDEVICE(INTEL, 0x3b0a), TCO_Q57},
- { PCI_VDEVICE(INTEL, 0x3b0b), TCO_HM57},
- { PCI_VDEVICE(INTEL, 0x3b0d), TCO_PCHMSFF},
- { PCI_VDEVICE(INTEL, 0x3b0f), TCO_QS57},
- { PCI_VDEVICE(INTEL, 0x3b12), TCO_3400},
- { PCI_VDEVICE(INTEL, 0x3b14), TCO_3420},
- { PCI_VDEVICE(INTEL, 0x3b16), TCO_3450},
- { PCI_VDEVICE(INTEL, 0x5031), TCO_EP80579},
- { PCI_VDEVICE(INTEL, 0x1c41), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c42), TCO_CPTD},
- { PCI_VDEVICE(INTEL, 0x1c43), TCO_CPTM},
- { PCI_VDEVICE(INTEL, 0x1c44), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c45), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c46), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c47), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c48), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c49), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4a), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4b), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4c), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4d), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4e), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4f), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c50), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c51), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c52), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c53), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c54), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c55), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c56), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c57), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c58), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c59), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5a), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5b), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5c), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5d), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5e), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5f), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1d40), TCO_PBG},
- { PCI_VDEVICE(INTEL, 0x1d41), TCO_PBG},
- { PCI_VDEVICE(INTEL, 0x2310), TCO_DH89XXCC},
- { PCI_VDEVICE(INTEL, 0x1e40), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e41), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e42), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e43), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e44), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e45), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e46), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e47), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e48), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e49), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4a), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4b), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4c), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4d), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4e), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4f), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e50), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e51), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e52), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e53), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e54), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e55), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e56), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e57), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e58), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e59), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5a), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5b), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5c), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
- { 0, }, /* End of list */
-};
-MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
-
/* Address definitions for the TCO */
/* TCO base address */
-#define TCOBASE (iTCO_wdt_private.ACPIBASE + 0x60)
+#define TCOBASE (iTCO_wdt_private.tco_res->start)
/* SMI Control and Enable Register */
-#define SMI_EN (iTCO_wdt_private.ACPIBASE + 0x30)
+#define SMI_EN (iTCO_wdt_private.smi_res->start)
#define TCO_RLD (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */
#define TCOv1_TMR (TCOBASE + 0x01) /* TCOv1 Timer Initial Value */
@@ -393,19 +93,18 @@ static char expect_release;
static struct { /* this is private data for the iTCO_wdt device */
/* TCO version/generation */
unsigned int iTCO_version;
- /* The device's ACPIBASE address (TCOBASE = ACPIBASE+0x60) */
- unsigned long ACPIBASE;
+ struct resource *tco_res;
+ struct resource *smi_res;
+ struct resource *gcs_res;
/* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2)*/
unsigned long __iomem *gcs;
/* the lock for io operations */
spinlock_t io_lock;
+ struct platform_device *dev;
/* the PCI-device */
struct pci_dev *pdev;
} iTCO_wdt_private;
-/* the watchdog platform device */
-static struct platform_device *iTCO_wdt_platform_device;
-
/* module parameters */
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
@@ -485,7 +184,7 @@ static int iTCO_wdt_start(void)
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_start(iTCO_wdt_private.ACPIBASE, heartbeat);
+ iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, heartbeat);
/* disable chipset's NO_REBOOT bit */
if (iTCO_wdt_unset_NO_REBOOT_bit()) {
@@ -519,7 +218,7 @@ static int iTCO_wdt_stop(void)
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_stop(iTCO_wdt_private.ACPIBASE);
+ iTCO_vendor_pre_stop(iTCO_wdt_private.smi_res);
/* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
val = inw(TCO1_CNT);
@@ -541,7 +240,7 @@ static int iTCO_wdt_keepalive(void)
{
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_keepalive(iTCO_wdt_private.ACPIBASE, heartbeat);
+ iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, heartbeat);
/* Reload the timer by writing to the TCO Timer Counter register */
if (iTCO_wdt_private.iTCO_version == 2)
@@ -786,83 +485,120 @@ static struct miscdevice iTCO_wdt_miscdev = {
* Init & exit routines
*/
-static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
- const struct pci_device_id *ent, struct platform_device *dev)
+static void __devexit iTCO_wdt_cleanup(void)
+{
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ iTCO_wdt_stop();
+
+ /* Deregister */
+ misc_deregister(&iTCO_wdt_miscdev);
+
+ /* release resources */
+ release_region(iTCO_wdt_private.tco_res->start,
+ resource_size(iTCO_wdt_private.tco_res));
+ release_region(iTCO_wdt_private.smi_res->start,
+ resource_size(iTCO_wdt_private.smi_res));
+ if (iTCO_wdt_private.iTCO_version == 2) {
+ iounmap(iTCO_wdt_private.gcs);
+ release_mem_region(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res));
+ }
+
+ iTCO_wdt_private.tco_res = NULL;
+ iTCO_wdt_private.smi_res = NULL;
+ iTCO_wdt_private.gcs_res = NULL;
+ iTCO_wdt_private.gcs = NULL;
+}
+
+static int __devinit iTCO_wdt_probe(struct platform_device *dev)
{
- int ret;
- u32 base_address;
- unsigned long RCBA;
+ int ret = -ENODEV;
unsigned long val32;
+ struct lpc_ich_info *ich_info = dev->dev.platform_data;
+
+ if (!ich_info)
+ goto out;
+
+ spin_lock_init(&iTCO_wdt_private.io_lock);
+
+ iTCO_wdt_private.tco_res =
+ platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_TCO);
+ if (!iTCO_wdt_private.tco_res)
+ goto out;
+
+ iTCO_wdt_private.smi_res =
+ platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_SMI);
+ if (!iTCO_wdt_private.smi_res)
+ goto out;
+
+ iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
+ iTCO_wdt_private.dev = dev;
+ iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
/*
- * Find the ACPI/PM base I/O address which is the base
- * for the TCO registers (TCOBASE=ACPIBASE + 0x60)
- * ACPIBASE is bits [15:7] from 0x40-0x43
+ * Get the Memory-Mapped GCS register, we need it for the
+ * NO_REBOOT flag (TCO v2).
*/
- pci_read_config_dword(pdev, 0x40, &base_address);
- base_address &= 0x0000ff80;
- if (base_address == 0x00000000) {
- /* Something's wrong here, ACPIBASE has to be set */
- pr_err("failed to get TCOBASE address, device disabled by hardware/BIOS\n");
- return -ENODEV;
- }
- iTCO_wdt_private.iTCO_version =
- iTCO_chipset_info[ent->driver_data].iTCO_version;
- iTCO_wdt_private.ACPIBASE = base_address;
- iTCO_wdt_private.pdev = pdev;
-
- /* Get the Memory-Mapped GCS register, we need it for the
- NO_REBOOT flag (TCO v2). To get access to it you have to
- read RCBA from PCI Config space 0xf0 and use it as base.
- GCS = RCBA + ICH6_GCS(0x3410). */
if (iTCO_wdt_private.iTCO_version == 2) {
- pci_read_config_dword(pdev, 0xf0, &base_address);
- if ((base_address & 1) == 0) {
- pr_err("RCBA is disabled by hardware/BIOS, device disabled\n");
- ret = -ENODEV;
+ iTCO_wdt_private.gcs_res = platform_get_resource(dev,
+ IORESOURCE_MEM,
+ ICH_RES_MEM_GCS);
+
+ if (!iTCO_wdt_private.gcs_res)
+ goto out;
+
+ if (!request_mem_region(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res), dev->name)) {
+ ret = -EBUSY;
goto out;
}
- RCBA = base_address & 0xffffc000;
- iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4);
+ iTCO_wdt_private.gcs = ioremap(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res));
+ if (!iTCO_wdt_private.gcs) {
+ ret = -EIO;
+ goto unreg_gcs;
+ }
}
/* Check chipset's NO_REBOOT bit */
if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) {
pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
ret = -ENODEV; /* Cannot reset NO_REBOOT bit */
- goto out_unmap;
+ goto unmap_gcs;
}
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
iTCO_wdt_set_NO_REBOOT_bit();
/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
- if (!request_region(SMI_EN, 4, "iTCO_wdt")) {
- pr_err("I/O address 0x%04lx already in use, device disabled\n",
- SMI_EN);
- ret = -EIO;
- goto out_unmap;
+ if (!request_region(iTCO_wdt_private.smi_res->start,
+ resource_size(iTCO_wdt_private.smi_res), dev->name)) {
+ pr_err("I/O address 0x%04llx already in use, device disabled\n",
+ (u64)SMI_EN);
+ ret = -EBUSY;
+ goto unmap_gcs;
}
if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
- /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
+ /*
+ * Bit 13: TCO_EN -> 0
+ * Disables TCO logic generating an SMI#
+ */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN);
}
- /* The TCO I/O registers reside in a 32-byte range pointed to
- by the TCOBASE value */
- if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) {
- pr_err("I/O address 0x%04lx already in use, device disabled\n",
- TCOBASE);
- ret = -EIO;
- goto unreg_smi_en;
+ if (!request_region(iTCO_wdt_private.tco_res->start,
+ resource_size(iTCO_wdt_private.tco_res), dev->name)) {
+ pr_err("I/O address 0x%04llx already in use, device disabled\n",
+ (u64)TCOBASE);
+ ret = -EBUSY;
+ goto unreg_smi;
}
- pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04lx)\n",
- iTCO_chipset_info[ent->driver_data].name,
- iTCO_chipset_info[ent->driver_data].iTCO_version,
- TCOBASE);
+ pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
+ ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
/* Clear out the (probably old) status */
outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
@@ -883,7 +619,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
if (ret != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
- goto unreg_region;
+ goto unreg_tco;
}
pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
@@ -891,62 +627,31 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
return 0;
-unreg_region:
- release_region(TCOBASE, 0x20);
-unreg_smi_en:
- release_region(SMI_EN, 4);
-out_unmap:
+unreg_tco:
+ release_region(iTCO_wdt_private.tco_res->start,
+ resource_size(iTCO_wdt_private.tco_res));
+unreg_smi:
+ release_region(iTCO_wdt_private.smi_res->start,
+ resource_size(iTCO_wdt_private.smi_res));
+unmap_gcs:
if (iTCO_wdt_private.iTCO_version == 2)
iounmap(iTCO_wdt_private.gcs);
-out:
- iTCO_wdt_private.ACPIBASE = 0;
- return ret;
-}
-
-static void __devexit iTCO_wdt_cleanup(void)
-{
- /* Stop the timer before we leave */
- if (!nowayout)
- iTCO_wdt_stop();
-
- /* Deregister */
- misc_deregister(&iTCO_wdt_miscdev);
- release_region(TCOBASE, 0x20);
- release_region(SMI_EN, 4);
+unreg_gcs:
if (iTCO_wdt_private.iTCO_version == 2)
- iounmap(iTCO_wdt_private.gcs);
- pci_dev_put(iTCO_wdt_private.pdev);
- iTCO_wdt_private.ACPIBASE = 0;
-}
-
-static int __devinit iTCO_wdt_probe(struct platform_device *dev)
-{
- int ret = -ENODEV;
- int found = 0;
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *ent;
-
- spin_lock_init(&iTCO_wdt_private.io_lock);
-
- for_each_pci_dev(pdev) {
- ent = pci_match_id(iTCO_wdt_pci_tbl, pdev);
- if (ent) {
- found++;
- ret = iTCO_wdt_init(pdev, ent, dev);
- if (!ret)
- break;
- }
- }
-
- if (!found)
- pr_info("No device detected\n");
+ release_mem_region(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res));
+out:
+ iTCO_wdt_private.tco_res = NULL;
+ iTCO_wdt_private.smi_res = NULL;
+ iTCO_wdt_private.gcs_res = NULL;
+ iTCO_wdt_private.gcs = NULL;
return ret;
}
static int __devexit iTCO_wdt_remove(struct platform_device *dev)
{
- if (iTCO_wdt_private.ACPIBASE)
+ if (iTCO_wdt_private.tco_res || iTCO_wdt_private.smi_res)
iTCO_wdt_cleanup();
return 0;
@@ -977,23 +682,11 @@ static int __init iTCO_wdt_init_module(void)
if (err)
return err;
- iTCO_wdt_platform_device = platform_device_register_simple(DRV_NAME,
- -1, NULL, 0);
- if (IS_ERR(iTCO_wdt_platform_device)) {
- err = PTR_ERR(iTCO_wdt_platform_device);
- goto unreg_platform_driver;
- }
-
return 0;
-
-unreg_platform_driver:
- platform_driver_unregister(&iTCO_wdt_driver);
- return err;
}
static void __exit iTCO_wdt_cleanup_module(void)
{
- platform_device_unregister(iTCO_wdt_platform_device);
platform_driver_unregister(&iTCO_wdt_driver);
pr_info("Watchdog Module Unloaded\n");
}
@@ -1006,3 +699,4 @@ MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
new file mode 100644
index 000000000000..5f0d776f902c
--- /dev/null
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -0,0 +1,348 @@
+/*
+ * Intel Atom E6xx Watchdog driver
+ *
+ * Copyright (C) 2011 Alexander Stein
+ * <alexander.stein@systec-electronic.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+
+#define DRIVER_NAME "ie6xx_wdt"
+
+#define PV1 0x00
+#define PV2 0x04
+
+#define RR0 0x0c
+#define RR1 0x0d
+#define WDT_RELOAD 0x01
+#define WDT_TOUT 0x02
+
+#define WDTCR 0x10
+#define WDT_PRE_SEL 0x04
+#define WDT_RESET_SEL 0x08
+#define WDT_RESET_EN 0x10
+#define WDT_TOUT_EN 0x20
+
+#define DCR 0x14
+
+#define WDTLR 0x18
+#define WDT_LOCK 0x01
+#define WDT_ENABLE 0x02
+#define WDT_TOUT_CNF 0x03
+
+#define MIN_TIME 1
+#define MAX_TIME (10 * 60) /* 10 minutes */
+#define DEFAULT_TIME 60
+
+static unsigned int timeout = DEFAULT_TIME;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout,
+ "Default Watchdog timer setting ("
+ __MODULE_STRING(DEFAULT_TIME) "s)."
+ "The range is from 1 to 600");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static u8 resetmode = 0x10;
+module_param(resetmode, byte, 0);
+MODULE_PARM_DESC(resetmode,
+ "Resetmode bits: 0x08 warm reset (cold reset otherwise), "
+ "0x10 reset enable, 0x20 disable toggle GPIO[4] (default=0x10)");
+
+static struct {
+ unsigned short sch_wdtba;
+ struct spinlock unlock_sequence;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+} ie6xx_wdt_data;
+
+/*
+ * This is needed to write to preload and reload registers
+ * struct ie6xx_wdt_data.unlock_sequence must be used
+ * to prevent sequence interrupts
+ */
+static void ie6xx_wdt_unlock_registers(void)
+{
+ outb(0x80, ie6xx_wdt_data.sch_wdtba + RR0);
+ outb(0x86, ie6xx_wdt_data.sch_wdtba + RR0);
+}
+
+static int ie6xx_wdt_ping(struct watchdog_device *wdd)
+{
+ spin_lock(&ie6xx_wdt_data.unlock_sequence);
+ ie6xx_wdt_unlock_registers();
+ outb(WDT_RELOAD, ie6xx_wdt_data.sch_wdtba + RR1);
+ spin_unlock(&ie6xx_wdt_data.unlock_sequence);
+ return 0;
+}
+
+static int ie6xx_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t)
+{
+ u32 preload;
+ u64 clock;
+ u8 wdtcr;
+
+ /* Watchdog clock is PCI Clock (33MHz) */
+ clock = 33000000;
+ /* and the preload value is loaded into [34:15] of the down counter */
+ preload = (t * clock) >> 15;
+ /*
+ * Manual states preload must be one less.
+ * Does not wrap as t is at least 1
+ */
+ preload -= 1;
+
+ spin_lock(&ie6xx_wdt_data.unlock_sequence);
+
+ /* Set ResetMode & Enable prescaler for range 10ms to 10 min */
+ wdtcr = resetmode & 0x38;
+ outb(wdtcr, ie6xx_wdt_data.sch_wdtba + WDTCR);
+
+ ie6xx_wdt_unlock_registers();
+ outl(0, ie6xx_wdt_data.sch_wdtba + PV1);
+
+ ie6xx_wdt_unlock_registers();
+ outl(preload, ie6xx_wdt_data.sch_wdtba + PV2);
+
+ ie6xx_wdt_unlock_registers();
+ outb(WDT_RELOAD | WDT_TOUT, ie6xx_wdt_data.sch_wdtba + RR1);
+
+ spin_unlock(&ie6xx_wdt_data.unlock_sequence);
+
+ wdd->timeout = t;
+ return 0;
+}
+
+static int ie6xx_wdt_start(struct watchdog_device *wdd)
+{
+ ie6xx_wdt_set_timeout(wdd, wdd->timeout);
+
+ /* Enable the watchdog timer */
+ spin_lock(&ie6xx_wdt_data.unlock_sequence);
+ outb(WDT_ENABLE, ie6xx_wdt_data.sch_wdtba + WDTLR);
+ spin_unlock(&ie6xx_wdt_data.unlock_sequence);
+
+ return 0;
+}
+
+static int ie6xx_wdt_stop(struct watchdog_device *wdd)
+{
+ if (inb(ie6xx_wdt_data.sch_wdtba + WDTLR) & WDT_LOCK)
+ return -1;
+
+ /* Disable the watchdog timer */
+ spin_lock(&ie6xx_wdt_data.unlock_sequence);
+ outb(0, ie6xx_wdt_data.sch_wdtba + WDTLR);
+ spin_unlock(&ie6xx_wdt_data.unlock_sequence);
+
+ return 0;
+}
+
+static const struct watchdog_info ie6xx_wdt_info = {
+ .identity = "Intel Atom E6xx Watchdog",
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops ie6xx_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = ie6xx_wdt_start,
+ .stop = ie6xx_wdt_stop,
+ .ping = ie6xx_wdt_ping,
+ .set_timeout = ie6xx_wdt_set_timeout,
+};
+
+static struct watchdog_device ie6xx_wdt_dev = {
+ .info = &ie6xx_wdt_info,
+ .ops = &ie6xx_wdt_ops,
+ .min_timeout = MIN_TIME,
+ .max_timeout = MAX_TIME,
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+static int ie6xx_wdt_dbg_show(struct seq_file *s, void *unused)
+{
+ seq_printf(s, "PV1 = 0x%08x\n",
+ inl(ie6xx_wdt_data.sch_wdtba + PV1));
+ seq_printf(s, "PV2 = 0x%08x\n",
+ inl(ie6xx_wdt_data.sch_wdtba + PV2));
+ seq_printf(s, "RR = 0x%08x\n",
+ inw(ie6xx_wdt_data.sch_wdtba + RR0));
+ seq_printf(s, "WDTCR = 0x%08x\n",
+ inw(ie6xx_wdt_data.sch_wdtba + WDTCR));
+ seq_printf(s, "DCR = 0x%08x\n",
+ inl(ie6xx_wdt_data.sch_wdtba + DCR));
+ seq_printf(s, "WDTLR = 0x%08x\n",
+ inw(ie6xx_wdt_data.sch_wdtba + WDTLR));
+
+ seq_printf(s, "\n");
+ return 0;
+}
+
+static int ie6xx_wdt_dbg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ie6xx_wdt_dbg_show, NULL);
+}
+
+static const struct file_operations ie6xx_wdt_dbg_operations = {
+ .open = ie6xx_wdt_dbg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void __devinit ie6xx_wdt_debugfs_init(void)
+{
+ /* /sys/kernel/debug/ie6xx_wdt */
+ ie6xx_wdt_data.debugfs = debugfs_create_file("ie6xx_wdt",
+ S_IFREG | S_IRUGO, NULL, NULL, &ie6xx_wdt_dbg_operations);
+}
+
+static void __devexit ie6xx_wdt_debugfs_exit(void)
+{
+ debugfs_remove(ie6xx_wdt_data.debugfs);
+}
+
+#else
+static void __devinit ie6xx_wdt_debugfs_init(void)
+{
+}
+
+static void __devexit ie6xx_wdt_debugfs_exit(void)
+{
+}
+#endif
+
+static int __devinit ie6xx_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ u8 wdtlr;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_region(res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "Watchdog region 0x%llx already in use!\n",
+ (u64)res->start);
+ return -EBUSY;
+ }
+
+ ie6xx_wdt_data.sch_wdtba = res->start;
+ dev_dbg(&pdev->dev, "WDT = 0x%X\n", ie6xx_wdt_data.sch_wdtba);
+
+ ie6xx_wdt_dev.timeout = timeout;
+ watchdog_set_nowayout(&ie6xx_wdt_dev, nowayout);
+
+ spin_lock_init(&ie6xx_wdt_data.unlock_sequence);
+
+ wdtlr = inb(ie6xx_wdt_data.sch_wdtba + WDTLR);
+ if (wdtlr & WDT_LOCK)
+ dev_warn(&pdev->dev,
+ "Watchdog Timer is Locked (Reg=0x%x)\n", wdtlr);
+
+ ie6xx_wdt_debugfs_init();
+
+ ret = watchdog_register_device(&ie6xx_wdt_dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Watchdog timer: cannot register device (err =%d)\n",
+ ret);
+ goto misc_register_error;
+ }
+
+ return 0;
+
+misc_register_error:
+ ie6xx_wdt_debugfs_exit();
+ release_region(res->start, resource_size(res));
+ ie6xx_wdt_data.sch_wdtba = 0;
+ return ret;
+}
+
+static int __devexit ie6xx_wdt_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ ie6xx_wdt_stop(NULL);
+ watchdog_unregister_device(&ie6xx_wdt_dev);
+ ie6xx_wdt_debugfs_exit();
+ release_region(res->start, resource_size(res));
+ ie6xx_wdt_data.sch_wdtba = 0;
+
+ return 0;
+}
+
+static struct platform_driver ie6xx_wdt_driver = {
+ .probe = ie6xx_wdt_probe,
+ .remove = __devexit_p(ie6xx_wdt_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ie6xx_wdt_init(void)
+{
+ /* Check boot parameters to verify that their initial values */
+ /* are in range. */
+ if ((timeout < MIN_TIME) ||
+ (timeout > MAX_TIME)) {
+ pr_err("Watchdog timer: value of timeout %d (dec) "
+ "is out of range from %d to %d (dec)\n",
+ timeout, MIN_TIME, MAX_TIME);
+ return -EINVAL;
+ }
+
+ return platform_driver_register(&ie6xx_wdt_driver);
+}
+
+static void __exit ie6xx_wdt_exit(void)
+{
+ platform_driver_unregister(&ie6xx_wdt_driver);
+}
+
+late_initcall(ie6xx_wdt_init);
+module_exit(ie6xx_wdt_exit);
+
+MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>");
+MODULE_DESCRIPTION("Intel Atom E6xx Watchdog Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 7a2b734fcdc7..bcfab2b00ad2 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -121,7 +121,7 @@ static void imx2_wdt_start(void)
{
if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
/* at our first start we enable clock and do initialisations */
- clk_enable(imx2_wdt.clk);
+ clk_prepare_enable(imx2_wdt.clk);
imx2_wdt_setup();
} else /* delete the timer that pings the watchdog after close */
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index 8a741bcb5124..d3dcc6988b5f 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -12,7 +12,8 @@
* http://www.ite.com.tw/
*
* Support of the watchdog timers, which are available on
- * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721 and IT8726.
+ * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726
+ * and IT8728.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -84,6 +85,7 @@
#define IT8720_ID 0x8720
#define IT8721_ID 0x8721
#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
+#define IT8728_ID 0x8728
/* GPIO Configuration Registers LDN=0x07 */
#define WDTCTRL 0x71
@@ -95,7 +97,7 @@
#define WDT_CIRINT 0x80
#define WDT_MOUSEINT 0x40
#define WDT_KYBINT 0x20
-#define WDT_GAMEPORT 0x10 /* not in it8718, it8720, it8721 */
+#define WDT_GAMEPORT 0x10 /* not in it8718, it8720, it8721, it8728 */
#define WDT_FORCE 0x02
#define WDT_ZERO 0x01
@@ -616,6 +618,7 @@ static int __init it87_wdt_init(void)
case IT8718_ID:
case IT8720_ID:
case IT8721_ID:
+ case IT8728_ID:
max_units = 65535;
try_gameport = 0;
break;
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c
deleted file mode 100644
index 3f047a58d3ae..000000000000
--- a/drivers/watchdog/ixp2000_wdt.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * drivers/char/watchdog/ixp2000_wdt.c
- *
- * Watchdog driver for Intel IXP2000 network processors
- *
- * Adapted from the IXP4xx watchdog driver by Lennert Buytenhek.
- * The original version carries these notices:
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- *
- * Copyright 2004 (c) MontaVista, Software, Inc.
- * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/uaccess.h>
-#include <mach/hardware.h>
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */
-static unsigned long wdt_status;
-static DEFINE_SPINLOCK(wdt_lock);
-
-#define WDT_IN_USE 0
-#define WDT_OK_TO_CLOSE 1
-
-static unsigned long wdt_tick_rate;
-
-static void wdt_enable(void)
-{
- spin_lock(&wdt_lock);
- ixp2000_reg_write(IXP2000_RESET0, *(IXP2000_RESET0) | WDT_RESET_ENABLE);
- ixp2000_reg_write(IXP2000_TWDE, WDT_ENABLE);
- ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate);
- ixp2000_reg_write(IXP2000_T4_CTL, TIMER_DIVIDER_256 | TIMER_ENABLE);
- spin_unlock(&wdt_lock);
-}
-
-static void wdt_disable(void)
-{
- spin_lock(&wdt_lock);
- ixp2000_reg_write(IXP2000_T4_CTL, 0);
- spin_unlock(&wdt_lock);
-}
-
-static void wdt_keepalive(void)
-{
- spin_lock(&wdt_lock);
- ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate);
- spin_unlock(&wdt_lock);
-}
-
-static int ixp2000_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(WDT_IN_USE, &wdt_status))
- return -EBUSY;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- wdt_enable();
-
- return nonseekable_open(inode, file);
-}
-
-static ssize_t ixp2000_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
-{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- set_bit(WDT_OK_TO_CLOSE, &wdt_status);
- }
- }
- wdt_keepalive();
- }
-
- return len;
-}
-
-
-static const struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING,
- .identity = "IXP2000 Watchdog",
-};
-
-static long ixp2000_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret = -ENOTTY;
- int time;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_enable();
- ret = 0;
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(time, (int *)arg);
- if (ret)
- break;
-
- if (time <= 0 || time > 60) {
- ret = -EINVAL;
- break;
- }
-
- heartbeat = time;
- wdt_keepalive();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(heartbeat, (int *)arg);
- break;
- }
-
- return ret;
-}
-
-static int ixp2000_wdt_release(struct inode *inode, struct file *file)
-{
- if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
- wdt_disable();
- else
- pr_crit("Device closed unexpectedly - timer will not stop\n");
- clear_bit(WDT_IN_USE, &wdt_status);
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- return 0;
-}
-
-
-static const struct file_operations ixp2000_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = ixp2000_wdt_write,
- .unlocked_ioctl = ixp2000_wdt_ioctl,
- .open = ixp2000_wdt_open,
- .release = ixp2000_wdt_release,
-};
-
-static struct miscdevice ixp2000_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &ixp2000_wdt_fops,
-};
-
-static int __init ixp2000_wdt_init(void)
-{
- if ((*IXP2000_PRODUCT_ID & 0x001ffef0) == 0x00000000) {
- pr_info("Unable to use IXP2000 watchdog due to IXP2800 erratum #25\n");
- return -EIO;
- }
- wdt_tick_rate = (*IXP2000_T1_CLD * HZ) / 256;
- return misc_register(&ixp2000_wdt_miscdev);
-}
-
-static void __exit ixp2000_wdt_exit(void)
-{
- misc_deregister(&ixp2000_wdt_miscdev);
-}
-
-module_init(ixp2000_wdt_init);
-module_exit(ixp2000_wdt_exit);
-
-MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("IXP2000 Network Processor Watchdog");
-
-module_param(heartbeat, int, 0);
-MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)");
-
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index a9593a3a32a0..2e74c3a8ee58 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -13,14 +13,15 @@
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <lantiq.h>
+#include <lantiq_soc.h>
-/* Section 3.4 of the datasheet
+/*
+ * Section 3.4 of the datasheet
* The password sequence protects the WDT control register from unintended
* write actions, which might cause malfunction of the WDT.
*
@@ -70,7 +71,8 @@ ltq_wdt_disable(void)
{
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
- /* write the second password magic with no config
+ /*
+ * write the second password magic with no config
* this turns the watchdog off
*/
ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
@@ -184,7 +186,7 @@ static struct miscdevice ltq_wdt_miscdev = {
.fops = &ltq_wdt_fops,
};
-static int __init
+static int __devinit
ltq_wdt_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -194,28 +196,27 @@ ltq_wdt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot obtain I/O memory region");
return -ENOENT;
}
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "cannot request I/O memory region");
- return -EBUSY;
- }
- ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
+
+ ltq_wdt_membase = devm_request_and_ioremap(&pdev->dev, res);
if (!ltq_wdt_membase) {
dev_err(&pdev->dev, "cannot remap I/O memory region\n");
return -ENOMEM;
}
/* we do not need to enable the clock as it is always running */
- clk = clk_get(&pdev->dev, "io");
- WARN_ON(!clk);
+ clk = clk_get_io();
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return -ENOENT;
+ }
ltq_io_region_clk_rate = clk_get_rate(clk);
clk_put(clk);
+ /* find out if the watchdog caused the last reboot */
if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
ltq_wdt_bootstatus = WDIOF_CARDRESET;
+ dev_info(&pdev->dev, "Init done\n");
return misc_register(&ltq_wdt_miscdev);
}
@@ -227,33 +228,26 @@ ltq_wdt_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ltq_wdt_match[] = {
+ { .compatible = "lantiq,wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_wdt_match);
static struct platform_driver ltq_wdt_driver = {
+ .probe = ltq_wdt_probe,
.remove = __devexit_p(ltq_wdt_remove),
.driver = {
- .name = "ltq_wdt",
+ .name = "wdt",
.owner = THIS_MODULE,
+ .of_match_table = ltq_wdt_match,
},
};
-static int __init
-init_ltq_wdt(void)
-{
- return platform_driver_probe(&ltq_wdt_driver, ltq_wdt_probe);
-}
-
-static void __exit
-exit_ltq_wdt(void)
-{
- return platform_driver_unregister(&ltq_wdt_driver);
-}
-
-module_init(init_ltq_wdt);
-module_exit(exit_ltq_wdt);
+module_platform_driver(ltq_wdt_driver);
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
-
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC Watchdog");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 788aa158e78c..0f5736949c61 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -24,8 +24,8 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/clk.h>
#include <mach/bridge-regs.h>
-#include <plat/orion_wdt.h>
/*
* Watchdog timer block registers.
@@ -41,6 +41,7 @@
static bool nowayout = WATCHDOG_NOWAYOUT;
static int heartbeat = -1; /* module parameter (seconds) */
static unsigned int wdt_max_duration; /* (seconds) */
+static struct clk *clk;
static unsigned int wdt_tclk;
static void __iomem *wdt_reg;
static unsigned long wdt_status;
@@ -237,16 +238,16 @@ static struct miscdevice orion_wdt_miscdev = {
static int __devinit orion_wdt_probe(struct platform_device *pdev)
{
- struct orion_wdt_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
int ret;
- if (pdata) {
- wdt_tclk = pdata->tclk;
- } else {
- pr_err("misses platform data\n");
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "Orion Watchdog missing clock\n");
return -ENODEV;
}
+ clk_prepare_enable(clk);
+ wdt_tclk = clk_get_rate(clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -282,6 +283,9 @@ static int __devexit orion_wdt_remove(struct platform_device *pdev)
if (!ret)
orion_wdt_miscdev.parent = NULL;
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+
return ret;
}
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index c891399bed6a..ee6900da8678 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -707,6 +707,7 @@ static int __devinit pcipcwd_card_init(struct pci_dev *pdev,
goto err_out_disable_device;
}
+ spin_lock_init(&pcipcwd_private.io_lock);
pcipcwd_private.pdev = pdev;
pcipcwd_private.io_addr = pci_resource_start(pdev, 0);
@@ -814,22 +815,7 @@ static struct pci_driver pcipcwd_driver = {
.remove = __devexit_p(pcipcwd_card_exit),
};
-static int __init pcipcwd_init_module(void)
-{
- spin_lock_init(&pcipcwd_private.io_lock);
-
- return pci_register_driver(&pcipcwd_driver);
-}
-
-static void __exit pcipcwd_cleanup_module(void)
-{
- pci_unregister_driver(&pcipcwd_driver);
-
- pr_info("Watchdog Module Unloaded\n");
-}
-
-module_init(pcipcwd_init_module);
-module_exit(pcipcwd_cleanup_module);
+module_pci_driver(pcipcwd_driver);
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("Berkshire PCI-PC Watchdog driver");
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 6b8432f61d05..87722e126058 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <mach/hardware.h>
/* WatchDog Timer - Chapter 23 Page 207 */
@@ -201,10 +202,19 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id pnx4008_wdt_match[] = {
+ { .compatible = "nxp,pnx4008-wdt" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pnx4008_wdt_match);
+#endif
+
static struct platform_driver platform_wdt_driver = {
.driver = {
.name = "pnx4008-watchdog",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pnx4008_wdt_match),
},
.probe = pnx4008_wdt_probe,
.remove = __devexit_p(pnx4008_wdt_remove),
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 04e5a6de47d7..200ece5e2a22 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -40,6 +40,7 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <mach/map.h>
@@ -201,7 +202,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
writel(count, wdt_base + S3C2410_WTDAT);
writel(wtcon, wdt_base + S3C2410_WTCON);
- wdd->timeout = timeout;
+ wdd->timeout = (count * divisor) / freq;
return 0;
}
@@ -503,8 +504,6 @@ static const struct of_device_id s3c2410_wdt_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
-#else
-#define s3c2410_wdt_match NULL
#endif
static struct platform_driver s3c2410wdt_driver = {
@@ -516,7 +515,7 @@ static struct platform_driver s3c2410wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-wdt",
- .of_match_table = s3c2410_wdt_match,
+ .of_match_table = of_match_ptr(s3c2410_wdt_match),
},
};
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index bd86f32d63ab..f8477002b728 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -41,7 +41,6 @@
#define DRV_NAME "sch311x_wdt"
/* Runtime registers */
-#define RESGEN 0x1d
#define GP60 0x47
#define WDT_TIME_OUT 0x65
#define WDT_VAL 0x66
@@ -69,10 +68,6 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static unsigned short therm_trip;
-module_param(therm_trip, ushort, 0);
-MODULE_PARM_DESC(therm_trip, "Should a ThermTrip trigger the reset generator");
-
#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
module_param(timeout, int, 0);
@@ -358,26 +353,16 @@ static struct miscdevice sch311x_wdt_miscdev = {
static int __devinit sch311x_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- unsigned char val;
int err;
spin_lock_init(&sch311x_wdt_data.io_lock);
- if (!request_region(sch311x_wdt_data.runtime_reg + RESGEN, 1,
- DRV_NAME)) {
- dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n",
- sch311x_wdt_data.runtime_reg + RESGEN,
- sch311x_wdt_data.runtime_reg + RESGEN);
- err = -EBUSY;
- goto exit;
- }
-
if (!request_region(sch311x_wdt_data.runtime_reg + GP60, 1, DRV_NAME)) {
dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n",
sch311x_wdt_data.runtime_reg + GP60,
sch311x_wdt_data.runtime_reg + GP60);
err = -EBUSY;
- goto exit_release_region;
+ goto exit;
}
if (!request_region(sch311x_wdt_data.runtime_reg + WDT_TIME_OUT, 4,
@@ -386,7 +371,7 @@ static int __devinit sch311x_wdt_probe(struct platform_device *pdev)
sch311x_wdt_data.runtime_reg + WDT_TIME_OUT,
sch311x_wdt_data.runtime_reg + WDT_CTRL);
err = -EBUSY;
- goto exit_release_region2;
+ goto exit_release_region;
}
/* Make sure that the watchdog is not running */
@@ -414,24 +399,13 @@ static int __devinit sch311x_wdt_probe(struct platform_device *pdev)
/* Get status at boot */
sch311x_wdt_get_status(&sch311x_wdt_data.boot_status);
- /* enable watchdog */
- /* -- Reset Generator --
- * Bit 0 Enable Watchdog Timer Generation: 0* = Enabled, 1 = Disabled
- * Bit 1 Thermtrip Source Select: O* = No Source, 1 = Source
- * Bit 2 WDT2_CTL: WDT input bit
- * Bit 3-7 Reserved
- */
- outb(0, sch311x_wdt_data.runtime_reg + RESGEN);
- val = therm_trip ? 0x06 : 0x04;
- outb(val, sch311x_wdt_data.runtime_reg + RESGEN);
-
sch311x_wdt_miscdev.parent = dev;
err = misc_register(&sch311x_wdt_miscdev);
if (err != 0) {
dev_err(dev, "cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, err);
- goto exit_release_region3;
+ goto exit_release_region2;
}
dev_info(dev,
@@ -440,12 +414,10 @@ static int __devinit sch311x_wdt_probe(struct platform_device *pdev)
return 0;
-exit_release_region3:
- release_region(sch311x_wdt_data.runtime_reg + WDT_TIME_OUT, 4);
exit_release_region2:
- release_region(sch311x_wdt_data.runtime_reg + GP60, 1);
+ release_region(sch311x_wdt_data.runtime_reg + WDT_TIME_OUT, 4);
exit_release_region:
- release_region(sch311x_wdt_data.runtime_reg + RESGEN, 1);
+ release_region(sch311x_wdt_data.runtime_reg + GP60, 1);
sch311x_wdt_data.runtime_reg = 0;
exit:
return err;
@@ -461,7 +433,6 @@ static int __devexit sch311x_wdt_remove(struct platform_device *pdev)
misc_deregister(&sch311x_wdt_miscdev);
release_region(sch311x_wdt_data.runtime_reg + WDT_TIME_OUT, 4);
release_region(sch311x_wdt_data.runtime_reg + GP60, 1);
- release_region(sch311x_wdt_data.runtime_reg + RESGEN, 1);
sch311x_wdt_data.runtime_reg = 0;
return 0;
}
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 93958a7763e6..e5b59bebcdb1 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -3,7 +3,7 @@
*
* Watchdog driver for integrated watchdog in the SuperH processors.
*
- * Copyright (C) 2001 - 2010 Paul Mundt <lethal@linux-sh.org>
+ * Copyright (C) 2001 - 2012 Paul Mundt <lethal@linux-sh.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -25,16 +25,15 @@
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
-#include <linux/reboot.h>
-#include <linux/notifier.h>
-#include <linux/ioport.h>
+#include <linux/pm_runtime.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/uaccess.h>
+#include <linux/clk.h>
#include <asm/watchdog.h>
#define DRV_NAME "sh-wdt"
@@ -69,10 +68,6 @@
static int clock_division_ratio = WTCSR_CKS_4096;
#define next_ping_period(cks) (jiffies + msecs_to_jiffies(cks - 4))
-static const struct watchdog_info sh_wdt_info;
-static struct platform_device *sh_wdt_dev;
-static DEFINE_SPINLOCK(shwdt_lock);
-
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -81,19 +76,22 @@ static unsigned long next_heartbeat;
struct sh_wdt {
void __iomem *base;
struct device *dev;
+ struct clk *clk;
+ spinlock_t lock;
struct timer_list timer;
-
- unsigned long enabled;
- char expect_close;
};
-static void sh_wdt_start(struct sh_wdt *wdt)
+static int sh_wdt_start(struct watchdog_device *wdt_dev)
{
+ struct sh_wdt *wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
u8 csr;
- spin_lock_irqsave(&shwdt_lock, flags);
+ pm_runtime_get_sync(wdt->dev);
+ clk_enable(wdt->clk);
+
+ spin_lock_irqsave(&wdt->lock, flags);
next_heartbeat = jiffies + (heartbeat * HZ);
mod_timer(&wdt->timer, next_ping_period(clock_division_ratio));
@@ -122,15 +120,18 @@ static void sh_wdt_start(struct sh_wdt *wdt)
csr &= ~RSTCSR_RSTS;
sh_wdt_write_rstcsr(csr);
#endif
- spin_unlock_irqrestore(&shwdt_lock, flags);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
+ return 0;
}
-static void sh_wdt_stop(struct sh_wdt *wdt)
+static int sh_wdt_stop(struct watchdog_device *wdt_dev)
{
+ struct sh_wdt *wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
u8 csr;
- spin_lock_irqsave(&shwdt_lock, flags);
+ spin_lock_irqsave(&wdt->lock, flags);
del_timer(&wdt->timer);
@@ -138,28 +139,39 @@ static void sh_wdt_stop(struct sh_wdt *wdt)
csr &= ~WTCSR_TME;
sh_wdt_write_csr(csr);
- spin_unlock_irqrestore(&shwdt_lock, flags);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
+ clk_disable(wdt->clk);
+ pm_runtime_put_sync(wdt->dev);
+
+ return 0;
}
-static inline void sh_wdt_keepalive(struct sh_wdt *wdt)
+static int sh_wdt_keepalive(struct watchdog_device *wdt_dev)
{
+ struct sh_wdt *wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
- spin_lock_irqsave(&shwdt_lock, flags);
+ spin_lock_irqsave(&wdt->lock, flags);
next_heartbeat = jiffies + (heartbeat * HZ);
- spin_unlock_irqrestore(&shwdt_lock, flags);
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
+ return 0;
}
-static int sh_wdt_set_heartbeat(int t)
+static int sh_wdt_set_heartbeat(struct watchdog_device *wdt_dev, unsigned t)
{
+ struct sh_wdt *wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
if (unlikely(t < 1 || t > 3600)) /* arbitrary upper limit */
return -EINVAL;
- spin_lock_irqsave(&shwdt_lock, flags);
+ spin_lock_irqsave(&wdt->lock, flags);
heartbeat = t;
- spin_unlock_irqrestore(&shwdt_lock, flags);
+ wdt_dev->timeout = t;
+ spin_unlock_irqrestore(&wdt->lock, flags);
+
return 0;
}
@@ -168,7 +180,7 @@ static void sh_wdt_ping(unsigned long data)
struct sh_wdt *wdt = (struct sh_wdt *)data;
unsigned long flags;
- spin_lock_irqsave(&shwdt_lock, flags);
+ spin_lock_irqsave(&wdt->lock, flags);
if (time_before(jiffies, next_heartbeat)) {
u8 csr;
@@ -182,137 +194,9 @@ static void sh_wdt_ping(unsigned long data)
} else
dev_warn(wdt->dev, "Heartbeat lost! Will not ping "
"the watchdog\n");
- spin_unlock_irqrestore(&shwdt_lock, flags);
-}
-
-static int sh_wdt_open(struct inode *inode, struct file *file)
-{
- struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev);
-
- if (test_and_set_bit(0, &wdt->enabled))
- return -EBUSY;
- if (nowayout)
- __module_get(THIS_MODULE);
-
- file->private_data = wdt;
-
- sh_wdt_start(wdt);
-
- return nonseekable_open(inode, file);
-}
-
-static int sh_wdt_close(struct inode *inode, struct file *file)
-{
- struct sh_wdt *wdt = file->private_data;
-
- if (wdt->expect_close == 42) {
- sh_wdt_stop(wdt);
- } else {
- dev_crit(wdt->dev, "Unexpected close, not "
- "stopping watchdog!\n");
- sh_wdt_keepalive(wdt);
- }
-
- clear_bit(0, &wdt->enabled);
- wdt->expect_close = 0;
-
- return 0;
-}
-
-static ssize_t sh_wdt_write(struct file *file, const char *buf,
- size_t count, loff_t *ppos)
-{
- struct sh_wdt *wdt = file->private_data;
-
- if (count) {
- if (!nowayout) {
- size_t i;
-
- wdt->expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- wdt->expect_close = 42;
- }
- }
- sh_wdt_keepalive(wdt);
- }
-
- return count;
+ spin_unlock_irqrestore(&wdt->lock, flags);
}
-static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sh_wdt *wdt = file->private_data;
- int new_heartbeat;
- int options, retval = -EINVAL;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user((struct watchdog_info *)arg,
- &sh_wdt_info, sizeof(sh_wdt_info)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, (int *)arg);
- case WDIOC_SETOPTIONS:
- if (get_user(options, (int *)arg))
- return -EFAULT;
-
- if (options & WDIOS_DISABLECARD) {
- sh_wdt_stop(wdt);
- retval = 0;
- }
-
- if (options & WDIOS_ENABLECARD) {
- sh_wdt_start(wdt);
- retval = 0;
- }
-
- return retval;
- case WDIOC_KEEPALIVE:
- sh_wdt_keepalive(wdt);
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_heartbeat, (int *)arg))
- return -EFAULT;
-
- if (sh_wdt_set_heartbeat(new_heartbeat))
- return -EINVAL;
-
- sh_wdt_keepalive(wdt);
- /* Fall */
- case WDIOC_GETTIMEOUT:
- return put_user(heartbeat, (int *)arg);
- default:
- return -ENOTTY;
- }
- return 0;
-}
-
-static int sh_wdt_notify_sys(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev);
-
- if (code == SYS_DOWN || code == SYS_HALT)
- sh_wdt_stop(wdt);
-
- return NOTIFY_DONE;
-}
-
-static const struct file_operations sh_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = sh_wdt_write,
- .unlocked_ioctl = sh_wdt_ioctl,
- .open = sh_wdt_open,
- .release = sh_wdt_close,
-};
-
static const struct watchdog_info sh_wdt_info = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
@@ -320,14 +204,17 @@ static const struct watchdog_info sh_wdt_info = {
.identity = "SH WDT",
};
-static struct notifier_block sh_wdt_notifier = {
- .notifier_call = sh_wdt_notify_sys,
+static const struct watchdog_ops sh_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sh_wdt_start,
+ .stop = sh_wdt_stop,
+ .ping = sh_wdt_keepalive,
+ .set_timeout = sh_wdt_set_heartbeat,
};
-static struct miscdevice sh_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &sh_wdt_fops,
+static struct watchdog_device sh_wdt_dev = {
+ .info = &sh_wdt_info,
+ .ops = &sh_wdt_ops,
};
static int __devinit sh_wdt_probe(struct platform_device *pdev)
@@ -347,39 +234,49 @@ static int __devinit sh_wdt_probe(struct platform_device *pdev)
if (unlikely(!res))
return -EINVAL;
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), DRV_NAME))
- return -EBUSY;
-
wdt = devm_kzalloc(&pdev->dev, sizeof(struct sh_wdt), GFP_KERNEL);
- if (unlikely(!wdt)) {
- rc = -ENOMEM;
- goto out_release;
- }
+ if (unlikely(!wdt))
+ return -ENOMEM;
wdt->dev = &pdev->dev;
- wdt->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ wdt->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(wdt->clk)) {
+ /*
+ * Clock framework support is optional, continue on
+ * anyways if we don't find a matching clock.
+ */
+ wdt->clk = NULL;
+ }
+
+ wdt->base = devm_request_and_ioremap(wdt->dev, res);
if (unlikely(!wdt->base)) {
- rc = -ENXIO;
- goto out_err;
+ rc = -EADDRNOTAVAIL;
+ goto err;
}
- rc = register_reboot_notifier(&sh_wdt_notifier);
+ watchdog_set_nowayout(&sh_wdt_dev, nowayout);
+ watchdog_set_drvdata(&sh_wdt_dev, wdt);
+
+ spin_lock_init(&wdt->lock);
+
+ rc = sh_wdt_set_heartbeat(&sh_wdt_dev, heartbeat);
if (unlikely(rc)) {
- dev_err(&pdev->dev,
- "Can't register reboot notifier (err=%d)\n", rc);
- goto out_unmap;
+ /* Default timeout if invalid */
+ sh_wdt_set_heartbeat(&sh_wdt_dev, WATCHDOG_HEARTBEAT);
+
+ dev_warn(&pdev->dev,
+ "heartbeat value must be 1<=x<=3600, using %d\n",
+ sh_wdt_dev.timeout);
}
- sh_wdt_miscdev.parent = wdt->dev;
+ dev_info(&pdev->dev, "configured with heartbeat=%d sec (nowayout=%d)\n",
+ sh_wdt_dev.timeout, nowayout);
- rc = misc_register(&sh_wdt_miscdev);
+ rc = watchdog_register_device(&sh_wdt_dev);
if (unlikely(rc)) {
- dev_err(&pdev->dev,
- "Can't register miscdev on minor=%d (err=%d)\n",
- sh_wdt_miscdev.minor, rc);
- goto out_unreg;
+ dev_err(&pdev->dev, "Can't register watchdog (err=%d)\n", rc);
+ goto err;
}
init_timer(&wdt->timer);
@@ -388,20 +285,15 @@ static int __devinit sh_wdt_probe(struct platform_device *pdev)
wdt->timer.expires = next_ping_period(clock_division_ratio);
platform_set_drvdata(pdev, wdt);
- sh_wdt_dev = pdev;
dev_info(&pdev->dev, "initialized.\n");
+ pm_runtime_enable(&pdev->dev);
+
return 0;
-out_unreg:
- unregister_reboot_notifier(&sh_wdt_notifier);
-out_unmap:
- devm_iounmap(&pdev->dev, wdt->base);
-out_err:
- devm_kfree(&pdev->dev, wdt);
-out_release:
- devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
+err:
+ clk_put(wdt->clk);
return rc;
}
@@ -409,36 +301,35 @@ out_release:
static int __devexit sh_wdt_remove(struct platform_device *pdev)
{
struct sh_wdt *wdt = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
platform_set_drvdata(pdev, NULL);
- misc_deregister(&sh_wdt_miscdev);
+ watchdog_unregister_device(&sh_wdt_dev);
- sh_wdt_dev = NULL;
-
- unregister_reboot_notifier(&sh_wdt_notifier);
- devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
- devm_iounmap(&pdev->dev, wdt->base);
- devm_kfree(&pdev->dev, wdt);
+ pm_runtime_disable(&pdev->dev);
+ clk_put(wdt->clk);
return 0;
}
+static void sh_wdt_shutdown(struct platform_device *pdev)
+{
+ sh_wdt_stop(&sh_wdt_dev);
+}
+
static struct platform_driver sh_wdt_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
- .probe = sh_wdt_probe,
- .remove = __devexit_p(sh_wdt_remove),
+ .probe = sh_wdt_probe,
+ .remove = __devexit_p(sh_wdt_remove),
+ .shutdown = sh_wdt_shutdown,
};
static int __init sh_wdt_init(void)
{
- int rc;
-
if (unlikely(clock_division_ratio < 0x5 ||
clock_division_ratio > 0x7)) {
clock_division_ratio = WTCSR_CKS_4096;
@@ -447,17 +338,6 @@ static int __init sh_wdt_init(void)
clock_division_ratio);
}
- rc = sh_wdt_set_heartbeat(heartbeat);
- if (unlikely(rc)) {
- heartbeat = WATCHDOG_HEARTBEAT;
-
- pr_info("heartbeat value must be 1<=x<=3600, using %d\n",
- heartbeat);
- }
-
- pr_info("configured with heartbeat=%d sec (nowayout=%d)\n",
- heartbeat, nowayout);
-
return platform_driver_register(&sh_wdt_driver);
}
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 59108e48ada3..ae5e82cb83fa 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -313,7 +313,7 @@ static unsigned char __devinit sp5100_tco_setupdevice(void)
tcobase_phys = val;
tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE);
- if (tcobase == 0) {
+ if (!tcobase) {
pr_err("failed to get tcobase address\n");
goto unreg_mem_region;
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index bbb170e50055..e4841c36798b 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
* Watchdog driver for ARM SP805 watchdog module
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2 or later. This program is licensed "as is" without any
@@ -16,20 +16,17 @@
#include <linux/amba/bus.h>
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/math64.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/watchdog.h>
/* default timeout in seconds */
@@ -56,6 +53,7 @@
/**
* struct sp805_wdt: sp805 wdt device structure
+ * @wdd: instance of struct watchdog_device
* @lock: spin lock protecting dev structure and io access
* @base: base address of wdt
* @clk: clock structure of wdt
@@ -65,24 +63,24 @@
* @timeout: current programmed timeout
*/
struct sp805_wdt {
+ struct watchdog_device wdd;
spinlock_t lock;
void __iomem *base;
struct clk *clk;
struct amba_device *adev;
- unsigned long status;
- #define WDT_BUSY 0
- #define WDT_CAN_BE_CLOSED 1
unsigned int load_val;
unsigned int timeout;
};
-/* local variables */
-static struct sp805_wdt *wdt;
static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Set to 1 to keep watchdog running after device release");
/* This routine finds load value that will reset system in required timout */
-static void wdt_setload(unsigned int timeout)
+static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
u64 load, rate;
rate = clk_get_rate(wdt->clk);
@@ -103,11 +101,14 @@ static void wdt_setload(unsigned int timeout)
/* roundup timeout to closest positive integer value */
wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
spin_unlock(&wdt->lock);
+
+ return 0;
}
/* returns number of seconds left for reset to occur */
-static u32 wdt_timeleft(void)
+static unsigned int wdt_timeleft(struct watchdog_device *wdd)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
u64 load, rate;
rate = clk_get_rate(wdt->clk);
@@ -123,166 +124,96 @@ static u32 wdt_timeleft(void)
return div_u64(load, rate);
}
-/* enables watchdog timers reset */
-static void wdt_enable(void)
+static int wdt_config(struct watchdog_device *wdd, bool ping)
{
- spin_lock(&wdt->lock);
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+ int ret;
- writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
- writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
- writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
- writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
- writel_relaxed(LOCK, wdt->base + WDTLOCK);
+ if (!ping) {
+ ret = clk_prepare(wdt->clk);
+ if (ret) {
+ dev_err(&wdt->adev->dev, "clock prepare fail");
+ return ret;
+ }
- /* Flush posted writes. */
- readl_relaxed(wdt->base + WDTLOCK);
- spin_unlock(&wdt->lock);
-}
+ ret = clk_enable(wdt->clk);
+ if (ret) {
+ dev_err(&wdt->adev->dev, "clock enable fail");
+ clk_unprepare(wdt->clk);
+ return ret;
+ }
+ }
-/* disables watchdog timers reset */
-static void wdt_disable(void)
-{
spin_lock(&wdt->lock);
writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
- writel_relaxed(0, wdt->base + WDTCONTROL);
+ writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
+
+ if (!ping) {
+ writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+ writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
+ WDTCONTROL);
+ }
+
writel_relaxed(LOCK, wdt->base + WDTLOCK);
/* Flush posted writes. */
readl_relaxed(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
+
+ return 0;
}
-static ssize_t sp805_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
+static int wdt_ping(struct watchdog_device *wdd)
{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- /* Check for Magic Close character */
- if (c == 'V') {
- set_bit(WDT_CAN_BE_CLOSED,
- &wdt->status);
- break;
- }
- }
- }
- wdt_enable();
- }
- return len;
+ return wdt_config(wdd, true);
}
-static const struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
- .identity = MODULE_NAME,
-};
-
-static long sp805_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+/* enables watchdog timers reset */
+static int wdt_enable(struct watchdog_device *wdd)
{
- int ret = -ENOTTY;
- unsigned int timeout;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_enable();
- ret = 0;
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(timeout, (unsigned int *)arg);
- if (ret)
- break;
-
- wdt_setload(timeout);
-
- wdt_enable();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(wdt->timeout, (unsigned int *)arg);
- break;
- case WDIOC_GETTIMELEFT:
- ret = put_user(wdt_timeleft(), (unsigned int *)arg);
- break;
- }
- return ret;
+ return wdt_config(wdd, false);
}
-static int sp805_wdt_open(struct inode *inode, struct file *file)
+/* disables watchdog timers reset */
+static int wdt_disable(struct watchdog_device *wdd)
{
- int ret = 0;
-
- if (test_and_set_bit(WDT_BUSY, &wdt->status))
- return -EBUSY;
-
- ret = clk_enable(wdt->clk);
- if (ret) {
- dev_err(&wdt->adev->dev, "clock enable fail");
- goto err;
- }
-
- wdt_enable();
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
- /* can not be closed, once enabled */
- clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
- return nonseekable_open(inode, file);
+ spin_lock(&wdt->lock);
-err:
- clear_bit(WDT_BUSY, &wdt->status);
- return ret;
-}
+ writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
+ writel_relaxed(0, wdt->base + WDTCONTROL);
+ writel_relaxed(LOCK, wdt->base + WDTLOCK);
-static int sp805_wdt_release(struct inode *inode, struct file *file)
-{
- if (!test_bit(WDT_CAN_BE_CLOSED, &wdt->status)) {
- clear_bit(WDT_BUSY, &wdt->status);
- dev_warn(&wdt->adev->dev, "Device closed unexpectedly\n");
- return 0;
- }
+ /* Flush posted writes. */
+ readl_relaxed(wdt->base + WDTLOCK);
+ spin_unlock(&wdt->lock);
- wdt_disable();
clk_disable(wdt->clk);
- clear_bit(WDT_BUSY, &wdt->status);
+ clk_unprepare(wdt->clk);
return 0;
}
-static const struct file_operations sp805_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = sp805_wdt_write,
- .unlocked_ioctl = sp805_wdt_ioctl,
- .open = sp805_wdt_open,
- .release = sp805_wdt_release,
+static const struct watchdog_info wdt_info = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = MODULE_NAME,
};
-static struct miscdevice sp805_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &sp805_wdt_fops,
+static const struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_enable,
+ .stop = wdt_disable,
+ .ping = wdt_ping,
+ .set_timeout = wdt_setload,
+ .get_timeleft = wdt_timeleft,
};
static int __devinit
sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
+ struct sp805_wdt *wdt;
int ret = 0;
if (!devm_request_mem_region(&adev->dev, adev->res.start,
@@ -315,19 +246,26 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
}
wdt->adev = adev;
+ wdt->wdd.info = &wdt_info;
+ wdt->wdd.ops = &wdt_ops;
+
spin_lock_init(&wdt->lock);
- wdt_setload(DEFAULT_TIMEOUT);
+ watchdog_set_nowayout(&wdt->wdd, nowayout);
+ watchdog_set_drvdata(&wdt->wdd, wdt);
+ wdt_setload(&wdt->wdd, DEFAULT_TIMEOUT);
- ret = misc_register(&sp805_wdt_miscdev);
- if (ret < 0) {
- dev_warn(&adev->dev, "cannot register misc device\n");
- goto err_misc_register;
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret) {
+ dev_err(&adev->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+ goto err_register;
}
+ amba_set_drvdata(adev, wdt);
dev_info(&adev->dev, "registration successful\n");
return 0;
-err_misc_register:
+err_register:
clk_put(wdt->clk);
err:
dev_err(&adev->dev, "Probe Failed!!!\n");
@@ -336,7 +274,11 @@ err:
static int __devexit sp805_wdt_remove(struct amba_device *adev)
{
- misc_deregister(&sp805_wdt_miscdev);
+ struct sp805_wdt *wdt = amba_get_drvdata(adev);
+
+ watchdog_unregister_device(&wdt->wdd);
+ amba_set_drvdata(adev, NULL);
+ watchdog_set_drvdata(&wdt->wdd, NULL);
clk_put(wdt->clk);
return 0;
@@ -345,28 +287,22 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
#ifdef CONFIG_PM
static int sp805_wdt_suspend(struct device *dev)
{
- if (test_bit(WDT_BUSY, &wdt->status)) {
- wdt_disable();
- clk_disable(wdt->clk);
- }
+ struct sp805_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ return wdt_disable(&wdt->wdd);
return 0;
}
static int sp805_wdt_resume(struct device *dev)
{
- int ret = 0;
+ struct sp805_wdt *wdt = dev_get_drvdata(dev);
- if (test_bit(WDT_BUSY, &wdt->status)) {
- ret = clk_enable(wdt->clk);
- if (ret) {
- dev_err(dev, "clock enable fail");
- return ret;
- }
- wdt_enable();
- }
+ if (watchdog_active(&wdt->wdd))
+ return wdt_enable(&wdt->wdd);
- return ret;
+ return 0;
}
#endif /* CONFIG_PM */
@@ -395,11 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
module_amba_driver(sp805_wdt_driver);
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
- "Set to 1 to keep watchdog running after device release");
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 465e08273c97..aa50da3ccfe3 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -91,7 +91,7 @@ static inline void wdt_reset(void)
static void wdt_timer_tick(unsigned long data)
{
if (time_before(jiffies, next_heartbeat) ||
- (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+ (!watchdog_active(&wdt_dev))) {
wdt_reset();
mod_timer(&timer, jiffies + WDT_HEARTBEAT);
} else
@@ -202,6 +202,9 @@ static int __devinit wdt_probe(struct pci_dev *pdev,
goto err_out_release;
}
+ if (timeout < 1 || timeout > WDT_TIMEOUT_MAX)
+ timeout = WDT_TIMEOUT;
+
wdt_dev.timeout = timeout;
watchdog_set_nowayout(&wdt_dev, nowayout);
if (readl(wdt_mem) & VIA_WDT_FIRED)
@@ -250,20 +253,7 @@ static struct pci_driver wdt_driver = {
.remove = __devexit_p(wdt_remove),
};
-static int __init wdt_init(void)
-{
- if (timeout < 1 || timeout > WDT_TIMEOUT_MAX)
- timeout = WDT_TIMEOUT;
- return pci_register_driver(&wdt_driver);
-}
-
-static void __exit wdt_exit(void)
-{
- pci_unregister_driver(&wdt_driver);
-}
-
-module_init(wdt_init);
-module_exit(wdt_exit);
+module_pci_driver(wdt_driver);
MODULE_AUTHOR("Marc Vertes");
MODULE_DESCRIPTION("Driver for watchdog timer on VIA chipset");
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 14d768bfa267..6aa46a90ff02 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -34,8 +34,13 @@
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/watchdog.h> /* For watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
+#include <linux/idr.h> /* For ida_* macros */
+#include <linux/err.h> /* For IS_ERR macros */
-#include "watchdog_dev.h" /* For watchdog_dev_register/... */
+#include "watchdog_core.h" /* For watchdog_dev_register/... */
+
+static DEFINE_IDA(watchdog_ida);
+static struct class *watchdog_class;
/**
* watchdog_register_device() - register a watchdog device
@@ -49,7 +54,7 @@
*/
int watchdog_register_device(struct watchdog_device *wdd)
{
- int ret;
+ int ret, id, devno;
if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
return -EINVAL;
@@ -74,10 +79,38 @@ int watchdog_register_device(struct watchdog_device *wdd)
* corrupted in a later stage then we expect a kernel panic!
*/
- /* We only support 1 watchdog device via the /dev/watchdog interface */
+ mutex_init(&wdd->lock);
+ id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ wdd->id = id;
+
ret = watchdog_dev_register(wdd);
if (ret) {
- pr_err("error registering /dev/watchdog (err=%d)\n", ret);
+ ida_simple_remove(&watchdog_ida, id);
+ if (!(id == 0 && ret == -EBUSY))
+ return ret;
+
+ /* Retry in case a legacy watchdog module exists */
+ id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ wdd->id = id;
+
+ ret = watchdog_dev_register(wdd);
+ if (ret) {
+ ida_simple_remove(&watchdog_ida, id);
+ return ret;
+ }
+ }
+
+ devno = wdd->cdev.dev;
+ wdd->dev = device_create(watchdog_class, wdd->parent, devno,
+ NULL, "watchdog%d", wdd->id);
+ if (IS_ERR(wdd->dev)) {
+ watchdog_dev_unregister(wdd);
+ ida_simple_remove(&watchdog_ida, id);
+ ret = PTR_ERR(wdd->dev);
return ret;
}
@@ -95,6 +128,7 @@ EXPORT_SYMBOL_GPL(watchdog_register_device);
void watchdog_unregister_device(struct watchdog_device *wdd)
{
int ret;
+ int devno = wdd->cdev.dev;
if (wdd == NULL)
return;
@@ -102,9 +136,41 @@ void watchdog_unregister_device(struct watchdog_device *wdd)
ret = watchdog_dev_unregister(wdd);
if (ret)
pr_err("error unregistering /dev/watchdog (err=%d)\n", ret);
+ device_destroy(watchdog_class, devno);
+ ida_simple_remove(&watchdog_ida, wdd->id);
+ wdd->dev = NULL;
}
EXPORT_SYMBOL_GPL(watchdog_unregister_device);
+static int __init watchdog_init(void)
+{
+ int err;
+
+ watchdog_class = class_create(THIS_MODULE, "watchdog");
+ if (IS_ERR(watchdog_class)) {
+ pr_err("couldn't create class\n");
+ return PTR_ERR(watchdog_class);
+ }
+
+ err = watchdog_dev_init();
+ if (err < 0) {
+ class_destroy(watchdog_class);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+ watchdog_dev_exit();
+ class_destroy(watchdog_class);
+ ida_destroy(&watchdog_ida);
+}
+
+subsys_initcall(watchdog_init);
+module_exit(watchdog_exit);
+
MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("WatchDog Timer Driver Core");
diff --git a/drivers/watchdog/watchdog_dev.h b/drivers/watchdog/watchdog_core.h
index bc7612be25ce..6c951418fca7 100644
--- a/drivers/watchdog/watchdog_dev.h
+++ b/drivers/watchdog/watchdog_core.h
@@ -26,8 +26,12 @@
* This material is provided "AS-IS" and at no charge.
*/
+#define MAX_DOGS 32 /* Maximum number of watchdog devices */
+
/*
* Functions/procedures to be called by the core
*/
-int watchdog_dev_register(struct watchdog_device *);
-int watchdog_dev_unregister(struct watchdog_device *);
+extern int watchdog_dev_register(struct watchdog_device *);
+extern int watchdog_dev_unregister(struct watchdog_device *);
+extern int __init watchdog_dev_init(void);
+extern void __exit watchdog_dev_exit(void);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 8558da912c42..ef8edecfc526 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -42,10 +42,12 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
-/* make sure we only register one /dev/watchdog device */
-static unsigned long watchdog_dev_busy;
+#include "watchdog_core.h"
+
+/* the dev_t structure to store the dynamically allocated watchdog devices */
+static dev_t watchdog_devt;
/* the watchdog device behind /dev/watchdog */
-static struct watchdog_device *wdd;
+static struct watchdog_device *old_wdd;
/*
* watchdog_ping: ping the watchdog.
@@ -59,13 +61,26 @@ static struct watchdog_device *wdd;
static int watchdog_ping(struct watchdog_device *wddev)
{
- if (test_bit(WDOG_ACTIVE, &wddev->status)) {
- if (wddev->ops->ping)
- return wddev->ops->ping(wddev); /* ping the watchdog */
- else
- return wddev->ops->start(wddev); /* restart watchdog */
+ int err = 0;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_ping;
}
- return 0;
+
+ if (!watchdog_active(wddev))
+ goto out_ping;
+
+ if (wddev->ops->ping)
+ err = wddev->ops->ping(wddev); /* ping the watchdog */
+ else
+ err = wddev->ops->start(wddev); /* restart watchdog */
+
+out_ping:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -79,16 +94,25 @@ static int watchdog_ping(struct watchdog_device *wddev)
static int watchdog_start(struct watchdog_device *wddev)
{
- int err;
+ int err = 0;
- if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
- err = wddev->ops->start(wddev);
- if (err < 0)
- return err;
+ mutex_lock(&wddev->lock);
- set_bit(WDOG_ACTIVE, &wddev->status);
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_start;
}
- return 0;
+
+ if (watchdog_active(wddev))
+ goto out_start;
+
+ err = wddev->ops->start(wddev);
+ if (err == 0)
+ set_bit(WDOG_ACTIVE, &wddev->status);
+
+out_start:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -103,22 +127,155 @@ static int watchdog_start(struct watchdog_device *wddev)
static int watchdog_stop(struct watchdog_device *wddev)
{
- int err = -EBUSY;
+ int err = 0;
- if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
- pr_info("%s: nowayout prevents watchdog to be stopped!\n",
- wddev->info->identity);
- return err;
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_stop;
}
- if (test_bit(WDOG_ACTIVE, &wddev->status)) {
- err = wddev->ops->stop(wddev);
- if (err < 0)
- return err;
+ if (!watchdog_active(wddev))
+ goto out_stop;
+ if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
+ dev_info(wddev->dev, "nowayout prevents watchdog being stopped!\n");
+ err = -EBUSY;
+ goto out_stop;
+ }
+
+ err = wddev->ops->stop(wddev);
+ if (err == 0)
clear_bit(WDOG_ACTIVE, &wddev->status);
+
+out_stop:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_get_status: wrapper to get the watchdog status
+ * @wddev: the watchdog device to get the status from
+ * @status: the status of the watchdog device
+ *
+ * Get the watchdog's status flags.
+ */
+
+static int watchdog_get_status(struct watchdog_device *wddev,
+ unsigned int *status)
+{
+ int err = 0;
+
+ *status = 0;
+ if (!wddev->ops->status)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_status;
}
- return 0;
+
+ *status = wddev->ops->status(wddev);
+
+out_status:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_set_timeout: set the watchdog timer timeout
+ * @wddev: the watchdog device to set the timeout for
+ * @timeout: timeout to set in seconds
+ */
+
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+ unsigned int timeout)
+{
+ int err;
+
+ if ((wddev->ops->set_timeout == NULL) ||
+ !(wddev->info->options & WDIOF_SETTIMEOUT))
+ return -EOPNOTSUPP;
+
+ if ((wddev->max_timeout != 0) &&
+ (timeout < wddev->min_timeout || timeout > wddev->max_timeout))
+ return -EINVAL;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_timeout;
+ }
+
+ err = wddev->ops->set_timeout(wddev, timeout);
+
+out_timeout:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_get_timeleft: wrapper to get the time left before a reboot
+ * @wddev: the watchdog device to get the remaining time from
+ * @timeleft: the time that's left
+ *
+ * Get the time before a watchdog will reboot (if not pinged).
+ */
+
+static int watchdog_get_timeleft(struct watchdog_device *wddev,
+ unsigned int *timeleft)
+{
+ int err = 0;
+
+ *timeleft = 0;
+ if (!wddev->ops->get_timeleft)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_timeleft;
+ }
+
+ *timeleft = wddev->ops->get_timeleft(wddev);
+
+out_timeleft:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
+ * @wddev: the watchdog device to do the ioctl on
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ */
+
+static int watchdog_ioctl_op(struct watchdog_device *wddev, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ if (!wddev->ops->ioctl)
+ return -ENOIOCTLCMD;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_ioctl;
+ }
+
+ err = wddev->ops->ioctl(wddev, cmd, arg);
+
+out_ioctl:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -136,6 +293,7 @@ static int watchdog_stop(struct watchdog_device *wddev)
static ssize_t watchdog_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ struct watchdog_device *wdd = file->private_data;
size_t i;
char c;
@@ -175,23 +333,24 @@ static ssize_t watchdog_write(struct file *file, const char __user *data,
static long watchdog_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ struct watchdog_device *wdd = file->private_data;
void __user *argp = (void __user *)arg;
int __user *p = argp;
unsigned int val;
int err;
- if (wdd->ops->ioctl) {
- err = wdd->ops->ioctl(wdd, cmd, arg);
- if (err != -ENOIOCTLCMD)
- return err;
- }
+ err = watchdog_ioctl_op(wdd, cmd, arg);
+ if (err != -ENOIOCTLCMD)
+ return err;
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, wdd->info,
sizeof(struct watchdog_info)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
- val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
+ err = watchdog_get_status(wdd, &val);
+ if (err == -ENODEV)
+ return err;
return put_user(val, p);
case WDIOC_GETBOOTSTATUS:
return put_user(wdd->bootstatus, p);
@@ -215,15 +374,9 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
watchdog_ping(wdd);
return 0;
case WDIOC_SETTIMEOUT:
- if ((wdd->ops->set_timeout == NULL) ||
- !(wdd->info->options & WDIOF_SETTIMEOUT))
- return -EOPNOTSUPP;
if (get_user(val, p))
return -EFAULT;
- if ((wdd->max_timeout != 0) &&
- (val < wdd->min_timeout || val > wdd->max_timeout))
- return -EINVAL;
- err = wdd->ops->set_timeout(wdd, val);
+ err = watchdog_set_timeout(wdd, val);
if (err < 0)
return err;
/* If the watchdog is active then we send a keepalive ping
@@ -237,21 +390,21 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
return -EOPNOTSUPP;
return put_user(wdd->timeout, p);
case WDIOC_GETTIMELEFT:
- if (!wdd->ops->get_timeleft)
- return -EOPNOTSUPP;
-
- return put_user(wdd->ops->get_timeleft(wdd), p);
+ err = watchdog_get_timeleft(wdd, &val);
+ if (err)
+ return err;
+ return put_user(val, p);
default:
return -ENOTTY;
}
}
/*
- * watchdog_open: open the /dev/watchdog device.
+ * watchdog_open: open the /dev/watchdog* devices.
* @inode: inode of device
* @file: file handle to device
*
- * When the /dev/watchdog device gets opened, we start the watchdog.
+ * When the /dev/watchdog* device gets opened, we start the watchdog.
* Watch out: the /dev/watchdog device is single open, so we make sure
* it can only be opened once.
*/
@@ -259,6 +412,13 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
static int watchdog_open(struct inode *inode, struct file *file)
{
int err = -EBUSY;
+ struct watchdog_device *wdd;
+
+ /* Get the corresponding watchdog device */
+ if (imajor(inode) == MISC_MAJOR)
+ wdd = old_wdd;
+ else
+ wdd = container_of(inode->i_cdev, struct watchdog_device, cdev);
/* the watchdog is single open! */
if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
@@ -275,6 +435,11 @@ static int watchdog_open(struct inode *inode, struct file *file)
if (err < 0)
goto out_mod;
+ file->private_data = wdd;
+
+ if (wdd->ops->ref)
+ wdd->ops->ref(wdd);
+
/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
return nonseekable_open(inode, file);
@@ -286,9 +451,9 @@ out:
}
/*
- * watchdog_release: release the /dev/watchdog device.
- * @inode: inode of device
- * @file: file handle to device
+ * watchdog_release: release the watchdog device.
+ * @inode: inode of device
+ * @file: file handle to device
*
* This is the code for when /dev/watchdog gets closed. We will only
* stop the watchdog when we have received the magic char (and nowayout
@@ -297,6 +462,7 @@ out:
static int watchdog_release(struct inode *inode, struct file *file)
{
+ struct watchdog_device *wdd = file->private_data;
int err = -EBUSY;
/*
@@ -310,7 +476,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
/* If the watchdog was not stopped, send a keepalive ping */
if (err < 0) {
- pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
+ mutex_lock(&wdd->lock);
+ if (!test_bit(WDOG_UNREGISTERED, &wdd->status))
+ dev_crit(wdd->dev, "watchdog did not stop!\n");
+ mutex_unlock(&wdd->lock);
watchdog_ping(wdd);
}
@@ -320,6 +489,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
/* make sure that /dev/watchdog can be re-opened */
clear_bit(WDOG_DEV_OPEN, &wdd->status);
+ /* Note wdd may be gone after this, do not use after this! */
+ if (wdd->ops->unref)
+ wdd->ops->unref(wdd);
+
return 0;
}
@@ -338,62 +511,92 @@ static struct miscdevice watchdog_miscdev = {
};
/*
- * watchdog_dev_register:
+ * watchdog_dev_register: register a watchdog device
* @watchdog: watchdog device
*
- * Register a watchdog device as /dev/watchdog. /dev/watchdog
- * is actually a miscdevice and thus we set it up like that.
+ * Register a watchdog device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
*/
int watchdog_dev_register(struct watchdog_device *watchdog)
{
- int err;
-
- /* Only one device can register for /dev/watchdog */
- if (test_and_set_bit(0, &watchdog_dev_busy)) {
- pr_err("only one watchdog can use /dev/watchdog\n");
- return -EBUSY;
+ int err, devno;
+
+ if (watchdog->id == 0) {
+ watchdog_miscdev.parent = watchdog->parent;
+ err = misc_register(&watchdog_miscdev);
+ if (err != 0) {
+ pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
+ watchdog->info->identity, WATCHDOG_MINOR, err);
+ if (err == -EBUSY)
+ pr_err("%s: a legacy watchdog module is probably present.\n",
+ watchdog->info->identity);
+ return err;
+ }
+ old_wdd = watchdog;
}
- wdd = watchdog;
-
- err = misc_register(&watchdog_miscdev);
- if (err != 0) {
- pr_err("%s: cannot register miscdev on minor=%d (err=%d)\n",
- watchdog->info->identity, WATCHDOG_MINOR, err);
- goto out;
+ /* Fill in the data structures */
+ devno = MKDEV(MAJOR(watchdog_devt), watchdog->id);
+ cdev_init(&watchdog->cdev, &watchdog_fops);
+ watchdog->cdev.owner = watchdog->ops->owner;
+
+ /* Add the device */
+ err = cdev_add(&watchdog->cdev, devno, 1);
+ if (err) {
+ pr_err("watchdog%d unable to add device %d:%d\n",
+ watchdog->id, MAJOR(watchdog_devt), watchdog->id);
+ if (watchdog->id == 0) {
+ misc_deregister(&watchdog_miscdev);
+ old_wdd = NULL;
+ }
}
-
- return 0;
-
-out:
- wdd = NULL;
- clear_bit(0, &watchdog_dev_busy);
return err;
}
/*
- * watchdog_dev_unregister:
+ * watchdog_dev_unregister: unregister a watchdog device
* @watchdog: watchdog device
*
- * Deregister the /dev/watchdog device.
+ * Unregister the watchdog and if needed the legacy /dev/watchdog device.
*/
int watchdog_dev_unregister(struct watchdog_device *watchdog)
{
- /* Check that a watchdog device was registered in the past */
- if (!test_bit(0, &watchdog_dev_busy) || !wdd)
- return -ENODEV;
-
- /* We can only unregister the watchdog device that was registered */
- if (watchdog != wdd) {
- pr_err("%s: watchdog was not registered as /dev/watchdog\n",
- watchdog->info->identity);
- return -ENODEV;
+ mutex_lock(&watchdog->lock);
+ set_bit(WDOG_UNREGISTERED, &watchdog->status);
+ mutex_unlock(&watchdog->lock);
+
+ cdev_del(&watchdog->cdev);
+ if (watchdog->id == 0) {
+ misc_deregister(&watchdog_miscdev);
+ old_wdd = NULL;
}
-
- misc_deregister(&watchdog_miscdev);
- wdd = NULL;
- clear_bit(0, &watchdog_dev_busy);
return 0;
}
+
+/*
+ * watchdog_dev_init: init dev part of watchdog core
+ *
+ * Allocate a range of chardev nodes to use for watchdog devices
+ */
+
+int __init watchdog_dev_init(void)
+{
+ int err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
+ if (err < 0)
+ pr_err("watchdog: unable to allocate char dev region\n");
+ return err;
+}
+
+/*
+ * watchdog_dev_exit: exit dev part of watchdog core
+ *
+ * Release the range of chardev nodes used for watchdog devices
+ */
+
+void __exit watchdog_dev_exit(void)
+{
+ unregister_chrdev_region(watchdog_devt, MAX_DOGS);
+}
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 1c888c7d4cce..e32654efdbb6 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -739,39 +739,7 @@ static struct pci_driver wdtpci_driver = {
.remove = __devexit_p(wdtpci_remove_one),
};
-
-/**
- * wdtpci_cleanup:
- *
- * Unload the watchdog. You cannot do this with any file handles open.
- * If your watchdog is set to continue ticking on close and you unload
- * it, well it keeps ticking. We won't get the interrupt but the board
- * will not touch PC memory so all is fine. You just have to load a new
- * module in xx seconds or reboot.
- */
-
-static void __exit wdtpci_cleanup(void)
-{
- pci_unregister_driver(&wdtpci_driver);
-}
-
-
-/**
- * wdtpci_init:
- *
- * Set up the WDT watchdog board. All we have to do is grab the
- * resources we require and bitch if anyone beat us to them.
- * The open() function will actually kick the board off.
- */
-
-static int __init wdtpci_init(void)
-{
- return pci_register_driver(&wdtpci_driver);
-}
-
-
-module_init(wdtpci_init);
-module_exit(wdtpci_cleanup);
+module_pci_driver(wdtpci_driver);
MODULE_AUTHOR("JP Nollmann, Alan Cox");
MODULE_DESCRIPTION("Driver for the ICS PCI-WDT500/501 watchdog cards");
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index b1815c5ed7a7..87d66d236c3e 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -247,8 +247,9 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT;
if (pdata->update_gpio) {
- ret = gpio_request(pdata->update_gpio,
- "Watchdog update");
+ ret = gpio_request_one(pdata->update_gpio,
+ GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+ "Watchdog update");
if (ret < 0) {
dev_err(wm831x->dev,
"Failed to request update GPIO: %d\n",
@@ -256,14 +257,6 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
goto err;
}
- ret = gpio_direction_output(pdata->update_gpio, 0);
- if (ret != 0) {
- dev_err(wm831x->dev,
- "gpio_direction_output returned: %d\n",
- ret);
- goto err_gpio;
- }
-
driver_data->update_gpio = pdata->update_gpio;
/* Make sure the watchdog takes hardware updates */
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index ea20c51d24c7..8d2501e604dd 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -71,7 +71,7 @@ config XEN_DEV_EVTCHN
tristate "Xen /dev/xen/evtchn device"
default y
help
- The evtchn driver allows a userspace process to triger event
+ The evtchn driver allows a userspace process to trigger event
channels and to receive notification of an event channel
firing.
If in doubt, say yes.
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 9adc5be57b13..fc3488631136 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
-obj-$(CONFIG_XEN_DOM0) += pci.o
+obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
new file mode 100644
index 000000000000..119d42a2bf57
--- /dev/null
+++ b/drivers/xen/acpi.c
@@ -0,0 +1,62 @@
+/******************************************************************************
+ * acpi.c
+ * acpi file for domain 0 kernel
+ *
+ * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ * Copyright (c) 2011 Yu Ke ke.yu@intel.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <xen/acpi.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+ u32 pm1a_cnt, u32 pm1b_cnt)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_enter_acpi_sleep,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u = {
+ .enter_acpi_sleep = {
+ .pm1a_cnt_val = (u16)pm1a_cnt,
+ .pm1b_cnt_val = (u16)pm1b_cnt,
+ .sleep_state = sleep_state,
+ },
+ },
+ };
+
+ if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) {
+ WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!"
+ "Email xen-devel@lists.xensource.com Thank you.\n", \
+ pm1a_cnt, pm1b_cnt);
+ return -1;
+ }
+
+ HYPERVISOR_dom0_op(&op);
+ return 1;
+}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0a8a17cd80be..7595581d032c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -611,7 +611,7 @@ static void disable_pirq(struct irq_data *data)
disable_dynirq(data);
}
-static int find_irq_by_gsi(unsigned gsi)
+int xen_irq_from_gsi(unsigned gsi)
{
struct irq_info *info;
@@ -625,6 +625,7 @@ static int find_irq_by_gsi(unsigned gsi)
return -1;
}
+EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
/*
* Do not make any assumptions regarding the relationship between the
@@ -644,7 +645,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
mutex_lock(&irq_mapping_update_lock);
- irq = find_irq_by_gsi(gsi);
+ irq = xen_irq_from_gsi(gsi);
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
irq, gsi);
@@ -826,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn)
handle_edge_irq, "event");
xen_irq_info_evtchn_init(irq, evtchn);
+ } else {
+ struct irq_info *info = info_for_irq(irq);
+ WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
}
out:
@@ -861,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
+ } else {
+ struct irq_info *info = info_for_irq(irq);
+ WARN_ON(info == NULL || info->type != IRQT_IPI);
}
out:
@@ -938,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
xen_irq_info_virq_init(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
+ } else {
+ struct irq_info *info = info_for_irq(irq);
+ WARN_ON(info == NULL || info->type != IRQT_VIRQ);
}
out:
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f100ce20b16b..0bfc1ef11259 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -38,6 +38,7 @@
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/hardirq.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -426,10 +427,8 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
nflags = *pflags;
do {
flags = nflags;
- if (flags & (GTF_reading|GTF_writing)) {
- printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+ if (flags & (GTF_reading|GTF_writing))
return 0;
- }
} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
return 1;
@@ -458,12 +457,103 @@ static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
return 1;
}
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
{
return gnttab_interface->end_foreign_access_ref(ref, readonly);
}
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+ if (_gnttab_end_foreign_access_ref(ref, readonly))
+ return 1;
+ pr_warn("WARNING: g.e. %#x still in use!\n", ref);
+ return 0;
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+struct deferred_entry {
+ struct list_head list;
+ grant_ref_t ref;
+ bool ro;
+ uint16_t warn_delay;
+ struct page *page;
+};
+static LIST_HEAD(deferred_list);
+static void gnttab_handle_deferred(unsigned long);
+static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
+
+static void gnttab_handle_deferred(unsigned long unused)
+{
+ unsigned int nr = 10;
+ struct deferred_entry *first = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ while (nr--) {
+ struct deferred_entry *entry
+ = list_first_entry(&deferred_list,
+ struct deferred_entry, list);
+
+ if (entry == first)
+ break;
+ list_del(&entry->list);
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
+ put_free_entry(entry->ref);
+ if (entry->page) {
+ pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+ entry->ref, page_to_pfn(entry->page));
+ __free_page(entry->page);
+ } else
+ pr_info("freeing g.e. %#x\n", entry->ref);
+ kfree(entry);
+ entry = NULL;
+ } else {
+ if (!--entry->warn_delay)
+ pr_info("g.e. %#x still pending\n",
+ entry->ref);
+ if (!first)
+ first = entry;
+ }
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ if (entry)
+ list_add_tail(&entry->list, &deferred_list);
+ else if (list_empty(&deferred_list))
+ break;
+ }
+ if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
+ deferred_timer.expires = jiffies + HZ;
+ add_timer(&deferred_timer);
+ }
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+
+static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
+ struct page *page)
+{
+ struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ const char *what = KERN_WARNING "leaking";
+
+ if (entry) {
+ unsigned long flags;
+
+ entry->ref = ref;
+ entry->ro = readonly;
+ entry->page = page;
+ entry->warn_delay = 60;
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ list_add_tail(&entry->list, &deferred_list);
+ if (!timer_pending(&deferred_timer)) {
+ deferred_timer.expires = jiffies + HZ;
+ add_timer(&deferred_timer);
+ }
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ what = KERN_DEBUG "deferring";
+ }
+ printk("%s g.e. %#x (pfn %#lx)\n",
+ what, ref, page ? page_to_pfn(page) : -1);
+}
+
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page)
{
@@ -471,12 +561,9 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
put_free_entry(ref);
if (page != 0)
free_page(page);
- } else {
- /* XXX This needs to be fixed so that the ref and page are
- placed on a list to be freed up later. */
- printk(KERN_WARNING
- "WARNING: leaking g.e. and page still in use!\n");
- }
+ } else
+ gnttab_add_deferred(ref, readonly,
+ page ? virt_to_page(page) : NULL);
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
@@ -741,6 +828,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
int i, ret;
+ bool lazy = false;
pte_t *pte;
unsigned long mfn;
@@ -751,6 +839,11 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
+
for (i = 0; i < count; i++) {
/* Do not add to override if the map failed. */
if (map_ops[i].status)
@@ -769,6 +862,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret;
}
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
+
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
@@ -777,6 +873,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct page **pages, unsigned int count, bool clear_pte)
{
int i, ret;
+ bool lazy = false;
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
@@ -785,12 +882,20 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
+
for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i], clear_pte);
if (ret)
return ret;
}
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
+
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index b84bf0b6cc34..18fff88254eb 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev)
#ifdef CONFIG_ACPI
handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
- if (!handle)
+ if (!handle && pci_dev->bus->bridge)
handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
#ifdef CONFIG_PCI_IOV
if (!handle && pci_dev->is_virtfn)
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index dcb79521e6c8..89f264c67420 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
}
/* returns 0 if the page was successfully put into frontswap, -1 if not */
-static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!)
*/
-static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s)
__setup("nofrontswap", no_frontswap);
static struct frontswap_ops __initdata tmem_frontswap_ops = {
- .put_page = tmem_frontswap_put_page,
- .get_page = tmem_frontswap_get_page,
+ .store = tmem_frontswap_store,
+ .load = tmem_frontswap_load,
.invalidate_page = tmem_frontswap_flush_page,
.invalidate_area = tmem_frontswap_flush_area,
.init = tmem_frontswap_init
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 0b48579a9cd6..7ff2569e17ae 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -29,6 +29,7 @@
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
+#include <xen/xen.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 146c94897016..7d041cb6da26 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -105,6 +105,12 @@ static unsigned int selfballoon_interval __read_mostly = 5;
*/
static unsigned int selfballoon_min_usable_mb;
+/*
+ * Amount of RAM in MB to add to the target number of pages.
+ * Can be used to reserve some more room for caches and the like.
+ */
+static unsigned int selfballoon_reserved_mb;
+
static void selfballoon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
@@ -217,7 +223,8 @@ static void selfballoon_process(struct work_struct *work)
cur_pages = totalram_pages;
tgt_pages = cur_pages; /* default is no change */
goal_pages = percpu_counter_read_positive(&vm_committed_as) +
- totalreserve_pages;
+ totalreserve_pages +
+ MB2PAGES(selfballoon_reserved_mb);
#ifdef CONFIG_FRONTSWAP
/* allow space for frontswap pages to be repatriated */
if (frontswap_selfshrinking && frontswap_enabled)
@@ -397,6 +404,30 @@ static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
show_selfballoon_min_usable_mb,
store_selfballoon_min_usable_mb);
+SELFBALLOON_SHOW(selfballoon_reserved_mb, "%d\n",
+ selfballoon_reserved_mb);
+
+static ssize_t store_selfballoon_reserved_mb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_reserved_mb = val;
+ return count;
+}
+
+static DEVICE_ATTR(selfballoon_reserved_mb, S_IRUGO | S_IWUSR,
+ show_selfballoon_reserved_mb,
+ store_selfballoon_reserved_mb);
+
#ifdef CONFIG_FRONTSWAP
SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
@@ -480,6 +511,7 @@ static struct attribute *selfballoon_attrs[] = {
&dev_attr_selfballoon_downhysteresis.attr,
&dev_attr_selfballoon_uphysteresis.attr,
&dev_attr_selfballoon_min_usable_mb.attr,
+ &dev_attr_selfballoon_reserved_mb.attr,
#ifdef CONFIG_FRONTSWAP
&dev_attr_frontswap_selfshrinking.attr,
&dev_attr_frontswap_hysteresis.attr,
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 2eff7a6aaa20..52fe7ad07666 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -234,3 +234,9 @@ int xb_init_comms(void)
return 0;
}
+
+void xb_deinit_comms(void)
+{
+ unbind_from_irqhandler(xenbus_irq, &xb_waitq);
+ xenbus_irq = 0;
+}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index 6e42800fa499..c8abd3b8a6c4 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -35,6 +35,7 @@
int xs_init(void);
int xb_init_comms(void);
+void xb_deinit_comms(void);
/* Low level routines. */
int xb_write(const void *data, unsigned len);
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index 3d3be78c1093..be738c43104b 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -8,7 +8,11 @@
#include <xen/xen.h>
#include <xen/page.h>
+#include <xen/xenbus.h>
#include <xen/xenbus_dev.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <asm/xen/hypervisor.h>
#include "xenbus_comms.h"
@@ -22,6 +26,50 @@ static int xenbus_backend_open(struct inode *inode, struct file *filp)
return nonseekable_open(inode, filp);
}
+static long xenbus_alloc(domid_t domid)
+{
+ struct evtchn_alloc_unbound arg;
+ int err = -EEXIST;
+
+ xs_suspend();
+
+ /* If xenstored_ready is nonzero, that means we have already talked to
+ * xenstore and set up watches. These watches will be restored by
+ * xs_resume, but that requires communication over the port established
+ * below that is not visible to anyone until the ioctl returns.
+ *
+ * This can be resolved by splitting the ioctl into two parts
+ * (postponing the resume until xenstored is active) but this is
+ * unnecessarily complex for the intended use where xenstored is only
+ * started once - so return -EEXIST if it's already running.
+ */
+ if (xenstored_ready)
+ goto out_err;
+
+ gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
+ virt_to_mfn(xen_store_interface), 0 /* writable */);
+
+ arg.dom = DOMID_SELF;
+ arg.remote_dom = domid;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &arg);
+ if (err)
+ goto out_err;
+
+ if (xen_store_evtchn > 0)
+ xb_deinit_comms();
+
+ xen_store_evtchn = arg.port;
+
+ xs_resume();
+
+ return arg.port;
+
+ out_err:
+ xs_suspend_cancel();
+ return err;
+}
+
static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
{
if (!capable(CAP_SYS_ADMIN))
@@ -33,6 +81,9 @@ static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned l
return xen_store_evtchn;
return -ENODEV;
+ case IOCTL_XENBUS_BACKEND_SETUP:
+ return xenbus_alloc(data);
+
default:
return -ENOTTY;
}